mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-09 21:16:38 +08:00
Compare commits
216 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 0247b7bd17 | |||
| 930f8d9e6d | |||
| 9a3d02eb3a | |||
| b341c96386 | |||
| b365fb4138 | |||
| 1415219375 | |||
| 320f4be792 | |||
| 2f0cc45682 | |||
| b6754d935b | |||
| 689db36073 | |||
| b45a110de6 | |||
| b007125398 | |||
| 31b1bc20cc | |||
| de54491deb | |||
| e55b540899 | |||
| 918ca7f2ea | |||
| 675e9fd788 | |||
| 40abe9647c | |||
| 4c83303801 | |||
| 5a8c426112 | |||
| a4253f49e6 | |||
| 631916dfb2 | |||
| 00c46797b8 | |||
| 9b5a44ce6e | |||
| c52b5dcb52 | |||
| ed95d603df | |||
| a998a3ce4f | |||
| 9d44cbf7c8 | |||
| 44afeab124 | |||
| d3a62a440f | |||
| 56aae3e2c8 | |||
| dacd0e9a59 | |||
| 9bd3faaf1f | |||
| 3a8286b034 | |||
| b2e564c3d5 | |||
| c3d9243915 | |||
| f569823738 | |||
| 9300301584 | |||
| 66cd5152fd | |||
| 2ea2bc2941 | |||
| 487ec28b9c | |||
| b4d9a27fdb | |||
| 991de5fc81 | |||
| 7d710727a9 | |||
| 7ef18d5afd | |||
| e5cac06bbe | |||
| f672515ba6 | |||
| 2e6ed6a10f | |||
| 32c46c044c | |||
| ddb84a3991 | |||
| 6adaf6c776 | |||
| d984cee318 | |||
| b0f73174b2 | |||
| a9f5554342 | |||
| c6dcf7afd9 | |||
| b561dfe8b2 | |||
| ce1d30e9c3 | |||
| e374ee1f1c | |||
| 9208b4a7c1 | |||
| bed60d6ed9 | |||
| 333d942f30 | |||
| 941dea9439 | |||
| 54bf03466f | |||
| 7f8c51e36d | |||
| 4a461b6093 | |||
| 27734d9527 | |||
| 8c03ff085d | |||
| d673124343 | |||
| cf4ba2787d | |||
| 6a77eb15bc | |||
| 5afcca1c17 | |||
| aae60881de | |||
| 45363ad31f | |||
| f15c63c37d | |||
| 517be3d980 | |||
| a7c59dc3d6 | |||
| 96d317b3e2 | |||
| 87e72fc04c | |||
| 1de63e8e41 | |||
| b196fb954e | |||
| 638096fade | |||
| edc8f06770 | |||
| 9e37b5420b | |||
| 36e8277724 | |||
| b6a4a4c664 | |||
| 780c3ead16 | |||
| fd9c34a3eb | |||
| de0901bd02 | |||
| 2a7793394f | |||
| 18ed598fa1 | |||
| 9eda706e64 | |||
| bc6b0113e2 | |||
| bf12dcc066 | |||
| e431868c0d | |||
| 95289b3952 | |||
| f8b7170103 | |||
| ab98b65226 | |||
| b99e3d1336 | |||
| 3aceeab359 | |||
| 326a2593e0 | |||
| a8f1981bf2 | |||
| 5c94199b04 | |||
| 205611cc22 | |||
| d703ba9633 | |||
| 106bc9b32a | |||
| c3334ae813 | |||
| 8beead753a | |||
| 751c57c853 | |||
| 4263d6feca | |||
| d6737063af | |||
| 119f5a869e | |||
| 59e2d47cfc | |||
| d99f778982 | |||
| 8d9e4c76dd | |||
| c196dd5d0f | |||
| f687f8af7c | |||
| b17cc99c1e | |||
| ac05d9a5fa | |||
| 4294dfc496 | |||
| 79098e9fc8 | |||
| a580176735 | |||
| 371e20494d | |||
| a19ca62354 | |||
| 039a64be76 | |||
| c9e03684d6 | |||
| fad1b90d93 | |||
| f74f410ee7 | |||
| 139025f0fd | |||
| 8f7e27352e | |||
| 1e36e7ff8b | |||
| 535faa84f6 | |||
| c09213ebc1 | |||
| 0be2ab610a | |||
| 926a2b1579 | |||
| af781cb96c | |||
| 21c9d7b289 | |||
| eabd053227 | |||
| a7e9956dfc | |||
| f51ebfb5a1 | |||
| 5ee63e284b | |||
| 5423a4f262 | |||
| fe2cadeaa0 | |||
| 2b5bd2ace3 | |||
| 19bb231fbd | |||
| d8b91bb84e | |||
| 965d2f9b8f | |||
| 7521ff7dad | |||
| a6bcb184f6 | |||
| e1975567a3 | |||
| 982f4d6f31 | |||
| 8f0621ca7e | |||
| fefb24cc33 | |||
| 1eb1a44883 | |||
| 36770c1658 | |||
| 5f91e2905a | |||
| 3aa2d19c70 | |||
| 2b9ff52248 | |||
| cc68880914 | |||
| 904dc06451 | |||
| 56ccfeaa8a | |||
| 82e6eeab75 | |||
| 936bf6b60f | |||
| a86fddcdd4 | |||
| 18a7207ca4 | |||
| aff5271291 | |||
| 3758c65107 | |||
| 0e7ff98e1d | |||
| 2999212480 | |||
| 1ad8a72fe9 | |||
| 1ae7e7a1e2 | |||
| f4ece6731b | |||
| 0122bc43ea | |||
| d0c077423a | |||
| ba857bd8a0 | |||
| cef73c75fb | |||
| fce43e1312 | |||
| 533090465c | |||
| 86de88fb44 | |||
| aefd845a21 | |||
| 6ef4ad2a4c | |||
| 6d64658c79 | |||
| 6cf5db512a | |||
| b52154f382 | |||
| aac91caf1a | |||
| 002e16ac71 | |||
| fe9a47ae50 | |||
| ef3f45807f | |||
| 11d87760ca | |||
| f9aec12ef1 | |||
| 38721fdb64 | |||
| 1ef0693e65 | |||
| 1711e44e99 | |||
| ef04c46ee3 | |||
| 54e0d6b161 | |||
| cf7312d82c | |||
| 6854864db9 | |||
| 2873aaf4db | |||
| 70d2bbfec0 | |||
| 2197b6cbf3 | |||
| d79a3cf990 | |||
| a7f515e913 | |||
| 1fb1bad150 | |||
| 50da98bcf5 | |||
| 94e6119f9f | |||
| f46dc03658 | |||
| 50603859ab | |||
| 0d185b721f | |||
| 8642757971 | |||
| de86d8e32b | |||
| 8b331c5ca2 | |||
| 937d2d5325 | |||
| 0400497d5e | |||
| 5f0e04e2d7 | |||
| 96c2e3856d | |||
| 880f756dc1 | |||
| 4480ed488e |
@ -5,3 +5,146 @@ from .api_registry import (
|
||||
register_versions as register_versions,
|
||||
get_all_versions as get_all_versions,
|
||||
)
|
||||
|
||||
import asyncio
|
||||
from dataclasses import asdict
|
||||
from typing import Callable, Optional
|
||||
|
||||
|
||||
def first_real_override(cls: type, name: str, *, base: type=None) -> Optional[Callable]:
|
||||
"""Return the *callable* override of `name` visible on `cls`, or None if every
|
||||
implementation up to (and including) `base` is the placeholder defined on `base`.
|
||||
|
||||
If base is not provided, it will assume cls has a GET_BASE_CLASS
|
||||
"""
|
||||
if base is None:
|
||||
if not hasattr(cls, "GET_BASE_CLASS"):
|
||||
raise ValueError("base is required if cls does not have a GET_BASE_CLASS; is this a valid ComfyNode subclass?")
|
||||
base = cls.GET_BASE_CLASS()
|
||||
base_attr = getattr(base, name, None)
|
||||
if base_attr is None:
|
||||
return None
|
||||
base_func = base_attr.__func__
|
||||
for c in cls.mro(): # NodeB, NodeA, ComfyNode, object …
|
||||
if c is base: # reached the placeholder – we're done
|
||||
break
|
||||
if name in c.__dict__: # first class that *defines* the attr
|
||||
func = getattr(c, name).__func__
|
||||
if func is not base_func: # real override
|
||||
return getattr(cls, name) # bound to *cls*
|
||||
return None
|
||||
|
||||
|
||||
class _ComfyNodeInternal:
|
||||
"""Class that all V3-based APIs inherit from for ComfyNode.
|
||||
|
||||
This is intended to only be referenced within execution.py, as it has to handle all V3 APIs going forward."""
|
||||
@classmethod
|
||||
def GET_NODE_INFO_V1(cls):
|
||||
...
|
||||
|
||||
|
||||
class _NodeOutputInternal:
|
||||
"""Class that all V3-based APIs inherit from for NodeOutput.
|
||||
|
||||
This is intended to only be referenced within execution.py, as it has to handle all V3 APIs going forward."""
|
||||
...
|
||||
|
||||
|
||||
def as_pruned_dict(dataclass_obj):
|
||||
'''Return dict of dataclass object with pruned None values.'''
|
||||
return prune_dict(asdict(dataclass_obj))
|
||||
|
||||
def prune_dict(d: dict):
|
||||
return {k: v for k,v in d.items() if v is not None}
|
||||
|
||||
|
||||
def is_class(obj):
|
||||
'''
|
||||
Returns True if is a class type.
|
||||
Returns False if is a class instance.
|
||||
'''
|
||||
return isinstance(obj, type)
|
||||
|
||||
|
||||
def copy_class(cls: type) -> type:
|
||||
'''
|
||||
Copy a class and its attributes.
|
||||
'''
|
||||
if cls is None:
|
||||
return None
|
||||
cls_dict = {
|
||||
k: v for k, v in cls.__dict__.items()
|
||||
if k not in ('__dict__', '__weakref__', '__module__', '__doc__')
|
||||
}
|
||||
# new class
|
||||
new_cls = type(
|
||||
cls.__name__,
|
||||
(cls,),
|
||||
cls_dict
|
||||
)
|
||||
# metadata preservation
|
||||
new_cls.__module__ = cls.__module__
|
||||
new_cls.__doc__ = cls.__doc__
|
||||
return new_cls
|
||||
|
||||
|
||||
class classproperty(object):
|
||||
def __init__(self, f):
|
||||
self.f = f
|
||||
def __get__(self, obj, owner):
|
||||
return self.f(owner)
|
||||
|
||||
|
||||
# NOTE: this was ai generated and validated by hand
|
||||
def shallow_clone_class(cls, new_name=None):
|
||||
'''
|
||||
Shallow clone a class while preserving super() functionality.
|
||||
'''
|
||||
new_name = new_name or f"{cls.__name__}Clone"
|
||||
# Include the original class in the bases to maintain proper inheritance
|
||||
new_bases = (cls,) + cls.__bases__
|
||||
return type(new_name, new_bases, dict(cls.__dict__))
|
||||
|
||||
# NOTE: this was ai generated and validated by hand
|
||||
def lock_class(cls):
|
||||
'''
|
||||
Lock a class so that its top-levelattributes cannot be modified.
|
||||
'''
|
||||
# Locked instance __setattr__
|
||||
def locked_instance_setattr(self, name, value):
|
||||
raise AttributeError(
|
||||
f"Cannot set attribute '{name}' on immutable instance of {type(self).__name__}"
|
||||
)
|
||||
# Locked metaclass
|
||||
class LockedMeta(type(cls)):
|
||||
def __setattr__(cls_, name, value):
|
||||
raise AttributeError(
|
||||
f"Cannot modify class attribute '{name}' on locked class '{cls_.__name__}'"
|
||||
)
|
||||
# Rebuild class with locked behavior
|
||||
locked_dict = dict(cls.__dict__)
|
||||
locked_dict['__setattr__'] = locked_instance_setattr
|
||||
|
||||
return LockedMeta(cls.__name__, cls.__bases__, locked_dict)
|
||||
|
||||
|
||||
def make_locked_method_func(type_obj, func, class_clone):
|
||||
"""
|
||||
Returns a function that, when called with **inputs, will execute:
|
||||
getattr(type_obj, func).__func__(lock_class(class_clone), **inputs)
|
||||
|
||||
Supports both synchronous and asynchronous methods.
|
||||
"""
|
||||
locked_class = lock_class(class_clone)
|
||||
method = getattr(type_obj, func).__func__
|
||||
|
||||
# Check if the original method is async
|
||||
if asyncio.iscoroutinefunction(method):
|
||||
async def wrapped_async_func(**inputs):
|
||||
return await method(locked_class, **inputs)
|
||||
return wrapped_async_func
|
||||
else:
|
||||
def wrapped_func(**inputs):
|
||||
return method(locked_class, **inputs)
|
||||
return wrapped_func
|
||||
|
||||
@ -7,6 +7,9 @@ from comfy_api.internal.async_to_sync import create_sync_class
|
||||
from comfy_api.latest._input import ImageInput, AudioInput, MaskInput, LatentInput, VideoInput
|
||||
from comfy_api.latest._input_impl import VideoFromFile, VideoFromComponents
|
||||
from comfy_api.latest._util import VideoCodec, VideoContainer, VideoComponents
|
||||
from comfy_api.latest._io import _IO as io #noqa: F401
|
||||
from comfy_api.latest._ui import _UI as ui #noqa: F401
|
||||
from comfy_api.latest._resources import _RESOURCES as resources #noqa: F401
|
||||
from comfy_execution.utils import get_executing_context
|
||||
from comfy_execution.progress import get_progress_state, PreviewImageTuple
|
||||
from PIL import Image
|
||||
|
||||
1603
comfy_api/latest/_io.py
Normal file
1603
comfy_api/latest/_io.py
Normal file
File diff suppressed because it is too large
Load Diff
72
comfy_api/latest/_resources.py
Normal file
72
comfy_api/latest/_resources.py
Normal file
@ -0,0 +1,72 @@
|
||||
from __future__ import annotations
|
||||
import comfy.utils
|
||||
import folder_paths
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any
|
||||
import torch
|
||||
|
||||
class ResourceKey(ABC):
|
||||
Type = Any
|
||||
def __init__(self):
|
||||
...
|
||||
|
||||
class TorchDictFolderFilename(ResourceKey):
|
||||
'''Key for requesting a torch file via file_name from a folder category.'''
|
||||
Type = dict[str, torch.Tensor]
|
||||
def __init__(self, folder_name: str, file_name: str):
|
||||
self.folder_name = folder_name
|
||||
self.file_name = file_name
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.folder_name, self.file_name))
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, TorchDictFolderFilename):
|
||||
return False
|
||||
return self.folder_name == other.folder_name and self.file_name == other.file_name
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.folder_name} -> {self.file_name}"
|
||||
|
||||
class Resources(ABC):
|
||||
def __init__(self):
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def get(self, key: ResourceKey, default: Any=...) -> Any:
|
||||
pass
|
||||
|
||||
class ResourcesLocal(Resources):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.local_resources: dict[ResourceKey, Any] = {}
|
||||
|
||||
def get(self, key: ResourceKey, default: Any=...) -> Any:
|
||||
cached = self.local_resources.get(key, None)
|
||||
if cached is not None:
|
||||
logging.info(f"Using cached resource '{key}'")
|
||||
return cached
|
||||
logging.info(f"Loading resource '{key}'")
|
||||
to_return = None
|
||||
if isinstance(key, TorchDictFolderFilename):
|
||||
if default is ...:
|
||||
to_return = comfy.utils.load_torch_file(folder_paths.get_full_path_or_raise(key.folder_name, key.file_name), safe_load=True)
|
||||
else:
|
||||
full_path = folder_paths.get_full_path(key.folder_name, key.file_name)
|
||||
if full_path is not None:
|
||||
to_return = comfy.utils.load_torch_file(full_path, safe_load=True)
|
||||
|
||||
if to_return is not None:
|
||||
self.local_resources[key] = to_return
|
||||
return to_return
|
||||
if default is not ...:
|
||||
return default
|
||||
raise Exception(f"Unsupported resource key type: {type(key)}")
|
||||
|
||||
|
||||
class _RESOURCES:
|
||||
ResourceKey = ResourceKey
|
||||
TorchDictFolderFilename = TorchDictFolderFilename
|
||||
Resources = Resources
|
||||
ResourcesLocal = ResourcesLocal
|
||||
505
comfy_api/latest/_ui.py
Normal file
505
comfy_api/latest/_ui.py
Normal file
@ -0,0 +1,505 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
from io import BytesIO
|
||||
from typing import Type
|
||||
|
||||
import av
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchaudio
|
||||
from PIL import Image as PILImage
|
||||
from PIL.PngImagePlugin import PngInfo
|
||||
|
||||
import folder_paths
|
||||
|
||||
# used for image preview
|
||||
from comfy.cli_args import args
|
||||
from comfy_api.latest._io import ComfyNode, FolderType, Image, _UIOutput
|
||||
|
||||
|
||||
class SavedResult(dict):
|
||||
def __init__(self, filename: str, subfolder: str, type: FolderType):
|
||||
super().__init__(filename=filename, subfolder=subfolder,type=type.value)
|
||||
|
||||
@property
|
||||
def filename(self) -> str:
|
||||
return self["filename"]
|
||||
|
||||
@property
|
||||
def subfolder(self) -> str:
|
||||
return self["subfolder"]
|
||||
|
||||
@property
|
||||
def type(self) -> FolderType:
|
||||
return FolderType(self["type"])
|
||||
|
||||
|
||||
class SavedImages(_UIOutput):
|
||||
"""A UI output class to represent one or more saved images, potentially animated."""
|
||||
def __init__(self, results: list[SavedResult], is_animated: bool = False):
|
||||
super().__init__()
|
||||
self.results = results
|
||||
self.is_animated = is_animated
|
||||
|
||||
def as_dict(self) -> dict:
|
||||
data = {"images": self.results}
|
||||
if self.is_animated:
|
||||
data["animated"] = (True,)
|
||||
return data
|
||||
|
||||
|
||||
class SavedAudios(_UIOutput):
|
||||
"""UI wrapper around one or more audio files on disk (FLAC / MP3 / Opus)."""
|
||||
def __init__(self, results: list[SavedResult]):
|
||||
super().__init__()
|
||||
self.results = results
|
||||
|
||||
def as_dict(self) -> dict:
|
||||
return {"audio": self.results}
|
||||
|
||||
|
||||
def _get_directory_by_folder_type(folder_type: FolderType) -> str:
|
||||
if folder_type == FolderType.input:
|
||||
return folder_paths.get_input_directory()
|
||||
if folder_type == FolderType.output:
|
||||
return folder_paths.get_output_directory()
|
||||
return folder_paths.get_temp_directory()
|
||||
|
||||
|
||||
class ImageSaveHelper:
|
||||
"""A helper class with static methods to handle image saving and metadata."""
|
||||
|
||||
@staticmethod
|
||||
def _convert_tensor_to_pil(image_tensor: torch.Tensor) -> PILImage.Image:
|
||||
"""Converts a single torch tensor to a PIL Image."""
|
||||
return PILImage.fromarray(np.clip(255.0 * image_tensor.cpu().numpy(), 0, 255).astype(np.uint8))
|
||||
|
||||
@staticmethod
|
||||
def _create_png_metadata(cls: Type[ComfyNode] | None) -> PngInfo | None:
|
||||
"""Creates a PngInfo object with prompt and extra_pnginfo."""
|
||||
if args.disable_metadata or cls is None or not cls.hidden:
|
||||
return None
|
||||
metadata = PngInfo()
|
||||
if cls.hidden.prompt:
|
||||
metadata.add_text("prompt", json.dumps(cls.hidden.prompt))
|
||||
if cls.hidden.extra_pnginfo:
|
||||
for x in cls.hidden.extra_pnginfo:
|
||||
metadata.add_text(x, json.dumps(cls.hidden.extra_pnginfo[x]))
|
||||
return metadata
|
||||
|
||||
@staticmethod
|
||||
def _create_animated_png_metadata(cls: Type[ComfyNode] | None) -> PngInfo | None:
|
||||
"""Creates a PngInfo object with prompt and extra_pnginfo for animated PNGs (APNG)."""
|
||||
if args.disable_metadata or cls is None or not cls.hidden:
|
||||
return None
|
||||
metadata = PngInfo()
|
||||
if cls.hidden.prompt:
|
||||
metadata.add(
|
||||
b"comf",
|
||||
"prompt".encode("latin-1", "strict")
|
||||
+ b"\0"
|
||||
+ json.dumps(cls.hidden.prompt).encode("latin-1", "strict"),
|
||||
after_idat=True,
|
||||
)
|
||||
if cls.hidden.extra_pnginfo:
|
||||
for x in cls.hidden.extra_pnginfo:
|
||||
metadata.add(
|
||||
b"comf",
|
||||
x.encode("latin-1", "strict")
|
||||
+ b"\0"
|
||||
+ json.dumps(cls.hidden.extra_pnginfo[x]).encode("latin-1", "strict"),
|
||||
after_idat=True,
|
||||
)
|
||||
return metadata
|
||||
|
||||
@staticmethod
|
||||
def _create_webp_metadata(pil_image: PILImage.Image, cls: Type[ComfyNode] | None) -> PILImage.Exif:
|
||||
"""Creates EXIF metadata bytes for WebP images."""
|
||||
exif_data = pil_image.getexif()
|
||||
if args.disable_metadata or cls is None or cls.hidden is None:
|
||||
return exif_data
|
||||
if cls.hidden.prompt is not None:
|
||||
exif_data[0x0110] = "prompt:{}".format(json.dumps(cls.hidden.prompt)) # EXIF 0x0110 = Model
|
||||
if cls.hidden.extra_pnginfo is not None:
|
||||
inital_exif_tag = 0x010F # EXIF 0x010f = Make
|
||||
for key, value in cls.hidden.extra_pnginfo.items():
|
||||
exif_data[inital_exif_tag] = "{}:{}".format(key, json.dumps(value))
|
||||
inital_exif_tag -= 1
|
||||
return exif_data
|
||||
|
||||
@staticmethod
|
||||
def save_images(
|
||||
images, filename_prefix: str, folder_type: FolderType, cls: Type[ComfyNode] | None, compress_level = 4,
|
||||
) -> list[SavedResult]:
|
||||
"""Saves a batch of images as individual PNG files."""
|
||||
full_output_folder, filename, counter, subfolder, _ = folder_paths.get_save_image_path(
|
||||
filename_prefix, _get_directory_by_folder_type(folder_type), images[0].shape[1], images[0].shape[0]
|
||||
)
|
||||
results = []
|
||||
metadata = ImageSaveHelper._create_png_metadata(cls)
|
||||
for batch_number, image_tensor in enumerate(images):
|
||||
img = ImageSaveHelper._convert_tensor_to_pil(image_tensor)
|
||||
filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
|
||||
file = f"{filename_with_batch_num}_{counter:05}_.png"
|
||||
img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=compress_level)
|
||||
results.append(SavedResult(file, subfolder, folder_type))
|
||||
counter += 1
|
||||
return results
|
||||
|
||||
@staticmethod
|
||||
def get_save_images_ui(images, filename_prefix: str, cls: Type[ComfyNode] | None, compress_level=4) -> SavedImages:
|
||||
"""Saves a batch of images and returns a UI object for the node output."""
|
||||
return SavedImages(
|
||||
ImageSaveHelper.save_images(
|
||||
images,
|
||||
filename_prefix=filename_prefix,
|
||||
folder_type=FolderType.output,
|
||||
cls=cls,
|
||||
compress_level=compress_level,
|
||||
)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def save_animated_png(
|
||||
images, filename_prefix: str, folder_type: FolderType, cls: Type[ComfyNode] | None, fps: float, compress_level: int
|
||||
) -> SavedResult:
|
||||
"""Saves a batch of images as a single animated PNG."""
|
||||
full_output_folder, filename, counter, subfolder, _ = folder_paths.get_save_image_path(
|
||||
filename_prefix, _get_directory_by_folder_type(folder_type), images[0].shape[1], images[0].shape[0]
|
||||
)
|
||||
pil_images = [ImageSaveHelper._convert_tensor_to_pil(img) for img in images]
|
||||
metadata = ImageSaveHelper._create_animated_png_metadata(cls)
|
||||
file = f"{filename}_{counter:05}_.png"
|
||||
save_path = os.path.join(full_output_folder, file)
|
||||
pil_images[0].save(
|
||||
save_path,
|
||||
pnginfo=metadata,
|
||||
compress_level=compress_level,
|
||||
save_all=True,
|
||||
duration=int(1000.0 / fps),
|
||||
append_images=pil_images[1:],
|
||||
)
|
||||
return SavedResult(file, subfolder, folder_type)
|
||||
|
||||
@staticmethod
|
||||
def get_save_animated_png_ui(
|
||||
images, filename_prefix: str, cls: Type[ComfyNode] | None, fps: float, compress_level: int
|
||||
) -> SavedImages:
|
||||
"""Saves an animated PNG and returns a UI object for the node output."""
|
||||
result = ImageSaveHelper.save_animated_png(
|
||||
images,
|
||||
filename_prefix=filename_prefix,
|
||||
folder_type=FolderType.output,
|
||||
cls=cls,
|
||||
fps=fps,
|
||||
compress_level=compress_level,
|
||||
)
|
||||
return SavedImages([result], is_animated=len(images) > 1)
|
||||
|
||||
@staticmethod
|
||||
def save_animated_webp(
|
||||
images,
|
||||
filename_prefix: str,
|
||||
folder_type: FolderType,
|
||||
cls: Type[ComfyNode] | None,
|
||||
fps: float,
|
||||
lossless: bool,
|
||||
quality: int,
|
||||
method: int,
|
||||
) -> SavedResult:
|
||||
"""Saves a batch of images as a single animated WebP."""
|
||||
full_output_folder, filename, counter, subfolder, _ = folder_paths.get_save_image_path(
|
||||
filename_prefix, _get_directory_by_folder_type(folder_type), images[0].shape[1], images[0].shape[0]
|
||||
)
|
||||
pil_images = [ImageSaveHelper._convert_tensor_to_pil(img) for img in images]
|
||||
pil_exif = ImageSaveHelper._create_webp_metadata(pil_images[0], cls)
|
||||
file = f"{filename}_{counter:05}_.webp"
|
||||
pil_images[0].save(
|
||||
os.path.join(full_output_folder, file),
|
||||
save_all=True,
|
||||
duration=int(1000.0 / fps),
|
||||
append_images=pil_images[1:],
|
||||
exif=pil_exif,
|
||||
lossless=lossless,
|
||||
quality=quality,
|
||||
method=method,
|
||||
)
|
||||
return SavedResult(file, subfolder, folder_type)
|
||||
|
||||
@staticmethod
|
||||
def get_save_animated_webp_ui(
|
||||
images,
|
||||
filename_prefix: str,
|
||||
cls: Type[ComfyNode] | None,
|
||||
fps: float,
|
||||
lossless: bool,
|
||||
quality: int,
|
||||
method: int,
|
||||
) -> SavedImages:
|
||||
"""Saves an animated WebP and returns a UI object for the node output."""
|
||||
result = ImageSaveHelper.save_animated_webp(
|
||||
images,
|
||||
filename_prefix=filename_prefix,
|
||||
folder_type=FolderType.output,
|
||||
cls=cls,
|
||||
fps=fps,
|
||||
lossless=lossless,
|
||||
quality=quality,
|
||||
method=method,
|
||||
)
|
||||
return SavedImages([result], is_animated=len(images) > 1)
|
||||
|
||||
|
||||
class AudioSaveHelper:
|
||||
"""A helper class with static methods to handle audio saving and metadata."""
|
||||
_OPUS_RATES = [8000, 12000, 16000, 24000, 48000]
|
||||
|
||||
@staticmethod
|
||||
def save_audio(
|
||||
audio: dict,
|
||||
filename_prefix: str,
|
||||
folder_type: FolderType,
|
||||
cls: Type[ComfyNode] | None,
|
||||
format: str = "flac",
|
||||
quality: str = "128k",
|
||||
) -> list[SavedResult]:
|
||||
full_output_folder, filename, counter, subfolder, _ = folder_paths.get_save_image_path(
|
||||
filename_prefix, _get_directory_by_folder_type(folder_type)
|
||||
)
|
||||
|
||||
metadata = {}
|
||||
if not args.disable_metadata and cls is not None:
|
||||
if cls.hidden.prompt is not None:
|
||||
metadata["prompt"] = json.dumps(cls.hidden.prompt)
|
||||
if cls.hidden.extra_pnginfo is not None:
|
||||
for x in cls.hidden.extra_pnginfo:
|
||||
metadata[x] = json.dumps(cls.hidden.extra_pnginfo[x])
|
||||
|
||||
results = []
|
||||
for batch_number, waveform in enumerate(audio["waveform"].cpu()):
|
||||
filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
|
||||
file = f"{filename_with_batch_num}_{counter:05}_.{format}"
|
||||
output_path = os.path.join(full_output_folder, file)
|
||||
|
||||
# Use original sample rate initially
|
||||
sample_rate = audio["sample_rate"]
|
||||
|
||||
# Handle Opus sample rate requirements
|
||||
if format == "opus":
|
||||
if sample_rate > 48000:
|
||||
sample_rate = 48000
|
||||
elif sample_rate not in AudioSaveHelper._OPUS_RATES:
|
||||
# Find the next highest supported rate
|
||||
for rate in sorted(AudioSaveHelper._OPUS_RATES):
|
||||
if rate > sample_rate:
|
||||
sample_rate = rate
|
||||
break
|
||||
if sample_rate not in AudioSaveHelper._OPUS_RATES: # Fallback if still not supported
|
||||
sample_rate = 48000
|
||||
|
||||
# Resample if necessary
|
||||
if sample_rate != audio["sample_rate"]:
|
||||
waveform = torchaudio.functional.resample(waveform, audio["sample_rate"], sample_rate)
|
||||
|
||||
# Create output with specified format
|
||||
output_buffer = BytesIO()
|
||||
output_container = av.open(output_buffer, mode="w", format=format)
|
||||
|
||||
# Set metadata on the container
|
||||
for key, value in metadata.items():
|
||||
output_container.metadata[key] = value
|
||||
|
||||
# Set up the output stream with appropriate properties
|
||||
if format == "opus":
|
||||
out_stream = output_container.add_stream("libopus", rate=sample_rate)
|
||||
if quality == "64k":
|
||||
out_stream.bit_rate = 64000
|
||||
elif quality == "96k":
|
||||
out_stream.bit_rate = 96000
|
||||
elif quality == "128k":
|
||||
out_stream.bit_rate = 128000
|
||||
elif quality == "192k":
|
||||
out_stream.bit_rate = 192000
|
||||
elif quality == "320k":
|
||||
out_stream.bit_rate = 320000
|
||||
elif format == "mp3":
|
||||
out_stream = output_container.add_stream("libmp3lame", rate=sample_rate)
|
||||
if quality == "V0":
|
||||
# TODO i would really love to support V3 and V5 but there doesn't seem to be a way to set the qscale level, the property below is a bool
|
||||
out_stream.codec_context.qscale = 1
|
||||
elif quality == "128k":
|
||||
out_stream.bit_rate = 128000
|
||||
elif quality == "320k":
|
||||
out_stream.bit_rate = 320000
|
||||
else: # format == "flac":
|
||||
out_stream = output_container.add_stream("flac", rate=sample_rate)
|
||||
|
||||
frame = av.AudioFrame.from_ndarray(
|
||||
waveform.movedim(0, 1).reshape(1, -1).float().numpy(),
|
||||
format="flt",
|
||||
layout="mono" if waveform.shape[0] == 1 else "stereo",
|
||||
)
|
||||
frame.sample_rate = sample_rate
|
||||
frame.pts = 0
|
||||
output_container.mux(out_stream.encode(frame))
|
||||
|
||||
# Flush encoder
|
||||
output_container.mux(out_stream.encode(None))
|
||||
|
||||
# Close containers
|
||||
output_container.close()
|
||||
|
||||
# Write the output to file
|
||||
output_buffer.seek(0)
|
||||
with open(output_path, "wb") as f:
|
||||
f.write(output_buffer.getbuffer())
|
||||
|
||||
results.append(SavedResult(file, subfolder, folder_type))
|
||||
counter += 1
|
||||
|
||||
return results
|
||||
|
||||
@staticmethod
|
||||
def get_save_audio_ui(
|
||||
audio, filename_prefix: str, cls: Type[ComfyNode] | None, format: str = "flac", quality: str = "128k",
|
||||
) -> SavedAudios:
|
||||
"""Save and instantly wrap for UI."""
|
||||
return SavedAudios(
|
||||
AudioSaveHelper.save_audio(
|
||||
audio,
|
||||
filename_prefix=filename_prefix,
|
||||
folder_type=FolderType.output,
|
||||
cls=cls,
|
||||
format=format,
|
||||
quality=quality,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class PreviewImage(_UIOutput):
|
||||
def __init__(self, image: Image.Type, animated: bool = False, cls: Type[ComfyNode] = None, **kwargs):
|
||||
self.values = ImageSaveHelper.save_images(
|
||||
image,
|
||||
filename_prefix="ComfyUI_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for _ in range(5)),
|
||||
folder_type=FolderType.temp,
|
||||
cls=cls,
|
||||
compress_level=1,
|
||||
)
|
||||
self.animated = animated
|
||||
|
||||
def as_dict(self):
|
||||
return {
|
||||
"images": self.values,
|
||||
"animated": (self.animated,)
|
||||
}
|
||||
|
||||
|
||||
class PreviewMask(PreviewImage):
|
||||
def __init__(self, mask: PreviewMask.Type, animated: bool=False, cls: ComfyNode=None, **kwargs):
|
||||
preview = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3)
|
||||
super().__init__(preview, animated, cls, **kwargs)
|
||||
|
||||
|
||||
# class UILatent(_UIOutput):
|
||||
# def __init__(self, values: list[SavedResult | dict], **kwargs):
|
||||
# output_dir = folder_paths.get_temp_directory()
|
||||
# type = "temp"
|
||||
# prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
|
||||
# compress_level = 1
|
||||
# filename_prefix = "ComfyUI"
|
||||
|
||||
|
||||
# full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
|
||||
|
||||
# # support save metadata for latent sharing
|
||||
# prompt_info = ""
|
||||
# if prompt is not None:
|
||||
# prompt_info = json.dumps(prompt)
|
||||
|
||||
# metadata = None
|
||||
# if not args.disable_metadata:
|
||||
# metadata = {"prompt": prompt_info}
|
||||
# if extra_pnginfo is not None:
|
||||
# for x in extra_pnginfo:
|
||||
# metadata[x] = json.dumps(extra_pnginfo[x])
|
||||
|
||||
# file = f"{filename}_{counter:05}_.latent"
|
||||
|
||||
# results: list[FileLocator] = []
|
||||
# results.append({
|
||||
# "filename": file,
|
||||
# "subfolder": subfolder,
|
||||
# "type": "output"
|
||||
# })
|
||||
|
||||
# file = os.path.join(full_output_folder, file)
|
||||
|
||||
# output = {}
|
||||
# output["latent_tensor"] = samples["samples"].contiguous()
|
||||
# output["latent_format_version_0"] = torch.tensor([])
|
||||
|
||||
# comfy.utils.save_torch_file(output, file, metadata=metadata)
|
||||
|
||||
# self.values = values
|
||||
|
||||
# def as_dict(self):
|
||||
# return {
|
||||
# "latents": self.values,
|
||||
# }
|
||||
|
||||
|
||||
class PreviewAudio(_UIOutput):
|
||||
def __init__(self, audio: dict, cls: Type[ComfyNode] = None, **kwargs):
|
||||
self.values = AudioSaveHelper.save_audio(
|
||||
audio,
|
||||
filename_prefix="ComfyUI_temp_" + "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(5)),
|
||||
folder_type=FolderType.temp,
|
||||
cls=cls,
|
||||
format="flac",
|
||||
quality="128k",
|
||||
)
|
||||
|
||||
def as_dict(self) -> dict:
|
||||
return {"audio": self.values}
|
||||
|
||||
|
||||
class PreviewVideo(_UIOutput):
|
||||
def __init__(self, values: list[SavedResult | dict], **kwargs):
|
||||
self.values = values
|
||||
|
||||
def as_dict(self):
|
||||
return {"images": self.values, "animated": (True,)}
|
||||
|
||||
|
||||
class PreviewUI3D(_UIOutput):
|
||||
def __init__(self, model_file, camera_info, **kwargs):
|
||||
self.model_file = model_file
|
||||
self.camera_info = camera_info
|
||||
|
||||
def as_dict(self):
|
||||
return {"result": [self.model_file, self.camera_info]}
|
||||
|
||||
|
||||
class PreviewText(_UIOutput):
|
||||
def __init__(self, value: str, **kwargs):
|
||||
self.value = value
|
||||
|
||||
def as_dict(self):
|
||||
return {"text": (self.value,)}
|
||||
|
||||
|
||||
class _UI:
|
||||
SavedResult = SavedResult
|
||||
SavedImages = SavedImages
|
||||
SavedAudios = SavedAudios
|
||||
ImageSaveHelper = ImageSaveHelper
|
||||
AudioSaveHelper = AudioSaveHelper
|
||||
PreviewImage = PreviewImage
|
||||
PreviewMask = PreviewMask
|
||||
PreviewAudio = PreviewAudio
|
||||
PreviewVideo = PreviewVideo
|
||||
PreviewUI3D = PreviewUI3D
|
||||
PreviewText = PreviewText
|
||||
@ -6,6 +6,7 @@ from comfy_api.latest import (
|
||||
)
|
||||
from typing import Type, TYPE_CHECKING
|
||||
from comfy_api.internal.async_to_sync import create_sync_class
|
||||
from comfy_api.latest import io, ui, resources #noqa: F401
|
||||
|
||||
|
||||
class ComfyAPIAdapter_v0_0_2(ComfyAPI_latest):
|
||||
|
||||
77
comfy_extras/nodes_v1_test.py
Normal file
77
comfy_extras/nodes_v1_test.py
Normal file
@ -0,0 +1,77 @@
|
||||
import torch
|
||||
from comfy.comfy_types.node_typing import ComfyNodeABC, IO
|
||||
import asyncio
|
||||
from comfy.utils import ProgressBar
|
||||
import time
|
||||
|
||||
|
||||
class TestNode(ComfyNodeABC):
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"image": (IO.IMAGE,),
|
||||
"some_int": (IO.INT, {"display_name": "new_name",
|
||||
"min": 0, "max": 127, "default": 42,
|
||||
"tooltip": "My tooltip 😎", "display": "slider"}),
|
||||
"combo": (IO.COMBO, {"options": ["a", "b", "c"], "tooltip": "This is a combo input"}),
|
||||
"combo2": (IO.COMBO, {"options": ["a", "b", "c"], "multi_select": True, "tooltip": "This is a combo input"}),
|
||||
},
|
||||
"optional": {
|
||||
"xyz": ("XYZ",),
|
||||
"mask": (IO.MASK,),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = (IO.INT, IO.IMAGE)
|
||||
RETURN_NAMES = ("INT", "img🖼️")
|
||||
OUTPUT_TOOLTIPS = (None, "This is an image")
|
||||
FUNCTION = "do_thing"
|
||||
|
||||
OUTPUT_NODE = True
|
||||
|
||||
CATEGORY = "v3 nodes"
|
||||
|
||||
def do_thing(self, image: torch.Tensor, some_int: int, combo: str, combo2: list[str], xyz=None, mask: torch.Tensor=None):
|
||||
return (some_int, image)
|
||||
|
||||
|
||||
class TestSleep(ComfyNodeABC):
|
||||
@classmethod
|
||||
def INPUT_TYPES(cls):
|
||||
return {
|
||||
"required": {
|
||||
"value": (IO.ANY, {}),
|
||||
"seconds": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 0.01, "tooltip": "The amount of seconds to sleep."}),
|
||||
},
|
||||
"hidden": {
|
||||
"unique_id": "UNIQUE_ID",
|
||||
},
|
||||
}
|
||||
RETURN_TYPES = (IO.ANY,)
|
||||
FUNCTION = "sleep"
|
||||
|
||||
CATEGORY = "_for_testing"
|
||||
|
||||
async def sleep(self, value, seconds, unique_id):
|
||||
pbar = ProgressBar(seconds, node_id=unique_id)
|
||||
start = time.time()
|
||||
expiration = start + seconds
|
||||
now = start
|
||||
while now < expiration:
|
||||
now = time.time()
|
||||
pbar.update_absolute(now - start)
|
||||
await asyncio.sleep(0.02)
|
||||
return (value,)
|
||||
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"V1TestNode1": TestNode,
|
||||
"V1TestSleep": TestSleep,
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"V1TestNode1": "V1 Test Node",
|
||||
"V1TestSleep": "V1 Test Sleep",
|
||||
}
|
||||
285
comfy_extras/nodes_v3_test.py
Normal file
285
comfy_extras/nodes_v3_test.py
Normal file
@ -0,0 +1,285 @@
|
||||
import torch
|
||||
import time
|
||||
from comfy_api.latest import io, ui, resources, _io
|
||||
import logging # noqa
|
||||
import folder_paths
|
||||
import comfy.utils
|
||||
import comfy.sd
|
||||
import asyncio
|
||||
|
||||
@io.comfytype(io_type="XYZ")
|
||||
class XYZ(io.ComfyTypeIO):
|
||||
Type = tuple[int,str]
|
||||
|
||||
|
||||
class V3TestNode(io.ComfyNode):
|
||||
# NOTE: this is here just to test that state is not leaking
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.hahajkunless = ";)"
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="V3_01_TestNode1",
|
||||
display_name="V3 Test Node",
|
||||
category="v3 nodes",
|
||||
description="This is a funky V3 node test.",
|
||||
inputs=[
|
||||
io.Image.Input("image", display_name="new_image"),
|
||||
XYZ.Input("xyz", optional=True),
|
||||
io.Custom("JKL").Input("jkl", optional=True),
|
||||
io.Mask.Input("mask", display_name="mask haha", optional=True),
|
||||
io.Int.Input("some_int", display_name="new_name", min=0, max=127, default=42,
|
||||
tooltip="My tooltip 😎", display_mode=io.NumberDisplay.slider),
|
||||
io.Combo.Input("combo", options=["a", "b", "c"], tooltip="This is a combo input"),
|
||||
io.MultiCombo.Input("combo2", options=["a","b","c"]),
|
||||
io.MultiType.Input(io.Int.Input("int_multitype", display_name="haha"), types=[io.Float]),
|
||||
io.MultiType.Input("multitype", types=[io.Mask, io.Float, io.Int], optional=True),
|
||||
# ComboInput("combo", image_upload=True, image_folder=FolderType.output,
|
||||
# remote=RemoteOptions(
|
||||
# route="/internal/files/output",
|
||||
# refresh_button=True,
|
||||
# ),
|
||||
# tooltip="This is a combo input"),
|
||||
# IntegerInput("some_int", display_name="new_name", min=0, tooltip="My tooltip 😎", display=NumberDisplay.slider, ),
|
||||
# ComboDynamicInput("mask", behavior=InputBehavior.optional),
|
||||
# IntegerInput("some_int", display_name="new_name", min=0, tooltip="My tooltip 😎", display=NumberDisplay.slider,
|
||||
# dependent_inputs=[ComboDynamicInput("mask", behavior=InputBehavior.optional)],
|
||||
# dependent_values=[lambda my_value: IO.STRING if my_value < 5 else IO.NUMBER],
|
||||
# ),
|
||||
# ["option1", "option2". "option3"]
|
||||
# ComboDynamicInput["sdfgjhl", [ComboDynamicOptions("option1", [IntegerInput("some_int", display_name="new_name", min=0, tooltip="My tooltip 😎", display=NumberDisplay.slider, ImageInput(), MaskInput(), String()]),
|
||||
# CombyDynamicOptons("option2", [])
|
||||
# ]]
|
||||
],
|
||||
outputs=[
|
||||
io.Int.Output(),
|
||||
io.Image.Output(display_name="img🖼️", tooltip="This is an image"),
|
||||
],
|
||||
hidden=[
|
||||
io.Hidden.prompt,
|
||||
io.Hidden.auth_token_comfy_org,
|
||||
io.Hidden.unique_id,
|
||||
],
|
||||
is_output_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def validate_inputs(cls, image: io.Image.Type, some_int: int, combo: io.Combo.Type, combo2: io.MultiCombo.Type, xyz: XYZ.Type=None, mask: io.Mask.Type=None, **kwargs):
|
||||
if some_int < 0:
|
||||
raise Exception("some_int must be greater than 0")
|
||||
if combo == "c":
|
||||
raise Exception("combo must be a or b")
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image: io.Image.Type, some_int: int, combo: io.Combo.Type, combo2: io.MultiCombo.Type, xyz: XYZ.Type=None, mask: io.Mask.Type=None, **kwargs):
|
||||
if hasattr(cls, "hahajkunless"):
|
||||
raise Exception("The 'cls' variable leaked instance state between runs!")
|
||||
if hasattr(cls, "doohickey"):
|
||||
raise Exception("The 'cls' variable leaked state on class properties between runs!")
|
||||
try:
|
||||
cls.doohickey = "LOLJK"
|
||||
except AttributeError:
|
||||
pass
|
||||
return io.NodeOutput(some_int, image, ui=ui.PreviewImage(image, cls=cls))
|
||||
|
||||
|
||||
class V3LoraLoader(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="V3_LoraLoader",
|
||||
display_name="V3 LoRA Loader",
|
||||
category="v3 nodes",
|
||||
description="LoRAs are used to modify diffusion and CLIP models, altering the way in which latents are denoised such as applying styles. Multiple LoRA nodes can be linked together.",
|
||||
inputs=[
|
||||
io.Model.Input("model", tooltip="The diffusion model the LoRA will be applied to."),
|
||||
io.Clip.Input("clip", tooltip="The CLIP model the LoRA will be applied to."),
|
||||
io.Combo.Input(
|
||||
"lora_name",
|
||||
options=folder_paths.get_filename_list("loras"),
|
||||
tooltip="The name of the LoRA."
|
||||
),
|
||||
io.Float.Input(
|
||||
"strength_model",
|
||||
default=1.0,
|
||||
min=-100.0,
|
||||
max=100.0,
|
||||
step=0.01,
|
||||
tooltip="How strongly to modify the diffusion model. This value can be negative."
|
||||
),
|
||||
io.Float.Input(
|
||||
"strength_clip",
|
||||
default=1.0,
|
||||
min=-100.0,
|
||||
max=100.0,
|
||||
step=0.01,
|
||||
tooltip="How strongly to modify the CLIP model. This value can be negative."
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
io.Clip.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model: io.Model.Type, clip: io.Clip.Type, lora_name: str, strength_model: float, strength_clip: float, **kwargs):
|
||||
if strength_model == 0 and strength_clip == 0:
|
||||
return io.NodeOutput(model, clip)
|
||||
|
||||
lora = cls.resources.get(resources.TorchDictFolderFilename("loras", lora_name))
|
||||
|
||||
model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip)
|
||||
return io.NodeOutput(model_lora, clip_lora)
|
||||
|
||||
|
||||
class NInputsTest(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="V3_NInputsTest",
|
||||
display_name="V3 N Inputs Test",
|
||||
inputs=[
|
||||
_io.AutogrowDynamic.Input("nmock", template_input=io.Image.Input("image"), min=1, max=3),
|
||||
_io.AutogrowDynamic.Input("nmock2", template_input=io.Int.Input("int"), optional=True, min=1, max=4),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def validate_inputs(cls, nmock, nmock2):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def fingerprint_inputs(cls, nmock, nmock2):
|
||||
return time.time()
|
||||
|
||||
@classmethod
|
||||
def check_lazy_status(cls, **kwargs) -> list[str]:
|
||||
need = [name for name in kwargs if kwargs[name] is None]
|
||||
return need
|
||||
|
||||
@classmethod
|
||||
def execute(cls, nmock, nmock2):
|
||||
first_image = nmock[0]
|
||||
all_images = []
|
||||
for img in nmock:
|
||||
if img.shape != first_image.shape:
|
||||
img = img.movedim(-1,1)
|
||||
img = comfy.utils.common_upscale(img, first_image.shape[2], first_image.shape[1], "lanczos", "center")
|
||||
img = img.movedim(1,-1)
|
||||
all_images.append(img)
|
||||
combined_image = torch.cat(all_images, dim=0)
|
||||
return io.NodeOutput(combined_image)
|
||||
|
||||
|
||||
class V3TestSleep(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="V3_TestSleep",
|
||||
display_name="V3 Test Sleep",
|
||||
category="_for_testing",
|
||||
description="Test async sleep functionality.",
|
||||
inputs=[
|
||||
io.AnyType.Input("value", display_name="Value"),
|
||||
io.Float.Input("seconds", display_name="Seconds", default=1.0, min=0.0, max=9999.0, step=0.01, tooltip="The amount of seconds to sleep."),
|
||||
],
|
||||
outputs=[
|
||||
io.AnyType.Output(),
|
||||
],
|
||||
hidden=[
|
||||
io.Hidden.unique_id,
|
||||
],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(cls, value: io.AnyType.Type, seconds: io.Float.Type, **kwargs):
|
||||
logging.info(f"V3TestSleep: {cls.hidden.unique_id}")
|
||||
pbar = comfy.utils.ProgressBar(seconds, node_id=cls.hidden.unique_id)
|
||||
start = time.time()
|
||||
expiration = start + seconds
|
||||
now = start
|
||||
while now < expiration:
|
||||
now = time.time()
|
||||
pbar.update_absolute(now - start)
|
||||
await asyncio.sleep(0.02)
|
||||
return io.NodeOutput(value)
|
||||
|
||||
|
||||
class V3DummyStart(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="V3_DummyStart",
|
||||
display_name="V3 Dummy Start",
|
||||
category="v3 nodes",
|
||||
description="This is a dummy start node.",
|
||||
inputs=[],
|
||||
outputs=[
|
||||
io.Custom("XYZ").Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls):
|
||||
return io.NodeOutput(None)
|
||||
|
||||
|
||||
class V3DummyEnd(io.ComfyNode):
|
||||
COOL_VALUE = 123
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="V3_DummyEnd",
|
||||
display_name="V3 Dummy End",
|
||||
category="v3 nodes",
|
||||
description="This is a dummy end node.",
|
||||
inputs=[
|
||||
io.Custom("XYZ").Input("xyz"),
|
||||
],
|
||||
outputs=[],
|
||||
is_output_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def custom_action(cls):
|
||||
return 456
|
||||
|
||||
@classmethod
|
||||
def execute(cls, xyz: io.Custom("XYZ").Type):
|
||||
logging.info(f"V3DummyEnd: {cls.COOL_VALUE}")
|
||||
logging.info(f"V3DummyEnd: {cls.custom_action()}")
|
||||
return
|
||||
|
||||
|
||||
class V3DummyEndInherit(V3DummyEnd):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
schema = super().define_schema()
|
||||
schema.node_id = "V3_DummyEndInherit"
|
||||
schema.display_name = "V3 Dummy End Inherit"
|
||||
return schema
|
||||
|
||||
@classmethod
|
||||
def execute(cls, xyz: io.Custom("XYZ").Type):
|
||||
logging.info(f"V3DummyEndInherit: {cls.COOL_VALUE}")
|
||||
return super().execute(xyz)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
V3TestNode,
|
||||
V3LoraLoader,
|
||||
NInputsTest,
|
||||
V3TestSleep,
|
||||
V3DummyStart,
|
||||
V3DummyEnd,
|
||||
V3DummyEndInherit,
|
||||
]
|
||||
57
comfy_extras/v3/nodes_ace.py
Normal file
57
comfy_extras/v3/nodes_ace.py
Normal file
@ -0,0 +1,57 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
|
||||
import comfy.model_management
|
||||
import node_helpers
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class TextEncodeAceStepAudio(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="TextEncodeAceStepAudio_V3",
|
||||
category="conditioning",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("tags", multiline=True, dynamic_prompts=True),
|
||||
io.String.Input("lyrics", multiline=True, dynamic_prompts=True),
|
||||
io.Float.Input("lyrics_strength", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[io.Conditioning.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip, tags, lyrics, lyrics_strength) -> io.NodeOutput:
|
||||
conditioning = clip.encode_from_tokens_scheduled(clip.tokenize(tags, lyrics=lyrics))
|
||||
conditioning = node_helpers.conditioning_set_values(conditioning, {"lyrics_strength": lyrics_strength})
|
||||
return io.NodeOutput(conditioning)
|
||||
|
||||
|
||||
class EmptyAceStepLatentAudio(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="EmptyAceStepLatentAudio_V3",
|
||||
category="latent/audio",
|
||||
inputs=[
|
||||
io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.1),
|
||||
io.Int.Input(
|
||||
"batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch."
|
||||
),
|
||||
],
|
||||
outputs=[io.Latent.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, seconds, batch_size) -> io.NodeOutput:
|
||||
length = int(seconds * 44100 / 512 / 8)
|
||||
latent = torch.zeros([batch_size, 8, 16, length], device=comfy.model_management.intermediate_device())
|
||||
return io.NodeOutput({"samples": latent, "type": "audio"})
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
EmptyAceStepLatentAudio,
|
||||
TextEncodeAceStepAudio,
|
||||
]
|
||||
128
comfy_extras/v3/nodes_advanced_samplers.py
Normal file
128
comfy_extras/v3/nodes_advanced_samplers.py
Normal file
@ -0,0 +1,128 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from tqdm.auto import trange
|
||||
|
||||
import comfy.model_patcher
|
||||
import comfy.samplers
|
||||
import comfy.utils
|
||||
from comfy.k_diffusion.sampling import to_d
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def sample_lcm_upscale(
|
||||
model, x, sigmas, extra_args=None, callback=None, disable=None, total_upscale=2.0, upscale_method="bislerp", upscale_steps=None
|
||||
):
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
|
||||
if upscale_steps is None:
|
||||
upscale_steps = max(len(sigmas) // 2 + 1, 2)
|
||||
else:
|
||||
upscale_steps += 1
|
||||
upscale_steps = min(upscale_steps, len(sigmas) + 1)
|
||||
|
||||
upscales = np.linspace(1.0, total_upscale, upscale_steps)[1:]
|
||||
|
||||
orig_shape = x.size()
|
||||
s_in = x.new_ones([x.shape[0]])
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
||||
if callback is not None:
|
||||
callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
|
||||
|
||||
x = denoised
|
||||
if i < len(upscales):
|
||||
x = comfy.utils.common_upscale(
|
||||
x, round(orig_shape[-1] * upscales[i]), round(orig_shape[-2] * upscales[i]), upscale_method, "disabled"
|
||||
)
|
||||
|
||||
if sigmas[i + 1] > 0:
|
||||
x += sigmas[i + 1] * torch.randn_like(x)
|
||||
return x
|
||||
|
||||
|
||||
class SamplerLCMUpscale(io.ComfyNode):
|
||||
UPSCALE_METHODS = ["bislerp", "nearest-exact", "bilinear", "area", "bicubic"]
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="SamplerLCMUpscale_V3",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
inputs=[
|
||||
io.Float.Input("scale_ratio", default=1.0, min=0.1, max=20.0, step=0.01),
|
||||
io.Int.Input("scale_steps", default=-1, min=-1, max=1000, step=1),
|
||||
io.Combo.Input("upscale_method", options=cls.UPSCALE_METHODS),
|
||||
],
|
||||
outputs=[io.Sampler.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, scale_ratio, scale_steps, upscale_method) -> io.NodeOutput:
|
||||
if scale_steps < 0:
|
||||
scale_steps = None
|
||||
sampler = comfy.samplers.KSAMPLER(
|
||||
sample_lcm_upscale,
|
||||
extra_options={
|
||||
"total_upscale": scale_ratio,
|
||||
"upscale_steps": scale_steps,
|
||||
"upscale_method": upscale_method,
|
||||
},
|
||||
)
|
||||
return io.NodeOutput(sampler)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def sample_euler_pp(model, x, sigmas, extra_args=None, callback=None, disable=None):
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
|
||||
temp = [0]
|
||||
|
||||
def post_cfg_function(args):
|
||||
temp[0] = args["uncond_denoised"]
|
||||
return args["denoised"]
|
||||
|
||||
model_options = extra_args.get("model_options", {}).copy()
|
||||
extra_args["model_options"] = comfy.model_patcher.set_model_options_post_cfg_function(
|
||||
model_options, post_cfg_function, disable_cfg1_optimization=True
|
||||
)
|
||||
|
||||
s_in = x.new_ones([x.shape[0]])
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
sigma_hat = sigmas[i]
|
||||
denoised = model(x, sigma_hat * s_in, **extra_args)
|
||||
d = to_d(x - denoised + temp[0], sigmas[i], denoised)
|
||||
if callback is not None:
|
||||
callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
|
||||
dt = sigmas[i + 1] - sigma_hat
|
||||
x = x + d * dt
|
||||
return x
|
||||
|
||||
|
||||
class SamplerEulerCFGpp(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="SamplerEulerCFGpp_V3",
|
||||
display_name="SamplerEulerCFG++ _V3",
|
||||
category="_for_testing",
|
||||
inputs=[
|
||||
io.Combo.Input("version", options=["regular", "alternative"]),
|
||||
],
|
||||
outputs=[io.Sampler.Output()],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, version) -> io.NodeOutput:
|
||||
if version == "alternative":
|
||||
sampler = comfy.samplers.KSAMPLER(sample_euler_pp)
|
||||
else:
|
||||
sampler = comfy.samplers.ksampler("euler_cfg_pp")
|
||||
return io.NodeOutput(sampler)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
SamplerEulerCFGpp,
|
||||
SamplerLCMUpscale,
|
||||
]
|
||||
84
comfy_extras/v3/nodes_align_your_steps.py
Normal file
84
comfy_extras/v3/nodes_align_your_steps.py
Normal file
@ -0,0 +1,84 @@
|
||||
# from: https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
def loglinear_interp(t_steps, num_steps):
|
||||
"""Performs log-linear interpolation of a given array of decreasing numbers."""
|
||||
xs = np.linspace(0, 1, len(t_steps))
|
||||
ys = np.log(t_steps[::-1])
|
||||
|
||||
new_xs = np.linspace(0, 1, num_steps)
|
||||
new_ys = np.interp(new_xs, xs, ys)
|
||||
|
||||
return np.exp(new_ys)[::-1].copy()
|
||||
|
||||
|
||||
NOISE_LEVELS = {
|
||||
"SD1": [
|
||||
14.6146412293,
|
||||
6.4745760956,
|
||||
3.8636745985,
|
||||
2.6946151520,
|
||||
1.8841921177,
|
||||
1.3943805092,
|
||||
0.9642583904,
|
||||
0.6523686016,
|
||||
0.3977456272,
|
||||
0.1515232662,
|
||||
0.0291671582,
|
||||
],
|
||||
"SDXL": [
|
||||
14.6146412293,
|
||||
6.3184485287,
|
||||
3.7681790315,
|
||||
2.1811480769,
|
||||
1.3405244945,
|
||||
0.8620721141,
|
||||
0.5550693289,
|
||||
0.3798540708,
|
||||
0.2332364134,
|
||||
0.1114188177,
|
||||
0.0291671582,
|
||||
],
|
||||
"SVD": [700.00, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.002],
|
||||
}
|
||||
|
||||
|
||||
class AlignYourStepsScheduler(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="AlignYourStepsScheduler_V3",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
inputs=[
|
||||
io.Combo.Input("model_type", options=["SD1", "SDXL", "SVD"]),
|
||||
io.Int.Input("steps", default=10, min=1, max=10000),
|
||||
io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
],
|
||||
outputs=[io.Sigmas.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model_type, steps, denoise) -> io.NodeOutput:
|
||||
total_steps = steps
|
||||
if denoise < 1.0:
|
||||
if denoise <= 0.0:
|
||||
return io.NodeOutput(torch.FloatTensor([]))
|
||||
total_steps = round(steps * denoise)
|
||||
|
||||
sigmas = NOISE_LEVELS[model_type][:]
|
||||
if (steps + 1) != len(sigmas):
|
||||
sigmas = loglinear_interp(sigmas, steps + 1)
|
||||
|
||||
sigmas = sigmas[-(total_steps + 1) :]
|
||||
sigmas[-1] = 0
|
||||
return io.NodeOutput(torch.FloatTensor(sigmas))
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
AlignYourStepsScheduler,
|
||||
]
|
||||
98
comfy_extras/v3/nodes_apg.py
Normal file
98
comfy_extras/v3/nodes_apg.py
Normal file
@ -0,0 +1,98 @@
|
||||
import torch
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
def project(v0, v1):
|
||||
v1 = torch.nn.functional.normalize(v1, dim=[-1, -2, -3])
|
||||
v0_parallel = (v0 * v1).sum(dim=[-1, -2, -3], keepdim=True) * v1
|
||||
v0_orthogonal = v0 - v0_parallel
|
||||
return v0_parallel, v0_orthogonal
|
||||
|
||||
|
||||
class APG(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="APG_V3",
|
||||
display_name="Adaptive Projected Guidance _V3",
|
||||
category="sampling/custom_sampling",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input(
|
||||
"eta",
|
||||
default=1.0,
|
||||
min=-10.0,
|
||||
max=10.0,
|
||||
step=0.01,
|
||||
tooltip="Controls the scale of the parallel guidance vector. Default CFG behavior at a setting of 1.",
|
||||
),
|
||||
io.Float.Input(
|
||||
"norm_threshold",
|
||||
default=5.0,
|
||||
min=0.0,
|
||||
max=50.0,
|
||||
step=0.1,
|
||||
tooltip="Normalize guidance vector to this value, normalization disable at a setting of 0.",
|
||||
),
|
||||
io.Float.Input(
|
||||
"momentum",
|
||||
default=0.0,
|
||||
min=-5.0,
|
||||
max=1.0,
|
||||
step=0.01,
|
||||
tooltip="Controls a running average of guidance during diffusion, disabled at a setting of 0.",
|
||||
),
|
||||
],
|
||||
outputs=[io.Model.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, eta, norm_threshold, momentum) -> io.NodeOutput:
|
||||
running_avg = 0
|
||||
prev_sigma = None
|
||||
|
||||
def pre_cfg_function(args):
|
||||
nonlocal running_avg, prev_sigma
|
||||
|
||||
if len(args["conds_out"]) == 1:
|
||||
return args["conds_out"]
|
||||
|
||||
cond = args["conds_out"][0]
|
||||
uncond = args["conds_out"][1]
|
||||
sigma = args["sigma"][0]
|
||||
cond_scale = args["cond_scale"]
|
||||
|
||||
if prev_sigma is not None and sigma > prev_sigma:
|
||||
running_avg = 0
|
||||
prev_sigma = sigma
|
||||
|
||||
guidance = cond - uncond
|
||||
|
||||
if momentum != 0:
|
||||
if not torch.is_tensor(running_avg):
|
||||
running_avg = guidance
|
||||
else:
|
||||
running_avg = momentum * running_avg + guidance
|
||||
guidance = running_avg
|
||||
|
||||
if norm_threshold > 0:
|
||||
guidance_norm = guidance.norm(p=2, dim=[-1, -2, -3], keepdim=True)
|
||||
scale = torch.minimum(torch.ones_like(guidance_norm), norm_threshold / guidance_norm)
|
||||
guidance = guidance * scale
|
||||
|
||||
guidance_parallel, guidance_orthogonal = project(guidance, cond)
|
||||
modified_guidance = guidance_orthogonal + eta * guidance_parallel
|
||||
|
||||
modified_cond = (uncond + modified_guidance) + (cond - uncond) / cond_scale
|
||||
|
||||
return [modified_cond, uncond] + args["conds_out"][2:]
|
||||
|
||||
m = model.clone()
|
||||
m.set_model_sampler_pre_cfg_function(pre_cfg_function)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
APG,
|
||||
]
|
||||
139
comfy_extras/v3/nodes_attention_multiply.py
Normal file
139
comfy_extras/v3/nodes_attention_multiply.py
Normal file
@ -0,0 +1,139 @@
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
def attention_multiply(attn, model, q, k, v, out):
|
||||
m = model.clone()
|
||||
sd = model.model_state_dict()
|
||||
|
||||
for key in sd:
|
||||
if key.endswith("{}.to_q.bias".format(attn)) or key.endswith("{}.to_q.weight".format(attn)):
|
||||
m.add_patches({key: (None,)}, 0.0, q)
|
||||
if key.endswith("{}.to_k.bias".format(attn)) or key.endswith("{}.to_k.weight".format(attn)):
|
||||
m.add_patches({key: (None,)}, 0.0, k)
|
||||
if key.endswith("{}.to_v.bias".format(attn)) or key.endswith("{}.to_v.weight".format(attn)):
|
||||
m.add_patches({key: (None,)}, 0.0, v)
|
||||
if key.endswith("{}.to_out.0.bias".format(attn)) or key.endswith("{}.to_out.0.weight".format(attn)):
|
||||
m.add_patches({key: (None,)}, 0.0, out)
|
||||
return m
|
||||
|
||||
|
||||
class UNetSelfAttentionMultiply(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="UNetSelfAttentionMultiply_V3",
|
||||
category="_for_testing/attention_experiments",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("k", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("v", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("out", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[io.Model.Output()],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, q, k, v, out) -> io.NodeOutput:
|
||||
return io.NodeOutput(attention_multiply("attn1", model, q, k, v, out))
|
||||
|
||||
|
||||
class UNetCrossAttentionMultiply(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="UNetCrossAttentionMultiply_V3",
|
||||
category="_for_testing/attention_experiments",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("k", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("v", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("out", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[io.Model.Output()],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, q, k, v, out) -> io.NodeOutput:
|
||||
return io.NodeOutput(attention_multiply("attn2", model, q, k, v, out))
|
||||
|
||||
|
||||
class CLIPAttentionMultiply(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="CLIPAttentionMultiply_V3",
|
||||
category="_for_testing/attention_experiments",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("k", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("v", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("out", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[io.Clip.Output()],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip, q, k, v, out) -> io.NodeOutput:
|
||||
m = clip.clone()
|
||||
sd = m.patcher.model_state_dict()
|
||||
|
||||
for key in sd:
|
||||
if key.endswith("self_attn.q_proj.weight") or key.endswith("self_attn.q_proj.bias"):
|
||||
m.add_patches({key: (None,)}, 0.0, q)
|
||||
if key.endswith("self_attn.k_proj.weight") or key.endswith("self_attn.k_proj.bias"):
|
||||
m.add_patches({key: (None,)}, 0.0, k)
|
||||
if key.endswith("self_attn.v_proj.weight") or key.endswith("self_attn.v_proj.bias"):
|
||||
m.add_patches({key: (None,)}, 0.0, v)
|
||||
if key.endswith("self_attn.out_proj.weight") or key.endswith("self_attn.out_proj.bias"):
|
||||
m.add_patches({key: (None,)}, 0.0, out)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class UNetTemporalAttentionMultiply(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="UNetTemporalAttentionMultiply_V3",
|
||||
category="_for_testing/attention_experiments",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("self_structural", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("self_temporal", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("cross_structural", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("cross_temporal", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[io.Model.Output()],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, self_structural, self_temporal, cross_structural, cross_temporal) -> io.NodeOutput:
|
||||
m = model.clone()
|
||||
sd = model.model_state_dict()
|
||||
|
||||
for k in sd:
|
||||
if (k.endswith("attn1.to_out.0.bias") or k.endswith("attn1.to_out.0.weight")):
|
||||
if '.time_stack.' in k:
|
||||
m.add_patches({k: (None,)}, 0.0, self_temporal)
|
||||
else:
|
||||
m.add_patches({k: (None,)}, 0.0, self_structural)
|
||||
elif (k.endswith("attn2.to_out.0.bias") or k.endswith("attn2.to_out.0.weight")):
|
||||
if '.time_stack.' in k:
|
||||
m.add_patches({k: (None,)}, 0.0, cross_temporal)
|
||||
else:
|
||||
m.add_patches({k: (None,)}, 0.0, cross_structural)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
CLIPAttentionMultiply,
|
||||
UNetCrossAttentionMultiply,
|
||||
UNetSelfAttentionMultiply,
|
||||
UNetTemporalAttentionMultiply,
|
||||
]
|
||||
290
comfy_extras/v3/nodes_audio.py
Normal file
290
comfy_extras/v3/nodes_audio.py
Normal file
@ -0,0 +1,290 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
|
||||
import av
|
||||
import torch
|
||||
import torchaudio
|
||||
|
||||
import comfy.model_management
|
||||
import folder_paths
|
||||
import node_helpers
|
||||
from comfy_api.latest import io, ui
|
||||
|
||||
|
||||
class EmptyLatentAudio(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="EmptyLatentAudio_V3",
|
||||
category="latent/audio",
|
||||
inputs=[
|
||||
io.Float.Input("seconds", default=47.6, min=1.0, max=1000.0, step=0.1),
|
||||
io.Int.Input(
|
||||
"batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch."
|
||||
),
|
||||
],
|
||||
outputs=[io.Latent.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, seconds, batch_size) -> io.NodeOutput:
|
||||
length = round((seconds * 44100 / 2048) / 2) * 2
|
||||
latent = torch.zeros([batch_size, 64, length], device=comfy.model_management.intermediate_device())
|
||||
return io.NodeOutput({"samples": latent, "type": "audio"})
|
||||
|
||||
|
||||
class ConditioningStableAudio(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ConditioningStableAudio_V3",
|
||||
category="conditioning",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
io.Float.Input("seconds_start", default=0.0, min=0.0, max=1000.0, step=0.1),
|
||||
io.Float.Input("seconds_total", default=47.0, min=0.0, max=1000.0, step=0.1),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, positive, negative, seconds_start, seconds_total) -> io.NodeOutput:
|
||||
return io.NodeOutput(
|
||||
node_helpers.conditioning_set_values(
|
||||
positive, {"seconds_start": seconds_start, "seconds_total": seconds_total}
|
||||
),
|
||||
node_helpers.conditioning_set_values(
|
||||
negative, {"seconds_start": seconds_start, "seconds_total": seconds_total}
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class VAEEncodeAudio(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="VAEEncodeAudio_V3",
|
||||
category="latent/audio",
|
||||
inputs=[
|
||||
io.Audio.Input("audio"),
|
||||
io.Vae.Input("vae"),
|
||||
],
|
||||
outputs=[io.Latent.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, vae, audio) -> io.NodeOutput:
|
||||
sample_rate = audio["sample_rate"]
|
||||
if 44100 != sample_rate:
|
||||
waveform = torchaudio.functional.resample(audio["waveform"], sample_rate, 44100)
|
||||
else:
|
||||
waveform = audio["waveform"]
|
||||
return io.NodeOutput({"samples": vae.encode(waveform.movedim(1, -1))})
|
||||
|
||||
|
||||
class VAEDecodeAudio(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="VAEDecodeAudio_V3",
|
||||
category="latent/audio",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Vae.Input("vae"),
|
||||
],
|
||||
outputs=[io.Audio.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, vae, samples) -> io.NodeOutput:
|
||||
audio = vae.decode(samples["samples"]).movedim(-1, 1)
|
||||
std = torch.std(audio, dim=[1, 2], keepdim=True) * 5.0
|
||||
std[std < 1.0] = 1.0
|
||||
audio /= std
|
||||
return io.NodeOutput({"waveform": audio, "sample_rate": 44100})
|
||||
|
||||
|
||||
class SaveAudio(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SaveAudio_V3", # frontend expects "SaveAudio" to work
|
||||
display_name="Save Audio _V3", # frontend ignores "display_name" for this node
|
||||
category="audio",
|
||||
inputs=[
|
||||
io.Audio.Input("audio"),
|
||||
io.String.Input("filename_prefix", default="audio/ComfyUI"),
|
||||
],
|
||||
hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo],
|
||||
is_output_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, audio, filename_prefix="ComfyUI", format="flac") -> io.NodeOutput:
|
||||
return io.NodeOutput(
|
||||
ui=ui.AudioSaveHelper.get_save_audio_ui(audio, filename_prefix=filename_prefix, cls=cls, format=format)
|
||||
)
|
||||
|
||||
|
||||
class SaveAudioMP3(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SaveAudioMP3_V3", # frontend expects "SaveAudioMP3" to work
|
||||
display_name="Save Audio(MP3) _V3", # frontend ignores "display_name" for this node
|
||||
category="audio",
|
||||
inputs=[
|
||||
io.Audio.Input("audio"),
|
||||
io.String.Input("filename_prefix", default="audio/ComfyUI"),
|
||||
io.Combo.Input("quality", options=["V0", "128k", "320k"], default="V0"),
|
||||
],
|
||||
hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo],
|
||||
is_output_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, audio, filename_prefix="ComfyUI", format="mp3", quality="V0") -> io.NodeOutput:
|
||||
return io.NodeOutput(
|
||||
ui=ui.AudioSaveHelper.get_save_audio_ui(
|
||||
audio, filename_prefix=filename_prefix, cls=cls, format=format, quality=quality
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class SaveAudioOpus(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SaveAudioOpus_V3", # frontend expects "SaveAudioOpus" to work
|
||||
display_name="Save Audio(Opus) _V3", # frontend ignores "display_name" for this node
|
||||
category="audio",
|
||||
inputs=[
|
||||
io.Audio.Input("audio"),
|
||||
io.String.Input("filename_prefix", default="audio/ComfyUI"),
|
||||
io.Combo.Input("quality", options=["64k", "96k", "128k", "192k", "320k"], default="128k"),
|
||||
],
|
||||
hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo],
|
||||
is_output_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, audio, filename_prefix="ComfyUI", format="opus", quality="128k") -> io.NodeOutput:
|
||||
return io.NodeOutput(
|
||||
ui=ui.AudioSaveHelper.get_save_audio_ui(
|
||||
audio, filename_prefix=filename_prefix, cls=cls, format=format, quality=quality
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class PreviewAudio(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="PreviewAudio_V3", # frontend expects "PreviewAudio" to work
|
||||
display_name="Preview Audio _V3", # frontend ignores "display_name" for this node
|
||||
category="audio",
|
||||
inputs=[
|
||||
io.Audio.Input("audio"),
|
||||
],
|
||||
hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo],
|
||||
is_output_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, audio) -> io.NodeOutput:
|
||||
return io.NodeOutput(ui=ui.PreviewAudio(audio, cls=cls))
|
||||
|
||||
|
||||
class LoadAudio(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LoadAudio_V3", # frontend expects "LoadAudio" to work
|
||||
display_name="Load Audio _V3", # frontend ignores "display_name" for this node
|
||||
category="audio",
|
||||
inputs=[
|
||||
io.Combo.Input("audio", upload=io.UploadType.audio, options=cls.get_files_options()),
|
||||
],
|
||||
outputs=[io.Audio.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_files_options(cls) -> list[str]:
|
||||
input_dir = folder_paths.get_input_directory()
|
||||
return sorted(folder_paths.filter_files_content_types(os.listdir(input_dir), ["audio", "video"]))
|
||||
|
||||
@classmethod
|
||||
def load(cls, filepath: str) -> tuple[torch.Tensor, int]:
|
||||
with av.open(filepath) as af:
|
||||
if not af.streams.audio:
|
||||
raise ValueError("No audio stream found in the file.")
|
||||
|
||||
stream = af.streams.audio[0]
|
||||
sr = stream.codec_context.sample_rate
|
||||
n_channels = stream.channels
|
||||
|
||||
frames = []
|
||||
length = 0
|
||||
for frame in af.decode(streams=stream.index):
|
||||
buf = torch.from_numpy(frame.to_ndarray())
|
||||
if buf.shape[0] != n_channels:
|
||||
buf = buf.view(-1, n_channels).t()
|
||||
|
||||
frames.append(buf)
|
||||
length += buf.shape[1]
|
||||
|
||||
if not frames:
|
||||
raise ValueError("No audio frames decoded.")
|
||||
|
||||
wav = torch.cat(frames, dim=1)
|
||||
wav = cls.f32_pcm(wav)
|
||||
return wav, sr
|
||||
|
||||
@classmethod
|
||||
def f32_pcm(cls, wav: torch.Tensor) -> torch.Tensor:
|
||||
"""Convert audio to float 32 bits PCM format."""
|
||||
if wav.dtype.is_floating_point:
|
||||
return wav
|
||||
elif wav.dtype == torch.int16:
|
||||
return wav.float() / (2 ** 15)
|
||||
elif wav.dtype == torch.int32:
|
||||
return wav.float() / (2 ** 31)
|
||||
raise ValueError(f"Unsupported wav dtype: {wav.dtype}")
|
||||
|
||||
@classmethod
|
||||
def execute(cls, audio) -> io.NodeOutput:
|
||||
waveform, sample_rate = cls.load(folder_paths.get_annotated_filepath(audio))
|
||||
return io.NodeOutput({"waveform": waveform.unsqueeze(0), "sample_rate": sample_rate})
|
||||
|
||||
@classmethod
|
||||
def fingerprint_inputs(s, audio):
|
||||
image_path = folder_paths.get_annotated_filepath(audio)
|
||||
m = hashlib.sha256()
|
||||
with open(image_path, "rb") as f:
|
||||
m.update(f.read())
|
||||
return m.digest().hex()
|
||||
|
||||
@classmethod
|
||||
def validate_inputs(s, audio):
|
||||
if not folder_paths.exists_annotated_filepath(audio):
|
||||
return "Invalid audio file: {}".format(audio)
|
||||
return True
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
ConditioningStableAudio,
|
||||
EmptyLatentAudio,
|
||||
LoadAudio,
|
||||
PreviewAudio,
|
||||
SaveAudio,
|
||||
SaveAudioMP3,
|
||||
SaveAudioOpus,
|
||||
VAEDecodeAudio,
|
||||
VAEEncodeAudio,
|
||||
]
|
||||
217
comfy_extras/v3/nodes_camera_trajectory.py
Normal file
217
comfy_extras/v3/nodes_camera_trajectory.py
Normal file
@ -0,0 +1,217 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from einops import rearrange
|
||||
|
||||
import comfy.model_management
|
||||
import nodes
|
||||
from comfy_api.latest import io
|
||||
|
||||
CAMERA_DICT = {
|
||||
"base_T_norm": 1.5,
|
||||
"base_angle": np.pi / 3,
|
||||
"Static": {"angle": [0.0, 0.0, 0.0], "T": [0.0, 0.0, 0.0]},
|
||||
"Pan Up": {"angle": [0.0, 0.0, 0.0], "T": [0.0, -1.0, 0.0]},
|
||||
"Pan Down": {"angle": [0.0, 0.0, 0.0], "T": [0.0, 1.0, 0.0]},
|
||||
"Pan Left": {"angle": [0.0, 0.0, 0.0], "T": [-1.0, 0.0, 0.0]},
|
||||
"Pan Right": {"angle": [0.0, 0.0, 0.0], "T": [1.0, 0.0, 0.0]},
|
||||
"Zoom In": {"angle": [0.0, 0.0, 0.0], "T": [0.0, 0.0, 2.0]},
|
||||
"Zoom Out": {"angle": [0.0, 0.0, 0.0], "T": [0.0, 0.0, -2.0]},
|
||||
"Anti Clockwise (ACW)": {"angle": [0.0, 0.0, -1.0], "T": [0.0, 0.0, 0.0]},
|
||||
"ClockWise (CW)": {"angle": [0.0, 0.0, 1.0], "T": [0.0, 0.0, 0.0]},
|
||||
}
|
||||
|
||||
|
||||
def process_pose_params(cam_params, width=672, height=384, original_pose_width=1280, original_pose_height=720, device="cpu"):
|
||||
def get_relative_pose(cam_params):
|
||||
"""Copied from https://github.com/hehao13/CameraCtrl/blob/main/inference.py"""
|
||||
abs_w2cs = [cam_param.w2c_mat for cam_param in cam_params]
|
||||
abs_c2ws = [cam_param.c2w_mat for cam_param in cam_params]
|
||||
cam_to_origin = 0
|
||||
target_cam_c2w = np.array([[1, 0, 0, 0], [0, 1, 0, -cam_to_origin], [0, 0, 1, 0], [0, 0, 0, 1]])
|
||||
abs2rel = target_cam_c2w @ abs_w2cs[0]
|
||||
ret_poses = [target_cam_c2w] + [abs2rel @ abs_c2w for abs_c2w in abs_c2ws[1:]]
|
||||
return np.array(ret_poses, dtype=np.float32)
|
||||
|
||||
"""Modified from https://github.com/hehao13/CameraCtrl/blob/main/inference.py"""
|
||||
cam_params = [Camera(cam_param) for cam_param in cam_params]
|
||||
|
||||
sample_wh_ratio = width / height
|
||||
pose_wh_ratio = original_pose_width / original_pose_height # Assuming placeholder ratios, change as needed
|
||||
|
||||
if pose_wh_ratio > sample_wh_ratio:
|
||||
resized_ori_w = height * pose_wh_ratio
|
||||
for cam_param in cam_params:
|
||||
cam_param.fx = resized_ori_w * cam_param.fx / width
|
||||
else:
|
||||
resized_ori_h = width / pose_wh_ratio
|
||||
for cam_param in cam_params:
|
||||
cam_param.fy = resized_ori_h * cam_param.fy / height
|
||||
|
||||
intrinsic = np.asarray(
|
||||
[[cam_param.fx * width, cam_param.fy * height, cam_param.cx * width, cam_param.cy * height] for cam_param in cam_params],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
K = torch.as_tensor(intrinsic)[None] # [1, 1, 4]
|
||||
c2ws = get_relative_pose(cam_params) # Assuming this function is defined elsewhere
|
||||
c2ws = torch.as_tensor(c2ws)[None] # [1, n_frame, 4, 4]
|
||||
plucker_embedding = ray_condition(K, c2ws, height, width, device=device)[0].permute(0, 3, 1, 2).contiguous() # V, 6, H, W
|
||||
plucker_embedding = plucker_embedding[None]
|
||||
return rearrange(plucker_embedding, "b f c h w -> b f h w c")[0]
|
||||
|
||||
|
||||
class Camera:
|
||||
"""Copied from https://github.com/hehao13/CameraCtrl/blob/main/inference.py"""
|
||||
|
||||
def __init__(self, entry):
|
||||
fx, fy, cx, cy = entry[1:5]
|
||||
self.fx = fx
|
||||
self.fy = fy
|
||||
self.cx = cx
|
||||
self.cy = cy
|
||||
c2w_mat = np.array(entry[7:]).reshape(4, 4)
|
||||
self.c2w_mat = c2w_mat
|
||||
self.w2c_mat = np.linalg.inv(c2w_mat)
|
||||
|
||||
|
||||
def ray_condition(K, c2w, H, W, device):
|
||||
"""Copied from https://github.com/hehao13/CameraCtrl/blob/main/inference.py"""
|
||||
# c2w: B, V, 4, 4
|
||||
# K: B, V, 4
|
||||
|
||||
B = K.shape[0]
|
||||
|
||||
j, i = torch.meshgrid(
|
||||
torch.linspace(0, H - 1, H, device=device, dtype=c2w.dtype),
|
||||
torch.linspace(0, W - 1, W, device=device, dtype=c2w.dtype),
|
||||
indexing="ij",
|
||||
)
|
||||
i = i.reshape([1, 1, H * W]).expand([B, 1, H * W]) + 0.5 # [B, HxW]
|
||||
j = j.reshape([1, 1, H * W]).expand([B, 1, H * W]) + 0.5 # [B, HxW]
|
||||
|
||||
fx, fy, cx, cy = K.chunk(4, dim=-1) # B,V, 1
|
||||
|
||||
zs = torch.ones_like(i) # [B, HxW]
|
||||
xs = (i - cx) / fx * zs
|
||||
ys = (j - cy) / fy * zs
|
||||
zs = zs.expand_as(ys)
|
||||
|
||||
directions = torch.stack((xs, ys, zs), dim=-1) # B, V, HW, 3
|
||||
directions = directions / directions.norm(dim=-1, keepdim=True) # B, V, HW, 3
|
||||
|
||||
rays_d = directions @ c2w[..., :3, :3].transpose(-1, -2) # B, V, 3, HW
|
||||
rays_o = c2w[..., :3, 3] # B, V, 3
|
||||
rays_o = rays_o[:, :, None].expand_as(rays_d) # B, V, 3, HW
|
||||
# c2w @ dirctions
|
||||
rays_dxo = torch.cross(rays_o, rays_d)
|
||||
plucker = torch.cat([rays_dxo, rays_d], dim=-1)
|
||||
plucker = plucker.reshape(B, c2w.shape[1], H, W, 6) # B, V, H, W, 6
|
||||
# plucker = plucker.permute(0, 1, 4, 2, 3)
|
||||
return plucker
|
||||
|
||||
|
||||
def get_camera_motion(angle, T, speed, n=81):
|
||||
def compute_R_form_rad_angle(angles):
|
||||
theta_x, theta_y, theta_z = angles
|
||||
Rx = np.array([[1, 0, 0], [0, np.cos(theta_x), -np.sin(theta_x)], [0, np.sin(theta_x), np.cos(theta_x)]])
|
||||
|
||||
Ry = np.array([[np.cos(theta_y), 0, np.sin(theta_y)], [0, 1, 0], [-np.sin(theta_y), 0, np.cos(theta_y)]])
|
||||
|
||||
Rz = np.array([[np.cos(theta_z), -np.sin(theta_z), 0], [np.sin(theta_z), np.cos(theta_z), 0], [0, 0, 1]])
|
||||
|
||||
R = np.dot(Rz, np.dot(Ry, Rx))
|
||||
return R
|
||||
|
||||
RT = []
|
||||
for i in range(n):
|
||||
_angle = (i / n) * speed * (CAMERA_DICT["base_angle"]) * angle
|
||||
R = compute_R_form_rad_angle(_angle)
|
||||
_T = (i / n) * speed * (CAMERA_DICT["base_T_norm"]) * (T.reshape(3, 1))
|
||||
_RT = np.concatenate([R, _T], axis=1)
|
||||
RT.append(_RT)
|
||||
RT = np.stack(RT)
|
||||
return RT
|
||||
|
||||
|
||||
class WanCameraEmbedding(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="WanCameraEmbedding_V3",
|
||||
category="camera",
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"camera_pose",
|
||||
options=[
|
||||
"Static",
|
||||
"Pan Up",
|
||||
"Pan Down",
|
||||
"Pan Left",
|
||||
"Pan Right",
|
||||
"Zoom In",
|
||||
"Zoom Out",
|
||||
"Anti Clockwise (ACW)",
|
||||
"ClockWise (CW)",
|
||||
],
|
||||
default="Static",
|
||||
),
|
||||
io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4),
|
||||
io.Float.Input("speed", default=1.0, min=0, max=10.0, step=0.1, optional=True),
|
||||
io.Float.Input("fx", default=0.5, min=0, max=1, step=0.000000001, optional=True),
|
||||
io.Float.Input("fy", default=0.5, min=0, max=1, step=0.000000001, optional=True),
|
||||
io.Float.Input("cx", default=0.5, min=0, max=1, step=0.01, optional=True),
|
||||
io.Float.Input("cy", default=0.5, min=0, max=1, step=0.01, optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.WanCameraEmbedding.Output(display_name="camera_embedding"),
|
||||
io.Int.Output(display_name="width"),
|
||||
io.Int.Output(display_name="height"),
|
||||
io.Int.Output(display_name="length"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, camera_pose, width, height, length, speed=1.0, fx=0.5, fy=0.5, cx=0.5, cy=0.5) -> io.NodeOutput:
|
||||
"""
|
||||
Use Camera trajectory as extrinsic parameters to calculate Plücker embeddings (Sitzmannet al., 2021)
|
||||
Adapted from https://github.com/aigc-apps/VideoX-Fun/blob/main/comfyui/comfyui_nodes.py
|
||||
"""
|
||||
motion_list = [camera_pose]
|
||||
speed = speed
|
||||
angle = np.array(CAMERA_DICT[motion_list[0]]["angle"])
|
||||
T = np.array(CAMERA_DICT[motion_list[0]]["T"])
|
||||
RT = get_camera_motion(angle, T, speed, length)
|
||||
|
||||
trajs = []
|
||||
for cp in RT.tolist():
|
||||
traj = [fx, fy, cx, cy, 0, 0]
|
||||
traj.extend(cp[0])
|
||||
traj.extend(cp[1])
|
||||
traj.extend(cp[2])
|
||||
traj.extend([0, 0, 0, 1])
|
||||
trajs.append(traj)
|
||||
|
||||
cam_params = np.array([[float(x) for x in pose] for pose in trajs])
|
||||
cam_params = np.concatenate([np.zeros_like(cam_params[:, :1]), cam_params], 1)
|
||||
control_camera_video = process_pose_params(cam_params, width=width, height=height)
|
||||
control_camera_video = control_camera_video.permute([3, 0, 1, 2]).unsqueeze(0).to(device=comfy.model_management.intermediate_device())
|
||||
|
||||
control_camera_video = torch.concat(
|
||||
[torch.repeat_interleave(control_camera_video[:, :, 0:1], repeats=4, dim=2), control_camera_video[:, :, 1:]], dim=2
|
||||
).transpose(1, 2)
|
||||
|
||||
# Reshape, transpose, and view into desired shape
|
||||
b, f, c, h, w = control_camera_video.shape
|
||||
control_camera_video = control_camera_video.contiguous().view(b, f // 4, 4, c, h, w).transpose(2, 3)
|
||||
control_camera_video = control_camera_video.contiguous().view(b, f // 4, c * 4, h, w).transpose(1, 2)
|
||||
|
||||
return io.NodeOutput(control_camera_video, width, height, length)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
WanCameraEmbedding,
|
||||
]
|
||||
32
comfy_extras/v3/nodes_canny.py
Normal file
32
comfy_extras/v3/nodes_canny.py
Normal file
@ -0,0 +1,32 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from kornia.filters import canny
|
||||
|
||||
import comfy.model_management
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class Canny(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="Canny_V3",
|
||||
category="image/preprocessors",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Float.Input("low_threshold", default=0.4, min=0.01, max=0.99, step=0.01),
|
||||
io.Float.Input("high_threshold", default=0.8, min=0.01, max=0.99, step=0.01),
|
||||
],
|
||||
outputs=[io.Image.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image, low_threshold, high_threshold) -> io.NodeOutput:
|
||||
output = canny(image.to(comfy.model_management.get_torch_device()).movedim(-1, 1), low_threshold, high_threshold)
|
||||
img_out = output[1].to(comfy.model_management.intermediate_device()).repeat(1, 3, 1, 1).movedim(1, -1)
|
||||
return io.NodeOutput(img_out)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
Canny,
|
||||
]
|
||||
89
comfy_extras/v3/nodes_cfg.py
Normal file
89
comfy_extras/v3/nodes_cfg.py
Normal file
@ -0,0 +1,89 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
# https://github.com/WeichenFan/CFG-Zero-star
|
||||
def optimized_scale(positive, negative):
|
||||
positive_flat = positive.reshape(positive.shape[0], -1)
|
||||
negative_flat = negative.reshape(negative.shape[0], -1)
|
||||
|
||||
# Calculate dot production
|
||||
dot_product = torch.sum(positive_flat * negative_flat, dim=1, keepdim=True)
|
||||
|
||||
# Squared norm of uncondition
|
||||
squared_norm = torch.sum(negative_flat ** 2, dim=1, keepdim=True) + 1e-8
|
||||
|
||||
# st_star = v_cond^T * v_uncond / ||v_uncond||^2
|
||||
st_star = dot_product / squared_norm
|
||||
|
||||
return st_star.reshape([positive.shape[0]] + [1] * (positive.ndim - 1))
|
||||
|
||||
|
||||
class CFGZeroStar(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="CFGZeroStar_V3",
|
||||
category="advanced/guidance",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
],
|
||||
outputs=[io.Model.Output(display_name="patched_model")],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model) -> io.NodeOutput:
|
||||
m = model.clone()
|
||||
|
||||
def cfg_zero_star(args):
|
||||
guidance_scale = args['cond_scale']
|
||||
x = args['input']
|
||||
cond_p = args['cond_denoised']
|
||||
uncond_p = args['uncond_denoised']
|
||||
out = args["denoised"]
|
||||
alpha = optimized_scale(x - cond_p, x - uncond_p)
|
||||
|
||||
return out + uncond_p * (alpha - 1.0) + guidance_scale * uncond_p * (1.0 - alpha)
|
||||
|
||||
m.set_model_sampler_post_cfg_function(cfg_zero_star)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class CFGNorm(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="CFGNorm_V3",
|
||||
category="advanced/guidance",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("strength", default=1.0, min=0.0, max=100.0, step=0.01),
|
||||
],
|
||||
outputs=[io.Model.Output(display_name="patched_model")],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, strength) -> io.NodeOutput:
|
||||
m = model.clone()
|
||||
|
||||
def cfg_norm(args):
|
||||
cond_p = args['cond_denoised']
|
||||
pred_text_ = args["denoised"]
|
||||
|
||||
norm_full_cond = torch.norm(cond_p, dim=1, keepdim=True)
|
||||
norm_pred_text = torch.norm(pred_text_, dim=1, keepdim=True)
|
||||
scale = (norm_full_cond / (norm_pred_text + 1e-8)).clamp(min=0.0, max=1.0)
|
||||
return pred_text_ * scale * strength
|
||||
|
||||
m.set_model_sampler_post_cfg_function(cfg_norm)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
CFGNorm,
|
||||
CFGZeroStar,
|
||||
]
|
||||
79
comfy_extras/v3/nodes_clip_sdxl.py
Normal file
79
comfy_extras/v3/nodes_clip_sdxl.py
Normal file
@ -0,0 +1,79 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import nodes
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class CLIPTextEncodeSDXLRefiner(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeSDXLRefiner_V3",
|
||||
category="advanced/conditioning",
|
||||
inputs=[
|
||||
io.Float.Input("ascore", default=6.0, min=0.0, max=1000.0, step=0.01),
|
||||
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.Int.Input("height", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.String.Input("text", multiline=True, dynamic_prompts=True),
|
||||
io.Clip.Input("clip"),
|
||||
],
|
||||
outputs=[io.Conditioning.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, ascore, width, height, text, clip) -> io.NodeOutput:
|
||||
tokens = clip.tokenize(text)
|
||||
conditioning = clip.encode_from_tokens_scheduled(
|
||||
tokens, add_dict={"aesthetic_score": ascore, "width": width, "height": height}
|
||||
)
|
||||
return io.NodeOutput(conditioning)
|
||||
|
||||
|
||||
class CLIPTextEncodeSDXL(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeSDXL_V3",
|
||||
category="advanced/conditioning",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.Int.Input("height", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.Int.Input("crop_w", default=0, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.Int.Input("crop_h", default=0, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.Int.Input("target_width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.Int.Input("target_height", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.String.Input("text_g", multiline=True, dynamic_prompts=True),
|
||||
io.String.Input("text_l", multiline=True, dynamic_prompts=True),
|
||||
],
|
||||
outputs=[io.Conditioning.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip, width, height, crop_w, crop_h, target_width, target_height, text_g, text_l) -> io.NodeOutput:
|
||||
tokens = clip.tokenize(text_g)
|
||||
tokens["l"] = clip.tokenize(text_l)["l"]
|
||||
if len(tokens["l"]) != len(tokens["g"]):
|
||||
empty = clip.tokenize("")
|
||||
while len(tokens["l"]) < len(tokens["g"]):
|
||||
tokens["l"] += empty["l"]
|
||||
while len(tokens["l"]) > len(tokens["g"]):
|
||||
tokens["g"] += empty["g"]
|
||||
conditioning = clip.encode_from_tokens_scheduled(
|
||||
tokens,
|
||||
add_dict={
|
||||
"width": width,
|
||||
"height": height,
|
||||
"crop_w": crop_w,
|
||||
"crop_h": crop_h,
|
||||
"target_width": target_width,
|
||||
"target_height": target_height,
|
||||
},
|
||||
)
|
||||
return io.NodeOutput(conditioning)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
CLIPTextEncodeSDXL,
|
||||
CLIPTextEncodeSDXLRefiner,
|
||||
]
|
||||
226
comfy_extras/v3/nodes_compositing.py
Normal file
226
comfy_extras/v3/nodes_compositing.py
Normal file
@ -0,0 +1,226 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from enum import Enum
|
||||
|
||||
import torch
|
||||
|
||||
import comfy.utils
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
def resize_mask(mask, shape):
|
||||
return torch.nn.functional.interpolate(
|
||||
mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(shape[0], shape[1]), mode="bilinear"
|
||||
).squeeze(1)
|
||||
|
||||
|
||||
class PorterDuffMode(Enum):
|
||||
ADD = 0
|
||||
CLEAR = 1
|
||||
DARKEN = 2
|
||||
DST = 3
|
||||
DST_ATOP = 4
|
||||
DST_IN = 5
|
||||
DST_OUT = 6
|
||||
DST_OVER = 7
|
||||
LIGHTEN = 8
|
||||
MULTIPLY = 9
|
||||
OVERLAY = 10
|
||||
SCREEN = 11
|
||||
SRC = 12
|
||||
SRC_ATOP = 13
|
||||
SRC_IN = 14
|
||||
SRC_OUT = 15
|
||||
SRC_OVER = 16
|
||||
XOR = 17
|
||||
|
||||
|
||||
def porter_duff_composite(
|
||||
src_image: torch.Tensor, src_alpha: torch.Tensor, dst_image: torch.Tensor, dst_alpha: torch.Tensor, mode: PorterDuffMode
|
||||
):
|
||||
# convert mask to alpha
|
||||
src_alpha = 1 - src_alpha
|
||||
dst_alpha = 1 - dst_alpha
|
||||
# premultiply alpha
|
||||
src_image = src_image * src_alpha
|
||||
dst_image = dst_image * dst_alpha
|
||||
|
||||
# composite ops below assume alpha-premultiplied images
|
||||
if mode == PorterDuffMode.ADD:
|
||||
out_alpha = torch.clamp(src_alpha + dst_alpha, 0, 1)
|
||||
out_image = torch.clamp(src_image + dst_image, 0, 1)
|
||||
elif mode == PorterDuffMode.CLEAR:
|
||||
out_alpha = torch.zeros_like(dst_alpha)
|
||||
out_image = torch.zeros_like(dst_image)
|
||||
elif mode == PorterDuffMode.DARKEN:
|
||||
out_alpha = src_alpha + dst_alpha - src_alpha * dst_alpha
|
||||
out_image = (1 - dst_alpha) * src_image + (1 - src_alpha) * dst_image + torch.min(src_image, dst_image)
|
||||
elif mode == PorterDuffMode.DST:
|
||||
out_alpha = dst_alpha
|
||||
out_image = dst_image
|
||||
elif mode == PorterDuffMode.DST_ATOP:
|
||||
out_alpha = src_alpha
|
||||
out_image = src_alpha * dst_image + (1 - dst_alpha) * src_image
|
||||
elif mode == PorterDuffMode.DST_IN:
|
||||
out_alpha = src_alpha * dst_alpha
|
||||
out_image = dst_image * src_alpha
|
||||
elif mode == PorterDuffMode.DST_OUT:
|
||||
out_alpha = (1 - src_alpha) * dst_alpha
|
||||
out_image = (1 - src_alpha) * dst_image
|
||||
elif mode == PorterDuffMode.DST_OVER:
|
||||
out_alpha = dst_alpha + (1 - dst_alpha) * src_alpha
|
||||
out_image = dst_image + (1 - dst_alpha) * src_image
|
||||
elif mode == PorterDuffMode.LIGHTEN:
|
||||
out_alpha = src_alpha + dst_alpha - src_alpha * dst_alpha
|
||||
out_image = (1 - dst_alpha) * src_image + (1 - src_alpha) * dst_image + torch.max(src_image, dst_image)
|
||||
elif mode == PorterDuffMode.MULTIPLY:
|
||||
out_alpha = src_alpha * dst_alpha
|
||||
out_image = src_image * dst_image
|
||||
elif mode == PorterDuffMode.OVERLAY:
|
||||
out_alpha = src_alpha + dst_alpha - src_alpha * dst_alpha
|
||||
out_image = torch.where(2 * dst_image < dst_alpha, 2 * src_image * dst_image,
|
||||
src_alpha * dst_alpha - 2 * (dst_alpha - src_image) * (src_alpha - dst_image))
|
||||
elif mode == PorterDuffMode.SCREEN:
|
||||
out_alpha = src_alpha + dst_alpha - src_alpha * dst_alpha
|
||||
out_image = src_image + dst_image - src_image * dst_image
|
||||
elif mode == PorterDuffMode.SRC:
|
||||
out_alpha = src_alpha
|
||||
out_image = src_image
|
||||
elif mode == PorterDuffMode.SRC_ATOP:
|
||||
out_alpha = dst_alpha
|
||||
out_image = dst_alpha * src_image + (1 - src_alpha) * dst_image
|
||||
elif mode == PorterDuffMode.SRC_IN:
|
||||
out_alpha = src_alpha * dst_alpha
|
||||
out_image = src_image * dst_alpha
|
||||
elif mode == PorterDuffMode.SRC_OUT:
|
||||
out_alpha = (1 - dst_alpha) * src_alpha
|
||||
out_image = (1 - dst_alpha) * src_image
|
||||
elif mode == PorterDuffMode.SRC_OVER:
|
||||
out_alpha = src_alpha + (1 - src_alpha) * dst_alpha
|
||||
out_image = src_image + (1 - src_alpha) * dst_image
|
||||
elif mode == PorterDuffMode.XOR:
|
||||
out_alpha = (1 - dst_alpha) * src_alpha + (1 - src_alpha) * dst_alpha
|
||||
out_image = (1 - dst_alpha) * src_image + (1 - src_alpha) * dst_image
|
||||
else:
|
||||
return None, None
|
||||
|
||||
# back to non-premultiplied alpha
|
||||
out_image = torch.where(out_alpha > 1e-5, out_image / out_alpha, torch.zeros_like(out_image))
|
||||
out_image = torch.clamp(out_image, 0, 1)
|
||||
# convert alpha to mask
|
||||
out_alpha = 1 - out_alpha
|
||||
return out_image, out_alpha
|
||||
|
||||
|
||||
class PorterDuffImageComposite(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="PorterDuffImageComposite_V3",
|
||||
display_name="Porter-Duff Image Composite _V3",
|
||||
category="mask/compositing",
|
||||
inputs=[
|
||||
io.Image.Input("source"),
|
||||
io.Mask.Input("source_alpha"),
|
||||
io.Image.Input("destination"),
|
||||
io.Mask.Input("destination_alpha"),
|
||||
io.Combo.Input("mode", options=[mode.name for mode in PorterDuffMode], default=PorterDuffMode.DST.name),
|
||||
],
|
||||
outputs=[io.Image.Output(), io.Mask.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(
|
||||
cls, source: torch.Tensor, source_alpha: torch.Tensor, destination: torch.Tensor, destination_alpha: torch.Tensor, mode
|
||||
) -> io.NodeOutput:
|
||||
batch_size = min(len(source), len(source_alpha), len(destination), len(destination_alpha))
|
||||
out_images = []
|
||||
out_alphas = []
|
||||
|
||||
for i in range(batch_size):
|
||||
src_image = source[i]
|
||||
dst_image = destination[i]
|
||||
|
||||
assert src_image.shape[2] == dst_image.shape[2] # inputs need to have same number of channels
|
||||
|
||||
src_alpha = source_alpha[i].unsqueeze(2)
|
||||
dst_alpha = destination_alpha[i].unsqueeze(2)
|
||||
|
||||
if dst_alpha.shape[:2] != dst_image.shape[:2]:
|
||||
upscale_input = dst_alpha.unsqueeze(0).permute(0, 3, 1, 2)
|
||||
upscale_output = comfy.utils.common_upscale(
|
||||
upscale_input, dst_image.shape[1], dst_image.shape[0], upscale_method='bicubic', crop='center'
|
||||
)
|
||||
dst_alpha = upscale_output.permute(0, 2, 3, 1).squeeze(0)
|
||||
if src_image.shape != dst_image.shape:
|
||||
upscale_input = src_image.unsqueeze(0).permute(0, 3, 1, 2)
|
||||
upscale_output = comfy.utils.common_upscale(
|
||||
upscale_input, dst_image.shape[1], dst_image.shape[0], upscale_method='bicubic', crop='center'
|
||||
)
|
||||
src_image = upscale_output.permute(0, 2, 3, 1).squeeze(0)
|
||||
if src_alpha.shape != dst_alpha.shape:
|
||||
upscale_input = src_alpha.unsqueeze(0).permute(0, 3, 1, 2)
|
||||
upscale_output = comfy.utils.common_upscale(
|
||||
upscale_input, dst_alpha.shape[1], dst_alpha.shape[0], upscale_method='bicubic', crop='center'
|
||||
)
|
||||
src_alpha = upscale_output.permute(0, 2, 3, 1).squeeze(0)
|
||||
|
||||
out_image, out_alpha = porter_duff_composite(src_image, src_alpha, dst_image, dst_alpha, PorterDuffMode[mode])
|
||||
|
||||
out_images.append(out_image)
|
||||
out_alphas.append(out_alpha.squeeze(2))
|
||||
|
||||
return io.NodeOutput(torch.stack(out_images), torch.stack(out_alphas))
|
||||
|
||||
|
||||
class SplitImageWithAlpha(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SplitImageWithAlpha_V3",
|
||||
display_name="Split Image with Alpha _V3",
|
||||
category="mask/compositing",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
outputs=[io.Image.Output(), io.Mask.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image: torch.Tensor) -> io.NodeOutput:
|
||||
out_images = [i[:, :, :3] for i in image]
|
||||
out_alphas = [i[:, :, 3] if i.shape[2] > 3 else torch.ones_like(i[:, :, 0]) for i in image]
|
||||
return io.NodeOutput(torch.stack(out_images), 1.0 - torch.stack(out_alphas))
|
||||
|
||||
|
||||
class JoinImageWithAlpha(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="JoinImageWithAlpha_V3",
|
||||
display_name="Join Image with Alpha _V3",
|
||||
category="mask/compositing",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Mask.Input("alpha"),
|
||||
],
|
||||
outputs=[io.Image.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image: torch.Tensor, alpha: torch.Tensor) -> io.NodeOutput:
|
||||
batch_size = min(len(image), len(alpha))
|
||||
out_images = []
|
||||
|
||||
alpha = 1.0 - resize_mask(alpha, image.shape[1:])
|
||||
for i in range(batch_size):
|
||||
out_images.append(torch.cat((image[i][:, :, :3], alpha[i].unsqueeze(2)), dim=2))
|
||||
|
||||
return io.NodeOutput(torch.stack(out_images))
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
JoinImageWithAlpha,
|
||||
PorterDuffImageComposite,
|
||||
SplitImageWithAlpha,
|
||||
]
|
||||
60
comfy_extras/v3/nodes_cond.py
Normal file
60
comfy_extras/v3/nodes_cond.py
Normal file
@ -0,0 +1,60 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class CLIPTextEncodeControlnet(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeControlnet_V3",
|
||||
category="_for_testing/conditioning",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.String.Input("text", multiline=True, dynamic_prompts=True),
|
||||
],
|
||||
outputs=[io.Conditioning.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip, conditioning, text) -> io.NodeOutput:
|
||||
tokens = clip.tokenize(text)
|
||||
cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
|
||||
c = []
|
||||
for t in conditioning:
|
||||
n = [t[0], t[1].copy()]
|
||||
n[1]['cross_attn_controlnet'] = cond
|
||||
n[1]['pooled_output_controlnet'] = pooled
|
||||
c.append(n)
|
||||
return io.NodeOutput(c)
|
||||
|
||||
|
||||
class T5TokenizerOptions(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="T5TokenizerOptions_V3",
|
||||
category="_for_testing/conditioning",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Int.Input("min_padding", default=0, min=0, max=10000, step=1),
|
||||
io.Int.Input("min_length", default=0, min=0, max=10000, step=1),
|
||||
],
|
||||
outputs=[io.Clip.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip, min_padding, min_length) -> io.NodeOutput:
|
||||
clip = clip.clone()
|
||||
for t5_type in ["t5xxl", "pile_t5xl", "t5base", "mt5xl", "umt5xxl"]:
|
||||
clip.set_tokenizer_option("{}_min_padding".format(t5_type), min_padding)
|
||||
clip.set_tokenizer_option("{}_min_length".format(t5_type), min_length)
|
||||
|
||||
return io.NodeOutput(clip)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
CLIPTextEncodeControlnet,
|
||||
T5TokenizerOptions,
|
||||
]
|
||||
141
comfy_extras/v3/nodes_controlnet.py
Normal file
141
comfy_extras/v3/nodes_controlnet.py
Normal file
@ -0,0 +1,141 @@
|
||||
import comfy.utils
|
||||
from comfy.cldm.control_types import UNION_CONTROLNET_TYPES
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class SetUnionControlNetType(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SetUnionControlNetType_V3",
|
||||
category="conditioning/controlnet",
|
||||
inputs=[
|
||||
io.ControlNet.Input("control_net"),
|
||||
io.Combo.Input("type", options=["auto"] + list(UNION_CONTROLNET_TYPES.keys())),
|
||||
],
|
||||
outputs=[
|
||||
io.ControlNet.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, control_net, type) -> io.NodeOutput:
|
||||
control_net = control_net.copy()
|
||||
type_number = UNION_CONTROLNET_TYPES.get(type, -1)
|
||||
if type_number >= 0:
|
||||
control_net.set_extra_arg("control_type", [type_number])
|
||||
else:
|
||||
control_net.set_extra_arg("control_type", [])
|
||||
|
||||
return io.NodeOutput(control_net)
|
||||
|
||||
|
||||
class ControlNetApplyAdvanced(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ControlNetApplyAdvanced_V3",
|
||||
display_name="Apply ControlNet _V3",
|
||||
category="conditioning/controlnet",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
io.ControlNet.Input("control_net"),
|
||||
io.Image.Input("image"),
|
||||
io.Float.Input("strength", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("start_percent", default=0.0, min=0.0, max=1.0, step=0.001),
|
||||
io.Float.Input("end_percent", default=1.0, min=0.0, max=1.0, step=0.001),
|
||||
io.Vae.Input("vae", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(
|
||||
cls, positive, negative, control_net, image, strength, start_percent, end_percent, vae=None, extra_concat=[]
|
||||
) -> io.NodeOutput:
|
||||
if strength == 0:
|
||||
return io.NodeOutput(positive, negative)
|
||||
|
||||
control_hint = image.movedim(-1, 1)
|
||||
cnets = {}
|
||||
|
||||
out = []
|
||||
for conditioning in [positive, negative]:
|
||||
c = []
|
||||
for t in conditioning:
|
||||
d = t[1].copy()
|
||||
|
||||
prev_cnet = d.get("control", None)
|
||||
if prev_cnet in cnets:
|
||||
c_net = cnets[prev_cnet]
|
||||
else:
|
||||
c_net = control_net.copy().set_cond_hint(
|
||||
control_hint, strength, (start_percent, end_percent), vae=vae, extra_concat=extra_concat
|
||||
)
|
||||
c_net.set_previous_controlnet(prev_cnet)
|
||||
cnets[prev_cnet] = c_net
|
||||
|
||||
d["control"] = c_net
|
||||
d["control_apply_to_uncond"] = False
|
||||
n = [t[0], d]
|
||||
c.append(n)
|
||||
out.append(c)
|
||||
return io.NodeOutput(out[0], out[1])
|
||||
|
||||
|
||||
class ControlNetInpaintingAliMamaApply(ControlNetApplyAdvanced):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ControlNetInpaintingAliMamaApply_V3",
|
||||
category="conditioning/controlnet",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
io.ControlNet.Input("control_net"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Image.Input("image"),
|
||||
io.Mask.Input("mask"),
|
||||
io.Float.Input("strength", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("start_percent", default=0.0, min=0.0, max=1.0, step=0.001),
|
||||
io.Float.Input("end_percent", default=1.0, min=0.0, max=1.0, step=0.001),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(
|
||||
cls, positive, negative, control_net, vae, image, mask, strength, start_percent, end_percent
|
||||
) -> io.NodeOutput:
|
||||
extra_concat = []
|
||||
if control_net.concat_mask:
|
||||
mask = 1.0 - mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1]))
|
||||
mask_apply = comfy.utils.common_upscale(mask, image.shape[2], image.shape[1], "bilinear", "center").round()
|
||||
image = image * mask_apply.movedim(1, -1).repeat(1, 1, 1, image.shape[3])
|
||||
extra_concat = [mask]
|
||||
|
||||
return super().execute(
|
||||
positive,
|
||||
negative,
|
||||
control_net,
|
||||
image,
|
||||
strength,
|
||||
start_percent,
|
||||
end_percent,
|
||||
vae=vae,
|
||||
extra_concat=extra_concat,
|
||||
)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
ControlNetApplyAdvanced,
|
||||
SetUnionControlNetType,
|
||||
ControlNetInpaintingAliMamaApply,
|
||||
]
|
||||
146
comfy_extras/v3/nodes_cosmos.py
Normal file
146
comfy_extras/v3/nodes_cosmos.py
Normal file
@ -0,0 +1,146 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
|
||||
import comfy.latent_formats
|
||||
import comfy.model_management
|
||||
import comfy.utils
|
||||
import nodes
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class EmptyCosmosLatentVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="EmptyCosmosLatentVideo_V3",
|
||||
category="latent/video",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=1280, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=704, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("length", default=121, min=1, max=nodes.MAX_RESOLUTION, step=8),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
],
|
||||
outputs=[io.Latent.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, width, height, length, batch_size) -> io.NodeOutput:
|
||||
latent = torch.zeros(
|
||||
[batch_size, 16, ((length - 1) // 8) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()
|
||||
)
|
||||
return io.NodeOutput({"samples": latent})
|
||||
|
||||
|
||||
def vae_encode_with_padding(vae, image, width, height, length, padding=0):
|
||||
pixels = comfy.utils.common_upscale(image[..., :3].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1)
|
||||
pixel_len = min(pixels.shape[0], length)
|
||||
padded_length = min(length, (((pixel_len - 1) // 8) + 1 + padding) * 8 - 7)
|
||||
padded_pixels = torch.ones((padded_length, height, width, 3)) * 0.5
|
||||
padded_pixels[:pixel_len] = pixels[:pixel_len]
|
||||
latent_len = ((pixel_len - 1) // 8) + 1
|
||||
latent_temp = vae.encode(padded_pixels)
|
||||
return latent_temp[:, :, :latent_len]
|
||||
|
||||
|
||||
class CosmosImageToVideoLatent(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="CosmosImageToVideoLatent_V3",
|
||||
category="conditioning/inpaint",
|
||||
inputs=[
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=1280, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=704, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("length", default=121, min=1, max=nodes.MAX_RESOLUTION, step=8),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
io.Image.Input("start_image", optional=True),
|
||||
io.Image.Input("end_image", optional=True),
|
||||
],
|
||||
outputs=[io.Latent.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, vae, width, height, length, batch_size, start_image=None, end_image=None) -> io.NodeOutput:
|
||||
latent = torch.zeros([1, 16, ((length - 1) // 8) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device())
|
||||
if start_image is None and end_image is None:
|
||||
out_latent = {}
|
||||
out_latent["samples"] = latent
|
||||
return io.NodeOutput(out_latent)
|
||||
|
||||
mask = torch.ones(
|
||||
[latent.shape[0], 1, ((length - 1) // 8) + 1, latent.shape[-2], latent.shape[-1]],
|
||||
device=comfy.model_management.intermediate_device(),
|
||||
)
|
||||
|
||||
if start_image is not None:
|
||||
latent_temp = vae_encode_with_padding(vae, start_image, width, height, length, padding=1)
|
||||
latent[:, :, :latent_temp.shape[-3]] = latent_temp
|
||||
mask[:, :, :latent_temp.shape[-3]] *= 0.0
|
||||
|
||||
if end_image is not None:
|
||||
latent_temp = vae_encode_with_padding(vae, end_image, width, height, length, padding=0)
|
||||
latent[:, :, -latent_temp.shape[-3]:] = latent_temp
|
||||
mask[:, :, -latent_temp.shape[-3]:] *= 0.0
|
||||
|
||||
out_latent = {}
|
||||
out_latent["samples"] = latent.repeat((batch_size, ) + (1,) * (latent.ndim - 1))
|
||||
out_latent["noise_mask"] = mask.repeat((batch_size, ) + (1,) * (mask.ndim - 1))
|
||||
return io.NodeOutput(out_latent)
|
||||
|
||||
|
||||
class CosmosPredict2ImageToVideoLatent(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="CosmosPredict2ImageToVideoLatent_V3",
|
||||
category="conditioning/inpaint",
|
||||
inputs=[
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("length", default=93, min=1, max=nodes.MAX_RESOLUTION, step=4),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
io.Image.Input("start_image", optional=True),
|
||||
io.Image.Input("end_image", optional=True),
|
||||
],
|
||||
outputs=[io.Latent.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, vae, width, height, length, batch_size, start_image=None, end_image=None) -> io.NodeOutput:
|
||||
latent = torch.zeros([1, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device())
|
||||
if start_image is None and end_image is None:
|
||||
out_latent = {}
|
||||
out_latent["samples"] = latent
|
||||
return io.NodeOutput(out_latent)
|
||||
|
||||
mask = torch.ones(
|
||||
[latent.shape[0], 1, ((length - 1) // 4) + 1, latent.shape[-2], latent.shape[-1]],
|
||||
device=comfy.model_management.intermediate_device(),
|
||||
)
|
||||
|
||||
if start_image is not None:
|
||||
latent_temp = vae_encode_with_padding(vae, start_image, width, height, length, padding=1)
|
||||
latent[:, :, :latent_temp.shape[-3]] = latent_temp
|
||||
mask[:, :, :latent_temp.shape[-3]] *= 0.0
|
||||
|
||||
if end_image is not None:
|
||||
latent_temp = vae_encode_with_padding(vae, end_image, width, height, length, padding=0)
|
||||
latent[:, :, -latent_temp.shape[-3]:] = latent_temp
|
||||
mask[:, :, -latent_temp.shape[-3]:] *= 0.0
|
||||
|
||||
out_latent = {}
|
||||
latent_format = comfy.latent_formats.Wan21()
|
||||
latent = latent_format.process_out(latent) * mask + latent * (1.0 - mask)
|
||||
out_latent["samples"] = latent.repeat((batch_size, ) + (1,) * (latent.ndim - 1))
|
||||
out_latent["noise_mask"] = mask.repeat((batch_size, ) + (1,) * (mask.ndim - 1))
|
||||
return io.NodeOutput(out_latent)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
CosmosImageToVideoLatent,
|
||||
CosmosPredict2ImageToVideoLatent,
|
||||
EmptyCosmosLatentVideo,
|
||||
]
|
||||
1035
comfy_extras/v3/nodes_custom_sampler.py
Normal file
1035
comfy_extras/v3/nodes_custom_sampler.py
Normal file
File diff suppressed because it is too large
Load Diff
50
comfy_extras/v3/nodes_differential_diffusion.py
Normal file
50
comfy_extras/v3/nodes_differential_diffusion.py
Normal file
@ -0,0 +1,50 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class DifferentialDiffusion(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="DifferentialDiffusion_V3",
|
||||
display_name="Differential Diffusion _V3",
|
||||
category="_for_testing",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model):
|
||||
model = model.clone()
|
||||
model.set_model_denoise_mask_function(cls.forward)
|
||||
return io.NodeOutput(model)
|
||||
|
||||
@classmethod
|
||||
def forward(cls, sigma: torch.Tensor, denoise_mask: torch.Tensor, extra_options: dict):
|
||||
model = extra_options["model"]
|
||||
step_sigmas = extra_options["sigmas"]
|
||||
sigma_to = model.inner_model.model_sampling.sigma_min
|
||||
if step_sigmas[-1] > sigma_to:
|
||||
sigma_to = step_sigmas[-1]
|
||||
sigma_from = step_sigmas[0]
|
||||
|
||||
ts_from = model.inner_model.model_sampling.timestep(sigma_from)
|
||||
ts_to = model.inner_model.model_sampling.timestep(sigma_to)
|
||||
current_ts = model.inner_model.model_sampling.timestep(sigma[0])
|
||||
|
||||
threshold = (current_ts - ts_to) / (ts_from - ts_to)
|
||||
|
||||
return (denoise_mask >= threshold).to(denoise_mask.dtype)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
DifferentialDiffusion,
|
||||
]
|
||||
34
comfy_extras/v3/nodes_edit_model.py
Normal file
34
comfy_extras/v3/nodes_edit_model.py
Normal file
@ -0,0 +1,34 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import node_helpers
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class ReferenceLatent(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ReferenceLatent_V3",
|
||||
category="advanced/conditioning/edit_models",
|
||||
description="This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images.",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Latent.Input("latent", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, conditioning, latent=None):
|
||||
if latent is not None:
|
||||
conditioning = node_helpers.conditioning_set_values(
|
||||
conditioning, {"reference_latents": [latent["samples"]]}, append=True
|
||||
)
|
||||
return io.NodeOutput(conditioning)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
ReferenceLatent,
|
||||
]
|
||||
122
comfy_extras/v3/nodes_flux.py
Normal file
122
comfy_extras/v3/nodes_flux.py
Normal file
@ -0,0 +1,122 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import comfy.utils
|
||||
import node_helpers
|
||||
from comfy_api.latest import io
|
||||
|
||||
PREFERED_KONTEXT_RESOLUTIONS = [
|
||||
(672, 1568),
|
||||
(688, 1504),
|
||||
(720, 1456),
|
||||
(752, 1392),
|
||||
(800, 1328),
|
||||
(832, 1248),
|
||||
(880, 1184),
|
||||
(944, 1104),
|
||||
(1024, 1024),
|
||||
(1104, 944),
|
||||
(1184, 880),
|
||||
(1248, 832),
|
||||
(1328, 800),
|
||||
(1392, 752),
|
||||
(1456, 720),
|
||||
(1504, 688),
|
||||
(1568, 672),
|
||||
]
|
||||
|
||||
|
||||
class CLIPTextEncodeFlux(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeFlux_V3",
|
||||
category="advanced/conditioning/flux",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
||||
io.String.Input("t5xxl", multiline=True, dynamic_prompts=True),
|
||||
io.Float.Input("guidance", default=3.5, min=0.0, max=100.0, step=0.1),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip, clip_l, t5xxl, guidance):
|
||||
tokens = clip.tokenize(clip_l)
|
||||
tokens["t5xxl"] = clip.tokenize(t5xxl)["t5xxl"]
|
||||
|
||||
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens, add_dict={"guidance": guidance}))
|
||||
|
||||
class FluxGuidance(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="FluxGuidance_V3",
|
||||
category="advanced/conditioning/flux",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Float.Input("guidance", default=3.5, min=0.0, max=100.0, step=0.1),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, conditioning, guidance):
|
||||
c = node_helpers.conditioning_set_values(conditioning, {"guidance": guidance})
|
||||
return io.NodeOutput(c)
|
||||
|
||||
class FluxDisableGuidance(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="FluxDisableGuidance_V3",
|
||||
category="advanced/conditioning/flux",
|
||||
description="This node completely disables the guidance embed on Flux and Flux like models",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, conditioning):
|
||||
c = node_helpers.conditioning_set_values(conditioning, {"guidance": None})
|
||||
return io.NodeOutput(c)
|
||||
|
||||
class FluxKontextImageScale(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="FluxKontextImageScale_V3",
|
||||
category="advanced/conditioning/flux",
|
||||
description="This node resizes the image to one that is more optimal for flux kontext.",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image):
|
||||
width = image.shape[2]
|
||||
height = image.shape[1]
|
||||
aspect_ratio = width / height
|
||||
_, width, height = min((abs(aspect_ratio - w / h), w, h) for w, h in PREFERED_KONTEXT_RESOLUTIONS)
|
||||
image = comfy.utils.common_upscale(image.movedim(-1, 1), width, height, "lanczos", "center").movedim(1, -1)
|
||||
return io.NodeOutput(image)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
CLIPTextEncodeFlux,
|
||||
FluxDisableGuidance,
|
||||
FluxGuidance,
|
||||
FluxKontextImageScale,
|
||||
]
|
||||
131
comfy_extras/v3/nodes_freelunch.py
Normal file
131
comfy_extras/v3/nodes_freelunch.py
Normal file
@ -0,0 +1,131 @@
|
||||
#code originally taken from: https://github.com/ChenyangSi/FreeU (under MIT License)
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
import torch
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
def Fourier_filter(x, threshold, scale):
|
||||
# FFT
|
||||
x_freq = torch.fft.fftn(x.float(), dim=(-2, -1))
|
||||
x_freq = torch.fft.fftshift(x_freq, dim=(-2, -1))
|
||||
|
||||
B, C, H, W = x_freq.shape
|
||||
mask = torch.ones((B, C, H, W), device=x.device)
|
||||
|
||||
crow, ccol = H // 2, W //2
|
||||
mask[..., crow - threshold:crow + threshold, ccol - threshold:ccol + threshold] = scale
|
||||
x_freq = x_freq * mask
|
||||
|
||||
# IFFT
|
||||
x_freq = torch.fft.ifftshift(x_freq, dim=(-2, -1))
|
||||
x_filtered = torch.fft.ifftn(x_freq, dim=(-2, -1)).real
|
||||
|
||||
return x_filtered.to(x.dtype)
|
||||
|
||||
|
||||
class FreeU(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="FreeU_V3",
|
||||
category="model_patches/unet",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("b1", default=1.1, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("b2", default=1.2, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("s1", default=0.9, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("s2", default=0.2, min=0.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, b1, b2, s1, s2):
|
||||
model_channels = model.model.model_config.unet_config["model_channels"]
|
||||
scale_dict = {model_channels * 4: (b1, s1), model_channels * 2: (b2, s2)}
|
||||
on_cpu_devices = {}
|
||||
|
||||
def output_block_patch(h, hsp, transformer_options):
|
||||
scale = scale_dict.get(int(h.shape[1]), None)
|
||||
if scale is not None:
|
||||
h[:,:h.shape[1] // 2] = h[:,:h.shape[1] // 2] * scale[0]
|
||||
if hsp.device not in on_cpu_devices:
|
||||
try:
|
||||
hsp = Fourier_filter(hsp, threshold=1, scale=scale[1])
|
||||
except Exception:
|
||||
logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU.".format(hsp.device))
|
||||
on_cpu_devices[hsp.device] = True
|
||||
hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
|
||||
else:
|
||||
hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
|
||||
|
||||
return h, hsp
|
||||
|
||||
m = model.clone()
|
||||
m.set_model_output_block_patch(output_block_patch)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class FreeU_V2(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="FreeU_V2_V3",
|
||||
category="model_patches/unet",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("b1", default=1.3, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("b2", default=1.4, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("s1", default=0.9, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("s2", default=0.2, min=0.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, b1, b2, s1, s2):
|
||||
model_channels = model.model.model_config.unet_config["model_channels"]
|
||||
scale_dict = {model_channels * 4: (b1, s1), model_channels * 2: (b2, s2)}
|
||||
on_cpu_devices = {}
|
||||
|
||||
def output_block_patch(h, hsp, transformer_options):
|
||||
scale = scale_dict.get(int(h.shape[1]), None)
|
||||
if scale is not None:
|
||||
hidden_mean = h.mean(1).unsqueeze(1)
|
||||
B = hidden_mean.shape[0]
|
||||
hidden_max, _ = torch.max(hidden_mean.view(B, -1), dim=-1, keepdim=True)
|
||||
hidden_min, _ = torch.min(hidden_mean.view(B, -1), dim=-1, keepdim=True)
|
||||
hidden_mean = (hidden_mean - hidden_min.unsqueeze(2).unsqueeze(3)) / (hidden_max - hidden_min).unsqueeze(2).unsqueeze(3)
|
||||
|
||||
h[:,:h.shape[1] // 2] = h[:,:h.shape[1] // 2] * ((scale[0] - 1 ) * hidden_mean + 1)
|
||||
|
||||
if hsp.device not in on_cpu_devices:
|
||||
try:
|
||||
hsp = Fourier_filter(hsp, threshold=1, scale=scale[1])
|
||||
except Exception:
|
||||
logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU.".format(hsp.device))
|
||||
on_cpu_devices[hsp.device] = True
|
||||
hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
|
||||
else:
|
||||
hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device)
|
||||
|
||||
return h, hsp
|
||||
|
||||
m = model.clone()
|
||||
m.set_model_output_block_patch(output_block_patch)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
FreeU,
|
||||
FreeU_V2,
|
||||
]
|
||||
110
comfy_extras/v3/nodes_fresca.py
Normal file
110
comfy_extras/v3/nodes_fresca.py
Normal file
@ -0,0 +1,110 @@
|
||||
# Code based on https://github.com/WikiChao/FreSca (MIT License)
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
import torch.fft as fft
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
def Fourier_filter(x, scale_low=1.0, scale_high=1.5, freq_cutoff=20):
|
||||
"""
|
||||
Apply frequency-dependent scaling to an image tensor using Fourier transforms.
|
||||
|
||||
Parameters:
|
||||
x: Input tensor of shape (B, C, H, W)
|
||||
scale_low: Scaling factor for low-frequency components (default: 1.0)
|
||||
scale_high: Scaling factor for high-frequency components (default: 1.5)
|
||||
freq_cutoff: Number of frequency indices around center to consider as low-frequency (default: 20)
|
||||
|
||||
Returns:
|
||||
x_filtered: Filtered version of x in spatial domain with frequency-specific scaling applied.
|
||||
"""
|
||||
# Preserve input dtype and device
|
||||
dtype, device = x.dtype, x.device
|
||||
|
||||
# Convert to float32 for FFT computations
|
||||
x = x.to(torch.float32)
|
||||
|
||||
# 1) Apply FFT and shift low frequencies to center
|
||||
x_freq = fft.fftn(x, dim=(-2, -1))
|
||||
x_freq = fft.fftshift(x_freq, dim=(-2, -1))
|
||||
|
||||
# Initialize mask with high-frequency scaling factor
|
||||
mask = torch.ones(x_freq.shape, device=device) * scale_high
|
||||
m = mask
|
||||
for d in range(len(x_freq.shape) - 2):
|
||||
dim = d + 2
|
||||
cc = x_freq.shape[dim] // 2
|
||||
f_c = min(freq_cutoff, cc)
|
||||
m = m.narrow(dim, cc - f_c, f_c * 2)
|
||||
|
||||
# Apply low-frequency scaling factor to center region
|
||||
m[:] = scale_low
|
||||
|
||||
# 3) Apply frequency-specific scaling
|
||||
x_freq = x_freq * mask
|
||||
|
||||
# 4) Convert back to spatial domain
|
||||
x_freq = fft.ifftshift(x_freq, dim=(-2, -1))
|
||||
x_filtered = fft.ifftn(x_freq, dim=(-2, -1)).real
|
||||
|
||||
# 5) Restore original dtype
|
||||
x_filtered = x_filtered.to(dtype)
|
||||
|
||||
return x_filtered
|
||||
|
||||
|
||||
class FreSca(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="FreSca_V3",
|
||||
display_name="FreSca _V3",
|
||||
category="_for_testing",
|
||||
description="Applies frequency-dependent scaling to the guidance",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("scale_low", default=1.0, min=0, max=10, step=0.01,
|
||||
tooltip="Scaling factor for low-frequency components"),
|
||||
io.Float.Input("scale_high", default=1.25, min=0, max=10, step=0.01,
|
||||
tooltip="Scaling factor for high-frequency components"),
|
||||
io.Int.Input("freq_cutoff", default=20, min=1, max=10000, step=1,
|
||||
tooltip="Number of frequency indices around center to consider as low-frequency"),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, scale_low, scale_high, freq_cutoff):
|
||||
def custom_cfg_function(args):
|
||||
conds_out = args["conds_out"]
|
||||
if len(conds_out) <= 1 or None in args["conds"][:2]:
|
||||
return conds_out
|
||||
cond = conds_out[0]
|
||||
uncond = conds_out[1]
|
||||
|
||||
guidance = cond - uncond
|
||||
filtered_guidance = Fourier_filter(
|
||||
guidance,
|
||||
scale_low=scale_low,
|
||||
scale_high=scale_high,
|
||||
freq_cutoff=freq_cutoff,
|
||||
)
|
||||
filtered_cond = filtered_guidance + uncond
|
||||
|
||||
return [filtered_cond, uncond] + conds_out[2:]
|
||||
|
||||
m = model.clone()
|
||||
m.set_model_sampler_pre_cfg_function(custom_cfg_function)
|
||||
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
FreSca,
|
||||
]
|
||||
376
comfy_extras/v3/nodes_gits.py
Normal file
376
comfy_extras/v3/nodes_gits.py
Normal file
@ -0,0 +1,376 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
def loglinear_interp(t_steps, num_steps):
|
||||
"""Performs log-linear interpolation of a given array of decreasing numbers."""
|
||||
xs = np.linspace(0, 1, len(t_steps))
|
||||
ys = np.log(t_steps[::-1])
|
||||
|
||||
new_xs = np.linspace(0, 1, num_steps)
|
||||
new_ys = np.interp(new_xs, xs, ys)
|
||||
|
||||
return np.exp(new_ys)[::-1].copy()
|
||||
|
||||
|
||||
NOISE_LEVELS = {
|
||||
0.80: [
|
||||
[14.61464119, 7.49001646, 0.02916753],
|
||||
[14.61464119, 11.54541874, 6.77309084, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 3.07277966, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 2.05039096, 0.02916753],
|
||||
[14.61464119, 12.2308979, 8.75849152, 7.49001646, 5.85520077, 2.05039096, 0.02916753],
|
||||
[14.61464119, 12.2308979, 8.75849152, 7.49001646, 5.85520077, 3.07277966, 1.56271636, 0.02916753],
|
||||
[14.61464119, 12.96784878, 11.54541874, 8.75849152, 7.49001646, 5.85520077, 3.07277966, 1.56271636, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.2308979, 10.90732002, 8.75849152, 7.49001646, 5.85520077, 3.07277966, 1.56271636, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 10.90732002, 8.75849152, 7.49001646, 5.85520077, 3.07277966, 1.56271636, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 5.85520077, 3.07277966, 1.56271636, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 4.86714602, 3.07277966, 1.56271636, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.31284904, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 4.86714602, 3.07277966, 1.56271636, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 4.86714602, 3.07277966, 1.56271636, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.24142551, 8.75849152, 8.30717278, 7.49001646, 6.14220476, 4.86714602, 3.07277966, 1.56271636, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.24142551, 8.75849152, 8.30717278, 7.49001646, 6.14220476, 4.86714602, 3.1956799, 1.98035145, 0.86115354, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.75859547, 9.24142551, 8.75849152, 8.30717278, 7.49001646, 6.14220476, 4.86714602, 3.1956799, 1.98035145, 0.86115354, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.75859547, 9.24142551, 8.75849152, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 4.65472794, 3.07277966, 1.84880662, 0.83188516, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.75859547, 9.24142551, 8.75849152, 8.30717278, 7.88507891, 7.49001646, 6.77309084, 5.85520077, 4.65472794, 3.07277966, 1.84880662, 0.83188516, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.75859547, 9.24142551, 8.75849152, 8.30717278, 7.88507891, 7.49001646, 6.77309084, 5.85520077, 4.86714602, 3.75677586, 2.84484982, 1.78698075, 0.803307, 0.02916753],
|
||||
],
|
||||
0.85: [
|
||||
[14.61464119, 7.49001646, 0.02916753],
|
||||
[14.61464119, 7.49001646, 1.84880662, 0.02916753],
|
||||
[14.61464119, 11.54541874, 6.77309084, 1.56271636, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.11996698, 3.07277966, 1.24153244, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 5.09240818, 2.84484982, 0.95350921, 0.02916753],
|
||||
[14.61464119, 12.2308979, 8.75849152, 7.49001646, 5.09240818, 2.84484982, 0.95350921, 0.02916753],
|
||||
[14.61464119, 12.2308979, 8.75849152, 7.49001646, 5.58536053, 3.1956799, 1.84880662, 0.803307, 0.02916753],
|
||||
[14.61464119, 12.96784878, 11.54541874, 8.75849152, 7.49001646, 5.58536053, 3.1956799, 1.84880662, 0.803307, 0.02916753],
|
||||
[14.61464119, 12.96784878, 11.54541874, 8.75849152, 7.49001646, 6.14220476, 4.65472794, 3.07277966, 1.84880662, 0.803307, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.2308979, 10.90732002, 8.75849152, 7.49001646, 6.14220476, 4.65472794, 3.07277966, 1.84880662, 0.803307, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 4.65472794, 3.07277966, 1.84880662, 0.803307, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 4.65472794, 3.07277966, 1.84880662, 0.803307, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.31284904, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 4.65472794, 3.07277966, 1.84880662, 0.803307, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.31284904, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 4.86714602, 3.60512662, 2.6383388, 1.56271636, 0.72133851, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.31284904, 9.24142551, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 4.65472794, 3.46139455, 2.45070267, 1.56271636, 0.72133851, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.31284904, 9.24142551, 8.75849152, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 4.65472794, 3.46139455, 2.45070267, 1.56271636, 0.72133851, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.24142551, 8.75849152, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 4.65472794, 3.46139455, 2.45070267, 1.56271636, 0.72133851, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.75859547, 9.24142551, 8.75849152, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 4.65472794, 3.46139455, 2.45070267, 1.56271636, 0.72133851, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.90732002, 10.31284904, 9.75859547, 9.24142551, 8.75849152, 8.30717278, 7.88507891, 7.49001646, 6.77309084, 5.85520077, 4.65472794, 3.46139455, 2.45070267, 1.56271636, 0.72133851, 0.02916753],
|
||||
],
|
||||
0.90: [
|
||||
[14.61464119, 6.77309084, 0.02916753],
|
||||
[14.61464119, 7.49001646, 1.56271636, 0.02916753],
|
||||
[14.61464119, 7.49001646, 3.07277966, 0.95350921, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 2.54230714, 0.89115214, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 4.86714602, 2.54230714, 0.89115214, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 5.09240818, 3.07277966, 1.61558151, 0.69515091, 0.02916753],
|
||||
[14.61464119, 12.2308979, 8.75849152, 7.11996698, 4.86714602, 3.07277966, 1.61558151, 0.69515091, 0.02916753],
|
||||
[14.61464119, 12.2308979, 8.75849152, 7.49001646, 5.85520077, 4.45427561, 2.95596409, 1.61558151, 0.69515091, 0.02916753],
|
||||
[14.61464119, 12.2308979, 8.75849152, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.19988537, 1.24153244, 0.57119018, 0.02916753],
|
||||
[14.61464119, 12.96784878, 10.90732002, 8.75849152, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.19988537, 1.24153244, 0.57119018, 0.02916753],
|
||||
[14.61464119, 12.96784878, 11.54541874, 9.24142551, 8.30717278, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.19988537, 1.24153244, 0.57119018, 0.02916753],
|
||||
[14.61464119, 12.96784878, 11.54541874, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 4.86714602, 3.75677586, 2.84484982, 1.84880662, 1.08895338, 0.52423614, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 4.86714602, 3.75677586, 2.84484982, 1.84880662, 1.08895338, 0.52423614, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 6.44769001, 5.58536053, 4.45427561, 3.32507086, 2.45070267, 1.61558151, 0.95350921, 0.45573691, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 6.44769001, 5.58536053, 4.45427561, 3.32507086, 2.45070267, 1.61558151, 0.95350921, 0.45573691, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 4.86714602, 3.91689563, 3.07277966, 2.27973175, 1.56271636, 0.95350921, 0.45573691, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.31284904, 9.24142551, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 4.86714602, 3.91689563, 3.07277966, 2.27973175, 1.56271636, 0.95350921, 0.45573691, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.31284904, 9.24142551, 8.75849152, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 4.86714602, 3.91689563, 3.07277966, 2.27973175, 1.56271636, 0.95350921, 0.45573691, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.96784878, 12.2308979, 11.54541874, 10.31284904, 9.24142551, 8.75849152, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 5.09240818, 4.45427561, 3.60512662, 2.95596409, 2.19988537, 1.51179266, 0.89115214, 0.43325692, 0.02916753],
|
||||
],
|
||||
0.95: [
|
||||
[14.61464119, 6.77309084, 0.02916753],
|
||||
[14.61464119, 6.77309084, 1.56271636, 0.02916753],
|
||||
[14.61464119, 7.49001646, 2.84484982, 0.89115214, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 2.36326075, 0.803307, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 2.95596409, 1.56271636, 0.64427125, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 4.86714602, 2.95596409, 1.56271636, 0.64427125, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 4.86714602, 3.07277966, 1.91321158, 1.08895338, 0.50118381, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.07277966, 1.91321158, 1.08895338, 0.50118381, 0.02916753],
|
||||
[14.61464119, 12.2308979, 8.75849152, 7.49001646, 5.85520077, 4.45427561, 3.07277966, 1.91321158, 1.08895338, 0.50118381, 0.02916753],
|
||||
[14.61464119, 12.2308979, 8.75849152, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.19988537, 1.41535246, 0.803307, 0.38853383, 0.02916753],
|
||||
[14.61464119, 12.2308979, 8.75849152, 7.49001646, 5.85520077, 4.65472794, 3.46139455, 2.6383388, 1.84880662, 1.24153244, 0.72133851, 0.34370604, 0.02916753],
|
||||
[14.61464119, 12.96784878, 10.90732002, 8.75849152, 7.49001646, 5.85520077, 4.65472794, 3.46139455, 2.6383388, 1.84880662, 1.24153244, 0.72133851, 0.34370604, 0.02916753],
|
||||
[14.61464119, 12.96784878, 10.90732002, 8.75849152, 7.49001646, 6.14220476, 4.86714602, 3.75677586, 2.95596409, 2.19988537, 1.56271636, 1.05362725, 0.64427125, 0.32104823, 0.02916753],
|
||||
[14.61464119, 12.96784878, 10.90732002, 8.75849152, 7.49001646, 6.44769001, 5.58536053, 4.65472794, 3.60512662, 2.95596409, 2.19988537, 1.56271636, 1.05362725, 0.64427125, 0.32104823, 0.02916753],
|
||||
[14.61464119, 12.96784878, 11.54541874, 9.24142551, 8.30717278, 7.49001646, 6.44769001, 5.58536053, 4.65472794, 3.60512662, 2.95596409, 2.19988537, 1.56271636, 1.05362725, 0.64427125, 0.32104823, 0.02916753],
|
||||
[14.61464119, 12.96784878, 11.54541874, 9.24142551, 8.30717278, 7.49001646, 6.44769001, 5.58536053, 4.65472794, 3.75677586, 3.07277966, 2.45070267, 1.78698075, 1.24153244, 0.83188516, 0.50118381, 0.22545385, 0.02916753],
|
||||
[14.61464119, 12.96784878, 11.54541874, 9.24142551, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 5.09240818, 4.45427561, 3.60512662, 2.95596409, 2.36326075, 1.72759056, 1.24153244, 0.83188516, 0.50118381, 0.22545385, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 5.09240818, 4.45427561, 3.60512662, 2.95596409, 2.36326075, 1.72759056, 1.24153244, 0.83188516, 0.50118381, 0.22545385, 0.02916753],
|
||||
[14.61464119, 13.76078796, 12.2308979, 10.90732002, 9.24142551, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 5.09240818, 4.45427561, 3.75677586, 3.07277966, 2.45070267, 1.91321158, 1.46270394, 1.05362725, 0.72133851, 0.43325692, 0.19894916, 0.02916753],
|
||||
],
|
||||
1.00: [
|
||||
[14.61464119, 1.56271636, 0.02916753],
|
||||
[14.61464119, 6.77309084, 0.95350921, 0.02916753],
|
||||
[14.61464119, 6.77309084, 2.36326075, 0.803307, 0.02916753],
|
||||
[14.61464119, 7.11996698, 3.07277966, 1.56271636, 0.59516323, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 2.84484982, 1.41535246, 0.57119018, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 2.84484982, 1.61558151, 0.86115354, 0.38853383, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 4.86714602, 2.84484982, 1.61558151, 0.86115354, 0.38853383, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 4.86714602, 3.07277966, 1.98035145, 1.24153244, 0.72133851, 0.34370604, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.07277966, 1.98035145, 1.24153244, 0.72133851, 0.34370604, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.27973175, 1.51179266, 0.95350921, 0.54755926, 0.25053367, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.36326075, 1.61558151, 1.08895338, 0.72133851, 0.41087446, 0.17026083, 0.02916753],
|
||||
[14.61464119, 11.54541874, 8.75849152, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.36326075, 1.61558151, 1.08895338, 0.72133851, 0.41087446, 0.17026083, 0.02916753],
|
||||
[14.61464119, 11.54541874, 8.75849152, 7.49001646, 5.85520077, 4.65472794, 3.60512662, 2.84484982, 2.12350607, 1.56271636, 1.08895338, 0.72133851, 0.41087446, 0.17026083, 0.02916753],
|
||||
[14.61464119, 11.54541874, 8.75849152, 7.49001646, 5.85520077, 4.65472794, 3.60512662, 2.84484982, 2.19988537, 1.61558151, 1.162866, 0.803307, 0.50118381, 0.27464288, 0.09824532, 0.02916753],
|
||||
[14.61464119, 11.54541874, 8.75849152, 7.49001646, 5.85520077, 4.65472794, 3.75677586, 3.07277966, 2.45070267, 1.84880662, 1.36964464, 1.01931262, 0.72133851, 0.45573691, 0.25053367, 0.09824532, 0.02916753],
|
||||
[14.61464119, 11.54541874, 8.75849152, 7.49001646, 6.14220476, 5.09240818, 4.26497746, 3.46139455, 2.84484982, 2.19988537, 1.67050016, 1.24153244, 0.92192322, 0.64427125, 0.43325692, 0.25053367, 0.09824532, 0.02916753],
|
||||
[14.61464119, 11.54541874, 8.75849152, 7.49001646, 6.14220476, 5.09240818, 4.26497746, 3.60512662, 2.95596409, 2.45070267, 1.91321158, 1.51179266, 1.12534678, 0.83188516, 0.59516323, 0.38853383, 0.22545385, 0.09824532, 0.02916753],
|
||||
[14.61464119, 12.2308979, 9.24142551, 8.30717278, 7.49001646, 6.14220476, 5.09240818, 4.26497746, 3.60512662, 2.95596409, 2.45070267, 1.91321158, 1.51179266, 1.12534678, 0.83188516, 0.59516323, 0.38853383, 0.22545385, 0.09824532, 0.02916753],
|
||||
[14.61464119, 12.2308979, 9.24142551, 8.30717278, 7.49001646, 6.77309084, 5.85520077, 5.09240818, 4.26497746, 3.60512662, 2.95596409, 2.45070267, 1.91321158, 1.51179266, 1.12534678, 0.83188516, 0.59516323, 0.38853383, 0.22545385, 0.09824532, 0.02916753],
|
||||
],
|
||||
1.05: [
|
||||
[14.61464119, 0.95350921, 0.02916753],
|
||||
[14.61464119, 6.77309084, 0.89115214, 0.02916753],
|
||||
[14.61464119, 6.77309084, 2.05039096, 0.72133851, 0.02916753],
|
||||
[14.61464119, 6.77309084, 2.84484982, 1.28281462, 0.52423614, 0.02916753],
|
||||
[14.61464119, 6.77309084, 3.07277966, 1.61558151, 0.803307, 0.34370604, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 2.84484982, 1.56271636, 0.803307, 0.34370604, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 2.84484982, 1.61558151, 0.95350921, 0.52423614, 0.22545385, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 3.07277966, 1.98035145, 1.24153244, 0.74807048, 0.41087446, 0.17026083, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 3.1956799, 2.27973175, 1.51179266, 0.95350921, 0.59516323, 0.34370604, 0.13792117, 0.02916753],
|
||||
[14.61464119, 7.49001646, 5.09240818, 3.46139455, 2.45070267, 1.61558151, 1.08895338, 0.72133851, 0.45573691, 0.25053367, 0.09824532, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 5.09240818, 3.46139455, 2.45070267, 1.61558151, 1.08895338, 0.72133851, 0.45573691, 0.25053367, 0.09824532, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.36326075, 1.61558151, 1.08895338, 0.72133851, 0.45573691, 0.25053367, 0.09824532, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.45070267, 1.72759056, 1.24153244, 0.86115354, 0.59516323, 0.38853383, 0.22545385, 0.09824532, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.65472794, 3.60512662, 2.84484982, 2.19988537, 1.61558151, 1.162866, 0.83188516, 0.59516323, 0.38853383, 0.22545385, 0.09824532, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.65472794, 3.60512662, 2.84484982, 2.19988537, 1.67050016, 1.28281462, 0.95350921, 0.72133851, 0.52423614, 0.34370604, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.65472794, 3.60512662, 2.95596409, 2.36326075, 1.84880662, 1.41535246, 1.08895338, 0.83188516, 0.61951244, 0.45573691, 0.32104823, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.65472794, 3.60512662, 2.95596409, 2.45070267, 1.91321158, 1.51179266, 1.20157266, 0.95350921, 0.74807048, 0.57119018, 0.43325692, 0.29807833, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 11.54541874, 8.30717278, 7.11996698, 5.85520077, 4.65472794, 3.60512662, 2.95596409, 2.45070267, 1.91321158, 1.51179266, 1.20157266, 0.95350921, 0.74807048, 0.57119018, 0.43325692, 0.29807833, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 11.54541874, 8.30717278, 7.11996698, 5.85520077, 4.65472794, 3.60512662, 2.95596409, 2.45070267, 1.98035145, 1.61558151, 1.32549286, 1.08895338, 0.86115354, 0.69515091, 0.54755926, 0.41087446, 0.29807833, 0.19894916, 0.09824532, 0.02916753],
|
||||
],
|
||||
1.10: [
|
||||
[14.61464119, 0.89115214, 0.02916753],
|
||||
[14.61464119, 2.36326075, 0.72133851, 0.02916753],
|
||||
[14.61464119, 5.85520077, 1.61558151, 0.57119018, 0.02916753],
|
||||
[14.61464119, 6.77309084, 2.45070267, 1.08895338, 0.45573691, 0.02916753],
|
||||
[14.61464119, 6.77309084, 2.95596409, 1.56271636, 0.803307, 0.34370604, 0.02916753],
|
||||
[14.61464119, 6.77309084, 3.07277966, 1.61558151, 0.89115214, 0.4783645, 0.19894916, 0.02916753],
|
||||
[14.61464119, 6.77309084, 3.07277966, 1.84880662, 1.08895338, 0.64427125, 0.34370604, 0.13792117, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 2.84484982, 1.61558151, 0.95350921, 0.54755926, 0.27464288, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 2.95596409, 1.91321158, 1.24153244, 0.803307, 0.4783645, 0.25053367, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 3.07277966, 2.05039096, 1.41535246, 0.95350921, 0.64427125, 0.41087446, 0.22545385, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 3.1956799, 2.27973175, 1.61558151, 1.12534678, 0.803307, 0.54755926, 0.36617002, 0.22545385, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 3.32507086, 2.45070267, 1.72759056, 1.24153244, 0.89115214, 0.64427125, 0.45573691, 0.32104823, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 5.09240818, 3.60512662, 2.84484982, 2.05039096, 1.51179266, 1.08895338, 0.803307, 0.59516323, 0.43325692, 0.29807833, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 5.09240818, 3.60512662, 2.84484982, 2.12350607, 1.61558151, 1.24153244, 0.95350921, 0.72133851, 0.54755926, 0.41087446, 0.29807833, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.45070267, 1.84880662, 1.41535246, 1.08895338, 0.83188516, 0.64427125, 0.50118381, 0.36617002, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 5.85520077, 4.45427561, 3.1956799, 2.45070267, 1.91321158, 1.51179266, 1.20157266, 0.95350921, 0.74807048, 0.59516323, 0.45573691, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 5.85520077, 4.45427561, 3.46139455, 2.84484982, 2.19988537, 1.72759056, 1.36964464, 1.08895338, 0.86115354, 0.69515091, 0.54755926, 0.43325692, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.46139455, 2.84484982, 2.19988537, 1.72759056, 1.36964464, 1.08895338, 0.86115354, 0.69515091, 0.54755926, 0.43325692, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 11.54541874, 7.49001646, 5.85520077, 4.45427561, 3.46139455, 2.84484982, 2.19988537, 1.72759056, 1.36964464, 1.08895338, 0.89115214, 0.72133851, 0.59516323, 0.4783645, 0.38853383, 0.29807833, 0.22545385, 0.17026083, 0.09824532, 0.02916753],
|
||||
],
|
||||
1.15: [
|
||||
[14.61464119, 0.83188516, 0.02916753],
|
||||
[14.61464119, 1.84880662, 0.59516323, 0.02916753],
|
||||
[14.61464119, 5.85520077, 1.56271636, 0.52423614, 0.02916753],
|
||||
[14.61464119, 5.85520077, 1.91321158, 0.83188516, 0.34370604, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.45070267, 1.24153244, 0.59516323, 0.25053367, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.84484982, 1.51179266, 0.803307, 0.41087446, 0.17026083, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.84484982, 1.56271636, 0.89115214, 0.50118381, 0.25053367, 0.09824532, 0.02916753],
|
||||
[14.61464119, 6.77309084, 3.07277966, 1.84880662, 1.12534678, 0.72133851, 0.43325692, 0.22545385, 0.09824532, 0.02916753],
|
||||
[14.61464119, 6.77309084, 3.07277966, 1.91321158, 1.24153244, 0.803307, 0.52423614, 0.34370604, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 2.95596409, 1.91321158, 1.24153244, 0.803307, 0.52423614, 0.34370604, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 3.07277966, 2.05039096, 1.36964464, 0.95350921, 0.69515091, 0.4783645, 0.32104823, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 3.07277966, 2.12350607, 1.51179266, 1.08895338, 0.803307, 0.59516323, 0.43325692, 0.29807833, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 3.07277966, 2.12350607, 1.51179266, 1.08895338, 0.803307, 0.59516323, 0.45573691, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 3.07277966, 2.19988537, 1.61558151, 1.24153244, 0.95350921, 0.74807048, 0.59516323, 0.45573691, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 3.1956799, 2.45070267, 1.78698075, 1.32549286, 1.01931262, 0.803307, 0.64427125, 0.50118381, 0.38853383, 0.29807833, 0.22545385, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 3.1956799, 2.45070267, 1.78698075, 1.32549286, 1.01931262, 0.803307, 0.64427125, 0.52423614, 0.41087446, 0.32104823, 0.25053367, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 3.1956799, 2.45070267, 1.84880662, 1.41535246, 1.12534678, 0.89115214, 0.72133851, 0.59516323, 0.4783645, 0.38853383, 0.32104823, 0.25053367, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 3.1956799, 2.45070267, 1.84880662, 1.41535246, 1.12534678, 0.89115214, 0.72133851, 0.59516323, 0.50118381, 0.41087446, 0.34370604, 0.27464288, 0.22545385, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.86714602, 3.1956799, 2.45070267, 1.84880662, 1.41535246, 1.12534678, 0.89115214, 0.72133851, 0.59516323, 0.50118381, 0.41087446, 0.34370604, 0.29807833, 0.25053367, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
],
|
||||
1.20: [
|
||||
[14.61464119, 0.803307, 0.02916753],
|
||||
[14.61464119, 1.56271636, 0.52423614, 0.02916753],
|
||||
[14.61464119, 2.36326075, 0.92192322, 0.36617002, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.24153244, 0.59516323, 0.25053367, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.05039096, 0.95350921, 0.45573691, 0.17026083, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.45070267, 1.24153244, 0.64427125, 0.29807833, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.45070267, 1.36964464, 0.803307, 0.45573691, 0.25053367, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.84484982, 1.61558151, 0.95350921, 0.59516323, 0.36617002, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.84484982, 1.67050016, 1.08895338, 0.74807048, 0.50118381, 0.32104823, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.95596409, 1.84880662, 1.24153244, 0.83188516, 0.59516323, 0.41087446, 0.27464288, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 3.07277966, 1.98035145, 1.36964464, 0.95350921, 0.69515091, 0.50118381, 0.36617002, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 6.77309084, 3.46139455, 2.36326075, 1.56271636, 1.08895338, 0.803307, 0.59516323, 0.45573691, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 6.77309084, 3.46139455, 2.45070267, 1.61558151, 1.162866, 0.86115354, 0.64427125, 0.50118381, 0.38853383, 0.29807833, 0.22545385, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.65472794, 3.07277966, 2.12350607, 1.51179266, 1.08895338, 0.83188516, 0.64427125, 0.50118381, 0.38853383, 0.29807833, 0.22545385, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.65472794, 3.07277966, 2.12350607, 1.51179266, 1.08895338, 0.83188516, 0.64427125, 0.50118381, 0.41087446, 0.32104823, 0.25053367, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.65472794, 3.07277966, 2.12350607, 1.51179266, 1.08895338, 0.83188516, 0.64427125, 0.50118381, 0.41087446, 0.34370604, 0.27464288, 0.22545385, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.65472794, 3.07277966, 2.19988537, 1.61558151, 1.20157266, 0.92192322, 0.72133851, 0.57119018, 0.45573691, 0.36617002, 0.29807833, 0.25053367, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.65472794, 3.07277966, 2.19988537, 1.61558151, 1.24153244, 0.95350921, 0.74807048, 0.59516323, 0.4783645, 0.38853383, 0.32104823, 0.27464288, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 7.49001646, 4.65472794, 3.07277966, 2.19988537, 1.61558151, 1.24153244, 0.95350921, 0.74807048, 0.59516323, 0.50118381, 0.41087446, 0.34370604, 0.29807833, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
],
|
||||
1.25: [
|
||||
[14.61464119, 0.72133851, 0.02916753],
|
||||
[14.61464119, 1.56271636, 0.50118381, 0.02916753],
|
||||
[14.61464119, 2.05039096, 0.803307, 0.32104823, 0.02916753],
|
||||
[14.61464119, 2.36326075, 0.95350921, 0.43325692, 0.17026083, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.24153244, 0.59516323, 0.27464288, 0.09824532, 0.02916753],
|
||||
[14.61464119, 3.07277966, 1.51179266, 0.803307, 0.43325692, 0.22545385, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.36326075, 1.24153244, 0.72133851, 0.41087446, 0.22545385, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.45070267, 1.36964464, 0.83188516, 0.52423614, 0.34370604, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.84484982, 1.61558151, 0.98595673, 0.64427125, 0.43325692, 0.27464288, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.84484982, 1.67050016, 1.08895338, 0.74807048, 0.52423614, 0.36617002, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.84484982, 1.72759056, 1.162866, 0.803307, 0.59516323, 0.45573691, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.95596409, 1.84880662, 1.24153244, 0.86115354, 0.64427125, 0.4783645, 0.36617002, 0.27464288, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.95596409, 1.84880662, 1.28281462, 0.92192322, 0.69515091, 0.52423614, 0.41087446, 0.32104823, 0.25053367, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.95596409, 1.91321158, 1.32549286, 0.95350921, 0.72133851, 0.54755926, 0.43325692, 0.34370604, 0.27464288, 0.22545385, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.95596409, 1.91321158, 1.32549286, 0.95350921, 0.72133851, 0.57119018, 0.45573691, 0.36617002, 0.29807833, 0.25053367, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.95596409, 1.91321158, 1.32549286, 0.95350921, 0.74807048, 0.59516323, 0.4783645, 0.38853383, 0.32104823, 0.27464288, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 3.07277966, 2.05039096, 1.41535246, 1.05362725, 0.803307, 0.61951244, 0.50118381, 0.41087446, 0.34370604, 0.29807833, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 3.07277966, 2.05039096, 1.41535246, 1.05362725, 0.803307, 0.64427125, 0.52423614, 0.43325692, 0.36617002, 0.32104823, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 3.07277966, 2.05039096, 1.46270394, 1.08895338, 0.83188516, 0.66947293, 0.54755926, 0.45573691, 0.38853383, 0.34370604, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
],
|
||||
1.30: [
|
||||
[14.61464119, 0.72133851, 0.02916753],
|
||||
[14.61464119, 1.24153244, 0.43325692, 0.02916753],
|
||||
[14.61464119, 1.56271636, 0.59516323, 0.22545385, 0.02916753],
|
||||
[14.61464119, 1.84880662, 0.803307, 0.36617002, 0.13792117, 0.02916753],
|
||||
[14.61464119, 2.36326075, 1.01931262, 0.52423614, 0.25053367, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.36964464, 0.74807048, 0.41087446, 0.22545385, 0.09824532, 0.02916753],
|
||||
[14.61464119, 3.07277966, 1.56271636, 0.89115214, 0.54755926, 0.34370604, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 3.07277966, 1.61558151, 0.95350921, 0.61951244, 0.41087446, 0.27464288, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.45070267, 1.36964464, 0.83188516, 0.54755926, 0.36617002, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.45070267, 1.41535246, 0.92192322, 0.64427125, 0.45573691, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.6383388, 1.56271636, 1.01931262, 0.72133851, 0.50118381, 0.36617002, 0.27464288, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.84484982, 1.61558151, 1.05362725, 0.74807048, 0.54755926, 0.41087446, 0.32104823, 0.25053367, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.84484982, 1.61558151, 1.08895338, 0.77538133, 0.57119018, 0.43325692, 0.34370604, 0.27464288, 0.22545385, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.84484982, 1.61558151, 1.08895338, 0.803307, 0.59516323, 0.45573691, 0.36617002, 0.29807833, 0.25053367, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.84484982, 1.61558151, 1.08895338, 0.803307, 0.59516323, 0.4783645, 0.38853383, 0.32104823, 0.27464288, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.84484982, 1.72759056, 1.162866, 0.83188516, 0.64427125, 0.50118381, 0.41087446, 0.34370604, 0.29807833, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.84484982, 1.72759056, 1.162866, 0.83188516, 0.64427125, 0.52423614, 0.43325692, 0.36617002, 0.32104823, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.84484982, 1.78698075, 1.24153244, 0.92192322, 0.72133851, 0.57119018, 0.45573691, 0.38853383, 0.34370604, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.84484982, 1.78698075, 1.24153244, 0.92192322, 0.72133851, 0.57119018, 0.4783645, 0.41087446, 0.36617002, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
],
|
||||
1.35: [
|
||||
[14.61464119, 0.69515091, 0.02916753],
|
||||
[14.61464119, 0.95350921, 0.34370604, 0.02916753],
|
||||
[14.61464119, 1.56271636, 0.57119018, 0.19894916, 0.02916753],
|
||||
[14.61464119, 1.61558151, 0.69515091, 0.29807833, 0.09824532, 0.02916753],
|
||||
[14.61464119, 1.84880662, 0.83188516, 0.43325692, 0.22545385, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.45070267, 1.162866, 0.64427125, 0.36617002, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.36964464, 0.803307, 0.50118381, 0.32104823, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.41535246, 0.83188516, 0.54755926, 0.36617002, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.56271636, 0.95350921, 0.64427125, 0.45573691, 0.32104823, 0.22545385, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.56271636, 0.95350921, 0.64427125, 0.45573691, 0.34370604, 0.25053367, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 3.07277966, 1.61558151, 1.01931262, 0.72133851, 0.52423614, 0.38853383, 0.29807833, 0.22545385, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 3.07277966, 1.61558151, 1.01931262, 0.72133851, 0.52423614, 0.41087446, 0.32104823, 0.25053367, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 3.07277966, 1.61558151, 1.05362725, 0.74807048, 0.54755926, 0.43325692, 0.34370604, 0.27464288, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 3.07277966, 1.72759056, 1.12534678, 0.803307, 0.59516323, 0.45573691, 0.36617002, 0.29807833, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 3.07277966, 1.72759056, 1.12534678, 0.803307, 0.59516323, 0.4783645, 0.38853383, 0.32104823, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.45070267, 1.51179266, 1.01931262, 0.74807048, 0.57119018, 0.45573691, 0.36617002, 0.32104823, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.6383388, 1.61558151, 1.08895338, 0.803307, 0.61951244, 0.50118381, 0.41087446, 0.34370604, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.6383388, 1.61558151, 1.08895338, 0.803307, 0.64427125, 0.52423614, 0.43325692, 0.36617002, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 5.85520077, 2.6383388, 1.61558151, 1.08895338, 0.803307, 0.64427125, 0.52423614, 0.45573691, 0.38853383, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
],
|
||||
1.40: [
|
||||
[14.61464119, 0.59516323, 0.02916753],
|
||||
[14.61464119, 0.95350921, 0.34370604, 0.02916753],
|
||||
[14.61464119, 1.08895338, 0.43325692, 0.13792117, 0.02916753],
|
||||
[14.61464119, 1.56271636, 0.64427125, 0.27464288, 0.09824532, 0.02916753],
|
||||
[14.61464119, 1.61558151, 0.803307, 0.43325692, 0.22545385, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.05039096, 0.95350921, 0.54755926, 0.34370604, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.45070267, 1.24153244, 0.72133851, 0.43325692, 0.27464288, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.45070267, 1.24153244, 0.74807048, 0.50118381, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.45070267, 1.28281462, 0.803307, 0.52423614, 0.36617002, 0.27464288, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.45070267, 1.28281462, 0.803307, 0.54755926, 0.38853383, 0.29807833, 0.22545385, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.41535246, 0.86115354, 0.59516323, 0.43325692, 0.32104823, 0.25053367, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.51179266, 0.95350921, 0.64427125, 0.45573691, 0.34370604, 0.27464288, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.51179266, 0.95350921, 0.64427125, 0.4783645, 0.36617002, 0.29807833, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.56271636, 0.98595673, 0.69515091, 0.52423614, 0.41087446, 0.34370604, 0.29807833, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.56271636, 1.01931262, 0.72133851, 0.54755926, 0.43325692, 0.36617002, 0.32104823, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.61558151, 1.05362725, 0.74807048, 0.57119018, 0.45573691, 0.38853383, 0.34370604, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.61558151, 1.08895338, 0.803307, 0.61951244, 0.50118381, 0.41087446, 0.36617002, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.61558151, 1.08895338, 0.803307, 0.61951244, 0.50118381, 0.43325692, 0.38853383, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.61558151, 1.08895338, 0.803307, 0.64427125, 0.52423614, 0.45573691, 0.41087446, 0.36617002, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
],
|
||||
1.45: [
|
||||
[14.61464119, 0.59516323, 0.02916753],
|
||||
[14.61464119, 0.803307, 0.25053367, 0.02916753],
|
||||
[14.61464119, 0.95350921, 0.34370604, 0.09824532, 0.02916753],
|
||||
[14.61464119, 1.24153244, 0.54755926, 0.25053367, 0.09824532, 0.02916753],
|
||||
[14.61464119, 1.56271636, 0.72133851, 0.36617002, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 1.61558151, 0.803307, 0.45573691, 0.27464288, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 1.91321158, 0.95350921, 0.57119018, 0.36617002, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.19988537, 1.08895338, 0.64427125, 0.41087446, 0.27464288, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.45070267, 1.24153244, 0.74807048, 0.50118381, 0.34370604, 0.25053367, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.45070267, 1.24153244, 0.74807048, 0.50118381, 0.36617002, 0.27464288, 0.22545385, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.45070267, 1.28281462, 0.803307, 0.54755926, 0.41087446, 0.32104823, 0.25053367, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.45070267, 1.28281462, 0.803307, 0.57119018, 0.43325692, 0.34370604, 0.27464288, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.45070267, 1.28281462, 0.83188516, 0.59516323, 0.45573691, 0.36617002, 0.29807833, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.45070267, 1.28281462, 0.83188516, 0.59516323, 0.45573691, 0.36617002, 0.32104823, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.51179266, 0.95350921, 0.69515091, 0.52423614, 0.41087446, 0.34370604, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.51179266, 0.95350921, 0.69515091, 0.52423614, 0.43325692, 0.36617002, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.56271636, 0.98595673, 0.72133851, 0.54755926, 0.45573691, 0.38853383, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.56271636, 1.01931262, 0.74807048, 0.57119018, 0.4783645, 0.41087446, 0.36617002, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.84484982, 1.56271636, 1.01931262, 0.74807048, 0.59516323, 0.50118381, 0.43325692, 0.38853383, 0.36617002, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
],
|
||||
1.50: [
|
||||
[14.61464119, 0.54755926, 0.02916753],
|
||||
[14.61464119, 0.803307, 0.25053367, 0.02916753],
|
||||
[14.61464119, 0.86115354, 0.32104823, 0.09824532, 0.02916753],
|
||||
[14.61464119, 1.24153244, 0.54755926, 0.25053367, 0.09824532, 0.02916753],
|
||||
[14.61464119, 1.56271636, 0.72133851, 0.36617002, 0.19894916, 0.09824532, 0.02916753],
|
||||
[14.61464119, 1.61558151, 0.803307, 0.45573691, 0.27464288, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 1.61558151, 0.83188516, 0.52423614, 0.34370604, 0.25053367, 0.17026083, 0.09824532, 0.02916753],
|
||||
[14.61464119, 1.84880662, 0.95350921, 0.59516323, 0.38853383, 0.27464288, 0.19894916, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 1.84880662, 0.95350921, 0.59516323, 0.41087446, 0.29807833, 0.22545385, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 1.84880662, 0.95350921, 0.61951244, 0.43325692, 0.32104823, 0.25053367, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.19988537, 1.12534678, 0.72133851, 0.50118381, 0.36617002, 0.27464288, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.19988537, 1.12534678, 0.72133851, 0.50118381, 0.36617002, 0.29807833, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.36326075, 1.24153244, 0.803307, 0.57119018, 0.43325692, 0.34370604, 0.29807833, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.36326075, 1.24153244, 0.803307, 0.57119018, 0.43325692, 0.34370604, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.36326075, 1.24153244, 0.803307, 0.59516323, 0.45573691, 0.36617002, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.36326075, 1.24153244, 0.803307, 0.59516323, 0.45573691, 0.38853383, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.45070267, 1.32549286, 0.86115354, 0.64427125, 0.50118381, 0.41087446, 0.36617002, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.45070267, 1.36964464, 0.92192322, 0.69515091, 0.54755926, 0.45573691, 0.41087446, 0.36617002, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
[14.61464119, 2.45070267, 1.41535246, 0.95350921, 0.72133851, 0.57119018, 0.4783645, 0.43325692, 0.38853383, 0.36617002, 0.34370604, 0.32104823, 0.29807833, 0.27464288, 0.25053367, 0.22545385, 0.19894916, 0.17026083, 0.13792117, 0.09824532, 0.02916753],
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class GITSScheduler(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="GITSScheduler_V3",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
inputs=[
|
||||
io.Float.Input("coeff", default=1.20, min=0.80, max=1.50, step=0.05),
|
||||
io.Int.Input("steps", default=10, min=2, max=1000),
|
||||
io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Sigmas.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, coeff, steps, denoise):
|
||||
total_steps = steps
|
||||
if denoise < 1.0:
|
||||
if denoise <= 0.0:
|
||||
return io.NodeOutput(torch.FloatTensor([]))
|
||||
total_steps = round(steps * denoise)
|
||||
|
||||
if steps <= 20:
|
||||
sigmas = NOISE_LEVELS[round(coeff, 2)][steps-2][:]
|
||||
else:
|
||||
sigmas = NOISE_LEVELS[round(coeff, 2)][-1][:]
|
||||
sigmas = loglinear_interp(sigmas, steps + 1)
|
||||
|
||||
sigmas = sigmas[-(total_steps + 1):]
|
||||
sigmas[-1] = 0
|
||||
return io.NodeOutput(torch.FloatTensor(sigmas))
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
GITSScheduler,
|
||||
]
|
||||
71
comfy_extras/v3/nodes_hidream.py
Normal file
71
comfy_extras/v3/nodes_hidream.py
Normal file
@ -0,0 +1,71 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import comfy.model_management
|
||||
import comfy.sd
|
||||
import folder_paths
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class QuadrupleCLIPLoader(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="QuadrupleCLIPLoader_V3",
|
||||
category="advanced/loaders",
|
||||
description="[Recipes]\n\nhidream: long clip-l, long clip-g, t5xxl, llama_8b_3.1_instruct",
|
||||
inputs=[
|
||||
io.Combo.Input("clip_name1", options=folder_paths.get_filename_list("text_encoders")),
|
||||
io.Combo.Input("clip_name2", options=folder_paths.get_filename_list("text_encoders")),
|
||||
io.Combo.Input("clip_name3", options=folder_paths.get_filename_list("text_encoders")),
|
||||
io.Combo.Input("clip_name4", options=folder_paths.get_filename_list("text_encoders")),
|
||||
],
|
||||
outputs=[
|
||||
io.Clip.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip_name1, clip_name2, clip_name3, clip_name4):
|
||||
clip_path1 = folder_paths.get_full_path_or_raise("text_encoders", clip_name1)
|
||||
clip_path2 = folder_paths.get_full_path_or_raise("text_encoders", clip_name2)
|
||||
clip_path3 = folder_paths.get_full_path_or_raise("text_encoders", clip_name3)
|
||||
clip_path4 = folder_paths.get_full_path_or_raise("text_encoders", clip_name4)
|
||||
return io.NodeOutput(
|
||||
comfy.sd.load_clip(
|
||||
ckpt_paths=[clip_path1, clip_path2, clip_path3, clip_path4],
|
||||
embedding_directory=folder_paths.get_folder_paths("embeddings"),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class CLIPTextEncodeHiDream(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeHiDream_V3",
|
||||
category="advanced/conditioning",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
||||
io.String.Input("clip_g", multiline=True, dynamic_prompts=True),
|
||||
io.String.Input("t5xxl", multiline=True, dynamic_prompts=True),
|
||||
io.String.Input("llama", multiline=True, dynamic_prompts=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip, clip_l, clip_g, t5xxl, llama):
|
||||
tokens = clip.tokenize(clip_g)
|
||||
tokens["l"] = clip.tokenize(clip_l)["l"]
|
||||
tokens["t5xxl"] = clip.tokenize(t5xxl)["t5xxl"]
|
||||
tokens["llama"] = clip.tokenize(llama)["llama"]
|
||||
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens))
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
CLIPTextEncodeHiDream,
|
||||
QuadrupleCLIPLoader,
|
||||
]
|
||||
169
comfy_extras/v3/nodes_hunyuan.py
Normal file
169
comfy_extras/v3/nodes_hunyuan.py
Normal file
@ -0,0 +1,169 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
|
||||
import comfy.model_management
|
||||
import node_helpers
|
||||
import nodes
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class CLIPTextEncodeHunyuanDiT(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeHunyuanDiT_V3",
|
||||
category="advanced/conditioning",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("bert", multiline=True, dynamic_prompts=True),
|
||||
io.String.Input("mt5xl", multiline=True, dynamic_prompts=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip, bert, mt5xl):
|
||||
tokens = clip.tokenize(bert)
|
||||
tokens["mt5xl"] = clip.tokenize(mt5xl)["mt5xl"]
|
||||
|
||||
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens))
|
||||
|
||||
|
||||
class EmptyHunyuanLatentVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="EmptyHunyuanLatentVideo_V3",
|
||||
category="latent/video",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("length", default=25, min=1, max=nodes.MAX_RESOLUTION, step=4),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, width, height, length, batch_size):
|
||||
latent = torch.zeros(
|
||||
[batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8],
|
||||
device=comfy.model_management.intermediate_device(),
|
||||
)
|
||||
return io.NodeOutput({"samples":latent})
|
||||
|
||||
|
||||
PROMPT_TEMPLATE_ENCODE_VIDEO_I2V = (
|
||||
"<|start_header_id|>system<|end_header_id|>\n\n<image>\nDescribe the video by detailing the following aspects according to the reference image: "
|
||||
"1. The main content and theme of the video."
|
||||
"2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects."
|
||||
"3. Actions, events, behaviors temporal relationships, physical movement changes of the objects."
|
||||
"4. background environment, light, style and atmosphere."
|
||||
"5. camera angles, movements, and transitions used in the video:<|eot_id|>\n\n"
|
||||
"<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>"
|
||||
"<|start_header_id|>assistant<|end_header_id|>\n\n"
|
||||
)
|
||||
|
||||
|
||||
class TextEncodeHunyuanVideo_ImageToVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="TextEncodeHunyuanVideo_ImageToVideo_V3",
|
||||
category="advanced/conditioning",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.ClipVisionOutput.Input("clip_vision_output"),
|
||||
io.String.Input("prompt", multiline=True, dynamic_prompts=True),
|
||||
io.Int.Input(
|
||||
"image_interleave",
|
||||
default=2,
|
||||
min=1,
|
||||
max=512,
|
||||
tooltip="How much the image influences things vs the text prompt. Higher number means more influence from the text prompt.",
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip, clip_vision_output, prompt, image_interleave):
|
||||
tokens = clip.tokenize(
|
||||
prompt, llama_template=PROMPT_TEMPLATE_ENCODE_VIDEO_I2V,
|
||||
image_embeds=clip_vision_output.mm_projected,
|
||||
image_interleave=image_interleave,
|
||||
)
|
||||
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens))
|
||||
|
||||
|
||||
class HunyuanImageToVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="HunyuanImageToVideo_V3",
|
||||
category="conditioning/video_models",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("length", default=53, min=1, max=nodes.MAX_RESOLUTION, step=4),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
io.Combo.Input("guidance_type", options=["v1 (concat)", "v2 (replace)", "custom"]),
|
||||
io.Image.Input("start_image", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Latent.Output(display_name="latent"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, positive, vae, width, height, length, batch_size, guidance_type, start_image=None):
|
||||
latent = torch.zeros(
|
||||
[batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8],
|
||||
device=comfy.model_management.intermediate_device(),
|
||||
)
|
||||
out_latent = {}
|
||||
|
||||
if start_image is not None:
|
||||
start_image = comfy.utils.common_upscale(
|
||||
start_image[:length, :, :, :3].movedim(-1, 1), width, height, "bilinear", "center"
|
||||
).movedim(1, -1)
|
||||
|
||||
concat_latent_image = vae.encode(start_image)
|
||||
mask = torch.ones(
|
||||
(1, 1, latent.shape[2], concat_latent_image.shape[-2], concat_latent_image.shape[-1]),
|
||||
device=start_image.device,
|
||||
dtype=start_image.dtype,
|
||||
)
|
||||
mask[:, :, :((start_image.shape[0] - 1) // 4) + 1] = 0.0
|
||||
|
||||
if guidance_type == "v1 (concat)":
|
||||
cond = {"concat_latent_image": concat_latent_image, "concat_mask": mask}
|
||||
elif guidance_type == "v2 (replace)":
|
||||
cond = {'guiding_frame_index': 0}
|
||||
latent[:, :, :concat_latent_image.shape[2]] = concat_latent_image
|
||||
out_latent["noise_mask"] = mask
|
||||
elif guidance_type == "custom":
|
||||
cond = {"ref_latent": concat_latent_image}
|
||||
|
||||
positive = node_helpers.conditioning_set_values(positive, cond)
|
||||
|
||||
out_latent["samples"] = latent
|
||||
return io.NodeOutput(positive, out_latent)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
CLIPTextEncodeHunyuanDiT,
|
||||
EmptyHunyuanLatentVideo,
|
||||
HunyuanImageToVideo,
|
||||
TextEncodeHunyuanVideo_ImageToVideo,
|
||||
]
|
||||
672
comfy_extras/v3/nodes_hunyuan3d.py
Normal file
672
comfy_extras/v3/nodes_hunyuan3d.py
Normal file
@ -0,0 +1,672 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import struct
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
import comfy.model_management
|
||||
import folder_paths
|
||||
from comfy.cli_args import args
|
||||
from comfy.ldm.modules.diffusionmodules.mmdit import (
|
||||
get_1d_sincos_pos_embed_from_grid_torch,
|
||||
)
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class VOXEL:
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
|
||||
|
||||
class MESH:
|
||||
def __init__(self, vertices, faces):
|
||||
self.vertices = vertices
|
||||
self.faces = faces
|
||||
|
||||
|
||||
def voxel_to_mesh(voxels, threshold=0.5, device=None):
|
||||
if device is None:
|
||||
device = torch.device("cpu")
|
||||
voxels = voxels.to(device)
|
||||
|
||||
binary = (voxels > threshold).float()
|
||||
padded = torch.nn.functional.pad(binary, (1, 1, 1, 1, 1, 1), 'constant', 0)
|
||||
|
||||
D, H, W = binary.shape
|
||||
|
||||
neighbors = torch.tensor([
|
||||
[0, 0, 1],
|
||||
[0, 0, -1],
|
||||
[0, 1, 0],
|
||||
[0, -1, 0],
|
||||
[1, 0, 0],
|
||||
[-1, 0, 0]
|
||||
], device=device)
|
||||
|
||||
z, y, x = torch.meshgrid(
|
||||
torch.arange(D, device=device),
|
||||
torch.arange(H, device=device),
|
||||
torch.arange(W, device=device),
|
||||
indexing='ij'
|
||||
)
|
||||
voxel_indices = torch.stack([z.flatten(), y.flatten(), x.flatten()], dim=1)
|
||||
|
||||
solid_mask = binary.flatten() > 0
|
||||
solid_indices = voxel_indices[solid_mask]
|
||||
|
||||
corner_offsets = [
|
||||
torch.tensor([
|
||||
[0, 0, 1], [0, 1, 1], [1, 1, 1], [1, 0, 1]
|
||||
], device=device),
|
||||
torch.tensor([
|
||||
[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]
|
||||
], device=device),
|
||||
torch.tensor([
|
||||
[0, 1, 0], [1, 1, 0], [1, 1, 1], [0, 1, 1]
|
||||
], device=device),
|
||||
torch.tensor([
|
||||
[0, 0, 0], [0, 0, 1], [1, 0, 1], [1, 0, 0]
|
||||
], device=device),
|
||||
torch.tensor([
|
||||
[1, 0, 1], [1, 1, 1], [1, 1, 0], [1, 0, 0]
|
||||
], device=device),
|
||||
torch.tensor([
|
||||
[0, 1, 0], [0, 1, 1], [0, 0, 1], [0, 0, 0]
|
||||
], device=device)
|
||||
]
|
||||
|
||||
all_vertices = []
|
||||
all_indices = []
|
||||
|
||||
vertex_count = 0
|
||||
|
||||
for face_idx, offset in enumerate(neighbors):
|
||||
neighbor_indices = solid_indices + offset
|
||||
|
||||
padded_indices = neighbor_indices + 1
|
||||
|
||||
is_exposed = padded[
|
||||
padded_indices[:, 0],
|
||||
padded_indices[:, 1],
|
||||
padded_indices[:, 2]
|
||||
] == 0
|
||||
|
||||
if not is_exposed.any():
|
||||
continue
|
||||
|
||||
exposed_indices = solid_indices[is_exposed]
|
||||
|
||||
corners = corner_offsets[face_idx].unsqueeze(0)
|
||||
|
||||
face_vertices = exposed_indices.unsqueeze(1) + corners
|
||||
|
||||
all_vertices.append(face_vertices.reshape(-1, 3))
|
||||
|
||||
num_faces = exposed_indices.shape[0]
|
||||
face_indices = torch.arange(
|
||||
vertex_count,
|
||||
vertex_count + 4 * num_faces,
|
||||
device=device
|
||||
).reshape(-1, 4)
|
||||
|
||||
all_indices.append(torch.stack([face_indices[:, 0], face_indices[:, 1], face_indices[:, 2]], dim=1))
|
||||
all_indices.append(torch.stack([face_indices[:, 0], face_indices[:, 2], face_indices[:, 3]], dim=1))
|
||||
|
||||
vertex_count += 4 * num_faces
|
||||
|
||||
if len(all_vertices) > 0:
|
||||
vertices = torch.cat(all_vertices, dim=0)
|
||||
faces = torch.cat(all_indices, dim=0)
|
||||
else:
|
||||
vertices = torch.zeros((1, 3))
|
||||
faces = torch.zeros((1, 3))
|
||||
|
||||
v_min = 0
|
||||
v_max = max(voxels.shape)
|
||||
|
||||
vertices = vertices - (v_min + v_max) / 2
|
||||
|
||||
scale = (v_max - v_min) / 2
|
||||
if scale > 0:
|
||||
vertices = vertices / scale
|
||||
|
||||
vertices = torch.fliplr(vertices)
|
||||
return vertices, faces
|
||||
|
||||
def voxel_to_mesh_surfnet(voxels, threshold=0.5, device=None):
|
||||
if device is None:
|
||||
device = torch.device("cpu")
|
||||
voxels = voxels.to(device)
|
||||
|
||||
D, H, W = voxels.shape
|
||||
|
||||
padded = torch.nn.functional.pad(voxels, (1, 1, 1, 1, 1, 1), 'constant', 0)
|
||||
z, y, x = torch.meshgrid(
|
||||
torch.arange(D, device=device),
|
||||
torch.arange(H, device=device),
|
||||
torch.arange(W, device=device),
|
||||
indexing='ij'
|
||||
)
|
||||
cell_positions = torch.stack([z.flatten(), y.flatten(), x.flatten()], dim=1)
|
||||
|
||||
corner_offsets = torch.tensor([
|
||||
[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0],
|
||||
[0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]
|
||||
], device=device)
|
||||
|
||||
corner_values = torch.zeros((cell_positions.shape[0], 8), device=device)
|
||||
for c, (dz, dy, dx) in enumerate(corner_offsets):
|
||||
corner_values[:, c] = padded[
|
||||
cell_positions[:, 0] + dz,
|
||||
cell_positions[:, 1] + dy,
|
||||
cell_positions[:, 2] + dx
|
||||
]
|
||||
|
||||
corner_signs = corner_values > threshold
|
||||
has_inside = torch.any(corner_signs, dim=1)
|
||||
has_outside = torch.any(~corner_signs, dim=1)
|
||||
contains_surface = has_inside & has_outside
|
||||
|
||||
active_cells = cell_positions[contains_surface]
|
||||
active_signs = corner_signs[contains_surface]
|
||||
active_values = corner_values[contains_surface]
|
||||
|
||||
if active_cells.shape[0] == 0:
|
||||
return torch.zeros((0, 3), device=device), torch.zeros((0, 3), dtype=torch.long, device=device)
|
||||
|
||||
edges = torch.tensor([
|
||||
[0, 1], [0, 2], [0, 4], [1, 3],
|
||||
[1, 5], [2, 3], [2, 6], [3, 7],
|
||||
[4, 5], [4, 6], [5, 7], [6, 7]
|
||||
], device=device)
|
||||
|
||||
cell_vertices = {}
|
||||
progress = comfy.utils.ProgressBar(100)
|
||||
|
||||
for edge_idx, (e1, e2) in enumerate(edges):
|
||||
progress.update(1)
|
||||
crossing = active_signs[:, e1] != active_signs[:, e2]
|
||||
if not crossing.any():
|
||||
continue
|
||||
|
||||
cell_indices = torch.nonzero(crossing, as_tuple=True)[0]
|
||||
|
||||
v1 = active_values[cell_indices, e1]
|
||||
v2 = active_values[cell_indices, e2]
|
||||
|
||||
t = torch.zeros_like(v1, device=device)
|
||||
denom = v2 - v1
|
||||
valid = denom != 0
|
||||
t[valid] = (threshold - v1[valid]) / denom[valid]
|
||||
t[~valid] = 0.5
|
||||
|
||||
p1 = corner_offsets[e1].float()
|
||||
p2 = corner_offsets[e2].float()
|
||||
|
||||
intersection = p1.unsqueeze(0) + t.unsqueeze(1) * (p2.unsqueeze(0) - p1.unsqueeze(0))
|
||||
|
||||
for i, point in zip(cell_indices.tolist(), intersection):
|
||||
if i not in cell_vertices:
|
||||
cell_vertices[i] = []
|
||||
cell_vertices[i].append(point)
|
||||
|
||||
# Calculate the final vertices as the average of intersection points for each cell
|
||||
vertices = []
|
||||
vertex_lookup = {}
|
||||
|
||||
vert_progress_mod = round(len(cell_vertices)/50)
|
||||
|
||||
for i, points in cell_vertices.items():
|
||||
if not i % vert_progress_mod:
|
||||
progress.update(1)
|
||||
|
||||
if points:
|
||||
vertex = torch.stack(points).mean(dim=0)
|
||||
vertex = vertex + active_cells[i].float()
|
||||
vertex_lookup[tuple(active_cells[i].tolist())] = len(vertices)
|
||||
vertices.append(vertex)
|
||||
|
||||
if not vertices:
|
||||
return torch.zeros((0, 3), device=device), torch.zeros((0, 3), dtype=torch.long, device=device)
|
||||
|
||||
final_vertices = torch.stack(vertices)
|
||||
|
||||
inside_corners_mask = active_signs
|
||||
outside_corners_mask = ~active_signs
|
||||
|
||||
inside_counts = inside_corners_mask.sum(dim=1, keepdim=True).float()
|
||||
outside_counts = outside_corners_mask.sum(dim=1, keepdim=True).float()
|
||||
|
||||
inside_pos = torch.zeros((active_cells.shape[0], 3), device=device)
|
||||
outside_pos = torch.zeros((active_cells.shape[0], 3), device=device)
|
||||
|
||||
for i in range(8):
|
||||
mask_inside = inside_corners_mask[:, i].unsqueeze(1)
|
||||
mask_outside = outside_corners_mask[:, i].unsqueeze(1)
|
||||
inside_pos += corner_offsets[i].float().unsqueeze(0) * mask_inside
|
||||
outside_pos += corner_offsets[i].float().unsqueeze(0) * mask_outside
|
||||
|
||||
inside_pos /= inside_counts
|
||||
outside_pos /= outside_counts
|
||||
gradients = inside_pos - outside_pos
|
||||
|
||||
pos_dirs = torch.tensor([
|
||||
[1, 0, 0],
|
||||
[0, 1, 0],
|
||||
[0, 0, 1]
|
||||
], device=device)
|
||||
|
||||
cross_products = [
|
||||
torch.linalg.cross(pos_dirs[i].float(), pos_dirs[j].float())
|
||||
for i in range(3) for j in range(i+1, 3)
|
||||
]
|
||||
|
||||
faces = []
|
||||
all_keys = set(vertex_lookup.keys())
|
||||
|
||||
face_progress_mod = round(len(active_cells)/38*3)
|
||||
|
||||
for pair_idx, (i, j) in enumerate([(0,1), (0,2), (1,2)]):
|
||||
dir_i = pos_dirs[i]
|
||||
dir_j = pos_dirs[j]
|
||||
cross_product = cross_products[pair_idx]
|
||||
|
||||
ni_positions = active_cells + dir_i
|
||||
nj_positions = active_cells + dir_j
|
||||
diag_positions = active_cells + dir_i + dir_j
|
||||
|
||||
alignments = torch.matmul(gradients, cross_product)
|
||||
|
||||
valid_quads = []
|
||||
quad_indices = []
|
||||
|
||||
for idx, active_cell in enumerate(active_cells):
|
||||
if not idx % face_progress_mod:
|
||||
progress.update(1)
|
||||
cell_key = tuple(active_cell.tolist())
|
||||
ni_key = tuple(ni_positions[idx].tolist())
|
||||
nj_key = tuple(nj_positions[idx].tolist())
|
||||
diag_key = tuple(diag_positions[idx].tolist())
|
||||
|
||||
if cell_key in all_keys and ni_key in all_keys and nj_key in all_keys and diag_key in all_keys:
|
||||
v0 = vertex_lookup[cell_key]
|
||||
v1 = vertex_lookup[ni_key]
|
||||
v2 = vertex_lookup[nj_key]
|
||||
v3 = vertex_lookup[diag_key]
|
||||
|
||||
valid_quads.append((v0, v1, v2, v3))
|
||||
quad_indices.append(idx)
|
||||
|
||||
for q_idx, (v0, v1, v2, v3) in enumerate(valid_quads):
|
||||
cell_idx = quad_indices[q_idx]
|
||||
if alignments[cell_idx] > 0:
|
||||
faces.append(torch.tensor([v0, v1, v3], device=device, dtype=torch.long))
|
||||
faces.append(torch.tensor([v0, v3, v2], device=device, dtype=torch.long))
|
||||
else:
|
||||
faces.append(torch.tensor([v0, v3, v1], device=device, dtype=torch.long))
|
||||
faces.append(torch.tensor([v0, v2, v3], device=device, dtype=torch.long))
|
||||
|
||||
if faces:
|
||||
faces = torch.stack(faces)
|
||||
else:
|
||||
faces = torch.zeros((0, 3), dtype=torch.long, device=device)
|
||||
|
||||
v_min = 0
|
||||
v_max = max(D, H, W)
|
||||
|
||||
final_vertices = final_vertices - (v_min + v_max) / 2
|
||||
|
||||
scale = (v_max - v_min) / 2
|
||||
if scale > 0:
|
||||
final_vertices = final_vertices / scale
|
||||
|
||||
final_vertices = torch.fliplr(final_vertices)
|
||||
|
||||
return final_vertices, faces
|
||||
|
||||
|
||||
def save_glb(vertices, faces, filepath, metadata=None):
|
||||
"""
|
||||
Save PyTorch tensor vertices and faces as a GLB file without external dependencies.
|
||||
|
||||
Parameters:
|
||||
vertices: torch.Tensor of shape (N, 3) - The vertex coordinates
|
||||
faces: torch.Tensor of shape (M, 3) - The face indices (triangle faces)
|
||||
filepath: str - Output filepath (should end with .glb)
|
||||
"""
|
||||
|
||||
# Convert tensors to numpy arrays
|
||||
vertices_np = vertices.cpu().numpy().astype(np.float32)
|
||||
faces_np = faces.cpu().numpy().astype(np.uint32)
|
||||
|
||||
vertices_buffer = vertices_np.tobytes()
|
||||
indices_buffer = faces_np.tobytes()
|
||||
|
||||
def pad_to_4_bytes(buffer):
|
||||
padding_length = (4 - (len(buffer) % 4)) % 4
|
||||
return buffer + b'\x00' * padding_length
|
||||
|
||||
vertices_buffer_padded = pad_to_4_bytes(vertices_buffer)
|
||||
indices_buffer_padded = pad_to_4_bytes(indices_buffer)
|
||||
|
||||
buffer_data = vertices_buffer_padded + indices_buffer_padded
|
||||
|
||||
vertices_byte_length = len(vertices_buffer)
|
||||
vertices_byte_offset = 0
|
||||
indices_byte_length = len(indices_buffer)
|
||||
indices_byte_offset = len(vertices_buffer_padded)
|
||||
|
||||
gltf = {
|
||||
"asset": {"version": "2.0", "generator": "ComfyUI"},
|
||||
"buffers": [
|
||||
{
|
||||
"byteLength": len(buffer_data)
|
||||
}
|
||||
],
|
||||
"bufferViews": [
|
||||
{
|
||||
"buffer": 0,
|
||||
"byteOffset": vertices_byte_offset,
|
||||
"byteLength": vertices_byte_length,
|
||||
"target": 34962 # ARRAY_BUFFER
|
||||
},
|
||||
{
|
||||
"buffer": 0,
|
||||
"byteOffset": indices_byte_offset,
|
||||
"byteLength": indices_byte_length,
|
||||
"target": 34963 # ELEMENT_ARRAY_BUFFER
|
||||
}
|
||||
],
|
||||
"accessors": [
|
||||
{
|
||||
"bufferView": 0,
|
||||
"byteOffset": 0,
|
||||
"componentType": 5126, # FLOAT
|
||||
"count": len(vertices_np),
|
||||
"type": "VEC3",
|
||||
"max": vertices_np.max(axis=0).tolist(),
|
||||
"min": vertices_np.min(axis=0).tolist()
|
||||
},
|
||||
{
|
||||
"bufferView": 1,
|
||||
"byteOffset": 0,
|
||||
"componentType": 5125, # UNSIGNED_INT
|
||||
"count": faces_np.size,
|
||||
"type": "SCALAR"
|
||||
}
|
||||
],
|
||||
"meshes": [
|
||||
{
|
||||
"primitives": [
|
||||
{
|
||||
"attributes": {
|
||||
"POSITION": 0
|
||||
},
|
||||
"indices": 1,
|
||||
"mode": 4 # TRIANGLES
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"nodes": [
|
||||
{
|
||||
"mesh": 0
|
||||
}
|
||||
],
|
||||
"scenes": [
|
||||
{
|
||||
"nodes": [0]
|
||||
}
|
||||
],
|
||||
"scene": 0
|
||||
}
|
||||
|
||||
if metadata is not None:
|
||||
gltf["asset"]["extras"] = metadata
|
||||
|
||||
# Convert the JSON to bytes
|
||||
gltf_json = json.dumps(gltf).encode('utf8')
|
||||
|
||||
def pad_json_to_4_bytes(buffer):
|
||||
padding_length = (4 - (len(buffer) % 4)) % 4
|
||||
return buffer + b' ' * padding_length
|
||||
|
||||
gltf_json_padded = pad_json_to_4_bytes(gltf_json)
|
||||
|
||||
# Create the GLB header
|
||||
# Magic glTF
|
||||
glb_header = struct.pack('<4sII', b'glTF', 2, 12 + 8 + len(gltf_json_padded) + 8 + len(buffer_data))
|
||||
|
||||
# Create JSON chunk header (chunk type 0)
|
||||
json_chunk_header = struct.pack('<II', len(gltf_json_padded), 0x4E4F534A) # "JSON" in little endian
|
||||
|
||||
# Create BIN chunk header (chunk type 1)
|
||||
bin_chunk_header = struct.pack('<II', len(buffer_data), 0x004E4942) # "BIN\0" in little endian
|
||||
|
||||
# Write the GLB file
|
||||
with open(filepath, 'wb') as f:
|
||||
f.write(glb_header)
|
||||
f.write(json_chunk_header)
|
||||
f.write(gltf_json_padded)
|
||||
f.write(bin_chunk_header)
|
||||
f.write(buffer_data)
|
||||
|
||||
return filepath
|
||||
|
||||
|
||||
class EmptyLatentHunyuan3Dv2(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="EmptyLatentHunyuan3Dv2_V3",
|
||||
category="latent/3d",
|
||||
inputs=[
|
||||
io.Int.Input("resolution", default=3072, min=1, max=8192),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch.")
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, resolution, batch_size):
|
||||
latent = torch.zeros([batch_size, 64, resolution], device=comfy.model_management.intermediate_device())
|
||||
return io.NodeOutput({"samples": latent, "type": "hunyuan3dv2"})
|
||||
|
||||
|
||||
class Hunyuan3Dv2Conditioning(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="Hunyuan3Dv2Conditioning_V3",
|
||||
category="conditioning/video_models",
|
||||
inputs=[
|
||||
io.ClipVisionOutput.Input("clip_vision_output")
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative")
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip_vision_output):
|
||||
embeds = clip_vision_output.last_hidden_state
|
||||
positive = [[embeds, {}]]
|
||||
negative = [[torch.zeros_like(embeds), {}]]
|
||||
return io.NodeOutput(positive, negative)
|
||||
|
||||
|
||||
class Hunyuan3Dv2ConditioningMultiView(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="Hunyuan3Dv2ConditioningMultiView_V3",
|
||||
category="conditioning/video_models",
|
||||
inputs=[
|
||||
io.ClipVisionOutput.Input("front", optional=True),
|
||||
io.ClipVisionOutput.Input("left", optional=True),
|
||||
io.ClipVisionOutput.Input("back", optional=True),
|
||||
io.ClipVisionOutput.Input("right", optional=True)
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative")
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, front=None, left=None, back=None, right=None):
|
||||
all_embeds = [front, left, back, right]
|
||||
out = []
|
||||
pos_embeds = None
|
||||
for i, e in enumerate(all_embeds):
|
||||
if e is not None:
|
||||
if pos_embeds is None:
|
||||
pos_embeds = get_1d_sincos_pos_embed_from_grid_torch(e.last_hidden_state.shape[-1], torch.arange(4))
|
||||
out.append(e.last_hidden_state + pos_embeds[i].reshape(1, 1, -1))
|
||||
|
||||
embeds = torch.cat(out, dim=1)
|
||||
positive = [[embeds, {}]]
|
||||
negative = [[torch.zeros_like(embeds), {}]]
|
||||
return io.NodeOutput(positive, negative)
|
||||
|
||||
|
||||
class SaveGLB(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SaveGLB_V3",
|
||||
category="3d",
|
||||
is_output_node=True,
|
||||
inputs=[
|
||||
io.Mesh.Input("mesh"),
|
||||
io.String.Input("filename_prefix", default="mesh/ComfyUI")
|
||||
],
|
||||
outputs=[],
|
||||
hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, mesh, filename_prefix):
|
||||
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, folder_paths.get_output_directory())
|
||||
results = []
|
||||
|
||||
metadata = {}
|
||||
if not args.disable_metadata:
|
||||
if cls.hidden.prompt is not None:
|
||||
metadata["prompt"] = json.dumps(cls.hidden.prompt)
|
||||
if cls.hidden.extra_pnginfo is not None:
|
||||
for x in cls.hidden.extra_pnginfo:
|
||||
metadata[x] = json.dumps(cls.hidden.extra_pnginfo[x])
|
||||
|
||||
for i in range(mesh.vertices.shape[0]):
|
||||
f = f"{filename}_{counter:05}_.glb"
|
||||
save_glb(mesh.vertices[i], mesh.faces[i], os.path.join(full_output_folder, f), metadata)
|
||||
results.append({
|
||||
"filename": f,
|
||||
"subfolder": subfolder,
|
||||
"type": "output"
|
||||
})
|
||||
counter += 1
|
||||
|
||||
return io.NodeOutput(ui={"ui": {"3d": results}}) # TODO: do we need an additional type of preview for this?
|
||||
|
||||
|
||||
class VAEDecodeHunyuan3D(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="VAEDecodeHunyuan3D_V3",
|
||||
category="latent/3d",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("num_chunks", default=8000, min=1000, max=500000),
|
||||
io.Int.Input("octree_resolution", default=256, min=16, max=512)
|
||||
],
|
||||
outputs=[
|
||||
io.Voxel.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, vae, samples, num_chunks, octree_resolution):
|
||||
voxels = VOXEL(vae.decode(samples["samples"], vae_options={"num_chunks": num_chunks, "octree_resolution": octree_resolution}))
|
||||
return io.NodeOutput(voxels)
|
||||
|
||||
|
||||
class VoxelToMesh(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="VoxelToMesh_V3",
|
||||
category="3d",
|
||||
inputs=[
|
||||
io.Voxel.Input("voxel"),
|
||||
io.Combo.Input("algorithm", options=["surface net", "basic"]),
|
||||
io.Float.Input("threshold", default=0.6, min=-1.0, max=1.0, step=0.01)
|
||||
],
|
||||
outputs=[
|
||||
io.Mesh.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, voxel, algorithm, threshold):
|
||||
vertices = []
|
||||
faces = []
|
||||
|
||||
if algorithm == "basic":
|
||||
mesh_function = voxel_to_mesh
|
||||
elif algorithm == "surface net":
|
||||
mesh_function = voxel_to_mesh_surfnet
|
||||
|
||||
for x in voxel.data:
|
||||
v, f = mesh_function(x, threshold=threshold, device=None)
|
||||
vertices.append(v)
|
||||
faces.append(f)
|
||||
|
||||
return io.NodeOutput(MESH(torch.stack(vertices), torch.stack(faces)))
|
||||
|
||||
|
||||
class VoxelToMeshBasic(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="VoxelToMeshBasic_V3",
|
||||
category="3d",
|
||||
inputs=[
|
||||
io.Voxel.Input("voxel"),
|
||||
io.Float.Input("threshold", default=0.6, min=-1.0, max=1.0, step=0.01)
|
||||
],
|
||||
outputs=[
|
||||
io.Mesh.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, voxel, threshold):
|
||||
vertices = []
|
||||
faces = []
|
||||
for x in voxel.data:
|
||||
v, f = voxel_to_mesh(x, threshold=threshold, device=None)
|
||||
vertices.append(v)
|
||||
faces.append(f)
|
||||
|
||||
return io.NodeOutput(MESH(torch.stack(vertices), torch.stack(faces)))
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
EmptyLatentHunyuan3Dv2,
|
||||
Hunyuan3Dv2Conditioning,
|
||||
Hunyuan3Dv2ConditioningMultiView,
|
||||
SaveGLB,
|
||||
VAEDecodeHunyuan3D,
|
||||
VoxelToMesh,
|
||||
VoxelToMeshBasic,
|
||||
]
|
||||
136
comfy_extras/v3/nodes_hypernetwork.py
Normal file
136
comfy_extras/v3/nodes_hypernetwork.py
Normal file
@ -0,0 +1,136 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
import torch
|
||||
|
||||
import comfy.utils
|
||||
import folder_paths
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
def load_hypernetwork_patch(path, strength):
|
||||
sd = comfy.utils.load_torch_file(path, safe_load=True)
|
||||
activation_func = sd.get('activation_func', 'linear')
|
||||
is_layer_norm = sd.get('is_layer_norm', False)
|
||||
use_dropout = sd.get('use_dropout', False)
|
||||
activate_output = sd.get('activate_output', False)
|
||||
last_layer_dropout = sd.get('last_layer_dropout', False)
|
||||
|
||||
valid_activation = {
|
||||
"linear": torch.nn.Identity,
|
||||
"relu": torch.nn.ReLU,
|
||||
"leakyrelu": torch.nn.LeakyReLU,
|
||||
"elu": torch.nn.ELU,
|
||||
"swish": torch.nn.Hardswish,
|
||||
"tanh": torch.nn.Tanh,
|
||||
"sigmoid": torch.nn.Sigmoid,
|
||||
"softsign": torch.nn.Softsign,
|
||||
"mish": torch.nn.Mish,
|
||||
}
|
||||
|
||||
logging.error(
|
||||
"Unsupported Hypernetwork format, if you report it I might implement it. {} {} {} {} {} {}".format(
|
||||
path, activation_func, is_layer_norm, use_dropout, activate_output, last_layer_dropout
|
||||
)
|
||||
)
|
||||
|
||||
out = {}
|
||||
|
||||
for d in sd:
|
||||
try:
|
||||
dim = int(d)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
output = []
|
||||
for index in [0, 1]:
|
||||
attn_weights = sd[dim][index]
|
||||
keys = attn_weights.keys()
|
||||
|
||||
linears = filter(lambda a: a.endswith(".weight"), keys)
|
||||
linears = list(map(lambda a: a[:-len(".weight")], linears))
|
||||
layers = []
|
||||
|
||||
i = 0
|
||||
while i < len(linears):
|
||||
lin_name = linears[i]
|
||||
last_layer = (i == (len(linears) - 1))
|
||||
penultimate_layer = (i == (len(linears) - 2))
|
||||
|
||||
lin_weight = attn_weights['{}.weight'.format(lin_name)]
|
||||
lin_bias = attn_weights['{}.bias'.format(lin_name)]
|
||||
layer = torch.nn.Linear(lin_weight.shape[1], lin_weight.shape[0])
|
||||
layer.load_state_dict({"weight": lin_weight, "bias": lin_bias})
|
||||
layers.append(layer)
|
||||
if activation_func != "linear":
|
||||
if (not last_layer) or (activate_output):
|
||||
layers.append(valid_activation[activation_func]())
|
||||
if is_layer_norm:
|
||||
i += 1
|
||||
ln_name = linears[i]
|
||||
ln_weight = attn_weights['{}.weight'.format(ln_name)]
|
||||
ln_bias = attn_weights['{}.bias'.format(ln_name)]
|
||||
ln = torch.nn.LayerNorm(ln_weight.shape[0])
|
||||
ln.load_state_dict({"weight": ln_weight, "bias": ln_bias})
|
||||
layers.append(ln)
|
||||
if use_dropout:
|
||||
if (not last_layer) and (not penultimate_layer or last_layer_dropout):
|
||||
layers.append(torch.nn.Dropout(p=0.3))
|
||||
i += 1
|
||||
|
||||
output.append(torch.nn.Sequential(*layers))
|
||||
out[dim] = torch.nn.ModuleList(output)
|
||||
|
||||
class hypernetwork_patch:
|
||||
def __init__(self, hypernet, strength):
|
||||
self.hypernet = hypernet
|
||||
self.strength = strength
|
||||
|
||||
def __call__(self, q, k, v, extra_options):
|
||||
dim = k.shape[-1]
|
||||
if dim in self.hypernet:
|
||||
hn = self.hypernet[dim]
|
||||
k = k + hn[0](k) * self.strength
|
||||
v = v + hn[1](v) * self.strength
|
||||
|
||||
return q, k, v
|
||||
|
||||
def to(self, device):
|
||||
for d in self.hypernet.keys():
|
||||
self.hypernet[d] = self.hypernet[d].to(device)
|
||||
return self
|
||||
|
||||
return hypernetwork_patch(out, strength)
|
||||
|
||||
|
||||
class HypernetworkLoader(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="HypernetworkLoader_V3",
|
||||
category="loaders",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Combo.Input("hypernetwork_name", options=folder_paths.get_filename_list("hypernetworks")),
|
||||
io.Float.Input("strength", default=1.0, min=-10.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, hypernetwork_name, strength):
|
||||
hypernetwork_path = folder_paths.get_full_path_or_raise("hypernetworks", hypernetwork_name)
|
||||
model_hypernetwork = model.clone()
|
||||
patch = load_hypernetwork_patch(hypernetwork_path, strength)
|
||||
if patch is not None:
|
||||
model_hypernetwork.set_model_attn1_patch(patch)
|
||||
model_hypernetwork.set_model_attn2_patch(patch)
|
||||
return io.NodeOutput(model_hypernetwork)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
HypernetworkLoader,
|
||||
]
|
||||
95
comfy_extras/v3/nodes_hypertile.py
Normal file
95
comfy_extras/v3/nodes_hypertile.py
Normal file
@ -0,0 +1,95 @@
|
||||
"""Taken from: https://github.com/tfernd/HyperTile/"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
|
||||
from einops import rearrange
|
||||
from torch import randint
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int:
|
||||
min_value = min(min_value, value)
|
||||
|
||||
# All big divisors of value (inclusive)
|
||||
divisors = [i for i in range(min_value, value + 1) if value % i == 0]
|
||||
|
||||
ns = [value // i for i in divisors[:max_options]] # has at least 1 element
|
||||
|
||||
if len(ns) - 1 > 0:
|
||||
idx = randint(low=0, high=len(ns) - 1, size=(1,)).item()
|
||||
else:
|
||||
idx = 0
|
||||
|
||||
return ns[idx]
|
||||
|
||||
|
||||
class HyperTile(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="HyperTile_V3",
|
||||
category="model_patches/unet",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Int.Input("tile_size", default=256, min=1, max=2048),
|
||||
io.Int.Input("swap_size", default=2, min=1, max=128),
|
||||
io.Int.Input("max_depth", default=0, min=0, max=10),
|
||||
io.Boolean.Input("scale_depth", default=False),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, tile_size, swap_size, max_depth, scale_depth):
|
||||
latent_tile_size = max(32, tile_size) // 8
|
||||
temp = None
|
||||
|
||||
def hypertile_in(q, k, v, extra_options):
|
||||
nonlocal temp
|
||||
model_chans = q.shape[-2]
|
||||
orig_shape = extra_options['original_shape']
|
||||
apply_to = []
|
||||
for i in range(max_depth + 1):
|
||||
apply_to.append((orig_shape[-2] / (2 ** i)) * (orig_shape[-1] / (2 ** i)))
|
||||
|
||||
if model_chans in apply_to:
|
||||
shape = extra_options["original_shape"]
|
||||
aspect_ratio = shape[-1] / shape[-2]
|
||||
|
||||
hw = q.size(1)
|
||||
h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio))
|
||||
|
||||
factor = (2 ** apply_to.index(model_chans)) if scale_depth else 1
|
||||
nh = random_divisor(h, latent_tile_size * factor, swap_size)
|
||||
nw = random_divisor(w, latent_tile_size * factor, swap_size)
|
||||
|
||||
if nh * nw > 1:
|
||||
q = rearrange(q, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw)
|
||||
temp = (nh, nw, h, w)
|
||||
return q, k, v
|
||||
|
||||
return q, k, v
|
||||
|
||||
def hypertile_out(out, extra_options):
|
||||
nonlocal temp
|
||||
if temp is not None:
|
||||
nh, nw, h, w = temp
|
||||
temp = None
|
||||
out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw)
|
||||
out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw)
|
||||
return out
|
||||
|
||||
m = model.clone()
|
||||
m.set_model_attn1_patch(hypertile_in)
|
||||
m.set_model_attn1_output_patch(hypertile_out)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
HyperTile,
|
||||
]
|
||||
727
comfy_extras/v3/nodes_images.py
Normal file
727
comfy_extras/v3/nodes_images.py
Normal file
@ -0,0 +1,727 @@
|
||||
import hashlib
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from PIL import Image, ImageOps, ImageSequence
|
||||
|
||||
import comfy.utils
|
||||
import folder_paths
|
||||
import node_helpers
|
||||
import nodes
|
||||
from comfy_api.latest import io, ui
|
||||
from server import PromptServer
|
||||
|
||||
|
||||
class GetImageSize(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="GetImageSize_V3",
|
||||
display_name="Get Image Size _V3",
|
||||
description="Returns width and height of the image, and passes it through unchanged.",
|
||||
category="image",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
outputs=[
|
||||
io.Int.Output(display_name="width"),
|
||||
io.Int.Output(display_name="height"),
|
||||
io.Int.Output(display_name="batch_size"),
|
||||
],
|
||||
hidden=[io.Hidden.unique_id],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image) -> io.NodeOutput:
|
||||
height = image.shape[1]
|
||||
width = image.shape[2]
|
||||
batch_size = image.shape[0]
|
||||
|
||||
if cls.hidden.unique_id:
|
||||
PromptServer.instance.send_progress_text(
|
||||
f"width: {width}, height: {height}\n batch size: {batch_size}", cls.hidden.unique_id
|
||||
)
|
||||
|
||||
return io.NodeOutput(width, height, batch_size)
|
||||
|
||||
|
||||
class ImageAddNoise(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageAddNoise_V3",
|
||||
display_name="Image Add Noise _V3",
|
||||
category="image",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=0xFFFFFFFFFFFFFFFF,
|
||||
control_after_generate=True,
|
||||
tooltip="The random seed used for creating the noise.",
|
||||
),
|
||||
io.Float.Input("strength", default=0.5, min=0.0, max=1.0, step=0.01),
|
||||
],
|
||||
outputs=[io.Image.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image, seed, strength) -> io.NodeOutput:
|
||||
generator = torch.manual_seed(seed)
|
||||
s = torch.clip(
|
||||
(image + strength * torch.randn(image.size(), generator=generator, device="cpu").to(image)),
|
||||
min=0.0,
|
||||
max=1.0,
|
||||
)
|
||||
return io.NodeOutput(s)
|
||||
|
||||
|
||||
class ImageCrop(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageCrop_V3",
|
||||
display_name="Image Crop _V3",
|
||||
category="image/transform",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1),
|
||||
io.Int.Input("height", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1),
|
||||
io.Int.Input("x", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1),
|
||||
io.Int.Input("y", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1),
|
||||
],
|
||||
outputs=[io.Image.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image, width, height, x, y) -> io.NodeOutput:
|
||||
x = min(x, image.shape[2] - 1)
|
||||
y = min(y, image.shape[1] - 1)
|
||||
to_x = width + x
|
||||
to_y = height + y
|
||||
return io.NodeOutput(image[:, y:to_y, x:to_x, :])
|
||||
|
||||
|
||||
class ImageFlip(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageFlip_V3",
|
||||
display_name="Image Flip _V3",
|
||||
category="image/transform",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Combo.Input("flip_method", options=["x-axis: vertically", "y-axis: horizontally"]),
|
||||
],
|
||||
outputs=[io.Image.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image, flip_method) -> io.NodeOutput:
|
||||
if flip_method.startswith("x"):
|
||||
image = torch.flip(image, dims=[1])
|
||||
elif flip_method.startswith("y"):
|
||||
image = torch.flip(image, dims=[2])
|
||||
|
||||
return io.NodeOutput(image)
|
||||
|
||||
|
||||
class ImageFromBatch(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageFromBatch_V3",
|
||||
display_name="Image From Batch _V3",
|
||||
category="image/batch",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Int.Input("batch_index", default=0, min=0, max=4095),
|
||||
io.Int.Input("length", default=1, min=1, max=4096),
|
||||
],
|
||||
outputs=[io.Image.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image, batch_index, length) -> io.NodeOutput:
|
||||
s_in = image
|
||||
batch_index = min(s_in.shape[0] - 1, batch_index)
|
||||
length = min(s_in.shape[0] - batch_index, length)
|
||||
s = s_in[batch_index : batch_index + length].clone()
|
||||
return io.NodeOutput(s)
|
||||
|
||||
|
||||
class ImageRotate(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageRotate_V3",
|
||||
display_name="Image Rotate _V3",
|
||||
category="image/transform",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Combo.Input("rotation", options=["none", "90 degrees", "180 degrees", "270 degrees"]),
|
||||
],
|
||||
outputs=[io.Image.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image, rotation) -> io.NodeOutput:
|
||||
rotate_by = 0
|
||||
if rotation.startswith("90"):
|
||||
rotate_by = 1
|
||||
elif rotation.startswith("180"):
|
||||
rotate_by = 2
|
||||
elif rotation.startswith("270"):
|
||||
rotate_by = 3
|
||||
|
||||
return io.NodeOutput(torch.rot90(image, k=rotate_by, dims=[2, 1]))
|
||||
|
||||
|
||||
class ImageStitch(io.ComfyNode):
|
||||
"""Upstreamed from https://github.com/kijai/ComfyUI-KJNodes"""
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageStitch_V3",
|
||||
display_name="Image Stitch _V3",
|
||||
description="Stitches image2 to image1 in the specified direction. "
|
||||
"If image2 is not provided, returns image1 unchanged. "
|
||||
"Optional spacing can be added between images.",
|
||||
category="image/transform",
|
||||
inputs=[
|
||||
io.Image.Input("image1"),
|
||||
io.Combo.Input("direction", options=["right", "down", "left", "up"], default="right"),
|
||||
io.Boolean.Input("match_image_size", default=True),
|
||||
io.Int.Input("spacing_width", default=0, min=0, max=1024, step=2),
|
||||
io.Combo.Input("spacing_color", options=["white", "black", "red", "green", "blue"], default="white"),
|
||||
io.Image.Input("image2", optional=True),
|
||||
],
|
||||
outputs=[io.Image.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image1, direction, match_image_size, spacing_width, spacing_color, image2=None) -> io.NodeOutput:
|
||||
if image2 is None:
|
||||
return io.NodeOutput(image1)
|
||||
|
||||
# Handle batch size differences
|
||||
if image1.shape[0] != image2.shape[0]:
|
||||
max_batch = max(image1.shape[0], image2.shape[0])
|
||||
if image1.shape[0] < max_batch:
|
||||
image1 = torch.cat([image1, image1[-1:].repeat(max_batch - image1.shape[0], 1, 1, 1)])
|
||||
if image2.shape[0] < max_batch:
|
||||
image2 = torch.cat([image2, image2[-1:].repeat(max_batch - image2.shape[0], 1, 1, 1)])
|
||||
|
||||
# Match image sizes if requested
|
||||
if match_image_size:
|
||||
h1, w1 = image1.shape[1:3]
|
||||
h2, w2 = image2.shape[1:3]
|
||||
aspect_ratio = w2 / h2
|
||||
|
||||
if direction in ["left", "right"]:
|
||||
target_h, target_w = h1, int(h1 * aspect_ratio)
|
||||
else: # up, down
|
||||
target_w, target_h = w1, int(w1 / aspect_ratio)
|
||||
|
||||
image2 = comfy.utils.common_upscale(
|
||||
image2.movedim(-1, 1), target_w, target_h, "lanczos", "disabled"
|
||||
).movedim(1, -1)
|
||||
|
||||
color_map = {
|
||||
"white": 1.0,
|
||||
"black": 0.0,
|
||||
"red": (1.0, 0.0, 0.0),
|
||||
"green": (0.0, 1.0, 0.0),
|
||||
"blue": (0.0, 0.0, 1.0),
|
||||
}
|
||||
|
||||
color_val = color_map[spacing_color]
|
||||
|
||||
# When not matching sizes, pad to align non-concat dimensions
|
||||
if not match_image_size:
|
||||
h1, w1 = image1.shape[1:3]
|
||||
h2, w2 = image2.shape[1:3]
|
||||
pad_value = 0.0
|
||||
if not isinstance(color_val, tuple):
|
||||
pad_value = color_val
|
||||
|
||||
if direction in ["left", "right"]:
|
||||
# For horizontal concat, pad heights to match
|
||||
if h1 != h2:
|
||||
target_h = max(h1, h2)
|
||||
if h1 < target_h:
|
||||
pad_h = target_h - h1
|
||||
pad_top, pad_bottom = pad_h // 2, pad_h - pad_h // 2
|
||||
image1 = torch.nn.functional.pad(
|
||||
image1, (0, 0, 0, 0, pad_top, pad_bottom), mode="constant", value=pad_value
|
||||
)
|
||||
if h2 < target_h:
|
||||
pad_h = target_h - h2
|
||||
pad_top, pad_bottom = pad_h // 2, pad_h - pad_h // 2
|
||||
image2 = torch.nn.functional.pad(
|
||||
image2, (0, 0, 0, 0, pad_top, pad_bottom), mode="constant", value=pad_value
|
||||
)
|
||||
else: # up, down
|
||||
# For vertical concat, pad widths to match
|
||||
if w1 != w2:
|
||||
target_w = max(w1, w2)
|
||||
if w1 < target_w:
|
||||
pad_w = target_w - w1
|
||||
pad_left, pad_right = pad_w // 2, pad_w - pad_w // 2
|
||||
image1 = torch.nn.functional.pad(
|
||||
image1, (0, 0, pad_left, pad_right), mode="constant", value=pad_value
|
||||
)
|
||||
if w2 < target_w:
|
||||
pad_w = target_w - w2
|
||||
pad_left, pad_right = pad_w // 2, pad_w - pad_w // 2
|
||||
image2 = torch.nn.functional.pad(
|
||||
image2, (0, 0, pad_left, pad_right), mode="constant", value=pad_value
|
||||
)
|
||||
|
||||
# Ensure same number of channels
|
||||
if image1.shape[-1] != image2.shape[-1]:
|
||||
max_channels = max(image1.shape[-1], image2.shape[-1])
|
||||
if image1.shape[-1] < max_channels:
|
||||
image1 = torch.cat(
|
||||
[
|
||||
image1,
|
||||
torch.ones(
|
||||
*image1.shape[:-1],
|
||||
max_channels - image1.shape[-1],
|
||||
device=image1.device,
|
||||
),
|
||||
],
|
||||
dim=-1,
|
||||
)
|
||||
if image2.shape[-1] < max_channels:
|
||||
image2 = torch.cat(
|
||||
[
|
||||
image2,
|
||||
torch.ones(
|
||||
*image2.shape[:-1],
|
||||
max_channels - image2.shape[-1],
|
||||
device=image2.device,
|
||||
),
|
||||
],
|
||||
dim=-1,
|
||||
)
|
||||
|
||||
# Add spacing if specified
|
||||
if spacing_width > 0:
|
||||
spacing_width = spacing_width + (spacing_width % 2) # Ensure even
|
||||
|
||||
if direction in ["left", "right"]:
|
||||
spacing_shape = (
|
||||
image1.shape[0],
|
||||
max(image1.shape[1], image2.shape[1]),
|
||||
spacing_width,
|
||||
image1.shape[-1],
|
||||
)
|
||||
else:
|
||||
spacing_shape = (
|
||||
image1.shape[0],
|
||||
spacing_width,
|
||||
max(image1.shape[2], image2.shape[2]),
|
||||
image1.shape[-1],
|
||||
)
|
||||
|
||||
spacing = torch.full(spacing_shape, 0.0, device=image1.device)
|
||||
if isinstance(color_val, tuple):
|
||||
for i, c in enumerate(color_val):
|
||||
if i < spacing.shape[-1]:
|
||||
spacing[..., i] = c
|
||||
if spacing.shape[-1] == 4: # Add alpha
|
||||
spacing[..., 3] = 1.0
|
||||
else:
|
||||
spacing[..., : min(3, spacing.shape[-1])] = color_val
|
||||
if spacing.shape[-1] == 4:
|
||||
spacing[..., 3] = 1.0
|
||||
|
||||
# Concatenate images
|
||||
images = [image2, image1] if direction in ["left", "up"] else [image1, image2]
|
||||
if spacing_width > 0:
|
||||
images.insert(1, spacing)
|
||||
|
||||
concat_dim = 2 if direction in ["left", "right"] else 1
|
||||
return io.NodeOutput(torch.cat(images, dim=concat_dim))
|
||||
|
||||
|
||||
class LoadImage(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LoadImage_V3",
|
||||
display_name="Load Image _V3",
|
||||
category="image",
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"image",
|
||||
upload=io.UploadType.image,
|
||||
image_folder=io.FolderType.input,
|
||||
options=cls.get_files_options(),
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
io.Mask.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_files_options(cls) -> list[str]:
|
||||
target_dir = folder_paths.get_input_directory()
|
||||
files = [f for f in os.listdir(target_dir) if os.path.isfile(os.path.join(target_dir, f))]
|
||||
return sorted(folder_paths.filter_files_content_types(files, ["image"]))
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image) -> io.NodeOutput:
|
||||
img = node_helpers.pillow(Image.open, folder_paths.get_annotated_filepath(image))
|
||||
|
||||
output_images = []
|
||||
output_masks = []
|
||||
w, h = None, None
|
||||
|
||||
excluded_formats = ["MPO"]
|
||||
|
||||
for i in ImageSequence.Iterator(img):
|
||||
i = node_helpers.pillow(ImageOps.exif_transpose, i)
|
||||
|
||||
if i.mode == "I":
|
||||
i = i.point(lambda i: i * (1 / 255))
|
||||
image = i.convert("RGB")
|
||||
|
||||
if len(output_images) == 0:
|
||||
w = image.size[0]
|
||||
h = image.size[1]
|
||||
|
||||
if image.size[0] != w or image.size[1] != h:
|
||||
continue
|
||||
|
||||
image = np.array(image).astype(np.float32) / 255.0
|
||||
image = torch.from_numpy(image)[None,]
|
||||
if "A" in i.getbands():
|
||||
mask = np.array(i.getchannel("A")).astype(np.float32) / 255.0
|
||||
mask = 1.0 - torch.from_numpy(mask)
|
||||
elif i.mode == "P" and "transparency" in i.info:
|
||||
mask = np.array(i.convert("RGBA").getchannel("A")).astype(np.float32) / 255.0
|
||||
mask = 1.0 - torch.from_numpy(mask)
|
||||
else:
|
||||
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
|
||||
output_images.append(image)
|
||||
output_masks.append(mask.unsqueeze(0))
|
||||
|
||||
if len(output_images) > 1 and img.format not in excluded_formats:
|
||||
output_image = torch.cat(output_images, dim=0)
|
||||
output_mask = torch.cat(output_masks, dim=0)
|
||||
else:
|
||||
output_image = output_images[0]
|
||||
output_mask = output_masks[0]
|
||||
|
||||
return io.NodeOutput(output_image, output_mask)
|
||||
|
||||
@classmethod
|
||||
def fingerprint_inputs(s, image):
|
||||
image_path = folder_paths.get_annotated_filepath(image)
|
||||
m = hashlib.sha256()
|
||||
with open(image_path, "rb") as f:
|
||||
m.update(f.read())
|
||||
return m.digest().hex()
|
||||
|
||||
@classmethod
|
||||
def validate_inputs(s, image):
|
||||
if not folder_paths.exists_annotated_filepath(image):
|
||||
return "Invalid image file: {}".format(image)
|
||||
return True
|
||||
|
||||
|
||||
class LoadImageOutput(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LoadImageOutput_V3",
|
||||
display_name="Load Image (from Outputs) _V3",
|
||||
description="Load an image from the output folder. "
|
||||
"When the refresh button is clicked, the node will update the image list "
|
||||
"and automatically select the first image, allowing for easy iteration.",
|
||||
category="image",
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"image",
|
||||
upload=io.UploadType.image,
|
||||
image_folder=io.FolderType.output,
|
||||
remote=io.RemoteOptions(
|
||||
route="/internal/files/output",
|
||||
refresh_button=True,
|
||||
control_after_refresh="first",
|
||||
),
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
io.Mask.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image) -> io.NodeOutput:
|
||||
img = node_helpers.pillow(Image.open, folder_paths.get_annotated_filepath(image))
|
||||
|
||||
output_images = []
|
||||
output_masks = []
|
||||
w, h = None, None
|
||||
|
||||
excluded_formats = ["MPO"]
|
||||
|
||||
for i in ImageSequence.Iterator(img):
|
||||
i = node_helpers.pillow(ImageOps.exif_transpose, i)
|
||||
|
||||
if i.mode == "I":
|
||||
i = i.point(lambda i: i * (1 / 255))
|
||||
image = i.convert("RGB")
|
||||
|
||||
if len(output_images) == 0:
|
||||
w = image.size[0]
|
||||
h = image.size[1]
|
||||
|
||||
if image.size[0] != w or image.size[1] != h:
|
||||
continue
|
||||
|
||||
image = np.array(image).astype(np.float32) / 255.0
|
||||
image = torch.from_numpy(image)[None,]
|
||||
if "A" in i.getbands():
|
||||
mask = np.array(i.getchannel("A")).astype(np.float32) / 255.0
|
||||
mask = 1.0 - torch.from_numpy(mask)
|
||||
elif i.mode == "P" and "transparency" in i.info:
|
||||
mask = np.array(i.convert("RGBA").getchannel("A")).astype(np.float32) / 255.0
|
||||
mask = 1.0 - torch.from_numpy(mask)
|
||||
else:
|
||||
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
|
||||
output_images.append(image)
|
||||
output_masks.append(mask.unsqueeze(0))
|
||||
|
||||
if len(output_images) > 1 and img.format not in excluded_formats:
|
||||
output_image = torch.cat(output_images, dim=0)
|
||||
output_mask = torch.cat(output_masks, dim=0)
|
||||
else:
|
||||
output_image = output_images[0]
|
||||
output_mask = output_masks[0]
|
||||
|
||||
return io.NodeOutput(output_image, output_mask)
|
||||
|
||||
@classmethod
|
||||
def fingerprint_inputs(s, image):
|
||||
image_path = folder_paths.get_annotated_filepath(image)
|
||||
m = hashlib.sha256()
|
||||
with open(image_path, "rb") as f:
|
||||
m.update(f.read())
|
||||
return m.digest().hex()
|
||||
|
||||
@classmethod
|
||||
def validate_inputs(s, image):
|
||||
if not folder_paths.exists_annotated_filepath(image):
|
||||
return "Invalid image file: {}".format(image)
|
||||
return True
|
||||
|
||||
|
||||
class PreviewImage(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="PreviewImage_V3",
|
||||
display_name="Preview Image _V3",
|
||||
description="Preview the input images.",
|
||||
category="image",
|
||||
inputs=[
|
||||
io.Image.Input("images", tooltip="The images to preview."),
|
||||
],
|
||||
hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo],
|
||||
is_output_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, images) -> io.NodeOutput:
|
||||
return io.NodeOutput(ui=ui.PreviewImage(images, cls=cls))
|
||||
|
||||
|
||||
class RepeatImageBatch(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="RepeatImageBatch_V3",
|
||||
display_name="Repeat Image Batch _V3",
|
||||
category="image/batch",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Int.Input("amount", default=1, min=1, max=4096),
|
||||
],
|
||||
outputs=[io.Image.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image, amount) -> io.NodeOutput:
|
||||
return io.NodeOutput(image.repeat((amount, 1, 1, 1)))
|
||||
|
||||
|
||||
class ResizeAndPadImage(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ResizeAndPadImage_V3",
|
||||
display_name="Resize and Pad Image _V3",
|
||||
category="image/transform",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Int.Input("target_width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1),
|
||||
io.Int.Input("target_height", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1),
|
||||
io.Combo.Input("padding_color", options=["white", "black"]),
|
||||
io.Combo.Input("interpolation", options=["area", "bicubic", "nearest-exact", "bilinear", "lanczos"]),
|
||||
],
|
||||
outputs=[io.Image.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image, target_width, target_height, padding_color, interpolation) -> io.NodeOutput:
|
||||
batch_size, orig_height, orig_width, channels = image.shape
|
||||
|
||||
scale_w = target_width / orig_width
|
||||
scale_h = target_height / orig_height
|
||||
scale = min(scale_w, scale_h)
|
||||
|
||||
new_width = int(orig_width * scale)
|
||||
new_height = int(orig_height * scale)
|
||||
|
||||
image_permuted = image.permute(0, 3, 1, 2)
|
||||
|
||||
resized = comfy.utils.common_upscale(image_permuted, new_width, new_height, interpolation, "disabled")
|
||||
|
||||
pad_value = 0.0 if padding_color == "black" else 1.0
|
||||
padded = torch.full(
|
||||
(batch_size, channels, target_height, target_width), pad_value, dtype=image.dtype, device=image.device
|
||||
)
|
||||
|
||||
y_offset = (target_height - new_height) // 2
|
||||
x_offset = (target_width - new_width) // 2
|
||||
|
||||
padded[:, :, y_offset : y_offset + new_height, x_offset : x_offset + new_width] = resized
|
||||
|
||||
return io.NodeOutput(padded.permute(0, 2, 3, 1))
|
||||
|
||||
|
||||
class SaveAnimatedPNG(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SaveAnimatedPNG_V3",
|
||||
display_name="Save Animated PNG _V3",
|
||||
category="image/animation",
|
||||
inputs=[
|
||||
io.Image.Input("images"),
|
||||
io.String.Input("filename_prefix", default="ComfyUI"),
|
||||
io.Float.Input("fps", default=6.0, min=0.01, max=1000.0, step=0.01),
|
||||
io.Int.Input("compress_level", default=4, min=0, max=9),
|
||||
],
|
||||
hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo],
|
||||
is_output_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, images, fps, compress_level, filename_prefix="ComfyUI") -> io.NodeOutput:
|
||||
return io.NodeOutput(
|
||||
ui=ui.ImageSaveHelper.get_save_animated_png_ui(
|
||||
images=images,
|
||||
filename_prefix=filename_prefix,
|
||||
cls=cls,
|
||||
fps=fps,
|
||||
compress_level=compress_level,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class SaveAnimatedWEBP(io.ComfyNode):
|
||||
COMPRESS_METHODS = {"default": 4, "fastest": 0, "slowest": 6}
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SaveAnimatedWEBP_V3",
|
||||
display_name="Save Animated WEBP _V3",
|
||||
category="image/animation",
|
||||
inputs=[
|
||||
io.Image.Input("images"),
|
||||
io.String.Input("filename_prefix", default="ComfyUI"),
|
||||
io.Float.Input("fps", default=6.0, min=0.01, max=1000.0, step=0.01),
|
||||
io.Boolean.Input("lossless", default=True),
|
||||
io.Int.Input("quality", default=80, min=0, max=100),
|
||||
io.Combo.Input("method", options=list(cls.COMPRESS_METHODS.keys())),
|
||||
],
|
||||
hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo],
|
||||
is_output_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, images, fps, filename_prefix, lossless, quality, method) -> io.NodeOutput:
|
||||
return io.NodeOutput(
|
||||
ui=ui.ImageSaveHelper.get_save_animated_webp_ui(
|
||||
images=images,
|
||||
filename_prefix=filename_prefix,
|
||||
cls=cls,
|
||||
fps=fps,
|
||||
lossless=lossless,
|
||||
quality=quality,
|
||||
method=cls.COMPRESS_METHODS.get(method)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class SaveImage(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SaveImage_V3",
|
||||
display_name="Save Image _V3",
|
||||
description="Saves the input images to your ComfyUI output directory.",
|
||||
category="image",
|
||||
inputs=[
|
||||
io.Image.Input(
|
||||
"images",
|
||||
tooltip="The images to save.",
|
||||
),
|
||||
io.String.Input(
|
||||
"filename_prefix",
|
||||
default="ComfyUI",
|
||||
tooltip="The prefix for the file to save. This may include formatting information "
|
||||
"such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes.",
|
||||
),
|
||||
],
|
||||
hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo],
|
||||
is_output_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, images, filename_prefix="ComfyUI") -> io.NodeOutput:
|
||||
return io.NodeOutput(
|
||||
ui=ui.ImageSaveHelper.get_save_images_ui(images, filename_prefix=filename_prefix, cls=cls, compress_level=4)
|
||||
)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
GetImageSize,
|
||||
ImageAddNoise,
|
||||
ImageCrop,
|
||||
ImageFlip,
|
||||
ImageFromBatch,
|
||||
ImageRotate,
|
||||
ImageStitch,
|
||||
LoadImage,
|
||||
LoadImageOutput,
|
||||
PreviewImage,
|
||||
RepeatImageBatch,
|
||||
ResizeAndPadImage,
|
||||
SaveAnimatedPNG,
|
||||
SaveAnimatedWEBP,
|
||||
SaveImage,
|
||||
]
|
||||
56
comfy_extras/v3/nodes_ip2p.py
Normal file
56
comfy_extras/v3/nodes_ip2p.py
Normal file
@ -0,0 +1,56 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class InstructPixToPixConditioning(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="InstructPixToPixConditioning_V3",
|
||||
category="conditioning/instructpix2pix",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Image.Input("pixels"),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
io.Latent.Output(display_name="latent"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, positive, negative, pixels, vae):
|
||||
x = (pixels.shape[1] // 8) * 8
|
||||
y = (pixels.shape[2] // 8) * 8
|
||||
|
||||
if pixels.shape[1] != x or pixels.shape[2] != y:
|
||||
x_offset = (pixels.shape[1] % 8) // 2
|
||||
y_offset = (pixels.shape[2] % 8) // 2
|
||||
pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:]
|
||||
|
||||
concat_latent = vae.encode(pixels)
|
||||
|
||||
out_latent = {}
|
||||
out_latent["samples"] = torch.zeros_like(concat_latent)
|
||||
|
||||
out = []
|
||||
for conditioning in [positive, negative]:
|
||||
c = []
|
||||
for t in conditioning:
|
||||
d = t[1].copy()
|
||||
d["concat_latent_image"] = concat_latent
|
||||
n = [t[0], d]
|
||||
c.append(n)
|
||||
out.append(c)
|
||||
return io.NodeOutput(out[0], out[1], out_latent)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
InstructPixToPixConditioning,
|
||||
]
|
||||
340
comfy_extras/v3/nodes_latent.py
Normal file
340
comfy_extras/v3/nodes_latent.py
Normal file
@ -0,0 +1,340 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
|
||||
import comfy.utils
|
||||
import comfy_extras.nodes_post_processing
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
def reshape_latent_to(target_shape, latent, repeat_batch=True):
|
||||
if latent.shape[1:] != target_shape[1:]:
|
||||
latent = comfy.utils.common_upscale(
|
||||
latent, target_shape[-1], target_shape[-2], "bilinear", "center"
|
||||
)
|
||||
if repeat_batch:
|
||||
return comfy.utils.repeat_to_batch_size(latent, target_shape[0])
|
||||
return latent
|
||||
|
||||
|
||||
class LatentAdd(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentAdd_V3",
|
||||
category="latent/advanced",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, samples1, samples2):
|
||||
samples_out = samples1.copy()
|
||||
|
||||
s1 = samples1["samples"]
|
||||
s2 = samples2["samples"]
|
||||
|
||||
s2 = reshape_latent_to(s1.shape, s2)
|
||||
samples_out["samples"] = s1 + s2
|
||||
return io.NodeOutput(samples_out)
|
||||
|
||||
|
||||
class LatentSubtract(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentSubtract_V3",
|
||||
category="latent/advanced",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, samples1, samples2):
|
||||
samples_out = samples1.copy()
|
||||
|
||||
s1 = samples1["samples"]
|
||||
s2 = samples2["samples"]
|
||||
|
||||
s2 = reshape_latent_to(s1.shape, s2)
|
||||
samples_out["samples"] = s1 - s2
|
||||
return io.NodeOutput(samples_out)
|
||||
|
||||
|
||||
class LatentMultiply(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentMultiply_V3",
|
||||
category="latent/advanced",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Float.Input("multiplier", default=1.0, min=-10.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, samples, multiplier):
|
||||
samples_out = samples.copy()
|
||||
|
||||
s1 = samples["samples"]
|
||||
samples_out["samples"] = s1 * multiplier
|
||||
return io.NodeOutput(samples_out)
|
||||
|
||||
|
||||
class LatentInterpolate(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentInterpolate_V3",
|
||||
category="latent/advanced",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
io.Float.Input("ratio", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, samples1, samples2, ratio):
|
||||
samples_out = samples1.copy()
|
||||
|
||||
s1 = samples1["samples"]
|
||||
s2 = samples2["samples"]
|
||||
|
||||
s2 = reshape_latent_to(s1.shape, s2)
|
||||
|
||||
m1 = torch.linalg.vector_norm(s1, dim=(1))
|
||||
m2 = torch.linalg.vector_norm(s2, dim=(1))
|
||||
|
||||
s1 = torch.nan_to_num(s1 / m1)
|
||||
s2 = torch.nan_to_num(s2 / m2)
|
||||
|
||||
t = (s1 * ratio + s2 * (1.0 - ratio))
|
||||
mt = torch.linalg.vector_norm(t, dim=(1))
|
||||
st = torch.nan_to_num(t / mt)
|
||||
|
||||
samples_out["samples"] = st * (m1 * ratio + m2 * (1.0 - ratio))
|
||||
return io.NodeOutput(samples_out)
|
||||
|
||||
|
||||
class LatentBatch(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentBatch_V3",
|
||||
category="latent/batch",
|
||||
inputs=[
|
||||
io.Latent.Input("samples1"),
|
||||
io.Latent.Input("samples2"),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, samples1, samples2):
|
||||
samples_out = samples1.copy()
|
||||
s1 = samples1["samples"]
|
||||
s2 = samples2["samples"]
|
||||
|
||||
s2 = reshape_latent_to(s1.shape, s2, repeat_batch=False)
|
||||
s = torch.cat((s1, s2), dim=0)
|
||||
samples_out["samples"] = s
|
||||
samples_out["batch_index"] = (samples1.get("batch_index", [x for x in range(0, s1.shape[0])]) +
|
||||
samples2.get("batch_index", [x for x in range(0, s2.shape[0])]))
|
||||
return io.NodeOutput(samples_out)
|
||||
|
||||
|
||||
class LatentBatchSeedBehavior(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentBatchSeedBehavior_V3",
|
||||
category="latent/advanced",
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Combo.Input("seed_behavior", options=["random", "fixed"], default="fixed"),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, samples, seed_behavior):
|
||||
samples_out = samples.copy()
|
||||
latent = samples["samples"]
|
||||
if seed_behavior == "random":
|
||||
if 'batch_index' in samples_out:
|
||||
samples_out.pop('batch_index')
|
||||
elif seed_behavior == "fixed":
|
||||
batch_number = samples_out.get("batch_index", [0])[0]
|
||||
samples_out["batch_index"] = [batch_number] * latent.shape[0]
|
||||
|
||||
return io.NodeOutput(samples_out)
|
||||
|
||||
|
||||
class LatentApplyOperation(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentApplyOperation_V3",
|
||||
category="latent/advanced/operations",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.LatentOperation.Input("operation"),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, samples, operation):
|
||||
samples_out = samples.copy()
|
||||
|
||||
s1 = samples["samples"]
|
||||
samples_out["samples"] = operation(latent=s1)
|
||||
return io.NodeOutput(samples_out)
|
||||
|
||||
|
||||
class LatentApplyOperationCFG(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentApplyOperationCFG_V3",
|
||||
category="latent/advanced/operations",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.LatentOperation.Input("operation"),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, operation):
|
||||
m = model.clone()
|
||||
|
||||
def pre_cfg_function(args):
|
||||
conds_out = args["conds_out"]
|
||||
if len(conds_out) == 2:
|
||||
conds_out[0] = operation(latent=(conds_out[0] - conds_out[1])) + conds_out[1]
|
||||
else:
|
||||
conds_out[0] = operation(latent=conds_out[0])
|
||||
return conds_out
|
||||
|
||||
m.set_model_sampler_pre_cfg_function(pre_cfg_function)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class LatentOperationTonemapReinhard(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentOperationTonemapReinhard_V3",
|
||||
category="latent/advanced/operations",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Float.Input("multiplier", default=1.0, min=0.0, max=100.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.LatentOperation.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, multiplier):
|
||||
def tonemap_reinhard(latent, **kwargs):
|
||||
latent_vector_magnitude = (torch.linalg.vector_norm(latent, dim=(1)) + 0.0000000001)[:,None]
|
||||
normalized_latent = latent / latent_vector_magnitude
|
||||
|
||||
mean = torch.mean(latent_vector_magnitude, dim=(1,2,3), keepdim=True)
|
||||
std = torch.std(latent_vector_magnitude, dim=(1,2,3), keepdim=True)
|
||||
|
||||
top = (std * 5 + mean) * multiplier
|
||||
|
||||
#reinhard
|
||||
latent_vector_magnitude *= (1.0 / top)
|
||||
new_magnitude = latent_vector_magnitude / (latent_vector_magnitude + 1.0)
|
||||
new_magnitude *= top
|
||||
|
||||
return normalized_latent * new_magnitude
|
||||
return io.NodeOutput(tonemap_reinhard)
|
||||
|
||||
|
||||
class LatentOperationSharpen(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentOperationSharpen_V3",
|
||||
category="latent/advanced/operations",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Int.Input("sharpen_radius", default=9, min=1, max=31, step=1),
|
||||
io.Float.Input("sigma", default=1.0, min=0.1, max=10.0, step=0.1),
|
||||
io.Float.Input("alpha", default=0.1, min=0.0, max=5.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.LatentOperation.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, sharpen_radius, sigma, alpha):
|
||||
def sharpen(latent, **kwargs):
|
||||
luminance = (torch.linalg.vector_norm(latent, dim=(1)) + 1e-6)[:,None]
|
||||
normalized_latent = latent / luminance
|
||||
channels = latent.shape[1]
|
||||
|
||||
kernel_size = sharpen_radius * 2 + 1
|
||||
kernel = comfy_extras.nodes_post_processing.gaussian_kernel(kernel_size, sigma, device=luminance.device)
|
||||
center = kernel_size // 2
|
||||
|
||||
kernel *= alpha * -10
|
||||
kernel[center, center] = kernel[center, center] - kernel.sum() + 1.0
|
||||
|
||||
padded_image = torch.nn.functional.pad(
|
||||
normalized_latent, (sharpen_radius,sharpen_radius,sharpen_radius,sharpen_radius), "reflect"
|
||||
)
|
||||
sharpened = torch.nn.functional.conv2d(
|
||||
padded_image, kernel.repeat(channels, 1, 1).unsqueeze(1), padding=kernel_size // 2, groups=channels
|
||||
)[:,:,sharpen_radius:-sharpen_radius, sharpen_radius:-sharpen_radius]
|
||||
|
||||
return luminance * sharpened
|
||||
return io.NodeOutput(sharpen)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
LatentAdd,
|
||||
LatentApplyOperation,
|
||||
LatentApplyOperationCFG,
|
||||
LatentBatch,
|
||||
LatentBatchSeedBehavior,
|
||||
LatentInterpolate,
|
||||
LatentMultiply,
|
||||
LatentOperationSharpen,
|
||||
LatentOperationTonemapReinhard,
|
||||
LatentSubtract,
|
||||
]
|
||||
180
comfy_extras/v3/nodes_load_3d.py
Normal file
180
comfy_extras/v3/nodes_load_3d.py
Normal file
@ -0,0 +1,180 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import folder_paths
|
||||
import nodes
|
||||
from comfy_api.input_impl import VideoFromFile
|
||||
from comfy_api.latest import io, ui
|
||||
|
||||
|
||||
def normalize_path(path):
|
||||
return path.replace("\\", "/")
|
||||
|
||||
|
||||
class Load3D(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
input_dir = os.path.join(folder_paths.get_input_directory(), "3d")
|
||||
|
||||
os.makedirs(input_dir, exist_ok=True)
|
||||
|
||||
input_path = Path(input_dir)
|
||||
base_path = Path(folder_paths.get_input_directory())
|
||||
|
||||
files = [
|
||||
normalize_path(str(file_path.relative_to(base_path)))
|
||||
for file_path in input_path.rglob("*")
|
||||
if file_path.suffix.lower() in {".gltf", ".glb", ".obj", ".fbx", ".stl"}
|
||||
]
|
||||
|
||||
return io.Schema(
|
||||
node_id="Load3D_V3",
|
||||
display_name="Load 3D _V3",
|
||||
category="3d",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Combo.Input("model_file", options=sorted(files), upload=io.UploadType.model),
|
||||
io.Load3D.Input("image"),
|
||||
io.Int.Input("width", default=1024, min=1, max=4096, step=1),
|
||||
io.Int.Input("height", default=1024, min=1, max=4096, step=1),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(display_name="image"),
|
||||
io.Mask.Output(display_name="mask"),
|
||||
io.String.Output(display_name="mesh_path"),
|
||||
io.Image.Output(display_name="normal"),
|
||||
io.Image.Output(display_name="lineart"),
|
||||
io.Load3DCamera.Output(display_name="camera_info"),
|
||||
io.Video.Output(display_name="recording_video"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model_file, image, **kwargs):
|
||||
image_path = folder_paths.get_annotated_filepath(image["image"])
|
||||
mask_path = folder_paths.get_annotated_filepath(image["mask"])
|
||||
normal_path = folder_paths.get_annotated_filepath(image["normal"])
|
||||
lineart_path = folder_paths.get_annotated_filepath(image["lineart"])
|
||||
|
||||
load_image_node = nodes.LoadImage()
|
||||
output_image, ignore_mask = load_image_node.load_image(image=image_path)
|
||||
ignore_image, output_mask = load_image_node.load_image(image=mask_path)
|
||||
normal_image, ignore_mask2 = load_image_node.load_image(image=normal_path)
|
||||
lineart_image, ignore_mask3 = load_image_node.load_image(image=lineart_path)
|
||||
|
||||
video = None
|
||||
if image["recording"] != "":
|
||||
recording_video_path = folder_paths.get_annotated_filepath(image["recording"])
|
||||
video = VideoFromFile(recording_video_path)
|
||||
|
||||
return io.NodeOutput(
|
||||
output_image, output_mask, model_file, normal_image, lineart_image, image["camera_info"], video
|
||||
)
|
||||
|
||||
|
||||
class Load3DAnimation(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
input_dir = os.path.join(folder_paths.get_input_directory(), "3d")
|
||||
|
||||
os.makedirs(input_dir, exist_ok=True)
|
||||
|
||||
input_path = Path(input_dir)
|
||||
base_path = Path(folder_paths.get_input_directory())
|
||||
|
||||
files = [
|
||||
normalize_path(str(file_path.relative_to(base_path)))
|
||||
for file_path in input_path.rglob("*")
|
||||
if file_path.suffix.lower() in {".gltf", ".glb", ".fbx"}
|
||||
]
|
||||
|
||||
return io.Schema(
|
||||
node_id="Load3DAnimation_V3",
|
||||
display_name="Load 3D - Animation _V3",
|
||||
category="3d",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Combo.Input("model_file", options=sorted(files), upload=io.UploadType.model),
|
||||
io.Load3DAnimation.Input("image"),
|
||||
io.Int.Input("width", default=1024, min=1, max=4096, step=1),
|
||||
io.Int.Input("height", default=1024, min=1, max=4096, step=1),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(display_name="image"),
|
||||
io.Mask.Output(display_name="mask"),
|
||||
io.String.Output(display_name="mesh_path"),
|
||||
io.Image.Output(display_name="normal"),
|
||||
io.Load3DCamera.Output(display_name="camera_info"),
|
||||
io.Video.Output(display_name="recording_video"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model_file, image, **kwargs):
|
||||
image_path = folder_paths.get_annotated_filepath(image["image"])
|
||||
mask_path = folder_paths.get_annotated_filepath(image["mask"])
|
||||
normal_path = folder_paths.get_annotated_filepath(image["normal"])
|
||||
|
||||
load_image_node = nodes.LoadImage()
|
||||
output_image, ignore_mask = load_image_node.load_image(image=image_path)
|
||||
ignore_image, output_mask = load_image_node.load_image(image=mask_path)
|
||||
normal_image, ignore_mask2 = load_image_node.load_image(image=normal_path)
|
||||
|
||||
video = None
|
||||
if image['recording'] != "":
|
||||
recording_video_path = folder_paths.get_annotated_filepath(image["recording"])
|
||||
video = VideoFromFile(recording_video_path)
|
||||
|
||||
return io.NodeOutput(output_image, output_mask, model_file, normal_image, image["camera_info"], video)
|
||||
|
||||
|
||||
class Preview3D(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="Preview3D_V3", # frontend expects "Preview3D" to work
|
||||
display_name="Preview 3D _V3",
|
||||
category="3d",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
inputs=[
|
||||
io.String.Input("model_file", default="", multiline=False),
|
||||
io.Load3DCamera.Input("camera_info", optional=True),
|
||||
],
|
||||
outputs=[],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model_file, camera_info=None):
|
||||
return io.NodeOutput(ui=ui.PreviewUI3D(model_file, camera_info, cls=cls))
|
||||
|
||||
|
||||
class Preview3DAnimation(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="Preview3DAnimation_V3", # frontend expects "Preview3DAnimation" to work
|
||||
display_name="Preview 3D - Animation _V3",
|
||||
category="3d",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
inputs=[
|
||||
io.String.Input("model_file", default="", multiline=False),
|
||||
io.Load3DCamera.Input("camera_info", optional=True),
|
||||
],
|
||||
outputs=[],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model_file, camera_info=None):
|
||||
return io.NodeOutput(ui=ui.PreviewUI3D(model_file, camera_info, cls=cls))
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
Load3D,
|
||||
Load3DAnimation,
|
||||
Preview3D,
|
||||
Preview3DAnimation,
|
||||
]
|
||||
138
comfy_extras/v3/nodes_lora_extract.py
Normal file
138
comfy_extras/v3/nodes_lora_extract.py
Normal file
@ -0,0 +1,138 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from enum import Enum
|
||||
|
||||
import torch
|
||||
|
||||
import comfy.model_management
|
||||
import comfy.utils
|
||||
import folder_paths
|
||||
from comfy_api.latest import io
|
||||
|
||||
CLAMP_QUANTILE = 0.99
|
||||
|
||||
|
||||
def extract_lora(diff, rank):
|
||||
conv2d = (len(diff.shape) == 4)
|
||||
kernel_size = None if not conv2d else diff.size()[2:4]
|
||||
conv2d_3x3 = conv2d and kernel_size != (1, 1)
|
||||
out_dim, in_dim = diff.size()[0:2]
|
||||
rank = min(rank, in_dim, out_dim)
|
||||
|
||||
if conv2d:
|
||||
if conv2d_3x3:
|
||||
diff = diff.flatten(start_dim=1)
|
||||
else:
|
||||
diff = diff.squeeze()
|
||||
|
||||
U, S, Vh = torch.linalg.svd(diff.float())
|
||||
U = U[:, :rank]
|
||||
S = S[:rank]
|
||||
U = U @ torch.diag(S)
|
||||
Vh = Vh[:rank, :]
|
||||
|
||||
dist = torch.cat([U.flatten(), Vh.flatten()])
|
||||
hi_val = torch.quantile(dist, CLAMP_QUANTILE)
|
||||
low_val = -hi_val
|
||||
|
||||
U = U.clamp(low_val, hi_val)
|
||||
Vh = Vh.clamp(low_val, hi_val)
|
||||
if conv2d:
|
||||
U = U.reshape(out_dim, rank, 1, 1)
|
||||
Vh = Vh.reshape(rank, in_dim, kernel_size[0], kernel_size[1])
|
||||
return (U, Vh)
|
||||
|
||||
|
||||
class LORAType(Enum):
|
||||
STANDARD = 0
|
||||
FULL_DIFF = 1
|
||||
|
||||
|
||||
LORA_TYPES = {
|
||||
"standard": LORAType.STANDARD,
|
||||
"full_diff": LORAType.FULL_DIFF,
|
||||
}
|
||||
|
||||
|
||||
def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora_type, bias_diff=False):
|
||||
comfy.model_management.load_models_gpu([model_diff], force_patch_weights=True)
|
||||
sd = model_diff.model_state_dict(filter_prefix=prefix_model)
|
||||
|
||||
for k in sd:
|
||||
if k.endswith(".weight"):
|
||||
weight_diff = sd[k]
|
||||
if lora_type == LORAType.STANDARD:
|
||||
if weight_diff.ndim < 2:
|
||||
if bias_diff:
|
||||
output_sd["{}{}.diff".format(prefix_lora, k[len(prefix_model):-7])] = weight_diff.contiguous().half().cpu()
|
||||
continue
|
||||
try:
|
||||
out = extract_lora(weight_diff, rank)
|
||||
output_sd["{}{}.lora_up.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[0].contiguous().half().cpu()
|
||||
output_sd["{}{}.lora_down.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[1].contiguous().half().cpu()
|
||||
except Exception:
|
||||
logging.warning("Could not generate lora weights for key {}, is the weight difference a zero?".format(k))
|
||||
elif lora_type == LORAType.FULL_DIFF:
|
||||
output_sd["{}{}.diff".format(prefix_lora, k[len(prefix_model):-7])] = weight_diff.contiguous().half().cpu()
|
||||
|
||||
elif bias_diff and k.endswith(".bias"):
|
||||
output_sd["{}{}.diff_b".format(prefix_lora, k[len(prefix_model):-5])] = sd[k].contiguous().half().cpu()
|
||||
return output_sd
|
||||
|
||||
|
||||
class LoraSave(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LoraSave_V3",
|
||||
display_name="Extract and Save Lora _V3",
|
||||
category="_for_testing",
|
||||
is_output_node=True,
|
||||
inputs=[
|
||||
io.String.Input("filename_prefix", default="loras/ComfyUI_extracted_lora"),
|
||||
io.Int.Input("rank", default=8, min=1, max=4096, step=1),
|
||||
io.Combo.Input("lora_type", options=list(LORA_TYPES.keys())),
|
||||
io.Boolean.Input("bias_diff", default=True),
|
||||
io.Model.Input(
|
||||
id="model_diff", optional=True, tooltip="The ModelSubtract output to be converted to a lora."
|
||||
),
|
||||
io.Clip.Input(
|
||||
id="text_encoder_diff", optional=True, tooltip="The CLIPSubtract output to be converted to a lora."
|
||||
),
|
||||
],
|
||||
outputs=[],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, filename_prefix, rank, lora_type, bias_diff, model_diff=None, text_encoder_diff=None):
|
||||
if model_diff is None and text_encoder_diff is None:
|
||||
return io.NodeOutput()
|
||||
|
||||
lora_type = LORA_TYPES.get(lora_type)
|
||||
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(
|
||||
filename_prefix, folder_paths.get_output_directory()
|
||||
)
|
||||
|
||||
output_sd = {}
|
||||
if model_diff is not None:
|
||||
output_sd = calc_lora_model(
|
||||
model_diff, rank, "diffusion_model.", "diffusion_model.", output_sd, lora_type, bias_diff=bias_diff
|
||||
)
|
||||
if text_encoder_diff is not None:
|
||||
output_sd = calc_lora_model(
|
||||
text_encoder_diff.patcher, rank, "", "text_encoders.", output_sd, lora_type, bias_diff=bias_diff
|
||||
)
|
||||
|
||||
output_checkpoint = f"{filename}_{counter:05}_.safetensors"
|
||||
output_checkpoint = os.path.join(full_output_folder, output_checkpoint)
|
||||
|
||||
comfy.utils.save_torch_file(output_sd, output_checkpoint, metadata=None)
|
||||
return io.NodeOutput()
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
LoraSave,
|
||||
]
|
||||
34
comfy_extras/v3/nodes_lotus.py
Normal file
34
comfy_extras/v3/nodes_lotus.py
Normal file
File diff suppressed because one or more lines are too long
528
comfy_extras/v3/nodes_lt.py
Normal file
528
comfy_extras/v3/nodes_lt.py
Normal file
@ -0,0 +1,528 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import sys
|
||||
|
||||
import av
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
import comfy.model_management
|
||||
import comfy.model_sampling
|
||||
import comfy.utils
|
||||
import node_helpers
|
||||
import nodes
|
||||
from comfy.ldm.lightricks.symmetric_patchifier import (
|
||||
SymmetricPatchifier,
|
||||
latent_to_pixel_coords,
|
||||
)
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
def conditioning_get_any_value(conditioning, key, default=None):
|
||||
for t in conditioning:
|
||||
if key in t[1]:
|
||||
return t[1][key]
|
||||
return default
|
||||
|
||||
|
||||
def get_noise_mask(latent):
|
||||
noise_mask = latent.get("noise_mask", None)
|
||||
latent_image = latent["samples"]
|
||||
if noise_mask is None:
|
||||
batch_size, _, latent_length, _, _ = latent_image.shape
|
||||
return torch.ones(
|
||||
(batch_size, 1, latent_length, 1, 1),
|
||||
dtype=torch.float32,
|
||||
device=latent_image.device,
|
||||
)
|
||||
return noise_mask.clone()
|
||||
|
||||
|
||||
def get_keyframe_idxs(cond):
|
||||
keyframe_idxs = conditioning_get_any_value(cond, "keyframe_idxs", None)
|
||||
if keyframe_idxs is None:
|
||||
return None, 0
|
||||
return keyframe_idxs, torch.unique(keyframe_idxs[:, 0]).shape[0]
|
||||
|
||||
|
||||
def encode_single_frame(output_file, image_array: np.ndarray, crf):
|
||||
container = av.open(output_file, "w", format="mp4")
|
||||
try:
|
||||
stream = container.add_stream(
|
||||
"libx264", rate=1, options={"crf": str(crf), "preset": "veryfast"}
|
||||
)
|
||||
stream.height = image_array.shape[0]
|
||||
stream.width = image_array.shape[1]
|
||||
av_frame = av.VideoFrame.from_ndarray(image_array, format="rgb24").reformat(
|
||||
format="yuv420p"
|
||||
)
|
||||
container.mux(stream.encode(av_frame))
|
||||
container.mux(stream.encode())
|
||||
finally:
|
||||
container.close()
|
||||
|
||||
|
||||
def decode_single_frame(video_file):
|
||||
container = av.open(video_file)
|
||||
try:
|
||||
stream = next(s for s in container.streams if s.type == "video")
|
||||
frame = next(container.decode(stream))
|
||||
finally:
|
||||
container.close()
|
||||
return frame.to_ndarray(format="rgb24")
|
||||
|
||||
|
||||
def preprocess(image: torch.Tensor, crf=29):
|
||||
if crf == 0:
|
||||
return image
|
||||
|
||||
image_array = (image[:(image.shape[0] // 2) * 2, :(image.shape[1] // 2) * 2] * 255.0).byte().cpu().numpy()
|
||||
with sys.modules['io'].BytesIO() as output_file:
|
||||
encode_single_frame(output_file, image_array, crf)
|
||||
video_bytes = output_file.getvalue()
|
||||
with sys.modules['io'].BytesIO(video_bytes) as video_file:
|
||||
image_array = decode_single_frame(video_file)
|
||||
return torch.tensor(image_array, dtype=image.dtype, device=image.device) / 255.0
|
||||
|
||||
|
||||
class EmptyLTXVLatentVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="EmptyLTXVLatentVideo_V3",
|
||||
category="latent/video/ltxv",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=768, min=64, max=nodes.MAX_RESOLUTION, step=32),
|
||||
io.Int.Input("height", default=512, min=64, max=nodes.MAX_RESOLUTION, step=32),
|
||||
io.Int.Input("length", default=97, min=1, max=nodes.MAX_RESOLUTION, step=8),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, width, height, length, batch_size):
|
||||
latent = torch.zeros(
|
||||
[batch_size, 128, ((length - 1) // 8) + 1, height // 32, width // 32],
|
||||
device=comfy.model_management.intermediate_device(),
|
||||
)
|
||||
return io.NodeOutput({"samples": latent})
|
||||
|
||||
|
||||
class LTXVAddGuide(io.ComfyNode):
|
||||
NUM_PREFIX_FRAMES = 2
|
||||
PATCHIFIER = SymmetricPatchifier(1)
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LTXVAddGuide_V3",
|
||||
category="conditioning/video_models",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Latent.Input("latent"),
|
||||
io.Image.Input(
|
||||
"image",
|
||||
tooltip="Image or video to condition the latent video on. Must be 8*n + 1 frames. "
|
||||
"If the video is not 8*n + 1 frames, it will be cropped to the nearest 8*n + 1 frames.",
|
||||
),
|
||||
io.Int.Input(
|
||||
"frame_idx",
|
||||
default=0,
|
||||
min=-9999,
|
||||
max=9999,
|
||||
tooltip="Frame index to start the conditioning at. "
|
||||
"For single-frame images or videos with 1-8 frames, any frame_idx value is acceptable. "
|
||||
"For videos with 9+ frames, frame_idx must be divisible by 8, otherwise it will be rounded "
|
||||
"down to the nearest multiple of 8. Negative values are counted from the end of the video.",
|
||||
),
|
||||
io.Float.Input("strength", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
io.Latent.Output(display_name="latent"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, positive, negative, vae, latent, image, frame_idx, strength):
|
||||
scale_factors = vae.downscale_index_formula
|
||||
latent_image = latent["samples"]
|
||||
noise_mask = get_noise_mask(latent)
|
||||
|
||||
_, _, latent_length, latent_height, latent_width = latent_image.shape
|
||||
image, t = cls._encode(vae, latent_width, latent_height, image, scale_factors)
|
||||
|
||||
frame_idx, latent_idx = cls._get_latent_index(positive, latent_length, len(image), frame_idx, scale_factors)
|
||||
assert latent_idx + t.shape[2] <= latent_length, "Conditioning frames exceed the length of the latent sequence."
|
||||
|
||||
num_prefix_frames = min(cls.NUM_PREFIX_FRAMES, t.shape[2])
|
||||
|
||||
positive, negative, latent_image, noise_mask = cls._append_keyframe(
|
||||
positive,
|
||||
negative,
|
||||
frame_idx,
|
||||
latent_image,
|
||||
noise_mask,
|
||||
t[:, :, :num_prefix_frames],
|
||||
strength,
|
||||
scale_factors,
|
||||
)
|
||||
|
||||
latent_idx += num_prefix_frames
|
||||
|
||||
t = t[:, :, num_prefix_frames:]
|
||||
if t.shape[2] == 0:
|
||||
return io.NodeOutput(positive, negative, {"samples": latent_image, "noise_mask": noise_mask})
|
||||
|
||||
latent_image, noise_mask = cls._replace_latent_frames(
|
||||
latent_image,
|
||||
noise_mask,
|
||||
t,
|
||||
latent_idx,
|
||||
strength,
|
||||
)
|
||||
|
||||
return io.NodeOutput(positive, negative, {"samples": latent_image, "noise_mask": noise_mask})
|
||||
|
||||
@classmethod
|
||||
def _encode(cls, vae, latent_width, latent_height, images, scale_factors):
|
||||
time_scale_factor, width_scale_factor, height_scale_factor = scale_factors
|
||||
images = images[:(images.shape[0] - 1) // time_scale_factor * time_scale_factor + 1]
|
||||
pixels = comfy.utils.common_upscale(
|
||||
images.movedim(-1, 1),
|
||||
latent_width * width_scale_factor,
|
||||
latent_height * height_scale_factor,
|
||||
"bilinear",
|
||||
crop="disabled",
|
||||
).movedim(1, -1)
|
||||
encode_pixels = pixels[:, :, :, :3]
|
||||
t = vae.encode(encode_pixels)
|
||||
return encode_pixels, t
|
||||
|
||||
@classmethod
|
||||
def _get_latent_index(cls, cond, latent_length, guide_length, frame_idx, scale_factors):
|
||||
time_scale_factor, _, _ = scale_factors
|
||||
_, num_keyframes = get_keyframe_idxs(cond)
|
||||
latent_count = latent_length - num_keyframes
|
||||
frame_idx = frame_idx if frame_idx >= 0 else max((latent_count - 1) * time_scale_factor + 1 + frame_idx, 0)
|
||||
if guide_length > 1 and frame_idx != 0:
|
||||
frame_idx = (frame_idx - 1) // time_scale_factor * time_scale_factor + 1
|
||||
return frame_idx, (frame_idx + time_scale_factor - 1) // time_scale_factor
|
||||
|
||||
@classmethod
|
||||
def _add_keyframe_index(cls, cond, frame_idx, guiding_latent, scale_factors):
|
||||
keyframe_idxs, _ = get_keyframe_idxs(cond)
|
||||
_, latent_coords = cls.PATCHIFIER.patchify(guiding_latent)
|
||||
pixel_coords = latent_to_pixel_coords(latent_coords, scale_factors, causal_fix=frame_idx == 0)
|
||||
pixel_coords[:, 0] += frame_idx
|
||||
if keyframe_idxs is None:
|
||||
keyframe_idxs = pixel_coords
|
||||
else:
|
||||
keyframe_idxs = torch.cat([keyframe_idxs, pixel_coords], dim=2)
|
||||
return node_helpers.conditioning_set_values(cond, {"keyframe_idxs": keyframe_idxs})
|
||||
|
||||
@classmethod
|
||||
def _append_keyframe(
|
||||
cls, positive, negative, frame_idx, latent_image, noise_mask, guiding_latent, strength, scale_factors
|
||||
):
|
||||
_, latent_idx = cls._get_latent_index(
|
||||
cond=positive,
|
||||
latent_length=latent_image.shape[2],
|
||||
guide_length=guiding_latent.shape[2],
|
||||
frame_idx=frame_idx,
|
||||
scale_factors=scale_factors,
|
||||
)
|
||||
noise_mask[:, :, latent_idx:latent_idx + guiding_latent.shape[2]] = 1.0
|
||||
|
||||
positive = cls._add_keyframe_index(positive, frame_idx, guiding_latent, scale_factors)
|
||||
negative = cls._add_keyframe_index(negative, frame_idx, guiding_latent, scale_factors)
|
||||
|
||||
mask = torch.full(
|
||||
(noise_mask.shape[0], 1, guiding_latent.shape[2], 1, 1),
|
||||
1.0 - strength,
|
||||
dtype=noise_mask.dtype,
|
||||
device=noise_mask.device,
|
||||
)
|
||||
|
||||
latent_image = torch.cat([latent_image, guiding_latent], dim=2)
|
||||
return positive, negative, latent_image, torch.cat([noise_mask, mask], dim=2)
|
||||
|
||||
@classmethod
|
||||
def _replace_latent_frames(cls, latent_image, noise_mask, guiding_latent, latent_idx, strength):
|
||||
cond_length = guiding_latent.shape[2]
|
||||
assert latent_image.shape[2] >= latent_idx + cond_length, "Conditioning frames exceed the length of the latent sequence."
|
||||
|
||||
mask = torch.full(
|
||||
(noise_mask.shape[0], 1, cond_length, 1, 1),
|
||||
1.0 - strength,
|
||||
dtype=noise_mask.dtype,
|
||||
device=noise_mask.device,
|
||||
)
|
||||
|
||||
latent_image = latent_image.clone()
|
||||
noise_mask = noise_mask.clone()
|
||||
|
||||
latent_image[:, :, latent_idx : latent_idx + cond_length] = guiding_latent
|
||||
noise_mask[:, :, latent_idx : latent_idx + cond_length] = mask
|
||||
|
||||
return latent_image, noise_mask
|
||||
|
||||
|
||||
class LTXVConditioning(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LTXVConditioning_V3",
|
||||
category="conditioning/video_models",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
io.Float.Input("frame_rate", default=25.0, min=0.0, max=1000.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, positive, negative, frame_rate):
|
||||
positive = node_helpers.conditioning_set_values(positive, {"frame_rate": frame_rate})
|
||||
negative = node_helpers.conditioning_set_values(negative, {"frame_rate": frame_rate})
|
||||
return io.NodeOutput(positive, negative)
|
||||
|
||||
|
||||
class LTXVCropGuides(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LTXVCropGuides_V3",
|
||||
category="conditioning/video_models",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
io.Latent.Input("latent"),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
io.Latent.Output(display_name="latent"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, positive, negative, latent):
|
||||
latent_image = latent["samples"].clone()
|
||||
noise_mask = get_noise_mask(latent)
|
||||
|
||||
_, num_keyframes = get_keyframe_idxs(positive)
|
||||
if num_keyframes == 0:
|
||||
return io.NodeOutput(positive, negative, {"samples": latent_image, "noise_mask": noise_mask})
|
||||
|
||||
latent_image = latent_image[:, :, :-num_keyframes]
|
||||
noise_mask = noise_mask[:, :, :-num_keyframes]
|
||||
|
||||
positive = node_helpers.conditioning_set_values(positive, {"keyframe_idxs": None})
|
||||
negative = node_helpers.conditioning_set_values(negative, {"keyframe_idxs": None})
|
||||
|
||||
return io.NodeOutput(positive, negative, {"samples": latent_image, "noise_mask": noise_mask})
|
||||
|
||||
|
||||
class LTXVImgToVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LTXVImgToVideo_V3",
|
||||
category="conditioning/video_models",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Image.Input("image"),
|
||||
io.Int.Input("width", default=768, min=64, max=nodes.MAX_RESOLUTION, step=32),
|
||||
io.Int.Input("height", default=512, min=64, max=nodes.MAX_RESOLUTION, step=32),
|
||||
io.Int.Input("length", default=97, min=9, max=nodes.MAX_RESOLUTION, step=8),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
io.Float.Input("strength", default=1.0, min=0.0, max=1.0),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
io.Latent.Output(display_name="latent"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, positive, negative, image, vae, width, height, length, batch_size, strength):
|
||||
pixels = comfy.utils.common_upscale(
|
||||
image.movedim(-1, 1), width, height, "bilinear", "center"
|
||||
).movedim(1, -1)
|
||||
encode_pixels = pixels[:, :, :, :3]
|
||||
t = vae.encode(encode_pixels)
|
||||
|
||||
latent = torch.zeros(
|
||||
[batch_size, 128, ((length - 1) // 8) + 1, height // 32, width // 32],
|
||||
device=comfy.model_management.intermediate_device(),
|
||||
)
|
||||
latent[:, :, :t.shape[2]] = t
|
||||
|
||||
conditioning_latent_frames_mask = torch.ones(
|
||||
(batch_size, 1, latent.shape[2], 1, 1),
|
||||
dtype=torch.float32,
|
||||
device=latent.device,
|
||||
)
|
||||
conditioning_latent_frames_mask[:, :, :t.shape[2]] = 1.0 - strength
|
||||
|
||||
return io.NodeOutput(positive, negative, {"samples": latent, "noise_mask": conditioning_latent_frames_mask})
|
||||
|
||||
|
||||
class LTXVPreprocess(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LTXVPreprocess_V3",
|
||||
category="image",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Int.Input(
|
||||
id="img_compression", default=35, min=0, max=100, tooltip="Amount of compression to apply on image."
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(display_name="output_image"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image, img_compression):
|
||||
output_images = []
|
||||
for i in range(image.shape[0]):
|
||||
output_images.append(preprocess(image[i], img_compression))
|
||||
return io.NodeOutput(torch.stack(output_images))
|
||||
|
||||
|
||||
class LTXVScheduler(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LTXVScheduler_V3",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
inputs=[
|
||||
io.Int.Input("steps", default=20, min=1, max=10000),
|
||||
io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01),
|
||||
io.Float.Input("base_shift", default=0.95, min=0.0, max=100.0, step=0.01),
|
||||
io.Boolean.Input(
|
||||
id="stretch",
|
||||
default=True,
|
||||
tooltip="Stretch the sigmas to be in the range [terminal, 1].",
|
||||
),
|
||||
io.Float.Input(
|
||||
id="terminal",
|
||||
default=0.1,
|
||||
min=0.0,
|
||||
max=0.99,
|
||||
step=0.01,
|
||||
tooltip="The terminal value of the sigmas after stretching.",
|
||||
),
|
||||
io.Latent.Input("latent", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Sigmas.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, steps, max_shift, base_shift, stretch, terminal, latent=None):
|
||||
if latent is None:
|
||||
tokens = 4096
|
||||
else:
|
||||
tokens = math.prod(latent["samples"].shape[2:])
|
||||
|
||||
sigmas = torch.linspace(1.0, 0.0, steps + 1)
|
||||
|
||||
x1 = 1024
|
||||
x2 = 4096
|
||||
mm = (max_shift - base_shift) / (x2 - x1)
|
||||
b = base_shift - mm * x1
|
||||
sigma_shift = (tokens) * mm + b
|
||||
|
||||
power = 1
|
||||
sigmas = torch.where(
|
||||
sigmas != 0,
|
||||
math.exp(sigma_shift) / (math.exp(sigma_shift) + (1 / sigmas - 1) ** power),
|
||||
0,
|
||||
)
|
||||
|
||||
if stretch:
|
||||
non_zero_mask = sigmas != 0
|
||||
non_zero_sigmas = sigmas[non_zero_mask]
|
||||
one_minus_z = 1.0 - non_zero_sigmas
|
||||
scale_factor = one_minus_z[-1] / (1.0 - terminal)
|
||||
stretched = 1.0 - (one_minus_z / scale_factor)
|
||||
sigmas[non_zero_mask] = stretched
|
||||
|
||||
return io.NodeOutput(sigmas)
|
||||
|
||||
|
||||
class ModelSamplingLTXV(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ModelSamplingLTXV_V3",
|
||||
category="advanced/model",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01),
|
||||
io.Float.Input("base_shift", default=0.95, min=0.0, max=100.0, step=0.01),
|
||||
io.Latent.Input("latent", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, max_shift, base_shift, latent=None):
|
||||
m = model.clone()
|
||||
|
||||
if latent is None:
|
||||
tokens = 4096
|
||||
else:
|
||||
tokens = math.prod(latent["samples"].shape[2:])
|
||||
|
||||
x1 = 1024
|
||||
x2 = 4096
|
||||
mm = (max_shift - base_shift) / (x2 - x1)
|
||||
b = base_shift - mm * x1
|
||||
shift = (tokens) * mm + b
|
||||
|
||||
sampling_base = comfy.model_sampling.ModelSamplingFlux
|
||||
sampling_type = comfy.model_sampling.CONST
|
||||
|
||||
class ModelSamplingAdvanced(sampling_base, sampling_type):
|
||||
pass
|
||||
|
||||
model_sampling = ModelSamplingAdvanced(model.model.model_config)
|
||||
model_sampling.set_parameters(shift=shift)
|
||||
m.add_object_patch("model_sampling", model_sampling)
|
||||
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
EmptyLTXVLatentVideo,
|
||||
LTXVAddGuide,
|
||||
LTXVConditioning,
|
||||
LTXVCropGuides,
|
||||
LTXVImgToVideo,
|
||||
LTXVPreprocess,
|
||||
LTXVScheduler,
|
||||
ModelSamplingLTXV,
|
||||
]
|
||||
116
comfy_extras/v3/nodes_lumina2.py
Normal file
116
comfy_extras/v3/nodes_lumina2.py
Normal file
@ -0,0 +1,116 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class RenormCFG(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="RenormCFG_V3",
|
||||
category="advanced/model",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("cfg_trunc", default=100, min=0.0, max=100.0, step=0.01),
|
||||
io.Float.Input("renorm_cfg", default=1.0, min=0.0, max=100.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, cfg_trunc, renorm_cfg):
|
||||
def renorm_cfg_func(args):
|
||||
cond_denoised = args["cond_denoised"]
|
||||
uncond_denoised = args["uncond_denoised"]
|
||||
cond_scale = args["cond_scale"]
|
||||
timestep = args["timestep"]
|
||||
x_orig = args["input"]
|
||||
in_channels = model.model.diffusion_model.in_channels
|
||||
|
||||
if timestep[0] < cfg_trunc:
|
||||
cond_eps, uncond_eps = cond_denoised[:, :in_channels], uncond_denoised[:, :in_channels]
|
||||
cond_rest, _ = cond_denoised[:, in_channels:], uncond_denoised[:, in_channels:]
|
||||
half_eps = uncond_eps + cond_scale * (cond_eps - uncond_eps)
|
||||
half_rest = cond_rest
|
||||
|
||||
if float(renorm_cfg) > 0.0:
|
||||
ori_pos_norm = torch.linalg.vector_norm(
|
||||
cond_eps,
|
||||
dim=tuple(range(1, len(cond_eps.shape))),
|
||||
keepdim=True
|
||||
)
|
||||
max_new_norm = ori_pos_norm * float(renorm_cfg)
|
||||
new_pos_norm = torch.linalg.vector_norm(
|
||||
half_eps, dim=tuple(range(1, len(half_eps.shape))), keepdim=True
|
||||
)
|
||||
if new_pos_norm >= max_new_norm:
|
||||
half_eps = half_eps * (max_new_norm / new_pos_norm)
|
||||
else:
|
||||
cond_eps, uncond_eps = cond_denoised[:, :in_channels], uncond_denoised[:, :in_channels]
|
||||
cond_rest, _ = cond_denoised[:, in_channels:], uncond_denoised[:, in_channels:]
|
||||
half_eps = cond_eps
|
||||
half_rest = cond_rest
|
||||
|
||||
cfg_result = torch.cat([half_eps, half_rest], dim=1)
|
||||
|
||||
# cfg_result = uncond_denoised + (cond_denoised - uncond_denoised) * cond_scale
|
||||
|
||||
return x_orig - cfg_result
|
||||
|
||||
m = model.clone()
|
||||
m.set_model_sampler_cfg_function(renorm_cfg_func)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class CLIPTextEncodeLumina2(io.ComfyNode):
|
||||
SYSTEM_PROMPT = {
|
||||
"superior": "You are an assistant designed to generate superior images with the superior "
|
||||
"degree of image-text alignment based on textual prompts or user prompts.",
|
||||
"alignment": "You are an assistant designed to generate high-quality images with the "
|
||||
"highest degree of image-text alignment based on textual prompts."
|
||||
}
|
||||
SYSTEM_PROMPT_TIP = "Lumina2 provide two types of system prompts:" \
|
||||
"Superior: You are an assistant designed to generate superior images with the superior " \
|
||||
"degree of image-text alignment based on textual prompts or user prompts. " \
|
||||
"Alignment: You are an assistant designed to generate high-quality images with the highest " \
|
||||
"degree of image-text alignment based on textual prompts."
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeLumina2_V3",
|
||||
display_name="CLIP Text Encode for Lumina2 _V3",
|
||||
category="conditioning",
|
||||
description="Encodes a system prompt and a user prompt using a CLIP model into an embedding "
|
||||
"that can be used to guide the diffusion model towards generating specific images.",
|
||||
inputs=[
|
||||
io.Combo.Input("system_prompt", options=list(cls.SYSTEM_PROMPT.keys()), tooltip=cls.SYSTEM_PROMPT_TIP),
|
||||
io.String.Input("user_prompt", multiline=True, dynamic_prompts=True, tooltip="The text to be encoded."),
|
||||
io.Clip.Input("clip", tooltip="The CLIP model used for encoding the text."),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(tooltip="A conditioning containing the embedded text used to guide the diffusion model."),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, system_prompt, user_prompt, clip):
|
||||
if clip is None:
|
||||
raise RuntimeError(
|
||||
"ERROR: clip input is invalid: None\n\n"
|
||||
"If the clip is from a checkpoint loader node your checkpoint does not contain a valid clip or text encoder model."
|
||||
)
|
||||
system_prompt = cls.SYSTEM_PROMPT[system_prompt]
|
||||
prompt = f'{system_prompt} <Prompt Start> {user_prompt}'
|
||||
tokens = clip.tokenize(prompt)
|
||||
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens))
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
CLIPTextEncodeLumina2,
|
||||
RenormCFG,
|
||||
]
|
||||
51
comfy_extras/v3/nodes_mahiro.py
Normal file
51
comfy_extras/v3/nodes_mahiro.py
Normal file
@ -0,0 +1,51 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class Mahiro(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="Mahiro_V3",
|
||||
display_name="Mahiro is so cute that she deserves a better guidance function!! (。・ω・。) _V3",
|
||||
category="_for_testing",
|
||||
description="Modify the guidance to scale more on the 'direction' of the positive prompt rather than the difference between the negative prompt.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model")
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(display_name="patched_model")
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model):
|
||||
m = model.clone()
|
||||
def mahiro_normd(args):
|
||||
scale: float = args['cond_scale']
|
||||
cond_p: torch.Tensor = args['cond_denoised']
|
||||
uncond_p: torch.Tensor = args['uncond_denoised']
|
||||
#naive leap
|
||||
leap = cond_p * scale
|
||||
#sim with uncond leap
|
||||
u_leap = uncond_p * scale
|
||||
cfg = args["denoised"]
|
||||
merge = (leap + cfg) / 2
|
||||
normu = torch.sqrt(u_leap.abs()) * u_leap.sign()
|
||||
normm = torch.sqrt(merge.abs()) * merge.sign()
|
||||
sim = F.cosine_similarity(normu, normm).mean()
|
||||
simsc = 2 * (sim+1)
|
||||
wm = (simsc*cfg + (4-simsc)*leap) / 4
|
||||
return wm
|
||||
m.set_model_sampler_post_cfg_function(mahiro_normd)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
Mahiro,
|
||||
]
|
||||
437
comfy_extras/v3/nodes_mask.py
Normal file
437
comfy_extras/v3/nodes_mask.py
Normal file
@ -0,0 +1,437 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import numpy as np
|
||||
import scipy.ndimage
|
||||
import torch
|
||||
|
||||
import comfy.utils
|
||||
import node_helpers
|
||||
import nodes
|
||||
from comfy_api.latest import io, ui
|
||||
|
||||
|
||||
def composite(destination, source, x, y, mask=None, multiplier=8, resize_source=False):
|
||||
source = source.to(destination.device)
|
||||
if resize_source:
|
||||
source = torch.nn.functional.interpolate(
|
||||
source, size=(destination.shape[2], destination.shape[3]), mode="bilinear"
|
||||
)
|
||||
|
||||
source = comfy.utils.repeat_to_batch_size(source, destination.shape[0])
|
||||
|
||||
x = max(-source.shape[3] * multiplier, min(x, destination.shape[3] * multiplier))
|
||||
y = max(-source.shape[2] * multiplier, min(y, destination.shape[2] * multiplier))
|
||||
|
||||
left, top = (x // multiplier, y // multiplier)
|
||||
right, bottom = (
|
||||
left + source.shape[3],
|
||||
top + source.shape[2],
|
||||
)
|
||||
|
||||
if mask is None:
|
||||
mask = torch.ones_like(source)
|
||||
else:
|
||||
mask = mask.to(destination.device, copy=True)
|
||||
mask = torch.nn.functional.interpolate(
|
||||
mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])),
|
||||
size=(source.shape[2], source.shape[3]),
|
||||
mode="bilinear",
|
||||
)
|
||||
mask = comfy.utils.repeat_to_batch_size(mask, source.shape[0])
|
||||
|
||||
# calculate the bounds of the source that will be overlapping the destination
|
||||
# this prevents the source trying to overwrite latent pixels that are out of bounds
|
||||
# of the destination
|
||||
visible_width, visible_height = (
|
||||
destination.shape[3] - left + min(0, x),
|
||||
destination.shape[2] - top + min(0, y),
|
||||
)
|
||||
|
||||
mask = mask[:, :, :visible_height, :visible_width]
|
||||
inverse_mask = torch.ones_like(mask) - mask
|
||||
|
||||
source_portion = mask * source[:, :, :visible_height, :visible_width]
|
||||
destination_portion = inverse_mask * destination[:, :, top:bottom, left:right]
|
||||
|
||||
destination[:, :, top:bottom, left:right] = source_portion + destination_portion
|
||||
return destination
|
||||
|
||||
|
||||
class LatentCompositeMasked(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LatentCompositeMasked_V3",
|
||||
display_name="Latent Composite Masked _V3",
|
||||
category="latent",
|
||||
inputs=[
|
||||
io.Latent.Input("destination"),
|
||||
io.Latent.Input("source"),
|
||||
io.Int.Input("x", default=0, min=0, max=nodes.MAX_RESOLUTION, step=8),
|
||||
io.Int.Input("y", default=0, min=0, max=nodes.MAX_RESOLUTION, step=8),
|
||||
io.Boolean.Input("resize_source", default=False),
|
||||
io.Mask.Input("mask", optional=True),
|
||||
],
|
||||
outputs=[io.Latent.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, destination, source, x, y, resize_source, mask=None) -> io.NodeOutput:
|
||||
output = destination.copy()
|
||||
destination_samples = destination["samples"].clone()
|
||||
source_samples = source["samples"]
|
||||
output["samples"] = composite(destination_samples, source_samples, x, y, mask, 8, resize_source)
|
||||
return io.NodeOutput(output)
|
||||
|
||||
|
||||
class ImageCompositeMasked(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageCompositeMasked_V3",
|
||||
display_name="Image Composite Masked _V3",
|
||||
category="image",
|
||||
inputs=[
|
||||
io.Image.Input("destination"),
|
||||
io.Image.Input("source"),
|
||||
io.Int.Input("x", default=0, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.Int.Input("y", default=0, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.Boolean.Input("resize_source", default=False),
|
||||
io.Mask.Input("mask", optional=True),
|
||||
],
|
||||
outputs=[io.Image.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, destination, source, x, y, resize_source, mask=None) -> io.NodeOutput:
|
||||
destination, source = node_helpers.image_alpha_fix(destination, source)
|
||||
destination = destination.clone().movedim(-1, 1)
|
||||
output = composite(destination, source.movedim(-1, 1), x, y, mask, 1, resize_source).movedim(1, -1)
|
||||
return io.NodeOutput(output)
|
||||
|
||||
|
||||
class MaskToImage(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="MaskToImage_V3",
|
||||
display_name="Convert Mask to Image _V3",
|
||||
category="mask",
|
||||
inputs=[
|
||||
io.Mask.Input("mask"),
|
||||
],
|
||||
outputs=[io.Image.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, mask) -> io.NodeOutput:
|
||||
return io.NodeOutput(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3))
|
||||
|
||||
|
||||
class ImageToMask(io.ComfyNode):
|
||||
CHANNELS = ["red", "green", "blue", "alpha"]
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageToMask_V3",
|
||||
display_name="Convert Image to Mask _V3",
|
||||
category="mask",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Combo.Input("channel", options=cls.CHANNELS),
|
||||
],
|
||||
outputs=[io.Mask.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image, channel) -> io.NodeOutput:
|
||||
return io.NodeOutput(image[:, :, :, cls.CHANNELS.index(channel)])
|
||||
|
||||
|
||||
class ImageColorToMask(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageColorToMask_V3",
|
||||
display_name="Image Color to Mask _V3",
|
||||
category="mask",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Int.Input("color", default=0, min=0, max=0xFFFFFF),
|
||||
],
|
||||
outputs=[io.Mask.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image, color) -> io.NodeOutput:
|
||||
temp = (torch.clamp(image, 0, 1.0) * 255.0).round().to(torch.int)
|
||||
temp = (
|
||||
torch.bitwise_left_shift(temp[:, :, :, 0], 16)
|
||||
+ torch.bitwise_left_shift(temp[:, :, :, 1], 8)
|
||||
+ temp[:, :, :, 2]
|
||||
)
|
||||
return io.NodeOutput(torch.where(temp == color, 1.0, 0).float())
|
||||
|
||||
|
||||
class SolidMask(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SolidMask_V3",
|
||||
display_name="Solid Mask _V3",
|
||||
category="mask",
|
||||
inputs=[
|
||||
io.Float.Input("value", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION),
|
||||
io.Int.Input("height", default=512, min=1, max=nodes.MAX_RESOLUTION),
|
||||
],
|
||||
outputs=[io.Mask.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, value, width, height) -> io.NodeOutput:
|
||||
return io.NodeOutput(torch.full((1, height, width), value, dtype=torch.float32, device="cpu"))
|
||||
|
||||
|
||||
class InvertMask(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="InvertMask_V3",
|
||||
display_name="Invert Mask _V3",
|
||||
category="mask",
|
||||
inputs=[
|
||||
io.Mask.Input("mask"),
|
||||
],
|
||||
outputs=[io.Mask.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, mask) -> io.NodeOutput:
|
||||
return io.NodeOutput(1.0 - mask)
|
||||
|
||||
|
||||
class CropMask(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="CropMask_V3",
|
||||
display_name="Crop Mask _V3",
|
||||
category="mask",
|
||||
inputs=[
|
||||
io.Mask.Input("mask"),
|
||||
io.Int.Input("x", default=0, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.Int.Input("y", default=0, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION),
|
||||
io.Int.Input("height", default=512, min=1, max=nodes.MAX_RESOLUTION),
|
||||
],
|
||||
outputs=[io.Mask.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, mask, x, y, width, height) -> io.NodeOutput:
|
||||
mask = mask.reshape((-1, mask.shape[-2], mask.shape[-1]))
|
||||
return io.NodeOutput(mask[:, y : y + height, x : x + width])
|
||||
|
||||
|
||||
class MaskComposite(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="MaskComposite_V3",
|
||||
display_name="Mask Composite _V3",
|
||||
category="mask",
|
||||
inputs=[
|
||||
io.Mask.Input("destination"),
|
||||
io.Mask.Input("source"),
|
||||
io.Int.Input("x", default=0, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.Int.Input("y", default=0, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.Combo.Input("operation", options=["multiply", "add", "subtract", "and", "or", "xor"]),
|
||||
],
|
||||
outputs=[io.Mask.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, destination, source, x, y, operation) -> io.NodeOutput:
|
||||
output = destination.reshape((-1, destination.shape[-2], destination.shape[-1])).clone()
|
||||
source = source.reshape((-1, source.shape[-2], source.shape[-1]))
|
||||
|
||||
left, top = (
|
||||
x,
|
||||
y,
|
||||
)
|
||||
right, bottom = (
|
||||
min(left + source.shape[-1], destination.shape[-1]),
|
||||
min(top + source.shape[-2], destination.shape[-2]),
|
||||
)
|
||||
visible_width, visible_height = (
|
||||
right - left,
|
||||
bottom - top,
|
||||
)
|
||||
|
||||
source_portion = source[:, :visible_height, :visible_width]
|
||||
destination_portion = output[:, top:bottom, left:right]
|
||||
|
||||
if operation == "multiply":
|
||||
output[:, top:bottom, left:right] = destination_portion * source_portion
|
||||
elif operation == "add":
|
||||
output[:, top:bottom, left:right] = destination_portion + source_portion
|
||||
elif operation == "subtract":
|
||||
output[:, top:bottom, left:right] = destination_portion - source_portion
|
||||
elif operation == "and":
|
||||
output[:, top:bottom, left:right] = torch.bitwise_and(
|
||||
destination_portion.round().bool(), source_portion.round().bool()
|
||||
).float()
|
||||
elif operation == "or":
|
||||
output[:, top:bottom, left:right] = torch.bitwise_or(
|
||||
destination_portion.round().bool(), source_portion.round().bool()
|
||||
).float()
|
||||
elif operation == "xor":
|
||||
output[:, top:bottom, left:right] = torch.bitwise_xor(
|
||||
destination_portion.round().bool(), source_portion.round().bool()
|
||||
).float()
|
||||
|
||||
return io.NodeOutput(torch.clamp(output, 0.0, 1.0))
|
||||
|
||||
|
||||
class FeatherMask(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="FeatherMask_V3",
|
||||
display_name="Feather Mask _V3",
|
||||
category="mask",
|
||||
inputs=[
|
||||
io.Mask.Input("mask"),
|
||||
io.Int.Input("left", default=0, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.Int.Input("top", default=0, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.Int.Input("right", default=0, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.Int.Input("bottom", default=0, min=0, max=nodes.MAX_RESOLUTION),
|
||||
],
|
||||
outputs=[io.Mask.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, mask, left, top, right, bottom) -> io.NodeOutput:
|
||||
output = mask.reshape((-1, mask.shape[-2], mask.shape[-1])).clone()
|
||||
|
||||
left = min(left, output.shape[-1])
|
||||
right = min(right, output.shape[-1])
|
||||
top = min(top, output.shape[-2])
|
||||
bottom = min(bottom, output.shape[-2])
|
||||
|
||||
for x in range(left):
|
||||
feather_rate = (x + 1.0) / left
|
||||
output[:, :, x] *= feather_rate
|
||||
|
||||
for x in range(right):
|
||||
feather_rate = (x + 1) / right
|
||||
output[:, :, -x] *= feather_rate
|
||||
|
||||
for y in range(top):
|
||||
feather_rate = (y + 1) / top
|
||||
output[:, y, :] *= feather_rate
|
||||
|
||||
for y in range(bottom):
|
||||
feather_rate = (y + 1) / bottom
|
||||
output[:, -y, :] *= feather_rate
|
||||
|
||||
return io.NodeOutput(output)
|
||||
|
||||
|
||||
class GrowMask(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="GrowMask_V3",
|
||||
display_name="Grow Mask _V3",
|
||||
category="mask",
|
||||
inputs=[
|
||||
io.Mask.Input("mask"),
|
||||
io.Int.Input("expand", default=0, min=-nodes.MAX_RESOLUTION, max=nodes.MAX_RESOLUTION),
|
||||
io.Boolean.Input("tapered_corners", default=True),
|
||||
],
|
||||
outputs=[io.Mask.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, mask, expand, tapered_corners) -> io.NodeOutput:
|
||||
c = 0 if tapered_corners else 1
|
||||
kernel = np.array([[c, 1, c], [1, 1, 1], [c, 1, c]])
|
||||
mask = mask.reshape((-1, mask.shape[-2], mask.shape[-1]))
|
||||
out = []
|
||||
for m in mask:
|
||||
output = m.numpy()
|
||||
for _ in range(abs(expand)):
|
||||
if expand < 0:
|
||||
output = scipy.ndimage.grey_erosion(output, footprint=kernel)
|
||||
else:
|
||||
output = scipy.ndimage.grey_dilation(output, footprint=kernel)
|
||||
output = torch.from_numpy(output)
|
||||
out.append(output)
|
||||
return io.NodeOutput(torch.stack(out, dim=0))
|
||||
|
||||
|
||||
class ThresholdMask(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ThresholdMask_V3",
|
||||
display_name="Threshold Mask _V3",
|
||||
category="mask",
|
||||
inputs=[
|
||||
io.Mask.Input("mask"),
|
||||
io.Float.Input("value", default=0.5, min=0.0, max=1.0, step=0.01),
|
||||
],
|
||||
outputs=[io.Mask.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, mask, value) -> io.NodeOutput:
|
||||
return io.NodeOutput((mask > value).float())
|
||||
|
||||
|
||||
# Mask Preview - original implement from
|
||||
# https://github.com/cubiq/ComfyUI_essentials/blob/9d9f4bedfc9f0321c19faf71855e228c93bd0dc9/mask.py#L81
|
||||
# upstream requested in https://github.com/Kosinkadink/rfcs/blob/main/rfcs/0000-corenodes.md#preview-nodes
|
||||
class MaskPreview(io.ComfyNode):
|
||||
"""Mask Preview - original implement in ComfyUI_essentials.
|
||||
|
||||
https://github.com/cubiq/ComfyUI_essentials/blob/9d9f4bedfc9f0321c19faf71855e228c93bd0dc9/mask.py#L81
|
||||
Upstream requested in https://github.com/Kosinkadink/rfcs/blob/main/rfcs/0000-corenodes.md#preview-nodes
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="MaskPreview_V3",
|
||||
display_name="Preview Mask _V3",
|
||||
category="mask",
|
||||
inputs=[
|
||||
io.Mask.Input("masks"),
|
||||
],
|
||||
hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo],
|
||||
is_output_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, masks):
|
||||
return io.NodeOutput(ui=ui.PreviewMask(masks))
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
CropMask,
|
||||
FeatherMask,
|
||||
GrowMask,
|
||||
ImageColorToMask,
|
||||
ImageCompositeMasked,
|
||||
ImageToMask,
|
||||
InvertMask,
|
||||
LatentCompositeMasked,
|
||||
MaskComposite,
|
||||
MaskPreview,
|
||||
MaskToImage,
|
||||
SolidMask,
|
||||
ThresholdMask,
|
||||
]
|
||||
38
comfy_extras/v3/nodes_mochi.py
Normal file
38
comfy_extras/v3/nodes_mochi.py
Normal file
@ -0,0 +1,38 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
|
||||
import comfy.model_management
|
||||
import nodes
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class EmptyMochiLatentVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="EmptyMochiLatentVideo_V3",
|
||||
category="latent/video",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("length", default=25, min=7, max=nodes.MAX_RESOLUTION, step=6),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, width, height, length, batch_size=1):
|
||||
latent = torch.zeros(
|
||||
[batch_size, 12, ((length - 1) // 6) + 1, height // 8, width // 8],
|
||||
device=comfy.model_management.intermediate_device(),
|
||||
)
|
||||
return io.NodeOutput({"samples": latent})
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
EmptyMochiLatentVideo,
|
||||
]
|
||||
387
comfy_extras/v3/nodes_model_advanced.py
Normal file
387
comfy_extras/v3/nodes_model_advanced.py
Normal file
@ -0,0 +1,387 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
|
||||
import comfy.latent_formats
|
||||
import comfy.model_sampling
|
||||
import comfy.sd
|
||||
import node_helpers
|
||||
import nodes
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class LCM(comfy.model_sampling.EPS):
|
||||
def calculate_denoised(self, sigma, model_output, model_input):
|
||||
timestep = self.timestep(sigma).view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
||||
x0 = model_input - model_output * sigma
|
||||
|
||||
sigma_data = 0.5
|
||||
scaled_timestep = timestep * 10.0 # timestep_scaling
|
||||
|
||||
c_skip = sigma_data**2 / (scaled_timestep**2 + sigma_data**2)
|
||||
c_out = scaled_timestep / (scaled_timestep**2 + sigma_data**2) ** 0.5
|
||||
|
||||
return c_out * x0 + c_skip * model_input
|
||||
|
||||
|
||||
class ModelSamplingDiscreteDistilled(comfy.model_sampling.ModelSamplingDiscrete):
|
||||
original_timesteps = 50
|
||||
|
||||
def __init__(self, model_config=None, zsnr=None):
|
||||
super().__init__(model_config, zsnr=zsnr)
|
||||
|
||||
self.skip_steps = self.num_timesteps // self.original_timesteps
|
||||
|
||||
sigmas_valid = torch.zeros((self.original_timesteps), dtype=torch.float32)
|
||||
for x in range(self.original_timesteps):
|
||||
sigmas_valid[self.original_timesteps - 1 - x] = self.sigmas[self.num_timesteps - 1 - x * self.skip_steps]
|
||||
|
||||
self.set_sigmas(sigmas_valid)
|
||||
|
||||
def timestep(self, sigma):
|
||||
log_sigma = sigma.log()
|
||||
dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None]
|
||||
return (dists.abs().argmin(dim=0).view(sigma.shape) * self.skip_steps + (self.skip_steps - 1)).to(sigma.device)
|
||||
|
||||
def sigma(self, timestep):
|
||||
t = torch.clamp(
|
||||
((timestep.float().to(self.log_sigmas.device) - (self.skip_steps - 1)) / self.skip_steps).float(),
|
||||
min=0,
|
||||
max=(len(self.sigmas) - 1),
|
||||
)
|
||||
low_idx = t.floor().long()
|
||||
high_idx = t.ceil().long()
|
||||
w = t.frac()
|
||||
log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx]
|
||||
return log_sigma.exp().to(timestep.device)
|
||||
|
||||
|
||||
class ModelSamplingDiscrete(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ModelSamplingDiscrete_V3",
|
||||
category="advanced/model",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Combo.Input("sampling", options=["eps", "v_prediction", "lcm", "x0", "img_to_img"]),
|
||||
io.Boolean.Input("zsnr", default=False),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, sampling, zsnr):
|
||||
m = model.clone()
|
||||
|
||||
sampling_base = comfy.model_sampling.ModelSamplingDiscrete
|
||||
if sampling == "eps":
|
||||
sampling_type = comfy.model_sampling.EPS
|
||||
elif sampling == "v_prediction":
|
||||
sampling_type = comfy.model_sampling.V_PREDICTION
|
||||
elif sampling == "lcm":
|
||||
sampling_type = LCM
|
||||
sampling_base = ModelSamplingDiscreteDistilled
|
||||
elif sampling == "x0":
|
||||
sampling_type = comfy.model_sampling.X0
|
||||
elif sampling == "img_to_img":
|
||||
sampling_type = comfy.model_sampling.IMG_TO_IMG
|
||||
|
||||
class ModelSamplingAdvanced(sampling_base, sampling_type):
|
||||
pass
|
||||
|
||||
model_sampling = ModelSamplingAdvanced(model.model.model_config, zsnr=zsnr)
|
||||
|
||||
m.add_object_patch("model_sampling", model_sampling)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class ModelSamplingStableCascade(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ModelSamplingStableCascade_V3",
|
||||
category="advanced/model",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("shift", default=2.0, min=0.0, max=100.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, shift):
|
||||
m = model.clone()
|
||||
|
||||
sampling_base = comfy.model_sampling.StableCascadeSampling
|
||||
sampling_type = comfy.model_sampling.EPS
|
||||
|
||||
class ModelSamplingAdvanced(sampling_base, sampling_type):
|
||||
pass
|
||||
|
||||
model_sampling = ModelSamplingAdvanced(model.model.model_config)
|
||||
model_sampling.set_parameters(shift)
|
||||
m.add_object_patch("model_sampling", model_sampling)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class ModelSamplingSD3(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ModelSamplingSD3_V3",
|
||||
category="advanced/model",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("shift", default=3.0, min=0.0, max=100.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, shift, multiplier: int | float = 1000):
|
||||
m = model.clone()
|
||||
|
||||
sampling_base = comfy.model_sampling.ModelSamplingDiscreteFlow
|
||||
sampling_type = comfy.model_sampling.CONST
|
||||
|
||||
class ModelSamplingAdvanced(sampling_base, sampling_type):
|
||||
pass
|
||||
|
||||
model_sampling = ModelSamplingAdvanced(model.model.model_config)
|
||||
model_sampling.set_parameters(shift=shift, multiplier=multiplier)
|
||||
m.add_object_patch("model_sampling", model_sampling)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class ModelSamplingAuraFlow(ModelSamplingSD3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ModelSamplingAuraFlow_V3",
|
||||
category="advanced/model",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("shift", default=1.73, min=0.0, max=100.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, shift, multiplier: int | float = 1.0):
|
||||
return super().execute(model, shift, multiplier)
|
||||
|
||||
|
||||
class ModelSamplingFlux(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ModelSamplingFlux_V3",
|
||||
category="advanced/model",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("max_shift", default=1.15, min=0.0, max=100.0, step=0.01),
|
||||
io.Float.Input("base_shift", default=0.5, min=0.0, max=100.0, step=0.01),
|
||||
io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=8),
|
||||
io.Int.Input("height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=8),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, max_shift, base_shift, width, height):
|
||||
m = model.clone()
|
||||
|
||||
x1 = 256
|
||||
x2 = 4096
|
||||
mm = (max_shift - base_shift) / (x2 - x1)
|
||||
b = base_shift - mm * x1
|
||||
shift = (width * height / (8 * 8 * 2 * 2)) * mm + b
|
||||
|
||||
sampling_base = comfy.model_sampling.ModelSamplingFlux
|
||||
sampling_type = comfy.model_sampling.CONST
|
||||
|
||||
class ModelSamplingAdvanced(sampling_base, sampling_type):
|
||||
pass
|
||||
|
||||
model_sampling = ModelSamplingAdvanced(model.model.model_config)
|
||||
model_sampling.set_parameters(shift=shift)
|
||||
m.add_object_patch("model_sampling", model_sampling)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class ModelSamplingContinuousEDM(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ModelSamplingContinuousEDM_V3",
|
||||
category="advanced/model",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Combo.Input(
|
||||
"sampling", options=["v_prediction", "edm", "edm_playground_v2.5", "eps", "cosmos_rflow"]
|
||||
),
|
||||
io.Float.Input("sigma_max", default=120.0, min=0.0, max=1000.0, step=0.001, round=False),
|
||||
io.Float.Input("sigma_min", default=0.002, min=0.0, max=1000.0, step=0.001, round=False),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, sampling, sigma_max, sigma_min):
|
||||
m = model.clone()
|
||||
|
||||
sampling_base = comfy.model_sampling.ModelSamplingContinuousEDM
|
||||
latent_format = None
|
||||
sigma_data = 1.0
|
||||
if sampling == "eps":
|
||||
sampling_type = comfy.model_sampling.EPS
|
||||
elif sampling == "edm":
|
||||
sampling_type = comfy.model_sampling.EDM
|
||||
sigma_data = 0.5
|
||||
elif sampling == "v_prediction":
|
||||
sampling_type = comfy.model_sampling.V_PREDICTION
|
||||
elif sampling == "edm_playground_v2.5":
|
||||
sampling_type = comfy.model_sampling.EDM
|
||||
sigma_data = 0.5
|
||||
latent_format = comfy.latent_formats.SDXL_Playground_2_5()
|
||||
elif sampling == "cosmos_rflow":
|
||||
sampling_type = comfy.model_sampling.COSMOS_RFLOW
|
||||
sampling_base = comfy.model_sampling.ModelSamplingCosmosRFlow
|
||||
|
||||
class ModelSamplingAdvanced(sampling_base, sampling_type):
|
||||
pass
|
||||
|
||||
model_sampling = ModelSamplingAdvanced(model.model.model_config)
|
||||
model_sampling.set_parameters(sigma_min, sigma_max, sigma_data)
|
||||
m.add_object_patch("model_sampling", model_sampling)
|
||||
if latent_format is not None:
|
||||
m.add_object_patch("latent_format", latent_format)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class ModelSamplingContinuousV(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ModelSamplingContinuousV_V3",
|
||||
category="advanced/model",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Combo.Input("sampling", options=["v_prediction"]),
|
||||
io.Float.Input("sigma_max", default=500.0, min=0.0, max=1000.0, step=0.001, round=False),
|
||||
io.Float.Input("sigma_min", default=0.03, min=0.0, max=1000.0, step=0.001, round=False),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, sampling, sigma_max, sigma_min):
|
||||
m = model.clone()
|
||||
|
||||
sigma_data = 1.0
|
||||
if sampling == "v_prediction":
|
||||
sampling_type = comfy.model_sampling.V_PREDICTION
|
||||
|
||||
class ModelSamplingAdvanced(comfy.model_sampling.ModelSamplingContinuousV, sampling_type):
|
||||
pass
|
||||
|
||||
model_sampling = ModelSamplingAdvanced(model.model.model_config)
|
||||
model_sampling.set_parameters(sigma_min, sigma_max, sigma_data)
|
||||
m.add_object_patch("model_sampling", model_sampling)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class RescaleCFG(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="RescaleCFG_V3",
|
||||
category="advanced/model",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("multiplier", default=0.7, min=0.0, max=1.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, multiplier):
|
||||
def rescale_cfg(args):
|
||||
cond = args["cond"]
|
||||
uncond = args["uncond"]
|
||||
cond_scale = args["cond_scale"]
|
||||
sigma = args["sigma"]
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (cond.ndim - 1))
|
||||
x_orig = args["input"]
|
||||
|
||||
#rescale cfg has to be done on v-pred model output
|
||||
x = x_orig / (sigma * sigma + 1.0)
|
||||
cond = ((x - (x_orig - cond)) * (sigma ** 2 + 1.0) ** 0.5) / (sigma)
|
||||
uncond = ((x - (x_orig - uncond)) * (sigma ** 2 + 1.0) ** 0.5) / (sigma)
|
||||
|
||||
#rescalecfg
|
||||
x_cfg = uncond + cond_scale * (cond - uncond)
|
||||
ro_pos = torch.std(cond, dim=(1,2,3), keepdim=True)
|
||||
ro_cfg = torch.std(x_cfg, dim=(1,2,3), keepdim=True)
|
||||
|
||||
x_rescaled = x_cfg * (ro_pos / ro_cfg)
|
||||
x_final = multiplier * x_rescaled + (1.0 - multiplier) * x_cfg
|
||||
|
||||
return x_orig - (x - x_final * sigma / (sigma * sigma + 1.0) ** 0.5)
|
||||
|
||||
m = model.clone()
|
||||
m.set_model_sampler_cfg_function(rescale_cfg)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class ModelComputeDtype(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ModelComputeDtype_V3",
|
||||
category="advanced/debug/model",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Combo.Input("dtype", options=["default", "fp32", "fp16", "bf16"]),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, dtype):
|
||||
m = model.clone()
|
||||
m.set_model_compute_dtype(node_helpers.string_to_torch_dtype(dtype))
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
ModelSamplingAuraFlow,
|
||||
ModelComputeDtype,
|
||||
ModelSamplingContinuousEDM,
|
||||
ModelSamplingContinuousV,
|
||||
ModelSamplingDiscrete,
|
||||
ModelSamplingFlux,
|
||||
ModelSamplingSD3,
|
||||
ModelSamplingStableCascade,
|
||||
RescaleCFG,
|
||||
]
|
||||
68
comfy_extras/v3/nodes_model_downscale.py
Normal file
68
comfy_extras/v3/nodes_model_downscale.py
Normal file
@ -0,0 +1,68 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import comfy.utils
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class PatchModelAddDownscale(io.ComfyNode):
|
||||
UPSCALE_METHODS = ["bicubic", "nearest-exact", "bilinear", "area", "bislerp"]
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="PatchModelAddDownscale_V3",
|
||||
display_name="PatchModelAddDownscale (Kohya Deep Shrink) _V3",
|
||||
category="model_patches/unet",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Int.Input("block_number", default=3, min=1, max=32, step=1),
|
||||
io.Float.Input("downscale_factor", default=2.0, min=0.1, max=9.0, step=0.001),
|
||||
io.Float.Input("start_percent", default=0.0, min=0.0, max=1.0, step=0.001),
|
||||
io.Float.Input("end_percent", default=0.35, min=0.0, max=1.0, step=0.001),
|
||||
io.Boolean.Input("downscale_after_skip", default=True),
|
||||
io.Combo.Input("downscale_method", options=cls.UPSCALE_METHODS),
|
||||
io.Combo.Input("upscale_method", options=cls.UPSCALE_METHODS),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(
|
||||
cls, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip, downscale_method, upscale_method
|
||||
):
|
||||
model_sampling = model.get_model_object("model_sampling")
|
||||
sigma_start = model_sampling.percent_to_sigma(start_percent)
|
||||
sigma_end = model_sampling.percent_to_sigma(end_percent)
|
||||
|
||||
def input_block_patch(h, transformer_options):
|
||||
if transformer_options["block"][1] == block_number:
|
||||
sigma = transformer_options["sigmas"][0].item()
|
||||
if sigma <= sigma_start and sigma >= sigma_end:
|
||||
h = comfy.utils.common_upscale(
|
||||
h,
|
||||
round(h.shape[-1] * (1.0 / downscale_factor)),
|
||||
round(h.shape[-2] * (1.0 / downscale_factor)),
|
||||
downscale_method,
|
||||
"disabled",
|
||||
)
|
||||
return h
|
||||
|
||||
def output_block_patch(h, hsp, transformer_options):
|
||||
if h.shape[2] != hsp.shape[2]:
|
||||
h = comfy.utils.common_upscale(h, hsp.shape[-1], hsp.shape[-2], upscale_method, "disabled")
|
||||
return h, hsp
|
||||
|
||||
m = model.clone()
|
||||
if downscale_after_skip:
|
||||
m.set_model_input_block_patch_after_skip(input_block_patch)
|
||||
else:
|
||||
m.set_model_input_block_patch(input_block_patch)
|
||||
m.set_model_output_block_patch(output_block_patch)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
PatchModelAddDownscale,
|
||||
]
|
||||
422
comfy_extras/v3/nodes_model_merging.py
Normal file
422
comfy_extras/v3/nodes_model_merging.py
Normal file
@ -0,0 +1,422 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
import torch
|
||||
|
||||
import comfy.model_base
|
||||
import comfy.model_management
|
||||
import comfy.model_sampling
|
||||
import comfy.sd
|
||||
import comfy.utils
|
||||
import folder_paths
|
||||
from comfy.cli_args import args
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
def save_checkpoint(model, clip=None, vae=None, clip_vision=None, filename_prefix=None, output_dir=None, prompt=None, extra_pnginfo=None):
|
||||
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, output_dir)
|
||||
prompt_info = ""
|
||||
if prompt is not None:
|
||||
prompt_info = json.dumps(prompt)
|
||||
|
||||
metadata = {}
|
||||
|
||||
enable_modelspec = True
|
||||
if isinstance(model.model, comfy.model_base.SDXL):
|
||||
if isinstance(model.model, comfy.model_base.SDXL_instructpix2pix):
|
||||
metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-edit"
|
||||
else:
|
||||
metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-base"
|
||||
elif isinstance(model.model, comfy.model_base.SDXLRefiner):
|
||||
metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-refiner"
|
||||
elif isinstance(model.model, comfy.model_base.SVD_img2vid):
|
||||
metadata["modelspec.architecture"] = "stable-video-diffusion-img2vid-v1"
|
||||
elif isinstance(model.model, comfy.model_base.SD3):
|
||||
metadata["modelspec.architecture"] = "stable-diffusion-v3-medium" #TODO: other SD3 variants
|
||||
else:
|
||||
enable_modelspec = False
|
||||
|
||||
if enable_modelspec:
|
||||
metadata["modelspec.sai_model_spec"] = "1.0.0"
|
||||
metadata["modelspec.implementation"] = "sgm"
|
||||
metadata["modelspec.title"] = "{} {}".format(filename, counter)
|
||||
|
||||
#TODO:
|
||||
# "stable-diffusion-v1", "stable-diffusion-v1-inpainting", "stable-diffusion-v2-512",
|
||||
# "stable-diffusion-v2-768-v", "stable-diffusion-v2-unclip-l", "stable-diffusion-v2-unclip-h",
|
||||
# "v2-inpainting"
|
||||
|
||||
extra_keys = {}
|
||||
model_sampling = model.get_model_object("model_sampling")
|
||||
if isinstance(model_sampling, comfy.model_sampling.ModelSamplingContinuousEDM):
|
||||
if isinstance(model_sampling, comfy.model_sampling.V_PREDICTION):
|
||||
extra_keys["edm_vpred.sigma_max"] = torch.tensor(model_sampling.sigma_max).float()
|
||||
extra_keys["edm_vpred.sigma_min"] = torch.tensor(model_sampling.sigma_min).float()
|
||||
|
||||
if model.model.model_type == comfy.model_base.ModelType.EPS:
|
||||
metadata["modelspec.predict_key"] = "epsilon"
|
||||
elif model.model.model_type == comfy.model_base.ModelType.V_PREDICTION:
|
||||
metadata["modelspec.predict_key"] = "v"
|
||||
extra_keys["v_pred"] = torch.tensor([])
|
||||
if getattr(model_sampling, "zsnr", False):
|
||||
extra_keys["ztsnr"] = torch.tensor([])
|
||||
|
||||
if not args.disable_metadata:
|
||||
metadata["prompt"] = prompt_info
|
||||
if extra_pnginfo is not None:
|
||||
for x in extra_pnginfo:
|
||||
metadata[x] = json.dumps(extra_pnginfo[x])
|
||||
|
||||
output_checkpoint = f"{filename}_{counter:05}_.safetensors"
|
||||
output_checkpoint = os.path.join(full_output_folder, output_checkpoint)
|
||||
|
||||
comfy.sd.save_checkpoint(output_checkpoint, model, clip, vae, clip_vision, metadata=metadata, extra_keys=extra_keys)
|
||||
|
||||
|
||||
class ModelMergeSimple(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ModelMergeSimple_V3",
|
||||
category="advanced/model_merging",
|
||||
inputs=[
|
||||
io.Model.Input("model1"),
|
||||
io.Model.Input("model2"),
|
||||
io.Float.Input("ratio", default=1.0, min=0.0, max=1.0, step=0.01)
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model1, model2, ratio):
|
||||
m = model1.clone()
|
||||
kp = model2.get_key_patches("diffusion_model.")
|
||||
for k in kp:
|
||||
m.add_patches({k: kp[k]}, 1.0 - ratio, ratio)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class ModelSubtract(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ModelMergeSubtract_V3",
|
||||
category="advanced/model_merging",
|
||||
inputs=[
|
||||
io.Model.Input("model1"),
|
||||
io.Model.Input("model2"),
|
||||
io.Float.Input("multiplier", default=1.0, min=-10.0, max=10.0, step=0.01)
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model1, model2, multiplier):
|
||||
m = model1.clone()
|
||||
kp = model2.get_key_patches("diffusion_model.")
|
||||
for k in kp:
|
||||
m.add_patches({k: kp[k]}, - multiplier, multiplier)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class ModelAdd(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ModelMergeAdd_V3",
|
||||
category="advanced/model_merging",
|
||||
inputs=[
|
||||
io.Model.Input("model1"),
|
||||
io.Model.Input("model2")
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model1, model2):
|
||||
m = model1.clone()
|
||||
kp = model2.get_key_patches("diffusion_model.")
|
||||
for k in kp:
|
||||
m.add_patches({k: kp[k]}, 1.0, 1.0)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class CLIPMergeSimple(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="CLIPMergeSimple_V3",
|
||||
category="advanced/model_merging",
|
||||
inputs=[
|
||||
io.Clip.Input("clip1"),
|
||||
io.Clip.Input("clip2"),
|
||||
io.Float.Input("ratio", default=1.0, min=0.0, max=1.0, step=0.01)
|
||||
],
|
||||
outputs=[
|
||||
io.Clip.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip1, clip2, ratio):
|
||||
m = clip1.clone()
|
||||
kp = clip2.get_key_patches()
|
||||
for k in kp:
|
||||
if k.endswith(".position_ids") or k.endswith(".logit_scale"):
|
||||
continue
|
||||
m.add_patches({k: kp[k]}, 1.0 - ratio, ratio)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class CLIPSubtract(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="CLIPMergeSubtract_V3",
|
||||
category="advanced/model_merging",
|
||||
inputs=[
|
||||
io.Clip.Input("clip1"),
|
||||
io.Clip.Input("clip2"),
|
||||
io.Float.Input("multiplier", default=1.0, min=-10.0, max=10.0, step=0.01)
|
||||
],
|
||||
outputs=[
|
||||
io.Clip.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip1, clip2, multiplier):
|
||||
m = clip1.clone()
|
||||
kp = clip2.get_key_patches()
|
||||
for k in kp:
|
||||
if k.endswith(".position_ids") or k.endswith(".logit_scale"):
|
||||
continue
|
||||
m.add_patches({k: kp[k]}, - multiplier, multiplier)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class CLIPAdd(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="CLIPMergeAdd_V3",
|
||||
category="advanced/model_merging",
|
||||
inputs=[
|
||||
io.Clip.Input("clip1"),
|
||||
io.Clip.Input("clip2")
|
||||
],
|
||||
outputs=[
|
||||
io.Clip.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip1, clip2):
|
||||
m = clip1.clone()
|
||||
kp = clip2.get_key_patches()
|
||||
for k in kp:
|
||||
if k.endswith(".position_ids") or k.endswith(".logit_scale"):
|
||||
continue
|
||||
m.add_patches({k: kp[k]}, 1.0, 1.0)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class ModelMergeBlocks(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ModelMergeBlocks_V3",
|
||||
category="advanced/model_merging",
|
||||
inputs=[
|
||||
io.Model.Input("model1"),
|
||||
io.Model.Input("model2"),
|
||||
io.Float.Input("input", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("middle", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("out", default=1.0, min=0.0, max=1.0, step=0.01)
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model1, model2, **kwargs):
|
||||
m = model1.clone()
|
||||
kp = model2.get_key_patches("diffusion_model.")
|
||||
default_ratio = next(iter(kwargs.values()))
|
||||
|
||||
for k in kp:
|
||||
ratio = default_ratio
|
||||
k_unet = k[len("diffusion_model."):]
|
||||
|
||||
last_arg_size = 0
|
||||
for arg in kwargs:
|
||||
if k_unet.startswith(arg) and last_arg_size < len(arg):
|
||||
ratio = kwargs[arg]
|
||||
last_arg_size = len(arg)
|
||||
|
||||
m.add_patches({k: kp[k]}, 1.0 - ratio, ratio)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class CheckpointSave(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="CheckpointSave_V3",
|
||||
display_name="Save Checkpoint _V3",
|
||||
category="advanced/model_merging",
|
||||
is_output_node=True,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Clip.Input("clip"),
|
||||
io.Vae.Input("vae"),
|
||||
io.String.Input("filename_prefix", default="checkpoints/ComfyUI")
|
||||
],
|
||||
outputs=[],
|
||||
hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, clip, vae, filename_prefix):
|
||||
save_checkpoint(model, clip=clip, vae=vae, filename_prefix=filename_prefix, output_dir=folder_paths.get_output_directory(), prompt=cls.hidden.prompt, extra_pnginfo=cls.hidden.extra_pnginfo)
|
||||
return io.NodeOutput()
|
||||
|
||||
|
||||
class CLIPSave(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="CLIPSave_V3",
|
||||
category="advanced/model_merging",
|
||||
is_output_node=True,
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("filename_prefix", default="clip/ComfyUI")
|
||||
],
|
||||
outputs=[],
|
||||
hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip, filename_prefix):
|
||||
prompt_info = ""
|
||||
if cls.hidden.prompt is not None:
|
||||
prompt_info = json.dumps(cls.hidden.prompt)
|
||||
|
||||
metadata = {}
|
||||
if not args.disable_metadata:
|
||||
metadata["format"] = "pt"
|
||||
metadata["prompt"] = prompt_info
|
||||
if cls.hidden.extra_pnginfo is not None:
|
||||
for x in cls.hidden.extra_pnginfo:
|
||||
metadata[x] = json.dumps(cls.hidden.extra_pnginfo[x])
|
||||
|
||||
comfy.model_management.load_models_gpu([clip.load_model()], force_patch_weights=True)
|
||||
clip_sd = clip.get_sd()
|
||||
|
||||
for prefix in ["clip_l.", "clip_g.", "clip_h.", "t5xxl.", "pile_t5xl.", "mt5xl.", "umt5xxl.", "t5base.", "gemma2_2b.", "llama.", "hydit_clip.", ""]:
|
||||
k = list(filter(lambda a: a.startswith(prefix), clip_sd.keys()))
|
||||
current_clip_sd = {}
|
||||
for x in k:
|
||||
current_clip_sd[x] = clip_sd.pop(x)
|
||||
if len(current_clip_sd) == 0:
|
||||
continue
|
||||
|
||||
p = prefix[:-1]
|
||||
replace_prefix = {}
|
||||
filename_prefix_ = filename_prefix
|
||||
if len(p) > 0:
|
||||
filename_prefix_ = "{}_{}".format(filename_prefix_, p)
|
||||
replace_prefix[prefix] = ""
|
||||
replace_prefix["transformer."] = ""
|
||||
|
||||
full_output_folder, filename, counter, subfolder, filename_prefix_ = folder_paths.get_save_image_path(filename_prefix_, folder_paths.get_output_directory())
|
||||
|
||||
output_checkpoint = f"{filename}_{counter:05}_.safetensors"
|
||||
output_checkpoint = os.path.join(full_output_folder, output_checkpoint)
|
||||
|
||||
current_clip_sd = comfy.utils.state_dict_prefix_replace(current_clip_sd, replace_prefix)
|
||||
|
||||
comfy.utils.save_torch_file(current_clip_sd, output_checkpoint, metadata=metadata)
|
||||
return io.NodeOutput()
|
||||
|
||||
|
||||
class VAESave(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="VAESave_V3",
|
||||
category="advanced/model_merging",
|
||||
is_output_node=True,
|
||||
inputs=[
|
||||
io.Vae.Input("vae"),
|
||||
io.String.Input("filename_prefix", default="vae/ComfyUI_vae")
|
||||
],
|
||||
outputs=[],
|
||||
hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, vae, filename_prefix):
|
||||
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, folder_paths.get_output_directory())
|
||||
prompt_info = ""
|
||||
if cls.hidden.prompt is not None:
|
||||
prompt_info = json.dumps(cls.hidden.prompt)
|
||||
|
||||
metadata = {}
|
||||
if not args.disable_metadata:
|
||||
metadata["prompt"] = prompt_info
|
||||
if cls.hidden.extra_pnginfo is not None:
|
||||
for x in cls.hidden.extra_pnginfo:
|
||||
metadata[x] = json.dumps(cls.hidden.extra_pnginfo[x])
|
||||
|
||||
output_checkpoint = f"{filename}_{counter:05}_.safetensors"
|
||||
output_checkpoint = os.path.join(full_output_folder, output_checkpoint)
|
||||
|
||||
comfy.utils.save_torch_file(vae.get_sd(), output_checkpoint, metadata=metadata)
|
||||
return io.NodeOutput()
|
||||
|
||||
|
||||
class ModelSave(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ModelSave_V3",
|
||||
category="advanced/model_merging",
|
||||
is_output_node=True,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.String.Input("filename_prefix", default="diffusion_models/ComfyUI")
|
||||
],
|
||||
outputs=[],
|
||||
hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, filename_prefix):
|
||||
save_checkpoint(model, filename_prefix=filename_prefix, output_dir=folder_paths.get_output_directory(), prompt=cls.hidden.prompt, extra_pnginfo=cls.hidden.extra_pnginfo)
|
||||
return io.NodeOutput()
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
CheckpointSave,
|
||||
CLIPAdd,
|
||||
CLIPMergeSimple,
|
||||
CLIPSave,
|
||||
CLIPSubtract,
|
||||
ModelAdd,
|
||||
ModelMergeBlocks,
|
||||
ModelMergeSimple,
|
||||
ModelSave,
|
||||
ModelSubtract,
|
||||
VAESave,
|
||||
]
|
||||
399
comfy_extras/v3/nodes_model_merging_model_specific.py
Normal file
399
comfy_extras/v3/nodes_model_merging_model_specific.py
Normal file
@ -0,0 +1,399 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from comfy_api.latest import io
|
||||
from comfy_extras.v3.nodes_model_merging import ModelMergeBlocks
|
||||
|
||||
|
||||
class ModelMergeSD1(ModelMergeBlocks):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
inputs = [
|
||||
io.Model.Input("model1"),
|
||||
io.Model.Input("model2"),
|
||||
io.Float.Input("time_embed.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("label_emb.", default=1.0, min=0.0, max=1.0, step=0.01)
|
||||
]
|
||||
|
||||
for i in range(12):
|
||||
inputs.append(io.Float.Input(f"input_blocks.{i}.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
for i in range(3):
|
||||
inputs.append(io.Float.Input(f"middle_block.{i}.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
for i in range(12):
|
||||
inputs.append(io.Float.Input(f"output_blocks.{i}.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
inputs.append(io.Float.Input("out.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
return io.Schema(
|
||||
node_id="ModelMergeSD1_V3",
|
||||
category="advanced/model_merging/model_specific",
|
||||
inputs=inputs,
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class ModelMergeSDXL(ModelMergeBlocks):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
inputs = [
|
||||
io.Model.Input("model1"),
|
||||
io.Model.Input("model2"),
|
||||
io.Float.Input("time_embed.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("label_emb.", default=1.0, min=0.0, max=1.0, step=0.01)
|
||||
]
|
||||
|
||||
for i in range(9):
|
||||
inputs.append(io.Float.Input(f"input_blocks.{i}", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
for i in range(3):
|
||||
inputs.append(io.Float.Input(f"middle_block.{i}", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
for i in range(9):
|
||||
inputs.append(io.Float.Input(f"output_blocks.{i}", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
inputs.append(io.Float.Input("out.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
return io.Schema(
|
||||
node_id="ModelMergeSDXL_V3",
|
||||
category="advanced/model_merging/model_specific",
|
||||
inputs=inputs,
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class ModelMergeSD3_2B(ModelMergeBlocks):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
inputs = [
|
||||
io.Model.Input("model1"),
|
||||
io.Model.Input("model2"),
|
||||
io.Float.Input("pos_embed.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("x_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("context_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("y_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("t_embedder.", default=1.0, min=0.0, max=1.0, step=0.01)
|
||||
]
|
||||
|
||||
for i in range(24):
|
||||
inputs.append(io.Float.Input(f"joint_blocks.{i}.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
inputs.append(io.Float.Input("final_layer.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
return io.Schema(
|
||||
node_id="ModelMergeSD3_2B_V3",
|
||||
category="advanced/model_merging/model_specific",
|
||||
inputs=inputs,
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class ModelMergeAuraflow(ModelMergeBlocks):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
inputs = [
|
||||
io.Model.Input("model1"),
|
||||
io.Model.Input("model2"),
|
||||
io.Float.Input("init_x_linear.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("positional_encoding", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("cond_seq_linear.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("register_tokens", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("t_embedder.", default=1.0, min=0.0, max=1.0, step=0.01)
|
||||
]
|
||||
|
||||
for i in range(4):
|
||||
inputs.append(io.Float.Input(f"double_layers.{i}.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
for i in range(32):
|
||||
inputs.append(io.Float.Input(f"single_layers.{i}.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
inputs.extend([
|
||||
io.Float.Input("modF.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("final_linear.", default=1.0, min=0.0, max=1.0, step=0.01)
|
||||
])
|
||||
|
||||
return io.Schema(
|
||||
node_id="ModelMergeAuraflow_V3",
|
||||
category="advanced/model_merging/model_specific",
|
||||
inputs=inputs,
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class ModelMergeFlux1(ModelMergeBlocks):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
inputs = [
|
||||
io.Model.Input("model1"),
|
||||
io.Model.Input("model2"),
|
||||
io.Float.Input("img_in.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("time_in.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("guidance_in", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("vector_in.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("txt_in.", default=1.0, min=0.0, max=1.0, step=0.01)
|
||||
]
|
||||
|
||||
for i in range(19):
|
||||
inputs.append(io.Float.Input(f"double_blocks.{i}.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
for i in range(38):
|
||||
inputs.append(io.Float.Input(f"single_blocks.{i}.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
inputs.append(io.Float.Input("final_layer.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
return io.Schema(
|
||||
node_id="ModelMergeFlux1_V3",
|
||||
category="advanced/model_merging/model_specific",
|
||||
inputs=inputs,
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class ModelMergeSD35_Large(ModelMergeBlocks):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
inputs = [
|
||||
io.Model.Input("model1"),
|
||||
io.Model.Input("model2"),
|
||||
io.Float.Input("pos_embed.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("x_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("context_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("y_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("t_embedder.", default=1.0, min=0.0, max=1.0, step=0.01)
|
||||
]
|
||||
|
||||
for i in range(38):
|
||||
inputs.append(io.Float.Input(f"joint_blocks.{i}.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
inputs.append(io.Float.Input("final_layer.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
return io.Schema(
|
||||
node_id="ModelMergeSD35_Large_V3",
|
||||
category="advanced/model_merging/model_specific",
|
||||
inputs=inputs,
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class ModelMergeMochiPreview(ModelMergeBlocks):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
inputs = [
|
||||
io.Model.Input("model1"),
|
||||
io.Model.Input("model2"),
|
||||
io.Float.Input("pos_frequencies.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("t_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("t5_y_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("t5_yproj.", default=1.0, min=0.0, max=1.0, step=0.01)
|
||||
]
|
||||
|
||||
for i in range(48):
|
||||
inputs.append(io.Float.Input(f"blocks.{i}.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
inputs.append(io.Float.Input("final_layer.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
return io.Schema(
|
||||
node_id="ModelMergeMochiPreview_V3",
|
||||
category="advanced/model_merging/model_specific",
|
||||
inputs=inputs,
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class ModelMergeLTXV(ModelMergeBlocks):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
inputs = [
|
||||
io.Model.Input("model1"),
|
||||
io.Model.Input("model2"),
|
||||
io.Float.Input("patchify_proj.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("adaln_single.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("caption_projection.", default=1.0, min=0.0, max=1.0, step=0.01)
|
||||
]
|
||||
|
||||
for i in range(28):
|
||||
inputs.append(io.Float.Input(f"transformer_blocks.{i}.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
inputs.extend([
|
||||
io.Float.Input("scale_shift_table", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("proj_out.", default=1.0, min=0.0, max=1.0, step=0.01)
|
||||
])
|
||||
|
||||
return io.Schema(
|
||||
node_id="ModelMergeLTXV_V3",
|
||||
category="advanced/model_merging/model_specific",
|
||||
inputs=inputs,
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class ModelMergeCosmos7B(ModelMergeBlocks):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
inputs = [
|
||||
io.Model.Input("model1"),
|
||||
io.Model.Input("model2"),
|
||||
io.Float.Input("pos_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("extra_pos_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("x_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("t_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("affline_norm.", default=1.0, min=0.0, max=1.0, step=0.01)
|
||||
]
|
||||
|
||||
for i in range(28):
|
||||
inputs.append(io.Float.Input(f"blocks.block{i}.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
inputs.append(io.Float.Input("final_layer.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
return io.Schema(
|
||||
node_id="ModelMergeCosmos7B_V3",
|
||||
category="advanced/model_merging/model_specific",
|
||||
inputs=inputs,
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class ModelMergeCosmos14B(ModelMergeBlocks):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
inputs = [
|
||||
io.Model.Input("model1"),
|
||||
io.Model.Input("model2"),
|
||||
io.Float.Input("pos_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("extra_pos_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("x_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("t_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("affline_norm.", default=1.0, min=0.0, max=1.0, step=0.01)
|
||||
]
|
||||
|
||||
for i in range(36):
|
||||
inputs.append(io.Float.Input(f"blocks.block{i}.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
inputs.append(io.Float.Input("final_layer.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
return io.Schema(
|
||||
node_id="ModelMergeCosmos14B_V3",
|
||||
category="advanced/model_merging/model_specific",
|
||||
inputs=inputs,
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class ModelMergeWAN2_1(ModelMergeBlocks):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
inputs = [
|
||||
io.Model.Input("model1"),
|
||||
io.Model.Input("model2"),
|
||||
io.Float.Input("patch_embedding.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("time_embedding.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("time_projection.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("text_embedding.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("img_emb.", default=1.0, min=0.0, max=1.0, step=0.01)
|
||||
]
|
||||
|
||||
for i in range(40):
|
||||
inputs.append(io.Float.Input(f"blocks.{i}.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
inputs.append(io.Float.Input("head.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
return io.Schema(
|
||||
node_id="ModelMergeWAN2_1_V3",
|
||||
category="advanced/model_merging/model_specific",
|
||||
description="1.3B model has 30 blocks, 14B model has 40 blocks. Image to video model has the extra img_emb.",
|
||||
inputs=inputs,
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class ModelMergeCosmosPredict2_2B(ModelMergeBlocks):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
inputs = [
|
||||
io.Model.Input("model1"),
|
||||
io.Model.Input("model2"),
|
||||
io.Float.Input("pos_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("x_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("t_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("t_embedding_norm.", default=1.0, min=0.0, max=1.0, step=0.01)
|
||||
]
|
||||
|
||||
for i in range(28):
|
||||
inputs.append(io.Float.Input(f"blocks.{i}.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
inputs.append(io.Float.Input("final_layer.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
return io.Schema(
|
||||
node_id="ModelMergeCosmosPredict2_2B_V3",
|
||||
category="advanced/model_merging/model_specific",
|
||||
inputs=inputs,
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class ModelMergeCosmosPredict2_14B(ModelMergeBlocks):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
inputs = [
|
||||
io.Model.Input("model1"),
|
||||
io.Model.Input("model2"),
|
||||
io.Float.Input("pos_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("x_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("t_embedder.", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
io.Float.Input("t_embedding_norm.", default=1.0, min=0.0, max=1.0, step=0.01)
|
||||
]
|
||||
|
||||
for i in range(36):
|
||||
inputs.append(io.Float.Input(f"blocks.{i}.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
inputs.append(io.Float.Input("final_layer.", default=1.0, min=0.0, max=1.0, step=0.01))
|
||||
|
||||
return io.Schema(
|
||||
node_id="ModelMergeCosmosPredict2_14B_V3",
|
||||
category="advanced/model_merging/model_specific",
|
||||
inputs=inputs,
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
ModelMergeAuraflow,
|
||||
ModelMergeCosmos14B,
|
||||
ModelMergeCosmos7B,
|
||||
ModelMergeCosmosPredict2_14B,
|
||||
ModelMergeCosmosPredict2_2B,
|
||||
ModelMergeFlux1,
|
||||
ModelMergeLTXV,
|
||||
ModelMergeMochiPreview,
|
||||
ModelMergeSD1,
|
||||
ModelMergeSD3_2B,
|
||||
ModelMergeSD35_Large,
|
||||
ModelMergeSDXL,
|
||||
ModelMergeWAN2_1,
|
||||
]
|
||||
108
comfy_extras/v3/nodes_morphology.py
Normal file
108
comfy_extras/v3/nodes_morphology.py
Normal file
@ -0,0 +1,108 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import kornia.color
|
||||
import torch
|
||||
from kornia.morphology import (
|
||||
bottom_hat,
|
||||
closing,
|
||||
dilation,
|
||||
erosion,
|
||||
gradient,
|
||||
opening,
|
||||
top_hat,
|
||||
)
|
||||
|
||||
import comfy.model_management
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class Morphology(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="Morphology_V3",
|
||||
display_name="ImageMorphology _V3",
|
||||
category="image/postprocessing",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Combo.Input("operation", options=["erode", "dilate", "open", "close", "gradient", "bottom_hat", "top_hat"]),
|
||||
io.Int.Input("kernel_size", default=3, min=3, max=999, step=1),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image, operation, kernel_size):
|
||||
device = comfy.model_management.get_torch_device()
|
||||
kernel = torch.ones(kernel_size, kernel_size, device=device)
|
||||
image_k = image.to(device).movedim(-1, 1)
|
||||
if operation == "erode":
|
||||
output = erosion(image_k, kernel)
|
||||
elif operation == "dilate":
|
||||
output = dilation(image_k, kernel)
|
||||
elif operation == "open":
|
||||
output = opening(image_k, kernel)
|
||||
elif operation == "close":
|
||||
output = closing(image_k, kernel)
|
||||
elif operation == "gradient":
|
||||
output = gradient(image_k, kernel)
|
||||
elif operation == "top_hat":
|
||||
output = top_hat(image_k, kernel)
|
||||
elif operation == "bottom_hat":
|
||||
output = bottom_hat(image_k, kernel)
|
||||
else:
|
||||
raise ValueError(f"Invalid operation {operation} for morphology. Must be one of 'erode', 'dilate', 'open', 'close', 'gradient', 'tophat', 'bottomhat'")
|
||||
return io.NodeOutput(output.to(comfy.model_management.intermediate_device()).movedim(1, -1))
|
||||
|
||||
|
||||
class ImageRGBToYUV(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageRGBToYUV_V3",
|
||||
category="image/batch",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(display_name="Y"),
|
||||
io.Image.Output(display_name="U"),
|
||||
io.Image.Output(display_name="V"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image):
|
||||
out = kornia.color.rgb_to_ycbcr(image.movedim(-1, 1)).movedim(1, -1)
|
||||
return io.NodeOutput(out[..., 0:1].expand_as(image), out[..., 1:2].expand_as(image), out[..., 2:3].expand_as(image))
|
||||
|
||||
|
||||
class ImageYUVToRGB(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageYUVToRGB_V3",
|
||||
category="image/batch",
|
||||
inputs=[
|
||||
io.Image.Input("Y"),
|
||||
io.Image.Input("U"),
|
||||
io.Image.Input("V"),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, Y, U, V):
|
||||
image = torch.cat([torch.mean(Y, dim=-1, keepdim=True), torch.mean(U, dim=-1, keepdim=True), torch.mean(V, dim=-1, keepdim=True)], dim=-1)
|
||||
return io.NodeOutput(kornia.color.ycbcr_to_rgb(image.movedim(-1, 1)).movedim(1, -1))
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
ImageRGBToYUV,
|
||||
ImageYUVToRGB,
|
||||
Morphology,
|
||||
]
|
||||
64
comfy_extras/v3/nodes_optimalsteps.py
Normal file
64
comfy_extras/v3/nodes_optimalsteps.py
Normal file
@ -0,0 +1,64 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
# from https://github.com/bebebe666/OptimalSteps
|
||||
|
||||
|
||||
def loglinear_interp(t_steps, num_steps):
|
||||
"""Performs log-linear interpolation of a given array of decreasing numbers."""
|
||||
xs = np.linspace(0, 1, len(t_steps))
|
||||
ys = np.log(t_steps[::-1])
|
||||
|
||||
new_xs = np.linspace(0, 1, num_steps)
|
||||
new_ys = np.interp(new_xs, xs, ys)
|
||||
|
||||
return np.exp(new_ys)[::-1].copy()
|
||||
|
||||
|
||||
NOISE_LEVELS = {
|
||||
"FLUX": [0.9968, 0.9886, 0.9819, 0.975, 0.966, 0.9471, 0.9158, 0.8287, 0.5512, 0.2808, 0.001],
|
||||
"Wan": [1.0, 0.997, 0.995, 0.993, 0.991, 0.989, 0.987, 0.985, 0.98, 0.975, 0.973, 0.968, 0.96, 0.946, 0.927, 0.902, 0.864, 0.776, 0.539, 0.208, 0.001],
|
||||
"Chroma": [0.992, 0.99, 0.988, 0.985, 0.982, 0.978, 0.973, 0.968, 0.961, 0.953, 0.943, 0.931, 0.917, 0.9, 0.881, 0.858, 0.832, 0.802, 0.769, 0.731, 0.69, 0.646, 0.599, 0.55, 0.501, 0.451, 0.402, 0.355, 0.311, 0.27, 0.232, 0.199, 0.169, 0.143, 0.12, 0.101, 0.084, 0.07, 0.058, 0.048, 0.001],
|
||||
}
|
||||
|
||||
|
||||
class OptimalStepsScheduler(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="OptimalStepsScheduler_V3",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
inputs=[
|
||||
io.Combo.Input("model_type", options=["FLUX", "Wan", "Chroma"]),
|
||||
io.Int.Input("steps", default=20, min=3, max=1000),
|
||||
io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Sigmas.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model_type, steps, denoise):
|
||||
total_steps = steps
|
||||
if denoise < 1.0:
|
||||
if denoise <= 0.0:
|
||||
return io.NodeOutput(torch.FloatTensor([]))
|
||||
total_steps = round(steps * denoise)
|
||||
|
||||
sigmas = NOISE_LEVELS[model_type][:]
|
||||
if (steps + 1) != len(sigmas):
|
||||
sigmas = loglinear_interp(sigmas, steps + 1)
|
||||
|
||||
sigmas = sigmas[-(total_steps + 1):]
|
||||
sigmas[-1] = 0
|
||||
return io.NodeOutput(torch.FloatTensor(sigmas))
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
OptimalStepsScheduler,
|
||||
]
|
||||
62
comfy_extras/v3/nodes_pag.py
Normal file
62
comfy_extras/v3/nodes_pag.py
Normal file
@ -0,0 +1,62 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import comfy.model_patcher
|
||||
import comfy.samplers
|
||||
from comfy_api.latest import io
|
||||
|
||||
#Modified/simplified version of the node from: https://github.com/pamparamm/sd-perturbed-attention
|
||||
#If you want the one with more options see the above repo.
|
||||
|
||||
#My modified one here is more basic but has fewer chances of breaking with ComfyUI updates.
|
||||
|
||||
|
||||
class PerturbedAttentionGuidance(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="PerturbedAttentionGuidance_V3",
|
||||
category="model_patches/unet",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("scale", default=3.0, min=0.0, max=100.0, step=0.01, round=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, scale):
|
||||
unet_block = "middle"
|
||||
unet_block_id = 0
|
||||
m = model.clone()
|
||||
|
||||
def perturbed_attention(q, k, v, extra_options, mask=None):
|
||||
return v
|
||||
|
||||
def post_cfg_function(args):
|
||||
model = args["model"]
|
||||
cond_pred = args["cond_denoised"]
|
||||
cond = args["cond"]
|
||||
cfg_result = args["denoised"]
|
||||
sigma = args["sigma"]
|
||||
model_options = args["model_options"].copy()
|
||||
x = args["input"]
|
||||
|
||||
if scale == 0:
|
||||
return cfg_result
|
||||
|
||||
# Replace Self-attention with PAG
|
||||
model_options = comfy.model_patcher.set_model_options_patch_replace(model_options, perturbed_attention, "attn1", unet_block, unet_block_id)
|
||||
(pag,) = comfy.samplers.calc_cond_batch(model, [cond], x, sigma, model_options)
|
||||
|
||||
return cfg_result + (cond_pred - pag) * scale
|
||||
|
||||
m.set_model_sampler_post_cfg_function(post_cfg_function)
|
||||
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
PerturbedAttentionGuidance,
|
||||
]
|
||||
114
comfy_extras/v3/nodes_perpneg.py
Normal file
114
comfy_extras/v3/nodes_perpneg.py
Normal file
@ -0,0 +1,114 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
|
||||
import torch
|
||||
|
||||
import comfy.model_management
|
||||
import comfy.sampler_helpers
|
||||
import comfy.samplers
|
||||
import comfy.utils
|
||||
import node_helpers
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
def perp_neg(x, noise_pred_pos, noise_pred_neg, noise_pred_nocond, neg_scale, cond_scale):
|
||||
pos = noise_pred_pos - noise_pred_nocond
|
||||
neg = noise_pred_neg - noise_pred_nocond
|
||||
|
||||
perp = neg - ((torch.mul(neg, pos).sum())/(torch.norm(pos)**2)) * pos
|
||||
perp_neg = perp * neg_scale
|
||||
return noise_pred_nocond + cond_scale*(pos - perp_neg)
|
||||
|
||||
|
||||
class Guider_PerpNeg(comfy.samplers.CFGGuider):
|
||||
def set_conds(self, positive, negative, empty_negative_prompt):
|
||||
empty_negative_prompt = node_helpers.conditioning_set_values(empty_negative_prompt, {"prompt_type": "negative"})
|
||||
self.inner_set_conds({"positive": positive, "empty_negative_prompt": empty_negative_prompt, "negative": negative})
|
||||
|
||||
def set_cfg(self, cfg, neg_scale):
|
||||
self.cfg = cfg
|
||||
self.neg_scale = neg_scale
|
||||
|
||||
def predict_noise(self, x, timestep, model_options={}, seed=None):
|
||||
# in CFGGuider.predict_noise, we call sampling_function(), which uses cfg_function() to compute pos & neg
|
||||
# but we'd rather do a single batch of sampling pos, neg, and empty, so we call calc_cond_batch([pos,neg,empty]) directly
|
||||
|
||||
positive_cond = self.conds.get("positive", None)
|
||||
negative_cond = self.conds.get("negative", None)
|
||||
empty_cond = self.conds.get("empty_negative_prompt", None)
|
||||
|
||||
if not model_options.get("disable_cfg1_optimization", False):
|
||||
if math.isclose(self.neg_scale, 0.0):
|
||||
negative_cond = None
|
||||
if math.isclose(self.cfg, 1.0):
|
||||
empty_cond = None
|
||||
|
||||
conds = [positive_cond, negative_cond, empty_cond]
|
||||
|
||||
out = comfy.samplers.calc_cond_batch(self.inner_model, conds, x, timestep, model_options)
|
||||
|
||||
# Apply pre_cfg_functions since sampling_function() is skipped
|
||||
for fn in model_options.get("sampler_pre_cfg_function", []):
|
||||
args = {"conds":conds, "conds_out": out, "cond_scale": self.cfg, "timestep": timestep,
|
||||
"input": x, "sigma": timestep, "model": self.inner_model, "model_options": model_options}
|
||||
out = fn(args)
|
||||
|
||||
noise_pred_pos, noise_pred_neg, noise_pred_empty = out
|
||||
cfg_result = perp_neg(x, noise_pred_pos, noise_pred_neg, noise_pred_empty, self.neg_scale, self.cfg)
|
||||
|
||||
# normally this would be done in cfg_function, but we skipped
|
||||
# that for efficiency: we can compute the noise predictions in
|
||||
# a single call to calc_cond_batch() (rather than two)
|
||||
# so we replicate the hook here
|
||||
for fn in model_options.get("sampler_post_cfg_function", []):
|
||||
args = {
|
||||
"denoised": cfg_result,
|
||||
"cond": positive_cond,
|
||||
"uncond": negative_cond,
|
||||
"cond_scale": self.cfg,
|
||||
"model": self.inner_model,
|
||||
"uncond_denoised": noise_pred_neg,
|
||||
"cond_denoised": noise_pred_pos,
|
||||
"sigma": timestep,
|
||||
"model_options": model_options,
|
||||
"input": x,
|
||||
# not in the original call in samplers.py:cfg_function, but made available for future hooks
|
||||
"empty_cond": empty_cond,
|
||||
"empty_cond_denoised": noise_pred_empty,}
|
||||
cfg_result = fn(args)
|
||||
|
||||
return cfg_result
|
||||
|
||||
|
||||
class PerpNegGuider(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="PerpNegGuider_V3",
|
||||
category="_for_testing",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
io.Conditioning.Input("empty_conditioning"),
|
||||
io.Float.Input("cfg", default=8.0, min=0.0, max=100.0, step=0.1, round=0.01),
|
||||
io.Float.Input("neg_scale", default=1.0, min=0.0, max=100.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Guider.Output(),
|
||||
],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, positive, negative, empty_conditioning, cfg, neg_scale):
|
||||
guider = Guider_PerpNeg(model)
|
||||
guider.set_conds(positive, negative, empty_conditioning)
|
||||
guider.set_cfg(cfg, neg_scale)
|
||||
return io.NodeOutput(guider)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
PerpNegGuider,
|
||||
]
|
||||
205
comfy_extras/v3/nodes_photomaker.py
Normal file
205
comfy_extras/v3/nodes_photomaker.py
Normal file
@ -0,0 +1,205 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
import comfy.clip_model
|
||||
import comfy.clip_vision
|
||||
import comfy.model_management
|
||||
import comfy.ops
|
||||
import comfy.utils
|
||||
import folder_paths
|
||||
from comfy_api.latest import io
|
||||
|
||||
# code for model from:
|
||||
# https://github.com/TencentARC/PhotoMaker/blob/main/photomaker/model.py under Apache License Version 2.0
|
||||
VISION_CONFIG_DICT = {
|
||||
"hidden_size": 1024,
|
||||
"image_size": 224,
|
||||
"intermediate_size": 4096,
|
||||
"num_attention_heads": 16,
|
||||
"num_channels": 3,
|
||||
"num_hidden_layers": 24,
|
||||
"patch_size": 14,
|
||||
"projection_dim": 768,
|
||||
"hidden_act": "quick_gelu",
|
||||
"model_type": "clip_vision_model",
|
||||
}
|
||||
|
||||
class MLP(nn.Module):
|
||||
def __init__(self, in_dim, out_dim, hidden_dim, use_residual=True, operations=comfy.ops):
|
||||
super().__init__()
|
||||
if use_residual:
|
||||
assert in_dim == out_dim
|
||||
self.layernorm = operations.LayerNorm(in_dim)
|
||||
self.fc1 = operations.Linear(in_dim, hidden_dim)
|
||||
self.fc2 = operations.Linear(hidden_dim, out_dim)
|
||||
self.use_residual = use_residual
|
||||
self.act_fn = nn.GELU()
|
||||
|
||||
def forward(self, x):
|
||||
residual = x
|
||||
x = self.layernorm(x)
|
||||
x = self.fc1(x)
|
||||
x = self.act_fn(x)
|
||||
x = self.fc2(x)
|
||||
if self.use_residual:
|
||||
x = x + residual
|
||||
return x
|
||||
|
||||
|
||||
class FuseModule(nn.Module):
|
||||
def __init__(self, embed_dim, operations):
|
||||
super().__init__()
|
||||
self.mlp1 = MLP(embed_dim * 2, embed_dim, embed_dim, use_residual=False, operations=operations)
|
||||
self.mlp2 = MLP(embed_dim, embed_dim, embed_dim, use_residual=True, operations=operations)
|
||||
self.layer_norm = operations.LayerNorm(embed_dim)
|
||||
|
||||
def fuse_fn(self, prompt_embeds, id_embeds):
|
||||
stacked_id_embeds = torch.cat([prompt_embeds, id_embeds], dim=-1)
|
||||
stacked_id_embeds = self.mlp1(stacked_id_embeds) + prompt_embeds
|
||||
stacked_id_embeds = self.mlp2(stacked_id_embeds)
|
||||
stacked_id_embeds = self.layer_norm(stacked_id_embeds)
|
||||
return stacked_id_embeds
|
||||
|
||||
def forward(
|
||||
self,
|
||||
prompt_embeds,
|
||||
id_embeds,
|
||||
class_tokens_mask,
|
||||
) -> torch.Tensor:
|
||||
# id_embeds shape: [b, max_num_inputs, 1, 2048]
|
||||
id_embeds = id_embeds.to(prompt_embeds.dtype)
|
||||
num_inputs = class_tokens_mask.sum().unsqueeze(0) # TODO: check for training case
|
||||
batch_size, max_num_inputs = id_embeds.shape[:2]
|
||||
# seq_length: 77
|
||||
seq_length = prompt_embeds.shape[1]
|
||||
# flat_id_embeds shape: [b*max_num_inputs, 1, 2048]
|
||||
flat_id_embeds = id_embeds.view(
|
||||
-1, id_embeds.shape[-2], id_embeds.shape[-1]
|
||||
)
|
||||
# valid_id_mask [b*max_num_inputs]
|
||||
valid_id_mask = (
|
||||
torch.arange(max_num_inputs, device=flat_id_embeds.device)[None, :]
|
||||
< num_inputs[:, None]
|
||||
)
|
||||
valid_id_embeds = flat_id_embeds[valid_id_mask.flatten()]
|
||||
|
||||
prompt_embeds = prompt_embeds.view(-1, prompt_embeds.shape[-1])
|
||||
class_tokens_mask = class_tokens_mask.view(-1)
|
||||
valid_id_embeds = valid_id_embeds.view(-1, valid_id_embeds.shape[-1])
|
||||
# slice out the image token embeddings
|
||||
image_token_embeds = prompt_embeds[class_tokens_mask]
|
||||
stacked_id_embeds = self.fuse_fn(image_token_embeds, valid_id_embeds)
|
||||
assert class_tokens_mask.sum() == stacked_id_embeds.shape[0], f"{class_tokens_mask.sum()} != {stacked_id_embeds.shape[0]}"
|
||||
prompt_embeds.masked_scatter_(class_tokens_mask[:, None], stacked_id_embeds.to(prompt_embeds.dtype))
|
||||
return prompt_embeds.view(batch_size, seq_length, -1)
|
||||
|
||||
|
||||
class PhotoMakerIDEncoder(comfy.clip_model.CLIPVisionModelProjection):
|
||||
def __init__(self):
|
||||
self.load_device = comfy.model_management.text_encoder_device()
|
||||
offload_device = comfy.model_management.text_encoder_offload_device()
|
||||
dtype = comfy.model_management.text_encoder_dtype(self.load_device)
|
||||
|
||||
super().__init__(VISION_CONFIG_DICT, dtype, offload_device, comfy.ops.manual_cast)
|
||||
self.visual_projection_2 = comfy.ops.manual_cast.Linear(1024, 1280, bias=False)
|
||||
self.fuse_module = FuseModule(2048, comfy.ops.manual_cast)
|
||||
|
||||
def forward(self, id_pixel_values, prompt_embeds, class_tokens_mask):
|
||||
b, num_inputs, c, h, w = id_pixel_values.shape
|
||||
id_pixel_values = id_pixel_values.view(b * num_inputs, c, h, w)
|
||||
|
||||
shared_id_embeds = self.vision_model(id_pixel_values)[2]
|
||||
id_embeds = self.visual_projection(shared_id_embeds)
|
||||
id_embeds_2 = self.visual_projection_2(shared_id_embeds)
|
||||
|
||||
id_embeds = id_embeds.view(b, num_inputs, 1, -1)
|
||||
id_embeds_2 = id_embeds_2.view(b, num_inputs, 1, -1)
|
||||
|
||||
id_embeds = torch.cat((id_embeds, id_embeds_2), dim=-1)
|
||||
return self.fuse_module(prompt_embeds, id_embeds, class_tokens_mask)
|
||||
|
||||
|
||||
class PhotoMakerLoader(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="PhotoMakerLoader_V3",
|
||||
category="_for_testing/photomaker",
|
||||
inputs=[
|
||||
io.Combo.Input("photomaker_model_name", options=folder_paths.get_filename_list("photomaker")),
|
||||
],
|
||||
outputs=[
|
||||
io.Photomaker.Output(),
|
||||
],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, photomaker_model_name):
|
||||
photomaker_model_path = folder_paths.get_full_path_or_raise("photomaker", photomaker_model_name)
|
||||
photomaker_model = PhotoMakerIDEncoder()
|
||||
data = comfy.utils.load_torch_file(photomaker_model_path, safe_load=True)
|
||||
if "id_encoder" in data:
|
||||
data = data["id_encoder"]
|
||||
photomaker_model.load_state_dict(data)
|
||||
return io.NodeOutput(photomaker_model)
|
||||
|
||||
|
||||
class PhotoMakerEncode(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="PhotoMakerEncode_V3",
|
||||
category="_for_testing/photomaker",
|
||||
inputs=[
|
||||
io.Photomaker.Input("photomaker"),
|
||||
io.Image.Input("image"),
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("text", multiline=True, dynamic_prompts=True, default="photograph of photomaker"),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, photomaker, image, clip, text):
|
||||
special_token = "photomaker"
|
||||
pixel_values = comfy.clip_vision.clip_preprocess(image.to(photomaker.load_device)).float()
|
||||
try:
|
||||
index = text.split(" ").index(special_token) + 1
|
||||
except ValueError:
|
||||
index = -1
|
||||
tokens = clip.tokenize(text, return_word_ids=True)
|
||||
out_tokens = {}
|
||||
for k in tokens:
|
||||
out_tokens[k] = []
|
||||
for t in tokens[k]:
|
||||
f = list(filter(lambda x: x[2] != index, t))
|
||||
while len(f) < len(t):
|
||||
f.append(t[-1])
|
||||
out_tokens[k].append(f)
|
||||
|
||||
cond, pooled = clip.encode_from_tokens(out_tokens, return_pooled=True)
|
||||
|
||||
if index > 0:
|
||||
token_index = index - 1
|
||||
num_id_images = 1
|
||||
class_tokens_mask = [True if token_index <= i < token_index+num_id_images else False for i in range(77)]
|
||||
out = photomaker(
|
||||
id_pixel_values=pixel_values.unsqueeze(0), prompt_embeds=cond.to(photomaker.load_device),
|
||||
class_tokens_mask=torch.tensor(class_tokens_mask, dtype=torch.bool, device=photomaker.load_device).unsqueeze(0),
|
||||
)
|
||||
else:
|
||||
out = cond
|
||||
|
||||
return io.NodeOutput([[out, {"pooled_output": pooled}]])
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
PhotoMakerEncode,
|
||||
PhotoMakerLoader,
|
||||
]
|
||||
33
comfy_extras/v3/nodes_pixart.py
Normal file
33
comfy_extras/v3/nodes_pixart.py
Normal file
@ -0,0 +1,33 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import nodes
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class CLIPTextEncodePixArtAlpha(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodePixArtAlpha_V3",
|
||||
category="advanced/conditioning",
|
||||
description="Encodes text and sets the resolution conditioning for PixArt Alpha. Does not apply to PixArt Sigma.",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.Int.Input("height", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||
io.String.Input("text", multiline=True, dynamic_prompts=True),
|
||||
io.Clip.Input("clip"),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, width, height, text, clip):
|
||||
tokens = clip.tokenize(text)
|
||||
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens, add_dict={"width": width, "height": height}))
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
CLIPTextEncodePixArtAlpha,
|
||||
]
|
||||
255
comfy_extras/v3/nodes_post_processing.py
Normal file
255
comfy_extras/v3/nodes_post_processing.py
Normal file
@ -0,0 +1,255 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from PIL import Image
|
||||
|
||||
import comfy.model_management
|
||||
import comfy.utils
|
||||
import node_helpers
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class Blend(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageBlend_V3",
|
||||
category="image/postprocessing",
|
||||
inputs=[
|
||||
io.Image.Input("image1"),
|
||||
io.Image.Input("image2"),
|
||||
io.Float.Input("blend_factor", default=0.5, min=0.0, max=1.0, step=0.01),
|
||||
io.Combo.Input("blend_mode", options=["normal", "multiply", "screen", "overlay", "soft_light", "difference"]),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str):
|
||||
image1, image2 = node_helpers.image_alpha_fix(image1, image2)
|
||||
image2 = image2.to(image1.device)
|
||||
if image1.shape != image2.shape:
|
||||
image2 = image2.permute(0, 3, 1, 2)
|
||||
image2 = comfy.utils.common_upscale(
|
||||
image2, image1.shape[2], image1.shape[1], upscale_method="bicubic", crop="center"
|
||||
)
|
||||
image2 = image2.permute(0, 2, 3, 1)
|
||||
|
||||
blended_image = cls.blend_mode(image1, image2, blend_mode)
|
||||
blended_image = image1 * (1 - blend_factor) + blended_image * blend_factor
|
||||
blended_image = torch.clamp(blended_image, 0, 1)
|
||||
return io.NodeOutput(blended_image)
|
||||
|
||||
@classmethod
|
||||
def blend_mode(cls, img1, img2, mode):
|
||||
if mode == "normal":
|
||||
return img2
|
||||
elif mode == "multiply":
|
||||
return img1 * img2
|
||||
elif mode == "screen":
|
||||
return 1 - (1 - img1) * (1 - img2)
|
||||
elif mode == "overlay":
|
||||
return torch.where(img1 <= 0.5, 2 * img1 * img2, 1 - 2 * (1 - img1) * (1 - img2))
|
||||
elif mode == "soft_light":
|
||||
return torch.where(img2 <= 0.5, img1 - (1 - 2 * img2) * img1 * (1 - img1), img1 + (2 * img2 - 1) * (cls.g(img1) - img1))
|
||||
elif mode == "difference":
|
||||
return img1 - img2
|
||||
raise ValueError(f"Unsupported blend mode: {mode}")
|
||||
|
||||
@classmethod
|
||||
def g(cls, x):
|
||||
return torch.where(x <= 0.25, ((16 * x - 12) * x + 4) * x, torch.sqrt(x))
|
||||
|
||||
|
||||
class Blur(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageBlur_V3",
|
||||
category="image/postprocessing",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Int.Input("blur_radius", default=1, min=1, max=31, step=1),
|
||||
io.Float.Input("sigma", default=1.0, min=0.1, max=10.0, step=0.1),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image: torch.Tensor, blur_radius: int, sigma: float):
|
||||
if blur_radius == 0:
|
||||
return io.NodeOutput(image)
|
||||
|
||||
image = image.to(comfy.model_management.get_torch_device())
|
||||
batch_size, height, width, channels = image.shape
|
||||
|
||||
kernel_size = blur_radius * 2 + 1
|
||||
kernel = gaussian_kernel(kernel_size, sigma, device=image.device).repeat(channels, 1, 1).unsqueeze(1)
|
||||
|
||||
image = image.permute(0, 3, 1, 2) # Torch wants (B, C, H, W) we use (B, H, W, C)
|
||||
padded_image = F.pad(image, (blur_radius,blur_radius,blur_radius,blur_radius), "reflect")
|
||||
blurred = F.conv2d(padded_image, kernel, padding=kernel_size // 2, groups=channels)[:,:,blur_radius:-blur_radius, blur_radius:-blur_radius]
|
||||
blurred = blurred.permute(0, 2, 3, 1)
|
||||
|
||||
return io.NodeOutput(blurred.to(comfy.model_management.intermediate_device()))
|
||||
|
||||
|
||||
def gaussian_kernel(kernel_size: int, sigma: float, device=None):
|
||||
x, y = torch.meshgrid(torch.linspace(-1, 1, kernel_size, device=device), torch.linspace(-1, 1, kernel_size, device=device), indexing="ij")
|
||||
d = torch.sqrt(x * x + y * y)
|
||||
g = torch.exp(-(d * d) / (2.0 * sigma * sigma))
|
||||
return g / g.sum()
|
||||
|
||||
|
||||
class Quantize(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageQuantize_V3",
|
||||
category="image/postprocessing",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Int.Input("colors", default=256, min=1, max=256, step=1),
|
||||
io.Combo.Input("dither", options=["none", "floyd-steinberg", "bayer-2", "bayer-4", "bayer-8", "bayer-16"]),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def bayer(im, pal_im, order):
|
||||
def normalized_bayer_matrix(n):
|
||||
if n == 0:
|
||||
return np.zeros((1,1), "float32")
|
||||
q = 4 ** n
|
||||
m = q * normalized_bayer_matrix(n - 1)
|
||||
return np.bmat(((m-1.5, m+0.5), (m+1.5, m-0.5))) / q
|
||||
|
||||
num_colors = len(pal_im.getpalette()) // 3
|
||||
spread = 2 * 256 / num_colors
|
||||
bayer_n = int(math.log2(order))
|
||||
bayer_matrix = torch.from_numpy(spread * normalized_bayer_matrix(bayer_n) + 0.5)
|
||||
|
||||
result = torch.from_numpy(np.array(im).astype(np.float32))
|
||||
tw = math.ceil(result.shape[0] / bayer_matrix.shape[0])
|
||||
th = math.ceil(result.shape[1] / bayer_matrix.shape[1])
|
||||
tiled_matrix = bayer_matrix.tile(tw, th).unsqueeze(-1)
|
||||
result.add_(tiled_matrix[:result.shape[0],:result.shape[1]]).clamp_(0, 255)
|
||||
result = result.to(dtype=torch.uint8)
|
||||
|
||||
im = Image.fromarray(result.cpu().numpy())
|
||||
return im.quantize(palette=pal_im, dither=Image.Dither.NONE)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image: torch.Tensor, colors: int, dither: str):
|
||||
batch_size, height, width, _ = image.shape
|
||||
result = torch.zeros_like(image)
|
||||
|
||||
for b in range(batch_size):
|
||||
im = Image.fromarray((image[b] * 255).to(torch.uint8).numpy(), mode='RGB')
|
||||
|
||||
pal_im = im.quantize(colors=colors) # Required as described in https://github.com/python-pillow/Pillow/issues/5836
|
||||
|
||||
if dither == "none":
|
||||
quantized_image = im.quantize(palette=pal_im, dither=Image.Dither.NONE)
|
||||
elif dither == "floyd-steinberg":
|
||||
quantized_image = im.quantize(palette=pal_im, dither=Image.Dither.FLOYDSTEINBERG)
|
||||
elif dither.startswith("bayer"):
|
||||
order = int(dither.split('-')[-1])
|
||||
quantized_image = cls.bayer(im, pal_im, order)
|
||||
|
||||
quantized_array = torch.tensor(np.array(quantized_image.convert("RGB"))).float() / 255
|
||||
result[b] = quantized_array
|
||||
|
||||
return io.NodeOutput(result)
|
||||
|
||||
|
||||
class Sharpen(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageSharpen_V3",
|
||||
category="image/postprocessing",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Int.Input("sharpen_radius", default=1, min=1, max=31, step=1),
|
||||
io.Float.Input("sigma", default=1.0, min=0.1, max=10.0, step=0.01),
|
||||
io.Float.Input("alpha", default=1.0, min=0.0, max=5.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image: torch.Tensor, sharpen_radius: int, sigma:float, alpha: float):
|
||||
if sharpen_radius == 0:
|
||||
return io.NodeOutput(image)
|
||||
|
||||
batch_size, height, width, channels = image.shape
|
||||
image = image.to(comfy.model_management.get_torch_device())
|
||||
|
||||
kernel_size = sharpen_radius * 2 + 1
|
||||
kernel = gaussian_kernel(kernel_size, sigma, device=image.device) * -(alpha*10)
|
||||
center = kernel_size // 2
|
||||
kernel[center, center] = kernel[center, center] - kernel.sum() + 1.0
|
||||
kernel = kernel.repeat(channels, 1, 1).unsqueeze(1)
|
||||
|
||||
tensor_image = image.permute(0, 3, 1, 2) # Torch wants (B, C, H, W) we use (B, H, W, C)
|
||||
tensor_image = F.pad(tensor_image, (sharpen_radius,sharpen_radius,sharpen_radius,sharpen_radius), "reflect")
|
||||
sharpened = F.conv2d(tensor_image, kernel, padding=center, groups=channels)[:,:,sharpen_radius:-sharpen_radius, sharpen_radius:-sharpen_radius]
|
||||
sharpened = sharpened.permute(0, 2, 3, 1)
|
||||
|
||||
result = torch.clamp(sharpened, 0, 1)
|
||||
|
||||
return io.NodeOutput(result.to(comfy.model_management.intermediate_device()))
|
||||
|
||||
|
||||
class ImageScaleToTotalPixels(io.ComfyNode):
|
||||
upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
|
||||
crop_methods = ["disabled", "center"]
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageScaleToTotalPixels_V3",
|
||||
category="image/upscaling",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Combo.Input("upscale_method", options=cls.upscale_methods),
|
||||
io.Float.Input("megapixels", default=1.0, min=0.01, max=16.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image, upscale_method, megapixels):
|
||||
samples = image.movedim(-1,1)
|
||||
total = int(megapixels * 1024 * 1024)
|
||||
|
||||
scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2]))
|
||||
width = round(samples.shape[3] * scale_by)
|
||||
height = round(samples.shape[2] * scale_by)
|
||||
|
||||
s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled")
|
||||
return io.NodeOutput(s.movedim(1,-1))
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
Blend,
|
||||
Blur,
|
||||
ImageScaleToTotalPixels,
|
||||
Quantize,
|
||||
Sharpen,
|
||||
]
|
||||
47
comfy_extras/v3/nodes_preview_any.py
Normal file
47
comfy_extras/v3/nodes_preview_any.py
Normal file
@ -0,0 +1,47 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from comfy_api.latest import io, ui
|
||||
|
||||
|
||||
class PreviewAny(io.ComfyNode):
|
||||
"""Originally implement from https://github.com/rgthree/rgthree-comfy/blob/main/py/display_any.py
|
||||
|
||||
upstream requested in https://github.com/Kosinkadink/rfcs/blob/main/rfcs/0000-corenodes.md#preview-nodes"""
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="PreviewAny_V3", # frontend expects "PreviewAny" to work
|
||||
display_name="Preview Any _V3", # frontend ignores "display_name" for this node
|
||||
description="Preview any type of data by converting it to a readable text format.",
|
||||
category="utils",
|
||||
inputs=[
|
||||
io.AnyType.Input("source"), # TODO: does not work currently, as `io.AnyType` does not define __ne__
|
||||
],
|
||||
is_output_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, source=None) -> io.NodeOutput:
|
||||
value = "None"
|
||||
if isinstance(source, str):
|
||||
value = source
|
||||
elif isinstance(source, (int, float, bool)):
|
||||
value = str(source)
|
||||
elif source is not None:
|
||||
try:
|
||||
value = json.dumps(source)
|
||||
except Exception:
|
||||
try:
|
||||
value = str(source)
|
||||
except Exception:
|
||||
value = "source exists, but could not be serialized."
|
||||
|
||||
return io.NodeOutput(ui=ui.PreviewText(value))
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
PreviewAny,
|
||||
]
|
||||
104
comfy_extras/v3/nodes_primitive.py
Normal file
104
comfy_extras/v3/nodes_primitive.py
Normal file
@ -0,0 +1,104 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class String(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="PrimitiveString_V3",
|
||||
display_name="String _V3",
|
||||
category="utils/primitive",
|
||||
inputs=[
|
||||
io.String.Input("value"),
|
||||
],
|
||||
outputs=[io.String.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, value: str) -> io.NodeOutput:
|
||||
return io.NodeOutput(value)
|
||||
|
||||
|
||||
class StringMultiline(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="PrimitiveStringMultiline_V3",
|
||||
display_name="String (Multiline) _V3",
|
||||
category="utils/primitive",
|
||||
inputs=[
|
||||
io.String.Input("value", multiline=True),
|
||||
],
|
||||
outputs=[io.String.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, value: str) -> io.NodeOutput:
|
||||
return io.NodeOutput(value)
|
||||
|
||||
|
||||
class Int(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="PrimitiveInt_V3",
|
||||
display_name="Int _V3",
|
||||
category="utils/primitive",
|
||||
inputs=[
|
||||
io.Int.Input("value", min=-sys.maxsize, max=sys.maxsize, control_after_generate=True),
|
||||
],
|
||||
outputs=[io.Int.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, value: int) -> io.NodeOutput:
|
||||
return io.NodeOutput(value)
|
||||
|
||||
|
||||
class Float(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="PrimitiveFloat_V3",
|
||||
display_name="Float _V3",
|
||||
category="utils/primitive",
|
||||
inputs=[
|
||||
io.Float.Input("value", min=-sys.maxsize, max=sys.maxsize),
|
||||
],
|
||||
outputs=[io.Float.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, value: float) -> io.NodeOutput:
|
||||
return io.NodeOutput(value)
|
||||
|
||||
|
||||
class Boolean(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="PrimitiveBoolean_V3",
|
||||
display_name="Boolean _V3",
|
||||
category="utils/primitive",
|
||||
inputs=[
|
||||
io.Boolean.Input("value"),
|
||||
],
|
||||
outputs=[io.Boolean.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, value: bool) -> io.NodeOutput:
|
||||
return io.NodeOutput(value)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
String,
|
||||
StringMultiline,
|
||||
Int,
|
||||
Float,
|
||||
Boolean,
|
||||
]
|
||||
148
comfy_extras/v3/nodes_rebatch.py
Normal file
148
comfy_extras/v3/nodes_rebatch.py
Normal file
@ -0,0 +1,148 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class ImageRebatch(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="RebatchImages_V3",
|
||||
display_name="Rebatch Images _V3",
|
||||
category="image/batch",
|
||||
is_input_list=True,
|
||||
inputs=[
|
||||
io.Image.Input("images"),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(display_name="IMAGE", is_output_list=True),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, images, batch_size):
|
||||
batch_size = batch_size[0]
|
||||
|
||||
output_list = []
|
||||
all_images = []
|
||||
for img in images:
|
||||
for i in range(img.shape[0]):
|
||||
all_images.append(img[i:i+1])
|
||||
|
||||
for i in range(0, len(all_images), batch_size):
|
||||
output_list.append(torch.cat(all_images[i:i+batch_size], dim=0))
|
||||
|
||||
return io.NodeOutput(output_list)
|
||||
|
||||
|
||||
class LatentRebatch(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="RebatchLatents_V3",
|
||||
display_name="Rebatch Latents _V3",
|
||||
category="latent/batch",
|
||||
is_input_list=True,
|
||||
inputs=[
|
||||
io.Latent.Input("latents"),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(is_output_list=True),
|
||||
],
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_batch(latents, list_ind, offset):
|
||||
"""prepare a batch out of the list of latents"""
|
||||
samples = latents[list_ind]['samples']
|
||||
shape = samples.shape
|
||||
mask = latents[list_ind]['noise_mask'] if 'noise_mask' in latents[list_ind] else torch.ones((shape[0], 1, shape[2]*8, shape[3]*8), device='cpu')
|
||||
if mask.shape[-1] != shape[-1] * 8 or mask.shape[-2] != shape[-2]:
|
||||
torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(shape[-2]*8, shape[-1]*8), mode="bilinear")
|
||||
if mask.shape[0] < samples.shape[0]:
|
||||
mask = mask.repeat((shape[0] - 1) // mask.shape[0] + 1, 1, 1, 1)[:shape[0]]
|
||||
if 'batch_index' in latents[list_ind]:
|
||||
batch_inds = latents[list_ind]['batch_index']
|
||||
else:
|
||||
batch_inds = [x+offset for x in range(shape[0])]
|
||||
return samples, mask, batch_inds
|
||||
|
||||
@staticmethod
|
||||
def get_slices(indexable, num, batch_size):
|
||||
"""divides an indexable object into num slices of length batch_size, and a remainder"""
|
||||
slices = []
|
||||
for i in range(num):
|
||||
slices.append(indexable[i*batch_size:(i+1)*batch_size])
|
||||
if num * batch_size < len(indexable):
|
||||
return slices, indexable[num * batch_size:]
|
||||
else:
|
||||
return slices, None
|
||||
|
||||
@staticmethod
|
||||
def slice_batch(batch, num, batch_size):
|
||||
result = [LatentRebatch.get_slices(x, num, batch_size) for x in batch]
|
||||
return list(zip(*result))
|
||||
|
||||
@staticmethod
|
||||
def cat_batch(batch1, batch2):
|
||||
if batch1[0] is None:
|
||||
return batch2
|
||||
result = [torch.cat((b1, b2)) if torch.is_tensor(b1) else b1 + b2 for b1, b2 in zip(batch1, batch2)]
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def execute(cls, latents, batch_size):
|
||||
batch_size = batch_size[0]
|
||||
|
||||
output_list = []
|
||||
current_batch = (None, None, None)
|
||||
processed = 0
|
||||
|
||||
for i in range(len(latents)):
|
||||
# fetch new entry of list
|
||||
#samples, masks, indices = self.get_batch(latents, i)
|
||||
next_batch = cls.get_batch(latents, i, processed)
|
||||
processed += len(next_batch[2])
|
||||
# set to current if current is None
|
||||
if current_batch[0] is None:
|
||||
current_batch = next_batch
|
||||
# add previous to list if dimensions do not match
|
||||
elif next_batch[0].shape[-1] != current_batch[0].shape[-1] or next_batch[0].shape[-2] != current_batch[0].shape[-2]:
|
||||
sliced, _ = cls.slice_batch(current_batch, 1, batch_size)
|
||||
output_list.append({'samples': sliced[0][0], 'noise_mask': sliced[1][0], 'batch_index': sliced[2][0]})
|
||||
current_batch = next_batch
|
||||
# cat if everything checks out
|
||||
else:
|
||||
current_batch = cls.cat_batch(current_batch, next_batch)
|
||||
|
||||
# add to list if dimensions gone above target batch size
|
||||
if current_batch[0].shape[0] > batch_size:
|
||||
num = current_batch[0].shape[0] // batch_size
|
||||
sliced, remainder = cls.slice_batch(current_batch, num, batch_size)
|
||||
|
||||
for i in range(num):
|
||||
output_list.append({'samples': sliced[0][i], 'noise_mask': sliced[1][i], 'batch_index': sliced[2][i]})
|
||||
|
||||
current_batch = remainder
|
||||
|
||||
#add remainder
|
||||
if current_batch[0] is not None:
|
||||
sliced, _ = cls.slice_batch(current_batch, 1, batch_size)
|
||||
output_list.append({'samples': sliced[0][0], 'noise_mask': sliced[1][0], 'batch_index': sliced[2][0]})
|
||||
|
||||
#get rid of empty masks
|
||||
for s in output_list:
|
||||
if s['noise_mask'].mean() == 1.0:
|
||||
del s['noise_mask']
|
||||
|
||||
return io.NodeOutput(output_list)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
ImageRebatch,
|
||||
LatentRebatch,
|
||||
]
|
||||
191
comfy_extras/v3/nodes_sag.py
Normal file
191
comfy_extras/v3/nodes_sag.py
Normal file
@ -0,0 +1,191 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from einops import rearrange, repeat
|
||||
from torch import einsum
|
||||
|
||||
import comfy.samplers
|
||||
from comfy.ldm.modules.attention import optimized_attention
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
# from comfy/ldm/modules/attention.py
|
||||
# but modified to return attention scores as well as output
|
||||
def attention_basic_with_sim(q, k, v, heads, mask=None, attn_precision=None):
|
||||
b, _, dim_head = q.shape
|
||||
dim_head //= heads
|
||||
scale = dim_head ** -0.5
|
||||
|
||||
h = heads
|
||||
q, k, v = map(
|
||||
lambda t: t.unsqueeze(3)
|
||||
.reshape(b, -1, heads, dim_head)
|
||||
.permute(0, 2, 1, 3)
|
||||
.reshape(b * heads, -1, dim_head)
|
||||
.contiguous(),
|
||||
(q, k, v),
|
||||
)
|
||||
|
||||
# force cast to fp32 to avoid overflowing
|
||||
if attn_precision == torch.float32:
|
||||
sim = einsum('b i d, b j d -> b i j', q.float(), k.float()) * scale
|
||||
else:
|
||||
sim = einsum('b i d, b j d -> b i j', q, k) * scale
|
||||
|
||||
del q, k
|
||||
|
||||
if mask is not None:
|
||||
mask = rearrange(mask, 'b ... -> b (...)')
|
||||
max_neg_value = -torch.finfo(sim.dtype).max
|
||||
mask = repeat(mask, 'b j -> (b h) () j', h=h)
|
||||
sim.masked_fill_(~mask, max_neg_value)
|
||||
|
||||
# attention, what we cannot get enough of
|
||||
sim = sim.softmax(dim=-1)
|
||||
|
||||
out = einsum('b i j, b j d -> b i d', sim.to(v.dtype), v)
|
||||
out = (
|
||||
out.unsqueeze(0)
|
||||
.reshape(b, heads, -1, dim_head)
|
||||
.permute(0, 2, 1, 3)
|
||||
.reshape(b, -1, heads * dim_head)
|
||||
)
|
||||
return out, sim
|
||||
|
||||
|
||||
def create_blur_map(x0, attn, sigma=3.0, threshold=1.0):
|
||||
# reshape and GAP the attention map
|
||||
_, hw1, hw2 = attn.shape
|
||||
b, _, lh, lw = x0.shape
|
||||
attn = attn.reshape(b, -1, hw1, hw2)
|
||||
# Global Average Pool
|
||||
mask = attn.mean(1, keepdim=False).sum(1, keepdim=False) > threshold
|
||||
|
||||
total = mask.shape[-1]
|
||||
x = round(math.sqrt((lh / lw) * total))
|
||||
xx = None
|
||||
for i in range(0, math.floor(math.sqrt(total) / 2)):
|
||||
for j in [(x + i), max(1, x - i)]:
|
||||
if total % j == 0:
|
||||
xx = j
|
||||
break
|
||||
if xx is not None:
|
||||
break
|
||||
|
||||
x = xx
|
||||
y = total // x
|
||||
|
||||
# Reshape
|
||||
mask = (
|
||||
mask.reshape(b, x, y)
|
||||
.unsqueeze(1)
|
||||
.type(attn.dtype)
|
||||
)
|
||||
# Upsample
|
||||
mask = F.interpolate(mask, (lh, lw))
|
||||
|
||||
blurred = gaussian_blur_2d(x0, kernel_size=9, sigma=sigma)
|
||||
blurred = blurred * mask + x0 * (1 - mask)
|
||||
return blurred
|
||||
|
||||
|
||||
def gaussian_blur_2d(img, kernel_size, sigma):
|
||||
ksize_half = (kernel_size - 1) * 0.5
|
||||
|
||||
x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size)
|
||||
|
||||
pdf = torch.exp(-0.5 * (x / sigma).pow(2))
|
||||
|
||||
x_kernel = pdf / pdf.sum()
|
||||
x_kernel = x_kernel.to(device=img.device, dtype=img.dtype)
|
||||
|
||||
kernel2d = torch.mm(x_kernel[:, None], x_kernel[None, :])
|
||||
kernel2d = kernel2d.expand(img.shape[-3], 1, kernel2d.shape[0], kernel2d.shape[1])
|
||||
|
||||
padding = [kernel_size // 2, kernel_size // 2, kernel_size // 2, kernel_size // 2]
|
||||
|
||||
img = F.pad(img, padding, mode="reflect")
|
||||
return F.conv2d(img, kernel2d, groups=img.shape[-3])
|
||||
|
||||
|
||||
class SelfAttentionGuidance(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SelfAttentionGuidance_V3",
|
||||
display_name="Self-Attention Guidance _V3",
|
||||
category="_for_testing",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("scale", default=0.5, min=-2.0, max=5.0, step=0.01),
|
||||
io.Float.Input("blur_sigma", default=2.0, min=0.0, max=10.0, step=0.1),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, scale, blur_sigma):
|
||||
m = model.clone()
|
||||
|
||||
attn_scores = None
|
||||
|
||||
# TODO: make this work properly with chunked batches
|
||||
# currently, we can only save the attn from one UNet call
|
||||
def attn_and_record(q, k, v, extra_options):
|
||||
nonlocal attn_scores
|
||||
# if uncond, save the attention scores
|
||||
heads = extra_options["n_heads"]
|
||||
cond_or_uncond = extra_options["cond_or_uncond"]
|
||||
b = q.shape[0] // len(cond_or_uncond)
|
||||
if 1 in cond_or_uncond:
|
||||
uncond_index = cond_or_uncond.index(1)
|
||||
# do the entire attention operation, but save the attention scores to attn_scores
|
||||
(out, sim) = attention_basic_with_sim(q, k, v, heads=heads, attn_precision=extra_options["attn_precision"])
|
||||
# when using a higher batch size, I BELIEVE the result batch dimension is [uc1, ... ucn, c1, ... cn]
|
||||
n_slices = heads * b
|
||||
attn_scores = sim[n_slices * uncond_index:n_slices * (uncond_index+1)]
|
||||
return out
|
||||
else:
|
||||
return optimized_attention(q, k, v, heads=heads, attn_precision=extra_options["attn_precision"])
|
||||
|
||||
def post_cfg_function(args):
|
||||
nonlocal attn_scores
|
||||
uncond_attn = attn_scores
|
||||
|
||||
sag_scale = scale
|
||||
sag_sigma = blur_sigma
|
||||
sag_threshold = 1.0
|
||||
model = args["model"]
|
||||
uncond_pred = args["uncond_denoised"]
|
||||
uncond = args["uncond"]
|
||||
cfg_result = args["denoised"]
|
||||
sigma = args["sigma"]
|
||||
model_options = args["model_options"]
|
||||
x = args["input"]
|
||||
if min(cfg_result.shape[2:]) <= 4: #skip when too small to add padding
|
||||
return cfg_result
|
||||
|
||||
# create the adversarially blurred image
|
||||
degraded = create_blur_map(uncond_pred, uncond_attn, sag_sigma, sag_threshold)
|
||||
degraded_noised = degraded + x - uncond_pred
|
||||
# call into the UNet
|
||||
(sag,) = comfy.samplers.calc_cond_batch(model, [uncond], degraded_noised, sigma, model_options)
|
||||
return cfg_result + (degraded - sag) * sag_scale
|
||||
|
||||
m.set_model_sampler_post_cfg_function(post_cfg_function, disable_cfg1_optimization=True)
|
||||
|
||||
# from diffusers:
|
||||
# unet.mid_block.attentions[0].transformer_blocks[0].attn1.patch
|
||||
m.set_model_attn1_replace(attn_and_record, "middle", 0, 0)
|
||||
|
||||
return io.NodeOutput(m)
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
SelfAttentionGuidance,
|
||||
]
|
||||
145
comfy_extras/v3/nodes_sd3.py
Normal file
145
comfy_extras/v3/nodes_sd3.py
Normal file
@ -0,0 +1,145 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
|
||||
import comfy.model_management
|
||||
import comfy.sd
|
||||
import folder_paths
|
||||
import nodes
|
||||
from comfy_api.latest import io
|
||||
from comfy_extras.v3.nodes_slg import SkipLayerGuidanceDiT
|
||||
|
||||
|
||||
class TripleCLIPLoader(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="TripleCLIPLoader_V3",
|
||||
category="advanced/loaders",
|
||||
description="[Recipes]\n\nsd3: clip-l, clip-g, t5",
|
||||
inputs=[
|
||||
io.Combo.Input("clip_name1", options=folder_paths.get_filename_list("text_encoders")),
|
||||
io.Combo.Input("clip_name2", options=folder_paths.get_filename_list("text_encoders")),
|
||||
io.Combo.Input("clip_name3", options=folder_paths.get_filename_list("text_encoders")),
|
||||
],
|
||||
outputs=[
|
||||
io.Clip.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip_name1: str, clip_name2: str, clip_name3: str):
|
||||
clip_path1 = folder_paths.get_full_path_or_raise("text_encoders", clip_name1)
|
||||
clip_path2 = folder_paths.get_full_path_or_raise("text_encoders", clip_name2)
|
||||
clip_path3 = folder_paths.get_full_path_or_raise("text_encoders", clip_name3)
|
||||
clip = comfy.sd.load_clip(
|
||||
ckpt_paths=[clip_path1, clip_path2, clip_path3],
|
||||
embedding_directory=folder_paths.get_folder_paths("embeddings"),
|
||||
)
|
||||
return io.NodeOutput(clip)
|
||||
|
||||
|
||||
class EmptySD3LatentImage(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="EmptySD3LatentImage_V3",
|
||||
category="latent/sd3",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, width: int, height: int, batch_size=1):
|
||||
latent = torch.zeros(
|
||||
[batch_size, 16, height // 8, width // 8], device=comfy.model_management.intermediate_device()
|
||||
)
|
||||
return io.NodeOutput({"samples":latent})
|
||||
|
||||
|
||||
class CLIPTextEncodeSD3(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeSD3_V3",
|
||||
category="advanced/conditioning",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
||||
io.String.Input("clip_g", multiline=True, dynamic_prompts=True),
|
||||
io.String.Input("t5xxl", multiline=True, dynamic_prompts=True),
|
||||
io.Combo.Input("empty_padding", options=["none", "empty_prompt"]),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip, clip_l, clip_g, t5xxl, empty_padding: str):
|
||||
no_padding = empty_padding == "none"
|
||||
|
||||
tokens = clip.tokenize(clip_g)
|
||||
if len(clip_g) == 0 and no_padding:
|
||||
tokens["g"] = []
|
||||
|
||||
if len(clip_l) == 0 and no_padding:
|
||||
tokens["l"] = []
|
||||
else:
|
||||
tokens["l"] = clip.tokenize(clip_l)["l"]
|
||||
|
||||
if len(t5xxl) == 0 and no_padding:
|
||||
tokens["t5xxl"] = []
|
||||
else:
|
||||
tokens["t5xxl"] = clip.tokenize(t5xxl)["t5xxl"]
|
||||
if len(tokens["l"]) != len(tokens["g"]):
|
||||
empty = clip.tokenize("")
|
||||
while len(tokens["l"]) < len(tokens["g"]):
|
||||
tokens["l"] += empty["l"]
|
||||
while len(tokens["l"]) > len(tokens["g"]):
|
||||
tokens["g"] += empty["g"]
|
||||
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens))
|
||||
|
||||
|
||||
class SkipLayerGuidanceSD3(SkipLayerGuidanceDiT):
|
||||
"""
|
||||
Enhance guidance towards detailed dtructure by having another set of CFG negative with skipped layers.
|
||||
Inspired by Perturbed Attention Guidance (https://arxiv.org/abs/2403.17377)
|
||||
Experimental implementation by Dango233@StabilityAI.
|
||||
"""
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SkipLayerGuidanceSD3_V3",
|
||||
category="advanced/guidance",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.String.Input("layers", default="7, 8, 9", multiline=False),
|
||||
io.Float.Input("scale", default=3.0, min=0.0, max=10.0, step=0.1),
|
||||
io.Float.Input("start_percent", default=0.01, min=0.0, max=1.0, step=0.001),
|
||||
io.Float.Input("end_percent", default=0.15, min=0.0, max=1.0, step=0.001),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, layers: str, scale: float, start_percent: float, end_percent: float):
|
||||
return super().execute(
|
||||
model=model, scale=scale, start_percent=start_percent, end_percent=end_percent, double_layers=layers
|
||||
)
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
CLIPTextEncodeSD3,
|
||||
EmptySD3LatentImage,
|
||||
SkipLayerGuidanceSD3,
|
||||
TripleCLIPLoader,
|
||||
]
|
||||
58
comfy_extras/v3/nodes_sdupscale.py
Normal file
58
comfy_extras/v3/nodes_sdupscale.py
Normal file
@ -0,0 +1,58 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
|
||||
import comfy.utils
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class SD_4XUpscale_Conditioning(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SD_4XUpscale_Conditioning_V3",
|
||||
category="conditioning/upscale_diffusion",
|
||||
inputs=[
|
||||
io.Image.Input("images"),
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
io.Float.Input("scale_ratio", default=4.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("noise_augmentation", default=0.0, min=0.0, max=1.0, step=0.001),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
io.Latent.Output(display_name="latent"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, images, positive, negative, scale_ratio, noise_augmentation):
|
||||
width = max(1, round(images.shape[-2] * scale_ratio))
|
||||
height = max(1, round(images.shape[-3] * scale_ratio))
|
||||
|
||||
pixels = comfy.utils.common_upscale(
|
||||
(images.movedim(-1,1) * 2.0) - 1.0, width // 4, height // 4, "bilinear", "center"
|
||||
)
|
||||
|
||||
out_cp = []
|
||||
out_cn = []
|
||||
|
||||
for t in positive:
|
||||
n = [t[0], t[1].copy()]
|
||||
n[1]['concat_image'] = pixels
|
||||
n[1]['noise_augmentation'] = noise_augmentation
|
||||
out_cp.append(n)
|
||||
|
||||
for t in negative:
|
||||
n = [t[0], t[1].copy()]
|
||||
n[1]['concat_image'] = pixels
|
||||
n[1]['noise_augmentation'] = noise_augmentation
|
||||
out_cn.append(n)
|
||||
|
||||
latent = torch.zeros([images.shape[0], 4, height // 4, width // 4])
|
||||
return io.NodeOutput(out_cp, out_cn, {"samples":latent})
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
SD_4XUpscale_Conditioning,
|
||||
]
|
||||
173
comfy_extras/v3/nodes_slg.py
Normal file
173
comfy_extras/v3/nodes_slg.py
Normal file
@ -0,0 +1,173 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
|
||||
import comfy.model_patcher
|
||||
import comfy.samplers
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class SkipLayerGuidanceDiT(io.ComfyNode):
|
||||
"""
|
||||
Enhance guidance towards detailed dtructure by having another set of CFG negative with skipped layers.
|
||||
Inspired by Perturbed Attention Guidance (https://arxiv.org/abs/2403.17377)
|
||||
Original experimental implementation for SD3 by Dango233@StabilityAI.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SkipLayerGuidanceDiT_V3",
|
||||
category="advanced/guidance",
|
||||
description="Generic version of SkipLayerGuidance node that can be used on every DiT model.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.String.Input("double_layers", default="7, 8, 9"),
|
||||
io.String.Input("single_layers", default="7, 8, 9"),
|
||||
io.Float.Input("scale", default=3.0, min=0.0, max=10.0, step=0.1),
|
||||
io.Float.Input("start_percent", default=0.01, min=0.0, max=1.0, step=0.001),
|
||||
io.Float.Input("end_percent", default=0.15, min=0.0, max=1.0, step=0.001),
|
||||
io.Float.Input("rescaling_scale", default=0.0, min=0.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, scale, start_percent, end_percent, double_layers="", single_layers="", rescaling_scale=0):
|
||||
# check if layer is comma separated integers
|
||||
def skip(args, extra_args):
|
||||
return args
|
||||
|
||||
model_sampling = model.get_model_object("model_sampling")
|
||||
sigma_start = model_sampling.percent_to_sigma(start_percent)
|
||||
sigma_end = model_sampling.percent_to_sigma(end_percent)
|
||||
|
||||
double_layers = re.findall(r"\d+", double_layers)
|
||||
double_layers = [int(i) for i in double_layers]
|
||||
|
||||
single_layers = re.findall(r"\d+", single_layers)
|
||||
single_layers = [int(i) for i in single_layers]
|
||||
|
||||
if len(double_layers) == 0 and len(single_layers) == 0:
|
||||
return io.NodeOutput(model)
|
||||
|
||||
def post_cfg_function(args):
|
||||
model = args["model"]
|
||||
cond_pred = args["cond_denoised"]
|
||||
cond = args["cond"]
|
||||
cfg_result = args["denoised"]
|
||||
sigma = args["sigma"]
|
||||
x = args["input"]
|
||||
model_options = args["model_options"].copy()
|
||||
|
||||
for layer in double_layers:
|
||||
model_options = comfy.model_patcher.set_model_options_patch_replace(
|
||||
model_options, skip, "dit", "double_block", layer
|
||||
)
|
||||
|
||||
for layer in single_layers:
|
||||
model_options = comfy.model_patcher.set_model_options_patch_replace(
|
||||
model_options, skip, "dit", "single_block", layer
|
||||
)
|
||||
|
||||
model_sampling.percent_to_sigma(start_percent)
|
||||
|
||||
sigma_ = sigma[0].item()
|
||||
if scale > 0 and sigma_ >= sigma_end and sigma_ <= sigma_start:
|
||||
(slg,) = comfy.samplers.calc_cond_batch(model, [cond], x, sigma, model_options)
|
||||
cfg_result = cfg_result + (cond_pred - slg) * scale
|
||||
if rescaling_scale != 0:
|
||||
factor = cond_pred.std() / cfg_result.std()
|
||||
factor = rescaling_scale * factor + (1 - rescaling_scale)
|
||||
cfg_result *= factor
|
||||
|
||||
return cfg_result
|
||||
|
||||
m = model.clone()
|
||||
m.set_model_sampler_post_cfg_function(post_cfg_function)
|
||||
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class SkipLayerGuidanceDiTSimple(io.ComfyNode):
|
||||
"""Simple version of the SkipLayerGuidanceDiT node that only modifies the uncond pass."""
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SkipLayerGuidanceDiTSimple_V3",
|
||||
category="advanced/guidance",
|
||||
description="Simple version of the SkipLayerGuidanceDiT node that only modifies the uncond pass.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.String.Input("double_layers", default="7, 8, 9"),
|
||||
io.String.Input("single_layers", default="7, 8, 9"),
|
||||
io.Float.Input("start_percent", default=0.0, min=0.0, max=1.0, step=0.001),
|
||||
io.Float.Input("end_percent", default=1.0, min=0.0, max=1.0, step=0.001),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, start_percent, end_percent, double_layers="", single_layers=""):
|
||||
def skip(args, extra_args):
|
||||
return args
|
||||
|
||||
model_sampling = model.get_model_object("model_sampling")
|
||||
sigma_start = model_sampling.percent_to_sigma(start_percent)
|
||||
sigma_end = model_sampling.percent_to_sigma(end_percent)
|
||||
|
||||
double_layers = re.findall(r"\d+", double_layers)
|
||||
double_layers = [int(i) for i in double_layers]
|
||||
|
||||
single_layers = re.findall(r"\d+", single_layers)
|
||||
single_layers = [int(i) for i in single_layers]
|
||||
|
||||
if len(double_layers) == 0 and len(single_layers) == 0:
|
||||
return io.NodeOutput(model)
|
||||
|
||||
def calc_cond_batch_function(args):
|
||||
x = args["input"]
|
||||
model = args["model"]
|
||||
conds = args["conds"]
|
||||
sigma = args["sigma"]
|
||||
|
||||
model_options = args["model_options"]
|
||||
slg_model_options = model_options.copy()
|
||||
|
||||
for layer in double_layers:
|
||||
slg_model_options = comfy.model_patcher.set_model_options_patch_replace(
|
||||
slg_model_options, skip, "dit", "double_block", layer
|
||||
)
|
||||
|
||||
for layer in single_layers:
|
||||
slg_model_options = comfy.model_patcher.set_model_options_patch_replace(
|
||||
slg_model_options, skip, "dit", "single_block", layer
|
||||
)
|
||||
|
||||
cond, uncond = conds
|
||||
sigma_ = sigma[0].item()
|
||||
if sigma_ >= sigma_end and sigma_ <= sigma_start and uncond is not None:
|
||||
cond_out, _ = comfy.samplers.calc_cond_batch(model, [cond, None], x, sigma, model_options)
|
||||
_, uncond_out = comfy.samplers.calc_cond_batch(model, [None, uncond], x, sigma, slg_model_options)
|
||||
out = [cond_out, uncond_out]
|
||||
else:
|
||||
out = comfy.samplers.calc_cond_batch(model, conds, x, sigma, model_options)
|
||||
|
||||
return out
|
||||
|
||||
m = model.clone()
|
||||
m.set_model_sampler_calc_cond_batch_function(calc_cond_batch_function)
|
||||
|
||||
return io.NodeOutput(m)
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
SkipLayerGuidanceDiT,
|
||||
SkipLayerGuidanceDiTSimple,
|
||||
]
|
||||
165
comfy_extras/v3/nodes_stable3d.py
Normal file
165
comfy_extras/v3/nodes_stable3d.py
Normal file
@ -0,0 +1,165 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
|
||||
import comfy.utils
|
||||
import nodes
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
def camera_embeddings(elevation, azimuth):
|
||||
elevation = torch.as_tensor([elevation])
|
||||
azimuth = torch.as_tensor([azimuth])
|
||||
embeddings = torch.stack(
|
||||
[
|
||||
torch.deg2rad(
|
||||
(90 - elevation) - 90
|
||||
), # Zero123 polar is 90-elevation
|
||||
torch.sin(torch.deg2rad(azimuth)),
|
||||
torch.cos(torch.deg2rad(azimuth)),
|
||||
torch.deg2rad(
|
||||
90 - torch.full_like(elevation, 0)
|
||||
),
|
||||
], dim=-1).unsqueeze(1)
|
||||
|
||||
return embeddings
|
||||
|
||||
|
||||
class StableZero123_Conditioning(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="StableZero123_Conditioning_V3",
|
||||
category="conditioning/3d_models",
|
||||
inputs=[
|
||||
io.ClipVision.Input("clip_vision"),
|
||||
io.Image.Input("init_image"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=256, min=16, max=nodes.MAX_RESOLUTION, step=8),
|
||||
io.Int.Input("height", default=256, min=16, max=nodes.MAX_RESOLUTION, step=8),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
io.Float.Input("elevation", default=0.0, min=-180.0, max=180.0, step=0.1, round=False),
|
||||
io.Float.Input("azimuth", default=0.0, min=-180.0, max=180.0, step=0.1, round=False)
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
io.Latent.Output(display_name="latent")
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip_vision, init_image, vae, width, height, batch_size, elevation, azimuth):
|
||||
output = clip_vision.encode_image(init_image)
|
||||
pooled = output.image_embeds.unsqueeze(0)
|
||||
pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1)
|
||||
encode_pixels = pixels[:,:,:,:3]
|
||||
t = vae.encode(encode_pixels)
|
||||
cam_embeds = camera_embeddings(elevation, azimuth)
|
||||
cond = torch.cat([pooled, cam_embeds.to(pooled.device).repeat((pooled.shape[0], 1, 1))], dim=-1)
|
||||
|
||||
positive = [[cond, {"concat_latent_image": t}]]
|
||||
negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t)}]]
|
||||
latent = torch.zeros([batch_size, 4, height // 8, width // 8])
|
||||
return io.NodeOutput(positive, negative, {"samples":latent})
|
||||
|
||||
|
||||
class StableZero123_Conditioning_Batched(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="StableZero123_Conditioning_Batched_V3",
|
||||
category="conditioning/3d_models",
|
||||
inputs=[
|
||||
io.ClipVision.Input("clip_vision"),
|
||||
io.Image.Input("init_image"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=256, min=16, max=nodes.MAX_RESOLUTION, step=8),
|
||||
io.Int.Input("height", default=256, min=16, max=nodes.MAX_RESOLUTION, step=8),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
io.Float.Input("elevation", default=0.0, min=-180.0, max=180.0, step=0.1, round=False),
|
||||
io.Float.Input("azimuth", default=0.0, min=-180.0, max=180.0, step=0.1, round=False),
|
||||
io.Float.Input("elevation_batch_increment", default=0.0, min=-180.0, max=180.0, step=0.1, round=False),
|
||||
io.Float.Input("azimuth_batch_increment", default=0.0, min=-180.0, max=180.0, step=0.1, round=False)
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
io.Latent.Output(display_name="latent")
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip_vision, init_image, vae, width, height, batch_size, elevation, azimuth, elevation_batch_increment, azimuth_batch_increment):
|
||||
output = clip_vision.encode_image(init_image)
|
||||
pooled = output.image_embeds.unsqueeze(0)
|
||||
pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1)
|
||||
encode_pixels = pixels[:,:,:,:3]
|
||||
t = vae.encode(encode_pixels)
|
||||
|
||||
cam_embeds = []
|
||||
for i in range(batch_size):
|
||||
cam_embeds.append(camera_embeddings(elevation, azimuth))
|
||||
elevation += elevation_batch_increment
|
||||
azimuth += azimuth_batch_increment
|
||||
|
||||
cam_embeds = torch.cat(cam_embeds, dim=0)
|
||||
cond = torch.cat([comfy.utils.repeat_to_batch_size(pooled, batch_size), cam_embeds], dim=-1)
|
||||
|
||||
positive = [[cond, {"concat_latent_image": t}]]
|
||||
negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t)}]]
|
||||
latent = torch.zeros([batch_size, 4, height // 8, width // 8])
|
||||
return io.NodeOutput(positive, negative, {"samples":latent, "batch_index": [0] * batch_size})
|
||||
|
||||
|
||||
class SV3D_Conditioning(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SV3D_Conditioning_V3",
|
||||
category="conditioning/3d_models",
|
||||
inputs=[
|
||||
io.ClipVision.Input("clip_vision"),
|
||||
io.Image.Input("init_image"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=576, min=16, max=nodes.MAX_RESOLUTION, step=8),
|
||||
io.Int.Input("height", default=576, min=16, max=nodes.MAX_RESOLUTION, step=8),
|
||||
io.Int.Input("video_frames", default=21, min=1, max=4096),
|
||||
io.Float.Input("elevation", default=0.0, min=-90.0, max=90.0, step=0.1, round=False)
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
io.Latent.Output(display_name="latent")
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip_vision, init_image, vae, width, height, video_frames, elevation):
|
||||
output = clip_vision.encode_image(init_image)
|
||||
pooled = output.image_embeds.unsqueeze(0)
|
||||
pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1)
|
||||
encode_pixels = pixels[:,:,:,:3]
|
||||
t = vae.encode(encode_pixels)
|
||||
|
||||
azimuth = 0
|
||||
azimuth_increment = 360 / (max(video_frames, 2) - 1)
|
||||
|
||||
elevations = []
|
||||
azimuths = []
|
||||
for i in range(video_frames):
|
||||
elevations.append(elevation)
|
||||
azimuths.append(azimuth)
|
||||
azimuth += azimuth_increment
|
||||
|
||||
positive = [[pooled, {"concat_latent_image": t, "elevation": elevations, "azimuth": azimuths}]]
|
||||
negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t), "elevation": elevations, "azimuth": azimuths}]]
|
||||
latent = torch.zeros([video_frames, 4, height // 8, width // 8])
|
||||
return io.NodeOutput(positive, negative, {"samples":latent})
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
StableZero123_Conditioning,
|
||||
StableZero123_Conditioning_Batched,
|
||||
SV3D_Conditioning,
|
||||
]
|
||||
143
comfy_extras/v3/nodes_stable_cascade.py
Normal file
143
comfy_extras/v3/nodes_stable_cascade.py
Normal file
@ -0,0 +1,143 @@
|
||||
"""
|
||||
This file is part of ComfyUI.
|
||||
Copyright (C) 2024 Stability AI
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
import torch
|
||||
|
||||
import comfy.utils
|
||||
import nodes
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class StableCascade_EmptyLatentImage(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="StableCascade_EmptyLatentImage_V3",
|
||||
category="latent/stable_cascade",
|
||||
inputs=[
|
||||
io.Int.Input("width", default=1024, min=256, max=nodes.MAX_RESOLUTION, step=8),
|
||||
io.Int.Input("height", default=1024, min=256, max=nodes.MAX_RESOLUTION, step=8),
|
||||
io.Int.Input("compression", default=42, min=4, max=128, step=1),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(display_name="stage_c"),
|
||||
io.Latent.Output(display_name="stage_b"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, width, height, compression, batch_size=1):
|
||||
c_latent = torch.zeros([batch_size, 16, height // compression, width // compression])
|
||||
b_latent = torch.zeros([batch_size, 4, height // 4, width // 4])
|
||||
return io.NodeOutput({"samples": c_latent}, {"samples": b_latent})
|
||||
|
||||
|
||||
class StableCascade_StageC_VAEEncode(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="StableCascade_StageC_VAEEncode_V3",
|
||||
category="latent/stable_cascade",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("compression", default=42, min=4, max=128, step=1),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(display_name="stage_c"),
|
||||
io.Latent.Output(display_name="stage_b"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image, vae, compression):
|
||||
width = image.shape[-2]
|
||||
height = image.shape[-3]
|
||||
out_width = (width // compression) * vae.downscale_ratio
|
||||
out_height = (height // compression) * vae.downscale_ratio
|
||||
|
||||
s = comfy.utils.common_upscale(image.movedim(-1, 1), out_width, out_height, "bicubic", "center").movedim(1, -1)
|
||||
|
||||
c_latent = vae.encode(s[:, :, :, :3])
|
||||
b_latent = torch.zeros([c_latent.shape[0], 4, (height // 8) * 2, (width // 8) * 2])
|
||||
return io.NodeOutput({"samples": c_latent}, {"samples": b_latent})
|
||||
|
||||
|
||||
class StableCascade_StageB_Conditioning(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="StableCascade_StageB_Conditioning_V3",
|
||||
category="conditioning/stable_cascade",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Latent.Input("stage_c"),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, conditioning, stage_c):
|
||||
c = []
|
||||
for t in conditioning:
|
||||
d = t[1].copy()
|
||||
d["stable_cascade_prior"] = stage_c["samples"]
|
||||
n = [t[0], d]
|
||||
c.append(n)
|
||||
return io.NodeOutput(c)
|
||||
|
||||
|
||||
class StableCascade_SuperResolutionControlnet(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="StableCascade_SuperResolutionControlnet_V3",
|
||||
category="_for_testing/stable_cascade",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Vae.Input("vae"),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(display_name="controlnet_input"),
|
||||
io.Latent.Output(display_name="stage_c"),
|
||||
io.Latent.Output(display_name="stage_b"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image, vae):
|
||||
width = image.shape[-2]
|
||||
height = image.shape[-3]
|
||||
batch_size = image.shape[0]
|
||||
controlnet_input = vae.encode(image[:, :, :, :3]).movedim(1, -1)
|
||||
|
||||
c_latent = torch.zeros([batch_size, 16, height // 16, width // 16])
|
||||
b_latent = torch.zeros([batch_size, 4, height // 2, width // 2])
|
||||
return io.NodeOutput(controlnet_input, {"samples": c_latent}, {"samples": b_latent})
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
StableCascade_EmptyLatentImage,
|
||||
StableCascade_StageB_Conditioning,
|
||||
StableCascade_StageC_VAEEncode,
|
||||
StableCascade_SuperResolutionControlnet,
|
||||
]
|
||||
380
comfy_extras/v3/nodes_string.py
Normal file
380
comfy_extras/v3/nodes_string.py
Normal file
@ -0,0 +1,380 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class StringConcatenate(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="StringConcatenate_V3",
|
||||
display_name="Concatenate _V3",
|
||||
category="utils/string",
|
||||
inputs=[
|
||||
io.String.Input("string_a", multiline=True),
|
||||
io.String.Input("string_b", multiline=True),
|
||||
io.String.Input("delimiter", multiline=False, default="")
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, string_a, string_b, delimiter):
|
||||
return io.NodeOutput(delimiter.join((string_a, string_b)))
|
||||
|
||||
|
||||
class StringSubstring(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="StringSubstring_V3",
|
||||
display_name="Substring _V3",
|
||||
category="utils/string",
|
||||
inputs=[
|
||||
io.String.Input("string", multiline=True),
|
||||
io.Int.Input("start"),
|
||||
io.Int.Input("end")
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, string, start, end):
|
||||
return io.NodeOutput(string[start:end])
|
||||
|
||||
|
||||
class StringLength(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="StringLength_V3",
|
||||
display_name="Length _V3",
|
||||
category="utils/string",
|
||||
inputs=[
|
||||
io.String.Input("string", multiline=True)
|
||||
],
|
||||
outputs=[
|
||||
io.Int.Output(display_name="length")
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, string):
|
||||
return io.NodeOutput(len(string))
|
||||
|
||||
|
||||
class CaseConverter(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="CaseConverter_V3",
|
||||
display_name="Case Converter _V3",
|
||||
category="utils/string",
|
||||
inputs=[
|
||||
io.String.Input("string", multiline=True),
|
||||
io.Combo.Input("mode", options=["UPPERCASE", "lowercase", "Capitalize", "Title Case"])
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, string, mode):
|
||||
if mode == "UPPERCASE":
|
||||
result = string.upper()
|
||||
elif mode == "lowercase":
|
||||
result = string.lower()
|
||||
elif mode == "Capitalize":
|
||||
result = string.capitalize()
|
||||
elif mode == "Title Case":
|
||||
result = string.title()
|
||||
else:
|
||||
result = string
|
||||
|
||||
return io.NodeOutput(result)
|
||||
|
||||
|
||||
class StringTrim(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="StringTrim_V3",
|
||||
display_name="Trim _V3",
|
||||
category="utils/string",
|
||||
inputs=[
|
||||
io.String.Input("string", multiline=True),
|
||||
io.Combo.Input("mode", options=["Both", "Left", "Right"])
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, string, mode):
|
||||
if mode == "Both":
|
||||
result = string.strip()
|
||||
elif mode == "Left":
|
||||
result = string.lstrip()
|
||||
elif mode == "Right":
|
||||
result = string.rstrip()
|
||||
else:
|
||||
result = string
|
||||
|
||||
return io.NodeOutput(result)
|
||||
|
||||
|
||||
class StringReplace(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="StringReplace_V3",
|
||||
display_name="Replace _V3",
|
||||
category="utils/string",
|
||||
inputs=[
|
||||
io.String.Input("string", multiline=True),
|
||||
io.String.Input("find", multiline=True),
|
||||
io.String.Input("replace", multiline=True)
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, string, find, replace):
|
||||
return io.NodeOutput(string.replace(find, replace))
|
||||
|
||||
|
||||
class StringContains(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="StringContains_V3",
|
||||
display_name="Contains _V3",
|
||||
category="utils/string",
|
||||
inputs=[
|
||||
io.String.Input("string", multiline=True),
|
||||
io.String.Input("substring", multiline=True),
|
||||
io.Boolean.Input("case_sensitive", default=True)
|
||||
],
|
||||
outputs=[
|
||||
io.Boolean.Output(display_name="contains")
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, string, substring, case_sensitive):
|
||||
if case_sensitive:
|
||||
contains = substring in string
|
||||
else:
|
||||
contains = substring.lower() in string.lower()
|
||||
|
||||
return io.NodeOutput(contains)
|
||||
|
||||
|
||||
class StringCompare(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="StringCompare_V3",
|
||||
display_name="Compare _V3",
|
||||
category="utils/string",
|
||||
inputs=[
|
||||
io.String.Input("string_a", multiline=True),
|
||||
io.String.Input("string_b", multiline=True),
|
||||
io.Combo.Input("mode", options=["Starts With", "Ends With", "Equal"]),
|
||||
io.Boolean.Input("case_sensitive", default=True)
|
||||
],
|
||||
outputs=[
|
||||
io.Boolean.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, string_a, string_b, mode, case_sensitive):
|
||||
if case_sensitive:
|
||||
a = string_a
|
||||
b = string_b
|
||||
else:
|
||||
a = string_a.lower()
|
||||
b = string_b.lower()
|
||||
|
||||
if mode == "Equal":
|
||||
return io.NodeOutput(a == b)
|
||||
elif mode == "Starts With":
|
||||
return io.NodeOutput(a.startswith(b))
|
||||
elif mode == "Ends With":
|
||||
return io.NodeOutput(a.endswith(b))
|
||||
|
||||
|
||||
class RegexMatch(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="RegexMatch_V3",
|
||||
display_name="Regex Match _V3",
|
||||
category="utils/string",
|
||||
inputs=[
|
||||
io.String.Input("string", multiline=True),
|
||||
io.String.Input("regex_pattern", multiline=True),
|
||||
io.Boolean.Input("case_insensitive", default=True),
|
||||
io.Boolean.Input("multiline", default=False),
|
||||
io.Boolean.Input("dotall", default=False)
|
||||
],
|
||||
outputs=[
|
||||
io.Boolean.Output(display_name="matches")
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, string, regex_pattern, case_insensitive, multiline, dotall):
|
||||
flags = 0
|
||||
|
||||
if case_insensitive:
|
||||
flags |= re.IGNORECASE
|
||||
if multiline:
|
||||
flags |= re.MULTILINE
|
||||
if dotall:
|
||||
flags |= re.DOTALL
|
||||
|
||||
try:
|
||||
match = re.search(regex_pattern, string, flags)
|
||||
result = match is not None
|
||||
|
||||
except re.error:
|
||||
result = False
|
||||
|
||||
return io.NodeOutput(result)
|
||||
|
||||
|
||||
class RegexExtract(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="RegexExtract_V3",
|
||||
display_name="Regex Extract _V3",
|
||||
category="utils/string",
|
||||
inputs=[
|
||||
io.String.Input("string", multiline=True),
|
||||
io.String.Input("regex_pattern", multiline=True),
|
||||
io.Combo.Input("mode", options=["First Match", "All Matches", "First Group", "All Groups"]),
|
||||
io.Boolean.Input("case_insensitive", default=True),
|
||||
io.Boolean.Input("multiline", default=False),
|
||||
io.Boolean.Input("dotall", default=False),
|
||||
io.Int.Input("group_index", default=1, min=0, max=100)
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, string, regex_pattern, mode, case_insensitive, multiline, dotall, group_index):
|
||||
join_delimiter = "\n"
|
||||
|
||||
flags = 0
|
||||
if case_insensitive:
|
||||
flags |= re.IGNORECASE
|
||||
if multiline:
|
||||
flags |= re.MULTILINE
|
||||
if dotall:
|
||||
flags |= re.DOTALL
|
||||
|
||||
try:
|
||||
if mode == "First Match":
|
||||
match = re.search(regex_pattern, string, flags)
|
||||
if match:
|
||||
result = match.group(0)
|
||||
else:
|
||||
result = ""
|
||||
|
||||
elif mode == "All Matches":
|
||||
matches = re.findall(regex_pattern, string, flags)
|
||||
if matches:
|
||||
if isinstance(matches[0], tuple):
|
||||
result = join_delimiter.join([m[0] for m in matches])
|
||||
else:
|
||||
result = join_delimiter.join(matches)
|
||||
else:
|
||||
result = ""
|
||||
|
||||
elif mode == "First Group":
|
||||
match = re.search(regex_pattern, string, flags)
|
||||
if match and len(match.groups()) >= group_index:
|
||||
result = match.group(group_index)
|
||||
else:
|
||||
result = ""
|
||||
|
||||
elif mode == "All Groups":
|
||||
matches = re.finditer(regex_pattern, string, flags)
|
||||
results = []
|
||||
for match in matches:
|
||||
if match.groups() and len(match.groups()) >= group_index:
|
||||
results.append(match.group(group_index))
|
||||
result = join_delimiter.join(results)
|
||||
else:
|
||||
result = ""
|
||||
|
||||
except re.error:
|
||||
result = ""
|
||||
|
||||
return io.NodeOutput(result)
|
||||
|
||||
|
||||
class RegexReplace(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="RegexReplace_V3",
|
||||
display_name="Regex Replace _V3",
|
||||
category="utils/string",
|
||||
description="Find and replace text using regex patterns.",
|
||||
inputs=[
|
||||
io.String.Input("string", multiline=True),
|
||||
io.String.Input("regex_pattern", multiline=True),
|
||||
io.String.Input("replace", multiline=True),
|
||||
io.Boolean.Input("case_insensitive", default=True, optional=True),
|
||||
io.Boolean.Input("multiline", default=False, optional=True),
|
||||
io.Boolean.Input("dotall", default=False, optional=True, tooltip="When enabled, the dot (.) character will match any character including newline characters. When disabled, dots won't match newlines."),
|
||||
io.Int.Input("count", default=0, min=0, max=100, optional=True, tooltip="Maximum number of replacements to make. Set to 0 to replace all occurrences (default). Set to 1 to replace only the first match, 2 for the first two matches, etc.")
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output()
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, string, regex_pattern, replace, case_insensitive=True, multiline=False, dotall=False, count=0):
|
||||
flags = 0
|
||||
|
||||
if case_insensitive:
|
||||
flags |= re.IGNORECASE
|
||||
if multiline:
|
||||
flags |= re.MULTILINE
|
||||
if dotall:
|
||||
flags |= re.DOTALL
|
||||
result = re.sub(regex_pattern, replace, string, count=count, flags=flags)
|
||||
return io.NodeOutput(result)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
CaseConverter,
|
||||
RegexExtract,
|
||||
RegexMatch,
|
||||
RegexReplace,
|
||||
StringCompare,
|
||||
StringConcatenate,
|
||||
StringContains,
|
||||
StringLength,
|
||||
StringReplace,
|
||||
StringSubstring,
|
||||
StringTrim,
|
||||
]
|
||||
70
comfy_extras/v3/nodes_tcfg.py
Normal file
70
comfy_extras/v3/nodes_tcfg.py
Normal file
@ -0,0 +1,70 @@
|
||||
"""TCFG: Tangential Damping Classifier-free Guidance - (arXiv: https://arxiv.org/abs/2503.18137)"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
def score_tangential_damping(cond_score: torch.Tensor, uncond_score: torch.Tensor) -> torch.Tensor:
|
||||
"""Drop tangential components from uncond score to align with cond score."""
|
||||
# (B, 1, ...)
|
||||
batch_num = cond_score.shape[0]
|
||||
cond_score_flat = cond_score.reshape(batch_num, 1, -1).float()
|
||||
uncond_score_flat = uncond_score.reshape(batch_num, 1, -1).float()
|
||||
|
||||
# Score matrix A (B, 2, ...)
|
||||
score_matrix = torch.cat((uncond_score_flat, cond_score_flat), dim=1)
|
||||
try:
|
||||
_, _, Vh = torch.linalg.svd(score_matrix, full_matrices=False)
|
||||
except RuntimeError:
|
||||
# Fallback to CPU
|
||||
_, _, Vh = torch.linalg.svd(score_matrix.cpu(), full_matrices=False)
|
||||
|
||||
# Drop the tangential components
|
||||
v1 = Vh[:, 0:1, :].to(uncond_score_flat.device) # (B, 1, ...)
|
||||
uncond_score_td = (uncond_score_flat @ v1.transpose(-2, -1)) * v1
|
||||
return uncond_score_td.reshape_as(uncond_score).to(uncond_score.dtype)
|
||||
|
||||
|
||||
class TCFG(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="TCFG_V3",
|
||||
display_name="Tangential Damping CFG _V3",
|
||||
category="advanced/guidance",
|
||||
description="TCFG – Tangential Damping CFG (2503.18137)\n\nRefine the uncond (negative) to align with the cond (positive) for improving quality.",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(display_name="patched_model"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model):
|
||||
m = model.clone()
|
||||
|
||||
def tangential_damping_cfg(args):
|
||||
# Assume [cond, uncond, ...]
|
||||
x = args["input"]
|
||||
conds_out = args["conds_out"]
|
||||
if len(conds_out) <= 1 or None in args["conds"][:2]:
|
||||
# Skip when either cond or uncond is None
|
||||
return conds_out
|
||||
cond_pred = conds_out[0]
|
||||
uncond_pred = conds_out[1]
|
||||
uncond_td = score_tangential_damping(x - cond_pred, x - uncond_pred)
|
||||
uncond_pred_td = x - uncond_td
|
||||
return [cond_pred, uncond_pred_td] + conds_out[2:]
|
||||
|
||||
m.set_model_sampler_pre_cfg_function(tangential_damping_cfg)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
TCFG,
|
||||
]
|
||||
190
comfy_extras/v3/nodes_tomesd.py
Normal file
190
comfy_extras/v3/nodes_tomesd.py
Normal file
@ -0,0 +1,190 @@
|
||||
"""Taken from: https://github.com/dbolya/tomesd"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from typing import Callable, Tuple
|
||||
|
||||
import torch
|
||||
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
def do_nothing(x: torch.Tensor, mode:str=None):
|
||||
return x
|
||||
|
||||
|
||||
def mps_gather_workaround(input, dim, index):
|
||||
if input.shape[-1] == 1:
|
||||
return torch.gather(
|
||||
input.unsqueeze(-1),
|
||||
dim - 1 if dim < 0 else dim,
|
||||
index.unsqueeze(-1)
|
||||
).squeeze(-1)
|
||||
return torch.gather(input, dim, index)
|
||||
|
||||
|
||||
def bipartite_soft_matching_random2d(
|
||||
metric: torch.Tensor,w: int, h: int, sx: int, sy: int, r: int, no_rand: bool = False
|
||||
) -> Tuple[Callable, Callable]:
|
||||
"""
|
||||
Partitions the tokens into src and dst and merges r tokens from src to dst.
|
||||
Dst tokens are partitioned by choosing one randomy in each (sx, sy) region.
|
||||
Args:
|
||||
- metric [B, N, C]: metric to use for similarity
|
||||
- w: image width in tokens
|
||||
- h: image height in tokens
|
||||
- sx: stride in the x dimension for dst, must divide w
|
||||
- sy: stride in the y dimension for dst, must divide h
|
||||
- r: number of tokens to remove (by merging)
|
||||
- no_rand: if true, disable randomness (use top left corner only)
|
||||
"""
|
||||
B, N, _ = metric.shape
|
||||
|
||||
if r <= 0 or w == 1 or h == 1:
|
||||
return do_nothing, do_nothing
|
||||
|
||||
gather = mps_gather_workaround if metric.device.type == "mps" else torch.gather
|
||||
|
||||
with torch.no_grad():
|
||||
hsy, wsx = h // sy, w // sx
|
||||
|
||||
# For each sy by sx kernel, randomly assign one token to be dst and the rest src
|
||||
if no_rand:
|
||||
rand_idx = torch.zeros(hsy, wsx, 1, device=metric.device, dtype=torch.int64)
|
||||
else:
|
||||
rand_idx = torch.randint(sy*sx, size=(hsy, wsx, 1), device=metric.device)
|
||||
|
||||
# The image might not divide sx and sy, so we need to work on a view of the top left if the idx buffer instead
|
||||
idx_buffer_view = torch.zeros(hsy, wsx, sy*sx, device=metric.device, dtype=torch.int64)
|
||||
idx_buffer_view.scatter_(dim=2, index=rand_idx, src=-torch.ones_like(rand_idx, dtype=rand_idx.dtype))
|
||||
idx_buffer_view = idx_buffer_view.view(hsy, wsx, sy, sx).transpose(1, 2).reshape(hsy * sy, wsx * sx)
|
||||
|
||||
# Image is not divisible by sx or sy so we need to move it into a new buffer
|
||||
if (hsy * sy) < h or (wsx * sx) < w:
|
||||
idx_buffer = torch.zeros(h, w, device=metric.device, dtype=torch.int64)
|
||||
idx_buffer[:(hsy * sy), :(wsx * sx)] = idx_buffer_view
|
||||
else:
|
||||
idx_buffer = idx_buffer_view
|
||||
|
||||
# We set dst tokens to be -1 and src to be 0, so an argsort gives us dst|src indices
|
||||
rand_idx = idx_buffer.reshape(1, -1, 1).argsort(dim=1)
|
||||
|
||||
# We're finished with these
|
||||
del idx_buffer, idx_buffer_view
|
||||
|
||||
# rand_idx is currently dst|src, so split them
|
||||
num_dst = hsy * wsx
|
||||
a_idx = rand_idx[:, num_dst:, :] # src
|
||||
b_idx = rand_idx[:, :num_dst, :] # dst
|
||||
|
||||
def split(x):
|
||||
C = x.shape[-1]
|
||||
src = gather(x, dim=1, index=a_idx.expand(B, N - num_dst, C))
|
||||
dst = gather(x, dim=1, index=b_idx.expand(B, num_dst, C))
|
||||
return src, dst
|
||||
|
||||
# Cosine similarity between A and B
|
||||
metric = metric / metric.norm(dim=-1, keepdim=True)
|
||||
a, b = split(metric)
|
||||
scores = a @ b.transpose(-1, -2)
|
||||
|
||||
# Can't reduce more than the # tokens in src
|
||||
r = min(a.shape[1], r)
|
||||
|
||||
# Find the most similar greedily
|
||||
node_max, node_idx = scores.max(dim=-1)
|
||||
edge_idx = node_max.argsort(dim=-1, descending=True)[..., None]
|
||||
|
||||
unm_idx = edge_idx[..., r:, :] # Unmerged Tokens
|
||||
src_idx = edge_idx[..., :r, :] # Merged Tokens
|
||||
dst_idx = gather(node_idx[..., None], dim=-2, index=src_idx)
|
||||
|
||||
def merge(x: torch.Tensor, mode="mean") -> torch.Tensor:
|
||||
src, dst = split(x)
|
||||
n, t1, c = src.shape
|
||||
|
||||
unm = gather(src, dim=-2, index=unm_idx.expand(n, t1 - r, c))
|
||||
src = gather(src, dim=-2, index=src_idx.expand(n, r, c))
|
||||
dst = dst.scatter_reduce(-2, dst_idx.expand(n, r, c), src, reduce=mode)
|
||||
|
||||
return torch.cat([unm, dst], dim=1)
|
||||
|
||||
def unmerge(x: torch.Tensor) -> torch.Tensor:
|
||||
unm_len = unm_idx.shape[1]
|
||||
unm, dst = x[..., :unm_len, :], x[..., unm_len:, :]
|
||||
_, _, c = unm.shape
|
||||
|
||||
src = gather(dst, dim=-2, index=dst_idx.expand(B, r, c))
|
||||
|
||||
# Combine back to the original shape
|
||||
out = torch.zeros(B, N, c, device=x.device, dtype=x.dtype)
|
||||
out.scatter_(dim=-2, index=b_idx.expand(B, num_dst, c), src=dst)
|
||||
out.scatter_(dim=-2, index=gather(a_idx.expand(B, a_idx.shape[1], 1), dim=1, index=unm_idx).expand(B, unm_len, c), src=unm)
|
||||
out.scatter_(dim=-2, index=gather(a_idx.expand(B, a_idx.shape[1], 1), dim=1, index=src_idx).expand(B, r, c), src=src)
|
||||
|
||||
return out
|
||||
|
||||
return merge, unmerge
|
||||
|
||||
|
||||
def get_functions(x, ratio, original_shape):
|
||||
b, c, original_h, original_w = original_shape
|
||||
original_tokens = original_h * original_w
|
||||
downsample = int(math.ceil(math.sqrt(original_tokens // x.shape[1])))
|
||||
stride_x = 2
|
||||
stride_y = 2
|
||||
max_downsample = 1
|
||||
|
||||
if downsample <= max_downsample:
|
||||
w = int(math.ceil(original_w / downsample))
|
||||
h = int(math.ceil(original_h / downsample))
|
||||
r = int(x.shape[1] * ratio)
|
||||
no_rand = False
|
||||
m, u = bipartite_soft_matching_random2d(x, w, h, stride_x, stride_y, r, no_rand)
|
||||
return m, u
|
||||
|
||||
def nothing(y):
|
||||
return y
|
||||
|
||||
return nothing, nothing
|
||||
|
||||
|
||||
class TomePatchModel(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="TomePatchModel_V3",
|
||||
category="model_patches/unet",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("ratio", default=0.3, min=0.0, max=1.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, ratio):
|
||||
u = None
|
||||
|
||||
def tomesd_m(q, k, v, extra_options):
|
||||
nonlocal u
|
||||
#NOTE: In the reference code get_functions takes x (input of the transformer block) as the argument instead of q
|
||||
#however from my basic testing it seems that using q instead gives better results
|
||||
m, u = get_functions(q, ratio, extra_options["original_shape"])
|
||||
return m(q), k, v
|
||||
|
||||
def tomesd_u(n, extra_options):
|
||||
return u(n)
|
||||
|
||||
m = model.clone()
|
||||
m.set_model_attn1_patch(tomesd_m)
|
||||
m.set_model_attn1_output_patch(tomesd_u)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
TomePatchModel,
|
||||
]
|
||||
32
comfy_extras/v3/nodes_torch_compile.py
Normal file
32
comfy_extras/v3/nodes_torch_compile.py
Normal file
@ -0,0 +1,32 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from comfy_api.latest import io
|
||||
from comfy_api.torch_helpers import set_torch_compile_wrapper
|
||||
|
||||
|
||||
class TorchCompileModel(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="TorchCompileModel_V3",
|
||||
category="_for_testing",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Combo.Input("backend", options=["inductor", "cudagraphs"]),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, backend):
|
||||
m = model.clone()
|
||||
set_torch_compile_wrapper(model=m, backend=backend)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
TorchCompileModel,
|
||||
]
|
||||
666
comfy_extras/v3/nodes_train.py
Normal file
666
comfy_extras/v3/nodes_train.py
Normal file
@ -0,0 +1,666 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
import safetensors
|
||||
import torch
|
||||
import torch.utils.checkpoint
|
||||
import tqdm
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
|
||||
import comfy.model_management
|
||||
import comfy.samplers
|
||||
import comfy.sd
|
||||
import comfy.utils
|
||||
import comfy_extras.nodes_custom_sampler
|
||||
import folder_paths
|
||||
import node_helpers
|
||||
from comfy.weight_adapter import adapter_maps, adapters
|
||||
from comfy_api.latest import io, ui
|
||||
|
||||
|
||||
def make_batch_extra_option_dict(d, indicies, full_size=None):
|
||||
new_dict = {}
|
||||
for k, v in d.items():
|
||||
newv = v
|
||||
if isinstance(v, dict):
|
||||
newv = make_batch_extra_option_dict(v, indicies, full_size=full_size)
|
||||
elif isinstance(v, torch.Tensor):
|
||||
if full_size is None or v.size(0) == full_size:
|
||||
newv = v[indicies]
|
||||
elif isinstance(v, (list, tuple)) and len(v) == full_size:
|
||||
newv = [v[i] for i in indicies]
|
||||
new_dict[k] = newv
|
||||
return new_dict
|
||||
|
||||
|
||||
class TrainSampler(comfy.samplers.Sampler):
|
||||
|
||||
def __init__(self, loss_fn, optimizer, loss_callback=None, batch_size=1, grad_acc=1, total_steps=1, seed=0, training_dtype=torch.bfloat16):
|
||||
self.loss_fn = loss_fn
|
||||
self.optimizer = optimizer
|
||||
self.loss_callback = loss_callback
|
||||
self.batch_size = batch_size
|
||||
self.total_steps = total_steps
|
||||
self.grad_acc = grad_acc
|
||||
self.seed = seed
|
||||
self.training_dtype = training_dtype
|
||||
|
||||
def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False):
|
||||
cond = model_wrap.conds["positive"]
|
||||
dataset_size = sigmas.size(0)
|
||||
torch.cuda.empty_cache()
|
||||
for i in (pbar:=tqdm.trange(self.total_steps, desc="Training LoRA", smoothing=0.01, disable=not comfy.utils.PROGRESS_BAR_ENABLED)):
|
||||
noisegen = comfy_extras.nodes_custom_sampler.Noise_RandomNoise(self.seed + i * 1000)
|
||||
indicies = torch.randperm(dataset_size)[:self.batch_size].tolist()
|
||||
|
||||
batch_latent = torch.stack([latent_image[i] for i in indicies])
|
||||
batch_noise = noisegen.generate_noise({"samples": batch_latent}).to(batch_latent.device)
|
||||
batch_sigmas = [
|
||||
model_wrap.inner_model.model_sampling.percent_to_sigma(
|
||||
torch.rand((1,)).item()
|
||||
) for _ in range(min(self.batch_size, dataset_size))
|
||||
]
|
||||
batch_sigmas = torch.tensor(batch_sigmas).to(batch_latent.device)
|
||||
|
||||
xt = model_wrap.inner_model.model_sampling.noise_scaling(
|
||||
batch_sigmas,
|
||||
batch_noise,
|
||||
batch_latent,
|
||||
False
|
||||
)
|
||||
x0 = model_wrap.inner_model.model_sampling.noise_scaling(
|
||||
torch.zeros_like(batch_sigmas),
|
||||
torch.zeros_like(batch_noise),
|
||||
batch_latent,
|
||||
False
|
||||
)
|
||||
|
||||
model_wrap.conds["positive"] = [
|
||||
cond[i] for i in indicies
|
||||
]
|
||||
batch_extra_args = make_batch_extra_option_dict(extra_args, indicies, full_size=dataset_size)
|
||||
|
||||
with torch.autocast(xt.device.type, dtype=self.training_dtype):
|
||||
x0_pred = model_wrap(xt, batch_sigmas, **batch_extra_args)
|
||||
loss = self.loss_fn(x0_pred, x0)
|
||||
loss.backward()
|
||||
if self.loss_callback:
|
||||
self.loss_callback(loss.item())
|
||||
pbar.set_postfix({"loss": f"{loss.item():.4f}"})
|
||||
|
||||
if (i + 1) % self.grad_acc == 0:
|
||||
self.optimizer.step()
|
||||
self.optimizer.zero_grad()
|
||||
torch.cuda.empty_cache()
|
||||
return torch.zeros_like(latent_image)
|
||||
|
||||
|
||||
class BiasDiff(torch.nn.Module):
|
||||
def __init__(self, bias):
|
||||
super().__init__()
|
||||
self.bias = bias
|
||||
|
||||
def __call__(self, b):
|
||||
org_dtype = b.dtype
|
||||
return (b.to(self.bias) + self.bias).to(org_dtype)
|
||||
|
||||
def passive_memory_usage(self):
|
||||
return self.bias.nelement() * self.bias.element_size()
|
||||
|
||||
def move_to(self, device):
|
||||
self.to(device=device)
|
||||
return self.passive_memory_usage()
|
||||
|
||||
|
||||
def load_and_process_images(image_files, input_dir, resize_method="None", w=None, h=None):
|
||||
"""Utility function to load and process a list of images.
|
||||
|
||||
Args:
|
||||
image_files: List of image filenames
|
||||
input_dir: Base directory containing the images
|
||||
resize_method: How to handle images of different sizes ("None", "Stretch", "Crop", "Pad")
|
||||
|
||||
Returns:
|
||||
torch.Tensor: Batch of processed images
|
||||
"""
|
||||
if not image_files:
|
||||
raise ValueError("No valid images found in input")
|
||||
|
||||
output_images = []
|
||||
|
||||
for file in image_files:
|
||||
image_path = os.path.join(input_dir, file)
|
||||
img = node_helpers.pillow(Image.open, image_path)
|
||||
|
||||
if img.mode == "I":
|
||||
img = img.point(lambda i: i * (1 / 255))
|
||||
img = img.convert("RGB")
|
||||
|
||||
if w is None and h is None:
|
||||
w, h = img.size[0], img.size[1]
|
||||
|
||||
# Resize image to first image
|
||||
if img.size[0] != w or img.size[1] != h:
|
||||
if resize_method == "Stretch":
|
||||
img = img.resize((w, h), Image.Resampling.LANCZOS)
|
||||
elif resize_method == "Crop":
|
||||
img = img.crop((0, 0, w, h))
|
||||
elif resize_method == "Pad":
|
||||
img = img.resize((w, h), Image.Resampling.LANCZOS)
|
||||
elif resize_method == "None":
|
||||
raise ValueError(
|
||||
"Your input image size does not match the first image in the dataset. Either select a valid resize method or use the same size for all images."
|
||||
)
|
||||
|
||||
img_array = np.array(img).astype(np.float32) / 255.0
|
||||
img_tensor = torch.from_numpy(img_array)[None,]
|
||||
output_images.append(img_tensor)
|
||||
|
||||
return torch.cat(output_images, dim=0)
|
||||
|
||||
|
||||
class LoadImageSetFromFolderNode(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LoadImageSetFromFolderNode_V3",
|
||||
display_name="Load Image Dataset from Folder _V3",
|
||||
category="loaders",
|
||||
description="Loads a batch of images from a directory for training.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Combo.Input(
|
||||
"folder", options=folder_paths.get_input_subfolders(), tooltip="The folder to load images from."
|
||||
),
|
||||
io.Combo.Input(
|
||||
"resize_method", options=["None", "Stretch", "Crop", "Pad"], default="None", optional=True
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, folder, resize_method="None"):
|
||||
sub_input_dir = os.path.join(folder_paths.get_input_directory(), folder)
|
||||
valid_extensions = [".png", ".jpg", ".jpeg", ".webp"]
|
||||
image_files = [
|
||||
f
|
||||
for f in os.listdir(sub_input_dir)
|
||||
if any(f.lower().endswith(ext) for ext in valid_extensions)
|
||||
]
|
||||
return io.NodeOutput(load_and_process_images(image_files, sub_input_dir, resize_method))
|
||||
|
||||
|
||||
class LoadImageTextSetFromFolderNode(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LoadImageTextSetFromFolderNode_V3",
|
||||
display_name="Load Image and Text Dataset from Folder _V3",
|
||||
category="loaders",
|
||||
description="Loads a batch of images and caption from a directory for training.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Combo.Input("folder", options=folder_paths.get_input_subfolders(), tooltip="The folder to load images from."),
|
||||
io.Clip.Input("clip", tooltip="The CLIP model used for encoding the text."),
|
||||
io.Combo.Input("resize_method", options=["None", "Stretch", "Crop", "Pad"], default="None", optional=True),
|
||||
io.Int.Input("width", default=-1, min=-1, max=10000, step=1, tooltip="The width to resize the images to. -1 means use the original width.", optional=True),
|
||||
io.Int.Input("height", default=-1, min=-1, max=10000, step=1, tooltip="The height to resize the images to. -1 means use the original height.", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
io.Conditioning.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, folder, clip, resize_method="None", width=None, height=None):
|
||||
if clip is None:
|
||||
raise RuntimeError(
|
||||
"ERROR: clip input is invalid: None\n\n"
|
||||
"If the clip is from a checkpoint loader node your checkpoint does not contain a valid clip or text encoder model."
|
||||
)
|
||||
|
||||
logging.info(f"Loading images from folder: {folder}")
|
||||
|
||||
sub_input_dir = os.path.join(folder_paths.get_input_directory(), folder)
|
||||
valid_extensions = [".png", ".jpg", ".jpeg", ".webp"]
|
||||
|
||||
image_files = []
|
||||
for item in os.listdir(sub_input_dir):
|
||||
path = os.path.join(sub_input_dir, item)
|
||||
if any(item.lower().endswith(ext) for ext in valid_extensions):
|
||||
image_files.append(path)
|
||||
elif os.path.isdir(path):
|
||||
# Support kohya-ss/sd-scripts folder structure
|
||||
repeat = 1
|
||||
if item.split("_")[0].isdigit():
|
||||
repeat = int(item.split("_")[0])
|
||||
image_files.extend([
|
||||
os.path.join(path, f) for f in os.listdir(path) if any(f.lower().endswith(ext) for ext in valid_extensions)
|
||||
] * repeat)
|
||||
|
||||
caption_file_path = [
|
||||
f.replace(os.path.splitext(f)[1], ".txt")
|
||||
for f in image_files
|
||||
]
|
||||
captions = []
|
||||
for caption_file in caption_file_path:
|
||||
caption_path = os.path.join(sub_input_dir, caption_file)
|
||||
if os.path.exists(caption_path):
|
||||
with open(caption_path, "r", encoding="utf-8") as f:
|
||||
caption = f.read().strip()
|
||||
captions.append(caption)
|
||||
else:
|
||||
captions.append("")
|
||||
|
||||
width = width if width != -1 else None
|
||||
height = height if height != -1 else None
|
||||
output_tensor = load_and_process_images(image_files, sub_input_dir, resize_method, width, height)
|
||||
|
||||
logging.info(f"Loaded {len(output_tensor)} images from {sub_input_dir}.")
|
||||
|
||||
logging.info(f"Encoding captions from {sub_input_dir}.")
|
||||
conditions = []
|
||||
empty_cond = clip.encode_from_tokens_scheduled(clip.tokenize(""))
|
||||
for text in captions:
|
||||
if text == "":
|
||||
conditions.append(empty_cond)
|
||||
tokens = clip.tokenize(text)
|
||||
conditions.extend(clip.encode_from_tokens_scheduled(tokens))
|
||||
logging.info(f"Encoded {len(conditions)} captions from {sub_input_dir}.")
|
||||
return io.NodeOutput(output_tensor, conditions)
|
||||
|
||||
|
||||
def draw_loss_graph(loss_map, steps):
|
||||
width, height = 500, 300
|
||||
img = Image.new("RGB", (width, height), "white")
|
||||
draw = ImageDraw.Draw(img)
|
||||
|
||||
min_loss, max_loss = min(loss_map.values()), max(loss_map.values())
|
||||
scaled_loss = [(l_v - min_loss) / (max_loss - min_loss) for l_v in loss_map.values()]
|
||||
|
||||
prev_point = (0, height - int(scaled_loss[0] * height))
|
||||
for i, l_v in enumerate(scaled_loss[1:], start=1):
|
||||
x = int(i / (steps - 1) * width)
|
||||
y = height - int(l_v * height)
|
||||
draw.line([prev_point, (x, y)], fill="blue", width=2)
|
||||
prev_point = (x, y)
|
||||
|
||||
return img
|
||||
|
||||
|
||||
def find_all_highest_child_module_with_forward(model: torch.nn.Module, result = None, name = None):
|
||||
if result is None:
|
||||
result = []
|
||||
elif hasattr(model, "forward") and not isinstance(model, (torch.nn.ModuleList, torch.nn.Sequential, torch.nn.ModuleDict)):
|
||||
result.append(model)
|
||||
logging.debug(f"Found module with forward: {name} ({model.__class__.__name__})")
|
||||
return result
|
||||
name = name or "root"
|
||||
for next_name, child in model.named_children():
|
||||
find_all_highest_child_module_with_forward(child, result, f"{name}.{next_name}")
|
||||
return result
|
||||
|
||||
|
||||
def patch(m):
|
||||
if not hasattr(m, "forward"):
|
||||
return
|
||||
org_forward = m.forward
|
||||
def fwd(args, kwargs):
|
||||
return org_forward(*args, **kwargs)
|
||||
def checkpointing_fwd(*args, **kwargs):
|
||||
return torch.utils.checkpoint.checkpoint(
|
||||
fwd, args, kwargs, use_reentrant=False
|
||||
)
|
||||
m.org_forward = org_forward
|
||||
m.forward = checkpointing_fwd
|
||||
|
||||
|
||||
def unpatch(m):
|
||||
if hasattr(m, "org_forward"):
|
||||
m.forward = m.org_forward
|
||||
del m.org_forward
|
||||
|
||||
|
||||
class TrainLoraNode(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="TrainLoraNode_V3",
|
||||
display_name="Train LoRA _V3",
|
||||
category="training",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model", tooltip="The model to train the LoRA on."),
|
||||
io.Latent.Input("latents", tooltip="The Latents to use for training, serve as dataset/input of the model."),
|
||||
io.Conditioning.Input("positive", tooltip="The positive conditioning to use for training."),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=10000, step=1, tooltip="The batch size to use for training."),
|
||||
io.Int.Input("grad_accumulation_steps", default=1, min=1, max=1024, step=1, tooltip="The number of gradient accumulation steps to use for training."),
|
||||
io.Int.Input("steps", default=16, min=1, max=100000, tooltip="The number of steps to train the LoRA for."),
|
||||
io.Float.Input("learning_rate", default=0.0005, min=0.0000001, max=1.0, step=0.000001, tooltip="The learning rate to use for training."),
|
||||
io.Int.Input("rank", default=8, min=1, max=128, tooltip="The rank of the LoRA layers."),
|
||||
io.Combo.Input("optimizer", options=["AdamW", "Adam", "SGD", "RMSprop"], default="AdamW", tooltip="The optimizer to use for training."),
|
||||
io.Combo.Input("loss_function", options=["MSE", "L1", "Huber", "SmoothL1"], default="MSE", tooltip="The loss function to use for training."),
|
||||
io.Int.Input("seed", default=0, min=0, max=0xFFFFFFFFFFFFFFFF, tooltip="The seed to use for training (used in generator for LoRA weight initialization and noise sampling)"),
|
||||
io.Combo.Input("training_dtype", options=["bf16", "fp32"], default="bf16", tooltip="The dtype to use for training."),
|
||||
io.Combo.Input("lora_dtype", options=["bf16", "fp32"], default="bf16", tooltip="The dtype to use for lora."),
|
||||
io.Combo.Input("algorithm", options=list(adapter_maps.keys()), default=list(adapter_maps.keys())[0], tooltip="The algorithm to use for training."),
|
||||
io.Boolean.Input("gradient_checkpointing", default=True, tooltip="Use gradient checkpointing for training."),
|
||||
io.Combo.Input("existing_lora", options=folder_paths.get_filename_list("loras") + ["[None]"], default="[None]", tooltip="The existing LoRA to append to. Set to None for new LoRA."),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(display_name="model_with_lora"),
|
||||
io.LoraModel.Output(display_name="lora"),
|
||||
io.LossMap.Output(display_name="loss"),
|
||||
io.Int.Output(display_name="steps"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(
|
||||
cls,
|
||||
model,
|
||||
latents,
|
||||
positive,
|
||||
batch_size,
|
||||
steps,
|
||||
grad_accumulation_steps,
|
||||
learning_rate,
|
||||
rank,
|
||||
optimizer,
|
||||
loss_function,
|
||||
seed,
|
||||
training_dtype,
|
||||
lora_dtype,
|
||||
algorithm,
|
||||
gradient_checkpointing,
|
||||
existing_lora,
|
||||
):
|
||||
mp = model.clone()
|
||||
dtype = node_helpers.string_to_torch_dtype(training_dtype)
|
||||
lora_dtype = node_helpers.string_to_torch_dtype(lora_dtype)
|
||||
mp.set_model_compute_dtype(dtype)
|
||||
|
||||
latents = latents["samples"].to(dtype)
|
||||
num_images = latents.shape[0]
|
||||
logging.info(f"Total Images: {num_images}, Total Captions: {len(positive)}")
|
||||
if len(positive) == 1 and num_images > 1:
|
||||
positive = positive * num_images
|
||||
elif len(positive) != num_images:
|
||||
raise ValueError(
|
||||
f"Number of positive conditions ({len(positive)}) does not match number of images ({num_images})."
|
||||
)
|
||||
|
||||
with torch.inference_mode(False):
|
||||
lora_sd = {}
|
||||
generator = torch.Generator()
|
||||
generator.manual_seed(seed)
|
||||
|
||||
# Load existing LoRA weights if provided
|
||||
existing_weights = {}
|
||||
existing_steps = 0
|
||||
if existing_lora != "[None]":
|
||||
lora_path = folder_paths.get_full_path_or_raise("loras", existing_lora)
|
||||
# Extract steps from filename like "trained_lora_10_steps_20250225_203716"
|
||||
existing_steps = int(existing_lora.split("_steps_")[0].split("_")[-1])
|
||||
if lora_path:
|
||||
existing_weights = comfy.utils.load_torch_file(lora_path)
|
||||
|
||||
all_weight_adapters = []
|
||||
for n, m in mp.model.named_modules():
|
||||
if hasattr(m, "weight_function"):
|
||||
if m.weight is not None:
|
||||
key = "{}.weight".format(n)
|
||||
shape = m.weight.shape
|
||||
if len(shape) >= 2:
|
||||
alpha = float(existing_weights.get(f"{key}.alpha", 1.0))
|
||||
dora_scale = existing_weights.get(
|
||||
f"{key}.dora_scale", None
|
||||
)
|
||||
for adapter_cls in adapters:
|
||||
existing_adapter = adapter_cls.load(
|
||||
n, existing_weights, alpha, dora_scale
|
||||
)
|
||||
if existing_adapter is not None:
|
||||
break
|
||||
else:
|
||||
existing_adapter = None
|
||||
adapter_cls = adapter_maps[algorithm]
|
||||
|
||||
if existing_adapter is not None:
|
||||
train_adapter = existing_adapter.to_train().to(lora_dtype)
|
||||
else:
|
||||
# Use LoRA with alpha=1.0 by default
|
||||
train_adapter = adapter_cls.create_train(
|
||||
m.weight, rank=rank, alpha=1.0
|
||||
).to(lora_dtype)
|
||||
for name, parameter in train_adapter.named_parameters():
|
||||
lora_sd[f"{n}.{name}"] = parameter
|
||||
|
||||
mp.add_weight_wrapper(key, train_adapter)
|
||||
all_weight_adapters.append(train_adapter)
|
||||
else:
|
||||
diff = torch.nn.Parameter(
|
||||
torch.zeros(
|
||||
m.weight.shape, dtype=lora_dtype, requires_grad=True
|
||||
)
|
||||
)
|
||||
diff_module = BiasDiff(diff)
|
||||
mp.add_weight_wrapper(key, BiasDiff(diff))
|
||||
all_weight_adapters.append(diff_module)
|
||||
lora_sd["{}.diff".format(n)] = diff
|
||||
if hasattr(m, "bias") and m.bias is not None:
|
||||
key = "{}.bias".format(n)
|
||||
bias = torch.nn.Parameter(
|
||||
torch.zeros(m.bias.shape, dtype=lora_dtype, requires_grad=True)
|
||||
)
|
||||
bias_module = BiasDiff(bias)
|
||||
lora_sd["{}.diff_b".format(n)] = bias
|
||||
mp.add_weight_wrapper(key, BiasDiff(bias))
|
||||
all_weight_adapters.append(bias_module)
|
||||
|
||||
if optimizer == "Adam":
|
||||
optimizer = torch.optim.Adam(lora_sd.values(), lr=learning_rate)
|
||||
elif optimizer == "AdamW":
|
||||
optimizer = torch.optim.AdamW(lora_sd.values(), lr=learning_rate)
|
||||
elif optimizer == "SGD":
|
||||
optimizer = torch.optim.SGD(lora_sd.values(), lr=learning_rate)
|
||||
elif optimizer == "RMSprop":
|
||||
optimizer = torch.optim.RMSprop(lora_sd.values(), lr=learning_rate)
|
||||
|
||||
# Setup loss function based on selection
|
||||
if loss_function == "MSE":
|
||||
criterion = torch.nn.MSELoss()
|
||||
elif loss_function == "L1":
|
||||
criterion = torch.nn.L1Loss()
|
||||
elif loss_function == "Huber":
|
||||
criterion = torch.nn.HuberLoss()
|
||||
elif loss_function == "SmoothL1":
|
||||
criterion = torch.nn.SmoothL1Loss()
|
||||
|
||||
# setup models
|
||||
if gradient_checkpointing:
|
||||
for m in find_all_highest_child_module_with_forward(mp.model.diffusion_model):
|
||||
patch(m)
|
||||
mp.model.requires_grad_(False)
|
||||
comfy.model_management.load_models_gpu([mp], memory_required=1e20, force_full_load=True)
|
||||
|
||||
# Setup sampler and guider like in test script
|
||||
loss_map = {"loss": []}
|
||||
def loss_callback(loss):
|
||||
loss_map["loss"].append(loss)
|
||||
train_sampler = TrainSampler(
|
||||
criterion,
|
||||
optimizer,
|
||||
loss_callback=loss_callback,
|
||||
batch_size=batch_size,
|
||||
grad_acc=grad_accumulation_steps,
|
||||
total_steps=steps * grad_accumulation_steps,
|
||||
seed=seed,
|
||||
training_dtype=dtype
|
||||
)
|
||||
guider = comfy_extras.nodes_custom_sampler.Guider_Basic(mp)
|
||||
guider.set_conds(positive) # Set conditioning from input
|
||||
|
||||
# Training loop
|
||||
try:
|
||||
# Generate dummy sigmas and noise
|
||||
sigmas = torch.tensor(range(num_images))
|
||||
noise = comfy_extras.nodes_custom_sampler.Noise_RandomNoise(seed)
|
||||
guider.sample(
|
||||
noise.generate_noise({"samples": latents}),
|
||||
latents,
|
||||
train_sampler,
|
||||
sigmas,
|
||||
seed=noise.seed
|
||||
)
|
||||
finally:
|
||||
for m in mp.model.modules():
|
||||
unpatch(m)
|
||||
del train_sampler, optimizer
|
||||
|
||||
for adapter in all_weight_adapters:
|
||||
adapter.requires_grad_(False)
|
||||
|
||||
for param in lora_sd:
|
||||
lora_sd[param] = lora_sd[param].to(lora_dtype)
|
||||
|
||||
return io.NodeOutput(mp, lora_sd, loss_map, steps + existing_steps)
|
||||
|
||||
|
||||
class LoraModelLoader(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LoraModelLoader_V3",
|
||||
display_name="Load LoRA Model _V3",
|
||||
category="loaders",
|
||||
description="Load Trained LoRA weights from Train LoRA node.",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model", tooltip="The diffusion model the LoRA will be applied to."),
|
||||
io.LoraModel.Input("lora", tooltip="The LoRA model to apply to the diffusion model."),
|
||||
io.Float.Input("strength_model", default=1.0, min=-100.0, max=100.0, step=0.01, tooltip="How strongly to modify the diffusion model. This value can be negative."),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(tooltip="The modified diffusion model."),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, lora, strength_model):
|
||||
if strength_model == 0:
|
||||
return io.NodeOutput(model)
|
||||
|
||||
model_lora, _ = comfy.sd.load_lora_for_models(model, None, lora, strength_model, 0)
|
||||
return io.NodeOutput(model_lora)
|
||||
|
||||
|
||||
class SaveLoRA(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SaveLoRA_V3",
|
||||
display_name="Save LoRA Weights _V3",
|
||||
category="loaders",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
inputs=[
|
||||
io.LoraModel.Input("lora", tooltip="The LoRA model to save. Do not use the model with LoRA layers."),
|
||||
io.String.Input("prefix", default="loras/ComfyUI_trained_lora", tooltip="The prefix to use for the saved LoRA file."),
|
||||
io.Int.Input("steps", tooltip="Optional: The number of steps to LoRA has been trained for, used to name the saved file.", optional=True),
|
||||
],
|
||||
outputs=[],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, lora, prefix, steps=None):
|
||||
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(
|
||||
prefix, folder_paths.get_output_directory()
|
||||
)
|
||||
if steps is None:
|
||||
output_checkpoint = f"{filename}_{counter:05}_.safetensors"
|
||||
else:
|
||||
output_checkpoint = f"{filename}_{steps}_steps_{counter:05}_.safetensors"
|
||||
output_checkpoint = os.path.join(full_output_folder, output_checkpoint)
|
||||
safetensors.torch.save_file(lora, output_checkpoint)
|
||||
return io.NodeOutput()
|
||||
|
||||
|
||||
class LossGraphNode(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="LossGraphNode_V3",
|
||||
display_name="Plot Loss Graph _V3",
|
||||
category="training",
|
||||
description="Plots the loss graph and saves it to the output directory.",
|
||||
is_experimental=True,
|
||||
is_output_node=True,
|
||||
inputs=[
|
||||
io.LossMap.Input("loss"), # TODO: original V1 node has also `default={}` parameter
|
||||
io.String.Input("filename_prefix", default="loss_graph"),
|
||||
],
|
||||
outputs=[],
|
||||
hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, loss, filename_prefix):
|
||||
loss_values = loss["loss"]
|
||||
width, height = 800, 480
|
||||
margin = 40
|
||||
|
||||
img = Image.new(
|
||||
"RGB", (width + margin, height + margin), "white"
|
||||
) # Extend canvas
|
||||
draw = ImageDraw.Draw(img)
|
||||
|
||||
min_loss, max_loss = min(loss_values), max(loss_values)
|
||||
scaled_loss = [(l_v - min_loss) / (max_loss - min_loss) for l_v in loss_values]
|
||||
|
||||
steps = len(loss_values)
|
||||
|
||||
prev_point = (margin, height - int(scaled_loss[0] * height))
|
||||
for i, l_v in enumerate(scaled_loss[1:], start=1):
|
||||
x = margin + int(i / steps * width) # Scale X properly
|
||||
y = height - int(l_v * height)
|
||||
draw.line([prev_point, (x, y)], fill="blue", width=2)
|
||||
prev_point = (x, y)
|
||||
|
||||
draw.line([(margin, 0), (margin, height)], fill="black", width=2) # Y-axis
|
||||
draw.line(
|
||||
[(margin, height), (width + margin, height)], fill="black", width=2
|
||||
) # X-axis
|
||||
|
||||
try:
|
||||
font = ImageFont.truetype("arial.ttf", 12)
|
||||
except IOError:
|
||||
font = ImageFont.load_default()
|
||||
|
||||
# Add axis labels
|
||||
draw.text((5, height // 2), "Loss", font=font, fill="black")
|
||||
draw.text((width // 2, height + 10), "Steps", font=font, fill="black")
|
||||
|
||||
# Add min/max loss values
|
||||
draw.text((margin - 30, 0), f"{max_loss:.2f}", font=font, fill="black")
|
||||
draw.text(
|
||||
(margin - 30, height - 10), f"{min_loss:.2f}", font=font, fill="black"
|
||||
)
|
||||
return io.NodeOutput(ui=ui.PreviewImage(img, cls=cls))
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
LoadImageSetFromFolderNode,
|
||||
LoadImageTextSetFromFolderNode,
|
||||
LoraModelLoader,
|
||||
LossGraphNode,
|
||||
SaveLoRA,
|
||||
TrainLoraNode,
|
||||
]
|
||||
106
comfy_extras/v3/nodes_upscale_model.py
Normal file
106
comfy_extras/v3/nodes_upscale_model.py
Normal file
@ -0,0 +1,106 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
import torch
|
||||
from spandrel import ImageModelDescriptor, ModelLoader
|
||||
|
||||
import comfy.utils
|
||||
import folder_paths
|
||||
from comfy import model_management
|
||||
from comfy_api.latest import io
|
||||
|
||||
try:
|
||||
from spandrel import MAIN_REGISTRY
|
||||
from spandrel_extra_arches import EXTRA_REGISTRY
|
||||
MAIN_REGISTRY.add(*EXTRA_REGISTRY)
|
||||
logging.info("Successfully imported spandrel_extra_arches: support for non commercial upscale models.")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
class UpscaleModelLoader(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="UpscaleModelLoader_V3",
|
||||
display_name="Load Upscale Model _V3",
|
||||
category="loaders",
|
||||
inputs=[
|
||||
io.Combo.Input("model_name", options=folder_paths.get_filename_list("upscale_models")),
|
||||
],
|
||||
outputs=[
|
||||
io.UpscaleModel.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model_name):
|
||||
model_path = folder_paths.get_full_path_or_raise("upscale_models", model_name)
|
||||
sd = comfy.utils.load_torch_file(model_path, safe_load=True)
|
||||
if "module.layers.0.residual_group.blocks.0.norm1.weight" in sd:
|
||||
sd = comfy.utils.state_dict_prefix_replace(sd, {"module.":""})
|
||||
out = ModelLoader().load_from_state_dict(sd).eval()
|
||||
|
||||
if not isinstance(out, ImageModelDescriptor):
|
||||
raise Exception("Upscale model must be a single-image model.")
|
||||
|
||||
return io.NodeOutput(out)
|
||||
|
||||
|
||||
class ImageUpscaleWithModel(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageUpscaleWithModel_V3",
|
||||
display_name="Upscale Image (using Model) _V3",
|
||||
category="image/upscaling",
|
||||
inputs=[
|
||||
io.UpscaleModel.Input("upscale_model"),
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, upscale_model, image):
|
||||
device = model_management.get_torch_device()
|
||||
|
||||
memory_required = model_management.module_size(upscale_model.model)
|
||||
memory_required += (512 * 512 * 3) * image.element_size() * max(upscale_model.scale, 1.0) * 384.0 #The 384.0 is an estimate of how much some of these models take, TODO: make it more accurate
|
||||
memory_required += image.nelement() * image.element_size()
|
||||
model_management.free_memory(memory_required, device)
|
||||
|
||||
upscale_model.to(device)
|
||||
in_img = image.movedim(-1,-3).to(device)
|
||||
|
||||
tile = 512
|
||||
overlap = 32
|
||||
|
||||
oom = True
|
||||
while oom:
|
||||
try:
|
||||
steps = in_img.shape[0] * comfy.utils.get_tiled_scale_steps(
|
||||
in_img.shape[3], in_img.shape[2], tile_x=tile, tile_y=tile, overlap=overlap
|
||||
)
|
||||
pbar = comfy.utils.ProgressBar(steps)
|
||||
s = comfy.utils.tiled_scale(
|
||||
in_img, lambda a: upscale_model(a), tile_x=tile, tile_y=tile, overlap=overlap, upscale_amount=upscale_model.scale, pbar=pbar
|
||||
)
|
||||
oom = False
|
||||
except model_management.OOM_EXCEPTION as e:
|
||||
tile //= 2
|
||||
if tile < 128:
|
||||
raise e
|
||||
|
||||
upscale_model.to("cpu")
|
||||
s = torch.clamp(s.movedim(-3,-1), min=0, max=1.0)
|
||||
return io.NodeOutput(s)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
ImageUpscaleWithModel,
|
||||
UpscaleModelLoader,
|
||||
]
|
||||
212
comfy_extras/v3/nodes_video.py
Normal file
212
comfy_extras/v3/nodes_video.py
Normal file
@ -0,0 +1,212 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
from fractions import Fraction
|
||||
|
||||
import av
|
||||
import torch
|
||||
|
||||
import folder_paths
|
||||
from comfy.cli_args import args
|
||||
from comfy_api.input import AudioInput, ImageInput, VideoInput
|
||||
from comfy_api.input_impl import VideoFromComponents, VideoFromFile
|
||||
from comfy_api.latest import io, ui
|
||||
from comfy_api.util import VideoCodec, VideoComponents, VideoContainer
|
||||
|
||||
|
||||
class SaveWEBM(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SaveWEBM_V3",
|
||||
category="image/video",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Image.Input("images"),
|
||||
io.String.Input("filename_prefix", default="ComfyUI"),
|
||||
io.Combo.Input("codec", options=["vp9", "av1"]),
|
||||
io.Float.Input("fps", default=24.0, min=0.01, max=1000.0, step=0.01),
|
||||
io.Float.Input("crf", default=32.0, min=0, max=63.0, step=1, tooltip="Higher crf means lower quality with a smaller file size, lower crf means higher quality higher filesize."),
|
||||
],
|
||||
outputs=[],
|
||||
hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo],
|
||||
is_output_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, images, codec, fps, filename_prefix, crf):
|
||||
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(
|
||||
filename_prefix, folder_paths.get_output_directory(), images[0].shape[1], images[0].shape[0]
|
||||
)
|
||||
|
||||
file = f"{filename}_{counter:05}_.webm"
|
||||
container = av.open(os.path.join(full_output_folder, file), mode="w")
|
||||
|
||||
if cls.hidden.prompt is not None:
|
||||
container.metadata["prompt"] = json.dumps(cls.hidden.prompt)
|
||||
|
||||
if cls.hidden.extra_pnginfo is not None:
|
||||
for x in cls.hidden.extra_pnginfo:
|
||||
container.metadata[x] = json.dumps(cls.hidden.extra_pnginfo[x])
|
||||
|
||||
codec_map = {"vp9": "libvpx-vp9", "av1": "libsvtav1"}
|
||||
stream = container.add_stream(codec_map[codec], rate=Fraction(round(fps * 1000), 1000))
|
||||
stream.width = images.shape[-2]
|
||||
stream.height = images.shape[-3]
|
||||
stream.pix_fmt = "yuv420p10le" if codec == "av1" else "yuv420p"
|
||||
stream.bit_rate = 0
|
||||
stream.options = {'crf': str(crf)}
|
||||
if codec == "av1":
|
||||
stream.options["preset"] = "6"
|
||||
|
||||
for frame in images:
|
||||
frame = av.VideoFrame.from_ndarray(torch.clamp(frame[..., :3] * 255, min=0, max=255).to(device=torch.device("cpu"), dtype=torch.uint8).numpy(), format="rgb24")
|
||||
for packet in stream.encode(frame):
|
||||
container.mux(packet)
|
||||
container.mux(stream.encode())
|
||||
container.close()
|
||||
|
||||
return io.NodeOutput(ui=ui.PreviewVideo([ui.SavedResult(file, subfolder, io.FolderType.output)]))
|
||||
|
||||
|
||||
class SaveVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SaveVideo_V3",
|
||||
display_name="Save Video _V3",
|
||||
category="image/video",
|
||||
description="Saves the input images to your ComfyUI output directory.",
|
||||
inputs=[
|
||||
io.Video.Input("video", tooltip="The video to save."),
|
||||
io.String.Input("filename_prefix", default="video/ComfyUI", tooltip="The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."),
|
||||
io.Combo.Input("format", options=VideoContainer.as_input(), default="auto", tooltip="The format to save the video as."),
|
||||
io.Combo.Input("codec", options=VideoCodec.as_input(), default="auto", tooltip="The codec to use for the video."),
|
||||
],
|
||||
outputs=[],
|
||||
hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo],
|
||||
is_output_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, video: VideoInput, filename_prefix, format, codec):
|
||||
width, height = video.get_dimensions()
|
||||
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(
|
||||
filename_prefix,
|
||||
folder_paths.get_output_directory(),
|
||||
width,
|
||||
height
|
||||
)
|
||||
saved_metadata = None
|
||||
if not args.disable_metadata:
|
||||
metadata = {}
|
||||
if cls.hidden.extra_pnginfo is not None:
|
||||
metadata.update(cls.hidden.extra_pnginfo)
|
||||
if cls.hidden.prompt is not None:
|
||||
metadata["prompt"] = cls.hidden.prompt
|
||||
if len(metadata) > 0:
|
||||
saved_metadata = metadata
|
||||
file = f"{filename}_{counter:05}_.{VideoContainer.get_extension(format)}"
|
||||
video.save_to(
|
||||
os.path.join(full_output_folder, file),
|
||||
format=format,
|
||||
codec=codec,
|
||||
metadata=saved_metadata
|
||||
)
|
||||
return io.NodeOutput(ui=ui.PreviewVideo([ui.SavedResult(file, subfolder, io.FolderType.output)]))
|
||||
|
||||
|
||||
class CreateVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="CreateVideo_V3",
|
||||
display_name="Create Video _V3",
|
||||
category="image/video",
|
||||
description="Create a video from images.",
|
||||
inputs=[
|
||||
io.Image.Input("images", tooltip="The images to create a video from."),
|
||||
io.Float.Input("fps", default=30.0, min=1.0, max=120.0, step=1.0),
|
||||
io.Audio.Input("audio", optional=True, tooltip="The audio to add to the video."),
|
||||
],
|
||||
outputs=[
|
||||
io.Video.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, images: ImageInput, fps: float, audio: AudioInput = None):
|
||||
return io.NodeOutput(
|
||||
VideoFromComponents(VideoComponents(images=images, audio=audio, frame_rate=Fraction(fps)))
|
||||
)
|
||||
|
||||
|
||||
class GetVideoComponents(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="GetVideoComponents_V3",
|
||||
display_name="Get Video Components _V3",
|
||||
category="image/video",
|
||||
description="Extracts all components from a video: frames, audio, and framerate.",
|
||||
inputs=[
|
||||
io.Video.Input("video", tooltip="The video to extract components from."),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(display_name="images"),
|
||||
io.Audio.Output(display_name="audio"),
|
||||
io.Float.Output(display_name="fps"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, video: VideoInput):
|
||||
components = video.get_components()
|
||||
return io.NodeOutput(components.images, components.audio, float(components.frame_rate))
|
||||
|
||||
|
||||
class LoadVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
input_dir = folder_paths.get_input_directory()
|
||||
files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))]
|
||||
files = folder_paths.filter_files_content_types(files, ["video"])
|
||||
return io.Schema(
|
||||
node_id="LoadVideo_V3",
|
||||
display_name="Load Video _V3",
|
||||
category="image/video",
|
||||
inputs=[
|
||||
io.Combo.Input("file", options=sorted(files), upload=io.UploadType.video),
|
||||
],
|
||||
outputs=[
|
||||
io.Video.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, file):
|
||||
video_path = folder_paths.get_annotated_filepath(file)
|
||||
return io.NodeOutput(VideoFromFile(video_path))
|
||||
|
||||
@classmethod
|
||||
def fingerprint_inputs(s, file):
|
||||
video_path = folder_paths.get_annotated_filepath(file)
|
||||
mod_time = os.path.getmtime(video_path)
|
||||
# Instead of hashing the file, we can just use the modification time to avoid rehashing large files.
|
||||
return mod_time
|
||||
|
||||
@classmethod
|
||||
def validate_inputs(s, file):
|
||||
if not folder_paths.exists_annotated_filepath(file):
|
||||
return "Invalid video file: {}".format(file)
|
||||
return True
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
CreateVideo,
|
||||
GetVideoComponents,
|
||||
LoadVideo,
|
||||
SaveVideo,
|
||||
SaveWEBM,
|
||||
]
|
||||
232
comfy_extras/v3/nodes_video_model.py
Normal file
232
comfy_extras/v3/nodes_video_model.py
Normal file
@ -0,0 +1,232 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
|
||||
import comfy.sd
|
||||
import comfy.utils
|
||||
import comfy_extras.nodes_model_merging
|
||||
import folder_paths
|
||||
import node_helpers
|
||||
import nodes
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class ImageOnlyCheckpointLoader(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageOnlyCheckpointLoader_V3",
|
||||
display_name="Image Only Checkpoint Loader (img2vid model) _V3",
|
||||
category="loaders/video_models",
|
||||
inputs=[
|
||||
io.Combo.Input("ckpt_name", options=folder_paths.get_filename_list("checkpoints")),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
io.ClipVision.Output(),
|
||||
io.Vae.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, ckpt_name):
|
||||
ckpt_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name)
|
||||
out = comfy.sd.load_checkpoint_guess_config(
|
||||
ckpt_path,
|
||||
output_vae=True,
|
||||
output_clip=False,
|
||||
output_clipvision=True,
|
||||
embedding_directory=folder_paths.get_folder_paths("embeddings"),
|
||||
)
|
||||
return io.NodeOutput(out[0], out[3], out[2])
|
||||
|
||||
|
||||
class SVD_img2vid_Conditioning(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="SVD_img2vid_Conditioning_V3",
|
||||
category="conditioning/video_models",
|
||||
inputs=[
|
||||
io.ClipVision.Input("clip_vision"),
|
||||
io.Image.Input("init_image"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=8),
|
||||
io.Int.Input("height", default=576, min=16, max=nodes.MAX_RESOLUTION, step=8),
|
||||
io.Int.Input("video_frames", default=14, min=1, max=4096),
|
||||
io.Int.Input("motion_bucket_id", default=127, min=1, max=1023),
|
||||
io.Int.Input("fps", default=6, min=1, max=1024),
|
||||
io.Float.Input("augmentation_level", default=0.0, min=0.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
io.Latent.Output(display_name="latent"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip_vision, init_image, vae, width, height, video_frames, motion_bucket_id, fps, augmentation_level):
|
||||
output = clip_vision.encode_image(init_image)
|
||||
pooled = output.image_embeds.unsqueeze(0)
|
||||
pixels = comfy.utils.common_upscale(
|
||||
init_image.movedim(-1,1), width, height, "bilinear", "center"
|
||||
).movedim(1,-1)
|
||||
encode_pixels = pixels[:,:,:,:3]
|
||||
if augmentation_level > 0:
|
||||
encode_pixels += torch.randn_like(pixels) * augmentation_level
|
||||
t = vae.encode(encode_pixels)
|
||||
positive = [
|
||||
[
|
||||
pooled,
|
||||
{"motion_bucket_id": motion_bucket_id, "fps": fps, "augmentation_level": augmentation_level, "concat_latent_image": t},
|
||||
]
|
||||
]
|
||||
negative = [
|
||||
[
|
||||
torch.zeros_like(pooled),
|
||||
{"motion_bucket_id": motion_bucket_id, "fps": fps, "augmentation_level": augmentation_level, "concat_latent_image": torch.zeros_like(t)},
|
||||
]
|
||||
]
|
||||
latent = torch.zeros([video_frames, 4, height // 8, width // 8])
|
||||
return io.NodeOutput(positive, negative, {"samples":latent})
|
||||
|
||||
|
||||
class VideoLinearCFGGuidance(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="VideoLinearCFGGuidance_V3",
|
||||
category="sampling/video_models",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("min_cfg", default=1.0, min=0.0, max=100.0, step=0.5, round=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, min_cfg):
|
||||
def linear_cfg(args):
|
||||
cond = args["cond"]
|
||||
uncond = args["uncond"]
|
||||
cond_scale = args["cond_scale"]
|
||||
|
||||
scale = torch.linspace(
|
||||
min_cfg, cond_scale, cond.shape[0], device=cond.device
|
||||
).reshape((cond.shape[0], 1, 1, 1))
|
||||
return uncond + scale * (cond - uncond)
|
||||
|
||||
m = model.clone()
|
||||
m.set_model_sampler_cfg_function(linear_cfg)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class VideoTriangleCFGGuidance(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="VideoTriangleCFGGuidance_V3",
|
||||
category="sampling/video_models",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("min_cfg", default=1.0, min=0.0, max=100.0, step=0.5, round=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Model.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, min_cfg):
|
||||
def linear_cfg(args):
|
||||
cond = args["cond"]
|
||||
uncond = args["uncond"]
|
||||
cond_scale = args["cond_scale"]
|
||||
period = 1.0
|
||||
values = torch.linspace(0, 1, cond.shape[0], device=cond.device)
|
||||
values = 2 * (values / period - torch.floor(values / period + 0.5)).abs()
|
||||
scale = (values * (cond_scale - min_cfg) + min_cfg).reshape((cond.shape[0], 1, 1, 1))
|
||||
|
||||
return uncond + scale * (cond - uncond)
|
||||
|
||||
m = model.clone()
|
||||
m.set_model_sampler_cfg_function(linear_cfg)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class ImageOnlyCheckpointSave(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageOnlyCheckpointSave_V3",
|
||||
category="advanced/model_merging",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.ClipVision.Input("clip_vision"),
|
||||
io.Vae.Input("vae"),
|
||||
io.String.Input("filename_prefix", default="checkpoints/ComfyUI"),
|
||||
],
|
||||
outputs=[],
|
||||
hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, clip_vision, vae, filename_prefix):
|
||||
output_dir = folder_paths.get_output_directory()
|
||||
comfy_extras.nodes_model_merging.save_checkpoint(
|
||||
model,
|
||||
clip_vision=clip_vision,
|
||||
vae=vae,
|
||||
filename_prefix=filename_prefix,
|
||||
output_dir=output_dir,
|
||||
prompt=cls.hidden.prompt,
|
||||
extra_pnginfo=cls.hidden.extra_pnginfo,
|
||||
)
|
||||
return io.NodeOutput()
|
||||
|
||||
|
||||
class ConditioningSetAreaPercentageVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ConditioningSetAreaPercentageVideo_V3",
|
||||
category="conditioning",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Float.Input("width", default=1.0, min=0, max=1.0, step=0.01),
|
||||
io.Float.Input("height", default=1.0, min=0, max=1.0, step=0.01),
|
||||
io.Float.Input("temporal", default=1.0, min=0, max=1.0, step=0.01),
|
||||
io.Float.Input("x", default=0, min=0, max=1.0, step=0.01),
|
||||
io.Float.Input("y", default=0, min=0, max=1.0, step=0.01),
|
||||
io.Float.Input("z", default=0, min=0, max=1.0, step=0.01),
|
||||
io.Float.Input("strength", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, conditioning, width, height, temporal, x, y, z, strength):
|
||||
c = node_helpers.conditioning_set_values(
|
||||
conditioning,
|
||||
{
|
||||
"area": ("percentage", temporal, height, width, z, y, x),
|
||||
"strength": strength,
|
||||
"set_area_to_bounds": False
|
||||
,}
|
||||
)
|
||||
return io.NodeOutput(c)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
ConditioningSetAreaPercentageVideo,
|
||||
ImageOnlyCheckpointLoader,
|
||||
ImageOnlyCheckpointSave,
|
||||
SVD_img2vid_Conditioning,
|
||||
VideoLinearCFGGuidance,
|
||||
VideoTriangleCFGGuidance,
|
||||
]
|
||||
437
comfy_extras/v3/nodes_wan.py
Normal file
437
comfy_extras/v3/nodes_wan.py
Normal file
@ -0,0 +1,437 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import torch
|
||||
|
||||
import comfy.clip_vision
|
||||
import comfy.latent_formats
|
||||
import comfy.model_management
|
||||
import comfy.utils
|
||||
import node_helpers
|
||||
import nodes
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class TrimVideoLatent(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="TrimVideoLatent_V3",
|
||||
category="latent/video",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Latent.Input("samples"),
|
||||
io.Int.Input("trim_amount", default=0, min=0, max=99999),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, samples, trim_amount):
|
||||
samples_out = samples.copy()
|
||||
|
||||
s1 = samples["samples"]
|
||||
samples_out["samples"] = s1[:, :, trim_amount:]
|
||||
return io.NodeOutput(samples_out)
|
||||
|
||||
|
||||
class WanCameraImageToVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="WanCameraImageToVideo_V3",
|
||||
category="conditioning/video_models",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
io.ClipVisionOutput.Input("clip_vision_output", optional=True),
|
||||
io.Image.Input("start_image", optional=True),
|
||||
io.WanCameraEmbedding.Input("camera_conditions", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
io.Latent.Output(display_name="latent"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, positive, negative, vae, width, height, length, batch_size, start_image=None, clip_vision_output=None, camera_conditions=None):
|
||||
latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device())
|
||||
concat_latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device())
|
||||
concat_latent = comfy.latent_formats.Wan21().process_out(concat_latent)
|
||||
|
||||
if start_image is not None:
|
||||
start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1)
|
||||
concat_latent_image = vae.encode(start_image[:, :, :, :3])
|
||||
concat_latent[:,:,:concat_latent_image.shape[2]] = concat_latent_image[:,:,:concat_latent.shape[2]]
|
||||
|
||||
positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent})
|
||||
negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent})
|
||||
|
||||
if camera_conditions is not None:
|
||||
positive = node_helpers.conditioning_set_values(positive, {'camera_conditions': camera_conditions})
|
||||
negative = node_helpers.conditioning_set_values(negative, {'camera_conditions': camera_conditions})
|
||||
|
||||
if clip_vision_output is not None:
|
||||
positive = node_helpers.conditioning_set_values(positive, {"clip_vision_output": clip_vision_output})
|
||||
negative = node_helpers.conditioning_set_values(negative, {"clip_vision_output": clip_vision_output})
|
||||
|
||||
out_latent = {}
|
||||
out_latent["samples"] = latent
|
||||
return io.NodeOutput(positive, negative, out_latent)
|
||||
|
||||
|
||||
class WanFirstLastFrameToVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="WanFirstLastFrameToVideo_V3",
|
||||
category="conditioning/video_models",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
io.ClipVisionOutput.Input("clip_vision_start_image", optional=True),
|
||||
io.ClipVisionOutput.Input("clip_vision_end_image", optional=True),
|
||||
io.Image.Input("start_image", optional=True),
|
||||
io.Image.Input("end_image", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
io.Latent.Output(display_name="latent"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, positive, negative, vae, width, height, length, batch_size, start_image=None, end_image=None, clip_vision_start_image=None, clip_vision_end_image=None):
|
||||
latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device())
|
||||
if start_image is not None:
|
||||
start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1)
|
||||
if end_image is not None:
|
||||
end_image = comfy.utils.common_upscale(end_image[-length:].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1)
|
||||
|
||||
image = torch.ones((length, height, width, 3)) * 0.5
|
||||
mask = torch.ones((1, 1, latent.shape[2] * 4, latent.shape[-2], latent.shape[-1]))
|
||||
|
||||
if start_image is not None:
|
||||
image[:start_image.shape[0]] = start_image
|
||||
mask[:, :, :start_image.shape[0] + 3] = 0.0
|
||||
|
||||
if end_image is not None:
|
||||
image[-end_image.shape[0]:] = end_image
|
||||
mask[:, :, -end_image.shape[0]:] = 0.0
|
||||
|
||||
concat_latent_image = vae.encode(image[:, :, :, :3])
|
||||
mask = mask.view(1, mask.shape[2] // 4, 4, mask.shape[3], mask.shape[4]).transpose(1, 2)
|
||||
positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent_image, "concat_mask": mask})
|
||||
negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent_image, "concat_mask": mask})
|
||||
|
||||
clip_vision_output = None
|
||||
if clip_vision_start_image is not None:
|
||||
clip_vision_output = clip_vision_start_image
|
||||
|
||||
if clip_vision_end_image is not None:
|
||||
if clip_vision_output is not None:
|
||||
states = torch.cat([clip_vision_output.penultimate_hidden_states, clip_vision_end_image.penultimate_hidden_states], dim=-2)
|
||||
clip_vision_output = comfy.clip_vision.Output()
|
||||
clip_vision_output.penultimate_hidden_states = states
|
||||
else:
|
||||
clip_vision_output = clip_vision_end_image
|
||||
|
||||
if clip_vision_output is not None:
|
||||
positive = node_helpers.conditioning_set_values(positive, {"clip_vision_output": clip_vision_output})
|
||||
negative = node_helpers.conditioning_set_values(negative, {"clip_vision_output": clip_vision_output})
|
||||
|
||||
out_latent = {}
|
||||
out_latent["samples"] = latent
|
||||
return io.NodeOutput(positive, negative, out_latent)
|
||||
|
||||
|
||||
class WanFunControlToVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="WanFunControlToVideo_V3",
|
||||
category="conditioning/video_models",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
io.ClipVisionOutput.Input("clip_vision_output", optional=True),
|
||||
io.Image.Input("start_image", optional=True),
|
||||
io.Image.Input("control_video", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
io.Latent.Output(display_name="latent"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, positive, negative, vae, width, height, length, batch_size, start_image=None, clip_vision_output=None, control_video=None):
|
||||
latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device())
|
||||
concat_latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device())
|
||||
concat_latent = comfy.latent_formats.Wan21().process_out(concat_latent)
|
||||
concat_latent = concat_latent.repeat(1, 2, 1, 1, 1)
|
||||
|
||||
if start_image is not None:
|
||||
start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1)
|
||||
concat_latent_image = vae.encode(start_image[:, :, :, :3])
|
||||
concat_latent[:,16:,:concat_latent_image.shape[2]] = concat_latent_image[:,:,:concat_latent.shape[2]]
|
||||
|
||||
if control_video is not None:
|
||||
control_video = comfy.utils.common_upscale(control_video[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1)
|
||||
concat_latent_image = vae.encode(control_video[:, :, :, :3])
|
||||
concat_latent[:,:16,:concat_latent_image.shape[2]] = concat_latent_image[:,:,:concat_latent.shape[2]]
|
||||
|
||||
positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent})
|
||||
negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent})
|
||||
|
||||
if clip_vision_output is not None:
|
||||
positive = node_helpers.conditioning_set_values(positive, {"clip_vision_output": clip_vision_output})
|
||||
negative = node_helpers.conditioning_set_values(negative, {"clip_vision_output": clip_vision_output})
|
||||
|
||||
out_latent = {}
|
||||
out_latent["samples"] = latent
|
||||
return io.NodeOutput(positive, negative, out_latent)
|
||||
|
||||
|
||||
class WanFunInpaintToVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="WanFunInpaintToVideo_V3",
|
||||
category="conditioning/video_models",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
io.ClipVisionOutput.Input("clip_vision_output", optional=True),
|
||||
io.Image.Input("start_image", optional=True),
|
||||
io.Image.Input("end_image", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
io.Latent.Output(display_name="latent"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, positive, negative, vae, width, height, length, batch_size, start_image=None, end_image=None, clip_vision_output=None):
|
||||
flfv = WanFirstLastFrameToVideo()
|
||||
return flfv.execute(positive, negative, vae, width, height, length, batch_size, start_image=start_image, end_image=end_image, clip_vision_start_image=clip_vision_output)
|
||||
|
||||
|
||||
class WanImageToVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="WanImageToVideo_V3",
|
||||
category="conditioning/video_models",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
io.ClipVisionOutput.Input("clip_vision_output", optional=True),
|
||||
io.Image.Input("start_image", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
io.Latent.Output(display_name="latent"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, positive, negative, vae, width, height, length, batch_size, start_image=None, clip_vision_output=None):
|
||||
latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device())
|
||||
if start_image is not None:
|
||||
start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1)
|
||||
image = torch.ones((length, height, width, start_image.shape[-1]), device=start_image.device, dtype=start_image.dtype) * 0.5
|
||||
image[:start_image.shape[0]] = start_image
|
||||
|
||||
concat_latent_image = vae.encode(image[:, :, :, :3])
|
||||
mask = torch.ones((1, 1, latent.shape[2], concat_latent_image.shape[-2], concat_latent_image.shape[-1]), device=start_image.device, dtype=start_image.dtype)
|
||||
mask[:, :, :((start_image.shape[0] - 1) // 4) + 1] = 0.0
|
||||
|
||||
positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent_image, "concat_mask": mask})
|
||||
negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent_image, "concat_mask": mask})
|
||||
|
||||
if clip_vision_output is not None:
|
||||
positive = node_helpers.conditioning_set_values(positive, {"clip_vision_output": clip_vision_output})
|
||||
negative = node_helpers.conditioning_set_values(negative, {"clip_vision_output": clip_vision_output})
|
||||
|
||||
out_latent = {}
|
||||
out_latent["samples"] = latent
|
||||
return io.NodeOutput(positive, negative, out_latent)
|
||||
|
||||
|
||||
class WanPhantomSubjectToVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="WanPhantomSubjectToVideo_V3",
|
||||
category="conditioning/video_models",
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
io.Image.Input("images", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative_text"),
|
||||
io.Conditioning.Output(display_name="negative_img_text"),
|
||||
io.Latent.Output(display_name="latent"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, positive, negative, vae, width, height, length, batch_size, images):
|
||||
latent = torch.zeros([batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device())
|
||||
cond2 = negative
|
||||
if images is not None:
|
||||
images = comfy.utils.common_upscale(images[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1)
|
||||
latent_images = []
|
||||
for i in images:
|
||||
latent_images += [vae.encode(i.unsqueeze(0)[:, :, :, :3])]
|
||||
concat_latent_image = torch.cat(latent_images, dim=2)
|
||||
|
||||
positive = node_helpers.conditioning_set_values(positive, {"time_dim_concat": concat_latent_image})
|
||||
cond2 = node_helpers.conditioning_set_values(negative, {"time_dim_concat": concat_latent_image})
|
||||
negative = node_helpers.conditioning_set_values(negative, {"time_dim_concat": comfy.latent_formats.Wan21().process_out(torch.zeros_like(concat_latent_image))})
|
||||
|
||||
out_latent = {}
|
||||
out_latent["samples"] = latent
|
||||
return io.NodeOutput(positive, cond2, negative, out_latent)
|
||||
|
||||
|
||||
class WanVaceToVideo(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="WanVaceToVideo_V3",
|
||||
category="conditioning/video_models",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Conditioning.Input("positive"),
|
||||
io.Conditioning.Input("negative"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("width", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input("length", default=81, min=1, max=nodes.MAX_RESOLUTION, step=4),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
io.Float.Input("strength", default=1.0, min=0.0, max=1000.0, step=0.01),
|
||||
io.Image.Input("control_video", optional=True),
|
||||
io.Mask.Input("control_masks", optional=True),
|
||||
io.Image.Input("reference_image", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(display_name="positive"),
|
||||
io.Conditioning.Output(display_name="negative"),
|
||||
io.Latent.Output(display_name="latent"),
|
||||
io.Int.Output(display_name="trim_latent"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, positive, negative, vae, width, height, length, batch_size, strength, control_video=None, control_masks=None, reference_image=None):
|
||||
latent_length = ((length - 1) // 4) + 1
|
||||
if control_video is not None:
|
||||
control_video = comfy.utils.common_upscale(control_video[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1)
|
||||
if control_video.shape[0] < length:
|
||||
control_video = torch.nn.functional.pad(control_video, (0, 0, 0, 0, 0, 0, 0, length - control_video.shape[0]), value=0.5)
|
||||
else:
|
||||
control_video = torch.ones((length, height, width, 3)) * 0.5
|
||||
|
||||
if reference_image is not None:
|
||||
reference_image = comfy.utils.common_upscale(reference_image[:1].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1)
|
||||
reference_image = vae.encode(reference_image[:, :, :, :3])
|
||||
reference_image = torch.cat([reference_image, comfy.latent_formats.Wan21().process_out(torch.zeros_like(reference_image))], dim=1)
|
||||
|
||||
if control_masks is None:
|
||||
mask = torch.ones((length, height, width, 1))
|
||||
else:
|
||||
mask = control_masks
|
||||
if mask.ndim == 3:
|
||||
mask = mask.unsqueeze(1)
|
||||
mask = comfy.utils.common_upscale(mask[:length], width, height, "bilinear", "center").movedim(1, -1)
|
||||
if mask.shape[0] < length:
|
||||
mask = torch.nn.functional.pad(mask, (0, 0, 0, 0, 0, 0, 0, length - mask.shape[0]), value=1.0)
|
||||
|
||||
control_video = control_video - 0.5
|
||||
inactive = (control_video * (1 - mask)) + 0.5
|
||||
reactive = (control_video * mask) + 0.5
|
||||
|
||||
inactive = vae.encode(inactive[:, :, :, :3])
|
||||
reactive = vae.encode(reactive[:, :, :, :3])
|
||||
control_video_latent = torch.cat((inactive, reactive), dim=1)
|
||||
if reference_image is not None:
|
||||
control_video_latent = torch.cat((reference_image, control_video_latent), dim=2)
|
||||
|
||||
vae_stride = 8
|
||||
height_mask = height // vae_stride
|
||||
width_mask = width // vae_stride
|
||||
mask = mask.view(length, height_mask, vae_stride, width_mask, vae_stride)
|
||||
mask = mask.permute(2, 4, 0, 1, 3)
|
||||
mask = mask.reshape(vae_stride * vae_stride, length, height_mask, width_mask)
|
||||
mask = torch.nn.functional.interpolate(mask.unsqueeze(0), size=(latent_length, height_mask, width_mask), mode='nearest-exact').squeeze(0)
|
||||
|
||||
trim_latent = 0
|
||||
if reference_image is not None:
|
||||
mask_pad = torch.zeros_like(mask[:, :reference_image.shape[2], :, :])
|
||||
mask = torch.cat((mask_pad, mask), dim=1)
|
||||
latent_length += reference_image.shape[2]
|
||||
trim_latent = reference_image.shape[2]
|
||||
|
||||
mask = mask.unsqueeze(0)
|
||||
|
||||
positive = node_helpers.conditioning_set_values(positive, {"vace_frames": [control_video_latent], "vace_mask": [mask], "vace_strength": [strength]}, append=True)
|
||||
negative = node_helpers.conditioning_set_values(negative, {"vace_frames": [control_video_latent], "vace_mask": [mask], "vace_strength": [strength]}, append=True)
|
||||
|
||||
latent = torch.zeros([batch_size, 16, latent_length, height // 8, width // 8], device=comfy.model_management.intermediate_device())
|
||||
out_latent = {}
|
||||
out_latent["samples"] = latent
|
||||
return io.NodeOutput(positive, negative, out_latent, trim_latent)
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||
TrimVideoLatent,
|
||||
WanCameraImageToVideo,
|
||||
WanFirstLastFrameToVideo,
|
||||
WanFunControlToVideo,
|
||||
WanFunInpaintToVideo,
|
||||
WanImageToVideo,
|
||||
WanPhantomSubjectToVideo,
|
||||
WanVaceToVideo,
|
||||
]
|
||||
92
comfy_extras/v3/nodes_webcam.py
Normal file
92
comfy_extras/v3/nodes_webcam.py
Normal file
@ -0,0 +1,92 @@
|
||||
import hashlib
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from PIL import Image, ImageOps, ImageSequence
|
||||
|
||||
import folder_paths
|
||||
import node_helpers
|
||||
import nodes
|
||||
from comfy_api.latest import io
|
||||
|
||||
|
||||
class WebcamCapture(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="WebcamCapture_V3",
|
||||
display_name="Webcam Capture _V3",
|
||||
category="image",
|
||||
inputs=[
|
||||
io.Webcam.Input("image"),
|
||||
io.Int.Input("width", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1),
|
||||
io.Int.Input("height", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1),
|
||||
io.Boolean.Input("capture_on_queue", default=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image, **kwargs) -> io.NodeOutput:
|
||||
img = node_helpers.pillow(Image.open, folder_paths.get_annotated_filepath(image))
|
||||
|
||||
output_images = []
|
||||
output_masks = []
|
||||
w, h = None, None
|
||||
|
||||
excluded_formats = ["MPO"]
|
||||
|
||||
for i in ImageSequence.Iterator(img):
|
||||
i = node_helpers.pillow(ImageOps.exif_transpose, i)
|
||||
|
||||
if i.mode == "I":
|
||||
i = i.point(lambda i: i * (1 / 255))
|
||||
image = i.convert("RGB")
|
||||
|
||||
if len(output_images) == 0:
|
||||
w = image.size[0]
|
||||
h = image.size[1]
|
||||
|
||||
if image.size[0] != w or image.size[1] != h:
|
||||
continue
|
||||
|
||||
image = np.array(image).astype(np.float32) / 255.0
|
||||
image = torch.from_numpy(image)[None,]
|
||||
if "A" in i.getbands():
|
||||
mask = np.array(i.getchannel("A")).astype(np.float32) / 255.0
|
||||
mask = 1.0 - torch.from_numpy(mask)
|
||||
elif i.mode == "P" and "transparency" in i.info:
|
||||
mask = np.array(i.convert("RGBA").getchannel("A")).astype(np.float32) / 255.0
|
||||
mask = 1.0 - torch.from_numpy(mask)
|
||||
else:
|
||||
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
|
||||
output_images.append(image)
|
||||
output_masks.append(mask.unsqueeze(0))
|
||||
|
||||
if len(output_images) > 1 and img.format not in excluded_formats:
|
||||
output_image = torch.cat(output_images, dim=0)
|
||||
output_mask = torch.cat(output_masks, dim=0)
|
||||
else:
|
||||
output_image = output_images[0]
|
||||
output_mask = output_masks[0]
|
||||
|
||||
return io.NodeOutput(output_image, output_mask)
|
||||
|
||||
@classmethod
|
||||
def fingerprint_inputs(s, image, width, height, capture_on_queue):
|
||||
image_path = folder_paths.get_annotated_filepath(image)
|
||||
m = hashlib.sha256()
|
||||
with open(image_path, "rb") as f:
|
||||
m.update(f.read())
|
||||
return m.digest().hex()
|
||||
|
||||
@classmethod
|
||||
def validate_inputs(s, image):
|
||||
if not folder_paths.exists_annotated_filepath(image):
|
||||
return "Invalid image file: {}".format(image)
|
||||
return True
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNode]] = [WebcamCapture]
|
||||
145
execution.py
145
execution.py
@ -32,6 +32,8 @@ from comfy_execution.graph_utils import GraphBuilder, is_link
|
||||
from comfy_execution.validation import validate_node_input
|
||||
from comfy_execution.progress import get_progress_state, reset_progress_state, add_progress_handler, WebUIProgressHandler
|
||||
from comfy_execution.utils import CurrentNodeContext
|
||||
from comfy_api.internal import _ComfyNodeInternal, _NodeOutputInternal, first_real_override, is_class, make_locked_method_func
|
||||
from comfy_api.latest import io, resources
|
||||
|
||||
|
||||
class ExecutionResult(Enum):
|
||||
@ -56,7 +58,15 @@ class IsChangedCache:
|
||||
node = self.dynprompt.get_node(node_id)
|
||||
class_type = node["class_type"]
|
||||
class_def = nodes.NODE_CLASS_MAPPINGS[class_type]
|
||||
if not hasattr(class_def, "IS_CHANGED"):
|
||||
has_is_changed = False
|
||||
is_changed_name = None
|
||||
if issubclass(class_def, _ComfyNodeInternal) and first_real_override(class_def, "fingerprint_inputs") is not None:
|
||||
has_is_changed = True
|
||||
is_changed_name = "fingerprint_inputs"
|
||||
elif hasattr(class_def, "IS_CHANGED"):
|
||||
has_is_changed = True
|
||||
is_changed_name = "IS_CHANGED"
|
||||
if not has_is_changed:
|
||||
self.is_changed[node_id] = False
|
||||
return self.is_changed[node_id]
|
||||
|
||||
@ -65,9 +75,9 @@ class IsChangedCache:
|
||||
return self.is_changed[node_id]
|
||||
|
||||
# Intentionally do not use cached outputs here. We only want constants in IS_CHANGED
|
||||
input_data_all, _ = get_input_data(node["inputs"], class_def, node_id, None)
|
||||
input_data_all, _, hidden_inputs = get_input_data(node["inputs"], class_def, node_id, None)
|
||||
try:
|
||||
is_changed = await _async_map_node_over_list(self.prompt_id, node_id, class_def, input_data_all, "IS_CHANGED")
|
||||
is_changed = await _async_map_node_over_list(self.prompt_id, node_id, class_def, input_data_all, is_changed_name)
|
||||
is_changed = await resolve_map_node_over_list_results(is_changed)
|
||||
node["is_changed"] = [None if isinstance(x, ExecutionBlocker) else x for x in is_changed]
|
||||
except Exception as e:
|
||||
@ -126,9 +136,14 @@ class CacheSet:
|
||||
SENSITIVE_EXTRA_DATA_KEYS = ("auth_token_comfy_org", "api_key_comfy_org")
|
||||
|
||||
def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, extra_data={}):
|
||||
valid_inputs = class_def.INPUT_TYPES()
|
||||
is_v3 = issubclass(class_def, _ComfyNodeInternal)
|
||||
if is_v3:
|
||||
valid_inputs, schema = class_def.INPUT_TYPES(include_hidden=False, return_schema=True)
|
||||
else:
|
||||
valid_inputs = class_def.INPUT_TYPES()
|
||||
input_data_all = {}
|
||||
missing_keys = {}
|
||||
hidden_inputs_v3 = {}
|
||||
for x in inputs:
|
||||
input_data = inputs[x]
|
||||
_, input_category, input_info = get_input_info(class_def, x, valid_inputs)
|
||||
@ -153,22 +168,37 @@ def get_input_data(inputs, class_def, unique_id, outputs=None, dynprompt=None, e
|
||||
elif input_category is not None:
|
||||
input_data_all[x] = [input_data]
|
||||
|
||||
if "hidden" in valid_inputs:
|
||||
h = valid_inputs["hidden"]
|
||||
for x in h:
|
||||
if h[x] == "PROMPT":
|
||||
input_data_all[x] = [dynprompt.get_original_prompt() if dynprompt is not None else {}]
|
||||
if h[x] == "DYNPROMPT":
|
||||
input_data_all[x] = [dynprompt]
|
||||
if h[x] == "EXTRA_PNGINFO":
|
||||
input_data_all[x] = [extra_data.get('extra_pnginfo', None)]
|
||||
if h[x] == "UNIQUE_ID":
|
||||
input_data_all[x] = [unique_id]
|
||||
if h[x] == "AUTH_TOKEN_COMFY_ORG":
|
||||
input_data_all[x] = [extra_data.get("auth_token_comfy_org", None)]
|
||||
if h[x] == "API_KEY_COMFY_ORG":
|
||||
input_data_all[x] = [extra_data.get("api_key_comfy_org", None)]
|
||||
return input_data_all, missing_keys
|
||||
if is_v3:
|
||||
if schema.hidden:
|
||||
if io.Hidden.prompt in schema.hidden:
|
||||
hidden_inputs_v3[io.Hidden.prompt] = dynprompt.get_original_prompt() if dynprompt is not None else {}
|
||||
if io.Hidden.dynprompt in schema.hidden:
|
||||
hidden_inputs_v3[io.Hidden.dynprompt] = dynprompt
|
||||
if io.Hidden.extra_pnginfo in schema.hidden:
|
||||
hidden_inputs_v3[io.Hidden.extra_pnginfo] = extra_data.get('extra_pnginfo', None)
|
||||
if io.Hidden.unique_id in schema.hidden:
|
||||
hidden_inputs_v3[io.Hidden.unique_id] = unique_id
|
||||
if io.Hidden.auth_token_comfy_org in schema.hidden:
|
||||
hidden_inputs_v3[io.Hidden.auth_token_comfy_org] = extra_data.get("auth_token_comfy_org", None)
|
||||
if io.Hidden.api_key_comfy_org in schema.hidden:
|
||||
hidden_inputs_v3[io.Hidden.api_key_comfy_org] = extra_data.get("api_key_comfy_org", None)
|
||||
else:
|
||||
if "hidden" in valid_inputs:
|
||||
h = valid_inputs["hidden"]
|
||||
for x in h:
|
||||
if h[x] == "PROMPT":
|
||||
input_data_all[x] = [dynprompt.get_original_prompt() if dynprompt is not None else {}]
|
||||
if h[x] == "DYNPROMPT":
|
||||
input_data_all[x] = [dynprompt]
|
||||
if h[x] == "EXTRA_PNGINFO":
|
||||
input_data_all[x] = [extra_data.get('extra_pnginfo', None)]
|
||||
if h[x] == "UNIQUE_ID":
|
||||
input_data_all[x] = [unique_id]
|
||||
if h[x] == "AUTH_TOKEN_COMFY_ORG":
|
||||
input_data_all[x] = [extra_data.get("auth_token_comfy_org", None)]
|
||||
if h[x] == "API_KEY_COMFY_ORG":
|
||||
input_data_all[x] = [extra_data.get("api_key_comfy_org", None)]
|
||||
return input_data_all, missing_keys, hidden_inputs_v3
|
||||
|
||||
map_node_over_list = None #Don't hook this please
|
||||
|
||||
@ -184,7 +214,7 @@ async def resolve_map_node_over_list_results(results):
|
||||
raise exc
|
||||
return [x.result() if isinstance(x, asyncio.Task) else x for x in results]
|
||||
|
||||
async def _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, func, allow_interrupt=False, execution_block_cb=None, pre_execute_cb=None):
|
||||
async def _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, func, allow_interrupt=False, execution_block_cb=None, pre_execute_cb=None, hidden_inputs=None):
|
||||
# check if node wants the lists
|
||||
input_is_list = getattr(obj, "INPUT_IS_LIST", False)
|
||||
|
||||
@ -214,7 +244,27 @@ async def _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, f
|
||||
if execution_block is None:
|
||||
if pre_execute_cb is not None and index is not None:
|
||||
pre_execute_cb(index)
|
||||
f = getattr(obj, func)
|
||||
# V3
|
||||
if isinstance(obj, _ComfyNodeInternal) or (is_class(obj) and issubclass(obj, _ComfyNodeInternal)):
|
||||
# if is just a class, then assign no resources or state, just create clone
|
||||
if is_class(obj):
|
||||
type_obj = obj
|
||||
obj.VALIDATE_CLASS()
|
||||
class_clone = obj.PREPARE_CLASS_CLONE(hidden_inputs)
|
||||
# otherwise, use class instance to populate/reuse some fields
|
||||
else:
|
||||
type_obj = type(obj)
|
||||
type_obj.VALIDATE_CLASS()
|
||||
class_clone = type_obj.PREPARE_CLASS_CLONE(hidden_inputs)
|
||||
# NOTE: this is a mock of resource management; for local, just stores ResourcesLocal on node instance
|
||||
if hasattr(obj, "local_resources"):
|
||||
if obj.local_resources is None:
|
||||
obj.local_resources = resources.ResourcesLocal()
|
||||
class_clone.resources = obj.local_resources
|
||||
f = make_locked_method_func(type_obj, func, class_clone)
|
||||
# V1
|
||||
else:
|
||||
f = getattr(obj, func)
|
||||
if inspect.iscoroutinefunction(f):
|
||||
async def async_wrapper(f, prompt_id, unique_id, list_index, args):
|
||||
with CurrentNodeContext(prompt_id, unique_id, list_index):
|
||||
@ -266,8 +316,8 @@ def merge_result_data(results, obj):
|
||||
output.append([o[i] for o in results])
|
||||
return output
|
||||
|
||||
async def get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=None, pre_execute_cb=None):
|
||||
return_values = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
|
||||
async def get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=None, pre_execute_cb=None, hidden_inputs=None):
|
||||
return_values = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, hidden_inputs=hidden_inputs)
|
||||
has_pending_task = any(isinstance(r, asyncio.Task) and not r.done() for r in return_values)
|
||||
if has_pending_task:
|
||||
return return_values, {}, False, has_pending_task
|
||||
@ -298,6 +348,26 @@ def get_output_from_returns(return_values, obj):
|
||||
result = tuple([result] * len(obj.RETURN_TYPES))
|
||||
results.append(result)
|
||||
subgraph_results.append((None, result))
|
||||
elif isinstance(r, _NodeOutputInternal):
|
||||
# V3
|
||||
if r.ui is not None:
|
||||
if isinstance(r.ui, dict):
|
||||
uis.append(r.ui)
|
||||
else:
|
||||
uis.append(r.ui.as_dict())
|
||||
if r.expand is not None:
|
||||
has_subgraph = True
|
||||
new_graph = r.expand
|
||||
result = r.result
|
||||
if r.block_execution is not None:
|
||||
result = tuple([ExecutionBlocker(r.block_execution)] * len(obj.RETURN_TYPES))
|
||||
subgraph_results.append((new_graph, result))
|
||||
elif r.result is not None:
|
||||
result = r.result
|
||||
if r.block_execution is not None:
|
||||
result = tuple([ExecutionBlocker(r.block_execution)] * len(obj.RETURN_TYPES))
|
||||
results.append(result)
|
||||
subgraph_results.append((None, result))
|
||||
else:
|
||||
if isinstance(r, ExecutionBlocker):
|
||||
r = tuple([r] * len(obj.RETURN_TYPES))
|
||||
@ -381,7 +451,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed,
|
||||
has_subgraph = False
|
||||
else:
|
||||
get_progress_state().start_progress(unique_id)
|
||||
input_data_all, missing_keys = get_input_data(inputs, class_def, unique_id, caches.outputs, dynprompt, extra_data)
|
||||
input_data_all, missing_keys, hidden_inputs = get_input_data(inputs, class_def, unique_id, caches.outputs, dynprompt, extra_data)
|
||||
if server.client_id is not None:
|
||||
server.last_node_id = display_node_id
|
||||
server.send_sync("executing", { "node": unique_id, "display_node": display_node_id, "prompt_id": prompt_id }, server.client_id)
|
||||
@ -391,8 +461,12 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed,
|
||||
obj = class_def()
|
||||
caches.objects.set(unique_id, obj)
|
||||
|
||||
if hasattr(obj, "check_lazy_status"):
|
||||
required_inputs = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, "check_lazy_status", allow_interrupt=True)
|
||||
if issubclass(class_def, _ComfyNodeInternal):
|
||||
lazy_status_present = first_real_override(class_def, "check_lazy_status") is not None
|
||||
else:
|
||||
lazy_status_present = getattr(obj, "check_lazy_status", None) is not None
|
||||
if lazy_status_present:
|
||||
required_inputs = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, "check_lazy_status", allow_interrupt=True, hidden_inputs=hidden_inputs)
|
||||
required_inputs = await resolve_map_node_over_list_results(required_inputs)
|
||||
required_inputs = set(sum([r for r in required_inputs if isinstance(r,list)], []))
|
||||
required_inputs = [x for x in required_inputs if isinstance(x,str) and (
|
||||
@ -424,7 +498,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed,
|
||||
def pre_execute_cb(call_index):
|
||||
# TODO - How to handle this with async functions without contextvars (which requires Python 3.12)?
|
||||
GraphBuilder.set_default_prefix(unique_id, call_index, 0)
|
||||
output_data, output_ui, has_subgraph, has_pending_tasks = await get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
|
||||
output_data, output_ui, has_subgraph, has_pending_tasks = await get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, hidden_inputs=hidden_inputs)
|
||||
if has_pending_tasks:
|
||||
pending_async_nodes[unique_id] = output_data
|
||||
unblock = execution_list.add_external_block(unique_id)
|
||||
@ -672,8 +746,14 @@ async def validate_inputs(prompt_id, prompt, item, validated):
|
||||
|
||||
validate_function_inputs = []
|
||||
validate_has_kwargs = False
|
||||
if hasattr(obj_class, "VALIDATE_INPUTS"):
|
||||
argspec = inspect.getfullargspec(obj_class.VALIDATE_INPUTS)
|
||||
if issubclass(obj_class, _ComfyNodeInternal):
|
||||
validate_function_name = "validate_inputs"
|
||||
validate_function = first_real_override(obj_class, validate_function_name)
|
||||
else:
|
||||
validate_function_name = "VALIDATE_INPUTS"
|
||||
validate_function = getattr(obj_class, validate_function_name, None)
|
||||
if validate_function is not None:
|
||||
argspec = inspect.getfullargspec(validate_function)
|
||||
validate_function_inputs = argspec.args
|
||||
validate_has_kwargs = argspec.varkw is not None
|
||||
received_types = {}
|
||||
@ -848,7 +928,7 @@ async def validate_inputs(prompt_id, prompt, item, validated):
|
||||
continue
|
||||
|
||||
if len(validate_function_inputs) > 0 or validate_has_kwargs:
|
||||
input_data_all, _ = get_input_data(inputs, obj_class, unique_id)
|
||||
input_data_all, _, hidden_inputs = get_input_data(inputs, obj_class, unique_id)
|
||||
input_filtered = {}
|
||||
for x in input_data_all:
|
||||
if x in validate_function_inputs or validate_has_kwargs:
|
||||
@ -856,8 +936,7 @@ async def validate_inputs(prompt_id, prompt, item, validated):
|
||||
if 'input_types' in validate_function_inputs:
|
||||
input_filtered['input_types'] = [received_types]
|
||||
|
||||
#ret = obj_class.VALIDATE_INPUTS(**input_filtered)
|
||||
ret = await _async_map_node_over_list(prompt_id, unique_id, obj_class, input_filtered, "VALIDATE_INPUTS")
|
||||
ret = await _async_map_node_over_list(prompt_id, unique_id, obj_class, input_filtered, validate_function_name, hidden_inputs=hidden_inputs)
|
||||
ret = await resolve_map_node_over_list_results(ret)
|
||||
for x in input_filtered:
|
||||
for i, r in enumerate(ret):
|
||||
|
||||
87
nodes.py
87
nodes.py
@ -29,6 +29,7 @@ import comfy.controlnet
|
||||
from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict, FileLocator
|
||||
from comfy_api.internal import register_versions, ComfyAPIWithVersion
|
||||
from comfy_api.version_list import supported_versions
|
||||
from comfy_api.latest import io
|
||||
|
||||
import comfy.clip_vision
|
||||
|
||||
@ -2152,6 +2153,7 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom
|
||||
if os.path.isdir(web_dir):
|
||||
EXTENSION_WEB_DIRS[module_name] = web_dir
|
||||
|
||||
# V1 node definition
|
||||
if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
|
||||
for name, node_cls in module.NODE_CLASS_MAPPINGS.items():
|
||||
if name not in ignore:
|
||||
@ -2160,8 +2162,19 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom
|
||||
if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None:
|
||||
NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
|
||||
return True
|
||||
# V3 node definition
|
||||
elif getattr(module, "NODES_LIST", None) is not None:
|
||||
for node_cls in module.NODES_LIST:
|
||||
node_cls: io.ComfyNode
|
||||
schema = node_cls.GET_SCHEMA()
|
||||
if schema.node_id not in ignore:
|
||||
NODE_CLASS_MAPPINGS[schema.node_id] = node_cls
|
||||
node_cls.RELATIVE_PYTHON_MODULE = "{}.{}".format(module_parent, get_module_name(module_path))
|
||||
if schema.display_name is not None:
|
||||
NODE_DISPLAY_NAME_MAPPINGS[schema.node_id] = schema.display_name
|
||||
return True
|
||||
else:
|
||||
logging.warning(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.")
|
||||
logging.warning(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS or NODES_LIST (need one).")
|
||||
return False
|
||||
except Exception as e:
|
||||
logging.warning(traceback.format_exc())
|
||||
@ -2286,7 +2299,77 @@ async def init_builtin_extra_nodes():
|
||||
"nodes_string.py",
|
||||
"nodes_camera_trajectory.py",
|
||||
"nodes_edit_model.py",
|
||||
"nodes_tcfg.py"
|
||||
"nodes_tcfg.py",
|
||||
"nodes_v3_test.py",
|
||||
"nodes_v1_test.py",
|
||||
"v3/nodes_ace.py",
|
||||
"v3/nodes_advanced_samplers.py",
|
||||
"v3/nodes_align_your_steps.py",
|
||||
"v3/nodes_apg.py",
|
||||
"v3/nodes_attention_multiply.py",
|
||||
"v3/nodes_audio.py",
|
||||
"v3/nodes_camera_trajectory.py",
|
||||
"v3/nodes_canny.py",
|
||||
"v3/nodes_cfg.py",
|
||||
"v3/nodes_clip_sdxl.py",
|
||||
"v3/nodes_compositing.py",
|
||||
"v3/nodes_cond.py",
|
||||
"v3/nodes_controlnet.py",
|
||||
"v3/nodes_cosmos.py",
|
||||
"v3/nodes_custom_sampler.py",
|
||||
"v3/nodes_differential_diffusion.py",
|
||||
"v3/nodes_edit_model.py",
|
||||
"v3/nodes_flux.py",
|
||||
"v3/nodes_freelunch.py",
|
||||
"v3/nodes_fresca.py",
|
||||
"v3/nodes_gits.py",
|
||||
"v3/nodes_hidream.py",
|
||||
# "v3/nodes_hooks.py",
|
||||
"v3/nodes_hunyuan.py",
|
||||
"v3/nodes_hunyuan3d.py",
|
||||
"v3/nodes_hypernetwork.py",
|
||||
"v3/nodes_hypertile.py",
|
||||
"v3/nodes_images.py",
|
||||
"v3/nodes_ip2p.py",
|
||||
"v3/nodes_latent.py",
|
||||
"v3/nodes_load_3d.py",
|
||||
"v3/nodes_lora_extract.py",
|
||||
"v3/nodes_lotus.py",
|
||||
"v3/nodes_lt.py",
|
||||
"v3/nodes_lumina2.py",
|
||||
"v3/nodes_mahiro.py",
|
||||
"v3/nodes_mask.py",
|
||||
"v3/nodes_mochi.py",
|
||||
"v3/nodes_model_advanced.py",
|
||||
"v3/nodes_model_downscale.py",
|
||||
"v3/nodes_model_merging.py",
|
||||
"v3/nodes_model_merging_model_specific.py",
|
||||
"v3/nodes_morphology.py",
|
||||
"v3/nodes_optimalsteps.py",
|
||||
"v3/nodes_pag.py",
|
||||
"v3/nodes_perpneg.py",
|
||||
"v3/nodes_photomaker.py",
|
||||
"v3/nodes_pixart.py",
|
||||
"v3/nodes_post_processing.py",
|
||||
"v3/nodes_preview_any.py",
|
||||
"v3/nodes_primitive.py",
|
||||
"v3/nodes_rebatch.py",
|
||||
"v3/nodes_sag.py",
|
||||
"v3/nodes_sd3.py",
|
||||
"v3/nodes_sdupscale.py",
|
||||
"v3/nodes_slg.py",
|
||||
"v3/nodes_stable3d.py",
|
||||
"v3/nodes_stable_cascade.py",
|
||||
"v3/nodes_string.py",
|
||||
"v3/nodes_tcfg.py",
|
||||
"v3/nodes_tomesd.py",
|
||||
"v3/nodes_torch_compile.py",
|
||||
"v3/nodes_train.py",
|
||||
"v3/nodes_upscale_model.py",
|
||||
"v3/nodes_video.py",
|
||||
"v3/nodes_video_model.py",
|
||||
"v3/nodes_wan.py",
|
||||
"v3/nodes_webcam.py",
|
||||
]
|
||||
|
||||
import_failed = []
|
||||
|
||||
@ -12,6 +12,8 @@ documentation = "https://docs.comfy.org/"
|
||||
|
||||
[tool.ruff]
|
||||
lint.select = [
|
||||
"E", # pycodestyle errors
|
||||
"I", # isort
|
||||
"N805", # invalid-first-argument-name-for-method
|
||||
"S307", # suspicious-eval-usage
|
||||
"S102", # exec
|
||||
@ -21,4 +23,8 @@ lint.select = [
|
||||
# See all rules here: https://docs.astral.sh/ruff/rules/#pyflakes-f
|
||||
"F",
|
||||
]
|
||||
lint.ignore = ["E501"] # disable line-length checking
|
||||
exclude = ["*.ipynb", "**/generated/*.pyi"]
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"!comfy_extras/v3/*" = ["E", "I"] # enable these rules only for V3 nodes
|
||||
|
||||
@ -30,6 +30,7 @@ from comfy_api import feature_flags
|
||||
import node_helpers
|
||||
from comfyui_version import __version__
|
||||
from app.frontend_management import FrontendManager
|
||||
from comfy_api.internal import _ComfyNodeInternal
|
||||
|
||||
from app.user_manager import UserManager
|
||||
from app.model_manager import ModelFileManager
|
||||
@ -591,6 +592,8 @@ class PromptServer():
|
||||
|
||||
def node_info(node_class):
|
||||
obj_class = nodes.NODE_CLASS_MAPPINGS[node_class]
|
||||
if issubclass(obj_class, _ComfyNodeInternal):
|
||||
return obj_class.GET_NODE_INFO_V1()
|
||||
info = {}
|
||||
info['input'] = obj_class.INPUT_TYPES()
|
||||
info['input_order'] = {key: list(value.keys()) for (key, value) in obj_class.INPUT_TYPES().items()}
|
||||
|
||||
Reference in New Issue
Block a user