mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-05-10 20:26:07 +08:00
Compare commits
40 Commits
feature/lo
...
feat/gpt-i
| Author | SHA1 | Date | |
|---|---|---|---|
| e9f8aa9346 | |||
| 43a1263b60 | |||
| 102773cd2c | |||
| 1e1d4f1254 | |||
| eb22225387 | |||
| b38dd0ff23 | |||
| ad94d47221 | |||
| e75f775ae8 | |||
| c514890325 | |||
| 543e9fba64 | |||
| fc5f4a996b | |||
| 138571da95 | |||
| 3d816db07f | |||
| b9dedea57d | |||
| 3086026401 | |||
| 9635c2ec9b | |||
| f8d92cf313 | |||
| 4f48be4138 | |||
| 541fd10bbe | |||
| 05f7531148 | |||
| c033bbf516 | |||
| 1391579c33 | |||
| d0c53c50c2 | |||
| b41ab53b6f | |||
| e9a2d1e4cc | |||
| 1de83f91c3 | |||
| 8f374716ee | |||
| cb0bbde402 | |||
| 7ce3f64c78 | |||
| c5569e8627 | |||
| c16db7fd69 | |||
| fed4ac031a | |||
| 35dfcbbb28 | |||
| 722bc73319 | |||
| 402ff1cdb7 | |||
| acd718598e | |||
| 559501e4b8 | |||
| ee2db7488d | |||
| c2657d5fb9 | |||
| 971932346a |
@ -139,9 +139,9 @@ Example:
|
||||
"_quantization_metadata": {
|
||||
"format_version": "1.0",
|
||||
"layers": {
|
||||
"model.layers.0.mlp.up_proj": "float8_e4m3fn",
|
||||
"model.layers.0.mlp.down_proj": "float8_e4m3fn",
|
||||
"model.layers.1.mlp.up_proj": "float8_e4m3fn"
|
||||
"model.layers.0.mlp.up_proj": {"format": "float8_e4m3fn"},
|
||||
"model.layers.0.mlp.down_proj": {"format": "float8_e4m3fn"},
|
||||
"model.layers.1.mlp.up_proj": {"format": "float8_e4m3fn"}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -165,4 +165,4 @@ Activation quantization (e.g., for FP8 Tensor Core operations) requires `input_s
|
||||
3. **Compute scales**: Derive `input_scale` from collected statistics
|
||||
4. **Store in checkpoint**: Save `input_scale` parameters alongside weights
|
||||
|
||||
The calibration dataset should be representative of your target use case. For diffusion models, this typically means a diverse set of prompts and generation parameters.
|
||||
The calibration dataset should be representative of your target use case. For diffusion models, this typically means a diverse set of prompts and generation parameters.
|
||||
|
||||
@ -195,7 +195,9 @@ The portable above currently comes with python 3.13 and pytorch cuda 13.0. Updat
|
||||
|
||||
#### Alternative Downloads:
|
||||
|
||||
[Experimental portable for AMD GPUs](https://github.com/comfyanonymous/ComfyUI/releases/latest/download/ComfyUI_windows_portable_amd.7z)
|
||||
[Portable for AMD GPUs](https://github.com/comfyanonymous/ComfyUI/releases/latest/download/ComfyUI_windows_portable_amd.7z)
|
||||
|
||||
[Experimental portable for Intel GPUs](https://github.com/comfyanonymous/ComfyUI/releases/latest/download/ComfyUI_windows_portable_intel.7z)
|
||||
|
||||
[Portable with pytorch cuda 12.6 and python 3.12](https://github.com/comfyanonymous/ComfyUI/releases/latest/download/ComfyUI_windows_portable_nvidia_cu126.7z) (Supports Nvidia 10 series and older GPUs).
|
||||
|
||||
|
||||
@ -67,7 +67,7 @@ class InternalRoutes:
|
||||
(entry for entry in os.scandir(directory) if is_visible_file(entry)),
|
||||
key=lambda entry: -entry.stat().st_mtime
|
||||
)
|
||||
return web.json_response([entry.name for entry in sorted_files], status=200)
|
||||
return web.json_response([f"{entry.name} [{directory_type}]" for entry in sorted_files], status=200)
|
||||
|
||||
|
||||
def get_app(self):
|
||||
|
||||
@ -15,7 +15,7 @@ def rope(pos: torch.Tensor, dim: int, theta: int) -> torch.Tensor:
|
||||
|
||||
scale = torch.arange(0, dim, 2, dtype=torch.float64, device=device) / dim
|
||||
omega = 1.0 / (theta**scale)
|
||||
out = torch.einsum("...n,d->...nd", pos, omega)
|
||||
out = torch.einsum("...n,d->...nd", pos.to(device), omega)
|
||||
out = torch.stack([torch.cos(out), torch.sin(out)], dim=0)
|
||||
return out.to(dtype=torch.float32, device=pos.device)
|
||||
|
||||
@ -118,8 +118,6 @@ class ErnieImageAttention(nn.Module):
|
||||
query = apply_rotary_emb(query, image_rotary_emb)
|
||||
key = apply_rotary_emb(key, image_rotary_emb)
|
||||
|
||||
query, key = query.to(x.dtype), key.to(x.dtype)
|
||||
|
||||
q_flat = query.reshape(B, S, -1)
|
||||
k_flat = key.reshape(B, S, -1)
|
||||
|
||||
@ -161,16 +159,16 @@ class ErnieImageSharedAdaLNBlock(nn.Module):
|
||||
|
||||
residual = x
|
||||
x_norm = self.adaLN_sa_ln(x)
|
||||
x_norm = (x_norm.float() * (1 + scale_msa.float()) + shift_msa.float()).to(x.dtype)
|
||||
x_norm = x_norm * (1 + scale_msa) + shift_msa
|
||||
|
||||
attn_out = self.self_attention(x_norm, attention_mask=attention_mask, image_rotary_emb=rotary_pos_emb)
|
||||
x = residual + (gate_msa.float() * attn_out.float()).to(x.dtype)
|
||||
x = residual + gate_msa * attn_out
|
||||
|
||||
residual = x
|
||||
x_norm = self.adaLN_mlp_ln(x)
|
||||
x_norm = (x_norm.float() * (1 + scale_mlp.float()) + shift_mlp.float()).to(x.dtype)
|
||||
x_norm = x_norm * (1 + scale_mlp) + shift_mlp
|
||||
|
||||
return residual + (gate_mlp.float() * self.mlp(x_norm).float()).to(x.dtype)
|
||||
return residual + gate_mlp * self.mlp(x_norm)
|
||||
|
||||
class ErnieImageAdaLNContinuous(nn.Module):
|
||||
def __init__(self, hidden_size: int, eps: float = 1e-6, operations=None, device=None, dtype=None):
|
||||
@ -183,7 +181,7 @@ class ErnieImageAdaLNContinuous(nn.Module):
|
||||
def forward(self, x: torch.Tensor, conditioning: torch.Tensor) -> torch.Tensor:
|
||||
scale, shift = self.linear(conditioning).chunk(2, dim=-1)
|
||||
x = self.norm(x)
|
||||
x = x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
|
||||
x = torch.addcmul(shift.unsqueeze(1), x, 1 + scale.unsqueeze(1))
|
||||
return x
|
||||
|
||||
class ErnieImageModel(nn.Module):
|
||||
@ -279,7 +277,7 @@ class ErnieImageModel(nn.Module):
|
||||
rotary_pos_emb = self.pos_embed(torch.cat([image_ids, text_ids], dim=1)).to(x.dtype)
|
||||
del image_ids, text_ids
|
||||
|
||||
sample = self.time_proj(timesteps.to(dtype)).to(self.time_embedding.linear_1.weight.dtype)
|
||||
sample = self.time_proj(timesteps).to(dtype)
|
||||
c = self.time_embedding(sample)
|
||||
|
||||
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = [
|
||||
|
||||
@ -4,9 +4,6 @@ import math
|
||||
import torch
|
||||
import torchaudio
|
||||
|
||||
import comfy.model_management
|
||||
import comfy.model_patcher
|
||||
import comfy.utils as utils
|
||||
from comfy.ldm.mmaudio.vae.distributions import DiagonalGaussianDistribution
|
||||
from comfy.ldm.lightricks.symmetric_patchifier import AudioPatchifier
|
||||
from comfy.ldm.lightricks.vae.causal_audio_autoencoder import (
|
||||
@ -43,30 +40,6 @@ class AudioVAEComponentConfig:
|
||||
|
||||
return cls(autoencoder=audio_config, vocoder=vocoder_config)
|
||||
|
||||
|
||||
class ModelDeviceManager:
|
||||
"""Manages device placement and GPU residency for the composed model."""
|
||||
|
||||
def __init__(self, module: torch.nn.Module):
|
||||
load_device = comfy.model_management.get_torch_device()
|
||||
offload_device = comfy.model_management.vae_offload_device()
|
||||
self.patcher = comfy.model_patcher.ModelPatcher(module, load_device, offload_device)
|
||||
|
||||
def ensure_model_loaded(self) -> None:
|
||||
comfy.model_management.free_memory(
|
||||
self.patcher.model_size(),
|
||||
self.patcher.load_device,
|
||||
)
|
||||
comfy.model_management.load_model_gpu(self.patcher)
|
||||
|
||||
def move_to_load_device(self, tensor: torch.Tensor) -> torch.Tensor:
|
||||
return tensor.to(self.patcher.load_device)
|
||||
|
||||
@property
|
||||
def load_device(self):
|
||||
return self.patcher.load_device
|
||||
|
||||
|
||||
class AudioLatentNormalizer:
|
||||
"""Applies per-channel statistics in patch space and restores original layout."""
|
||||
|
||||
@ -132,23 +105,17 @@ class AudioPreprocessor:
|
||||
class AudioVAE(torch.nn.Module):
|
||||
"""High-level Audio VAE wrapper exposing encode and decode entry points."""
|
||||
|
||||
def __init__(self, state_dict: dict, metadata: dict):
|
||||
def __init__(self, metadata: dict):
|
||||
super().__init__()
|
||||
|
||||
component_config = AudioVAEComponentConfig.from_metadata(metadata)
|
||||
|
||||
vae_sd = utils.state_dict_prefix_replace(state_dict, {"audio_vae.": ""}, filter_keys=True)
|
||||
vocoder_sd = utils.state_dict_prefix_replace(state_dict, {"vocoder.": ""}, filter_keys=True)
|
||||
|
||||
self.autoencoder = CausalAudioAutoencoder(config=component_config.autoencoder)
|
||||
if "bwe" in component_config.vocoder:
|
||||
self.vocoder = VocoderWithBWE(config=component_config.vocoder)
|
||||
else:
|
||||
self.vocoder = Vocoder(config=component_config.vocoder)
|
||||
|
||||
self.autoencoder.load_state_dict(vae_sd, strict=False)
|
||||
self.vocoder.load_state_dict(vocoder_sd, strict=False)
|
||||
|
||||
autoencoder_config = self.autoencoder.get_config()
|
||||
self.normalizer = AudioLatentNormalizer(
|
||||
AudioPatchifier(
|
||||
@ -168,18 +135,12 @@ class AudioVAE(torch.nn.Module):
|
||||
n_fft=autoencoder_config["n_fft"],
|
||||
)
|
||||
|
||||
self.device_manager = ModelDeviceManager(self)
|
||||
|
||||
def encode(self, audio: dict) -> torch.Tensor:
|
||||
def encode(self, audio, sample_rate=44100) -> torch.Tensor:
|
||||
"""Encode a waveform dictionary into normalized latent tensors."""
|
||||
|
||||
waveform = audio["waveform"]
|
||||
waveform_sample_rate = audio["sample_rate"]
|
||||
waveform = audio
|
||||
waveform_sample_rate = sample_rate
|
||||
input_device = waveform.device
|
||||
# Ensure that Audio VAE is loaded on the correct device.
|
||||
self.device_manager.ensure_model_loaded()
|
||||
|
||||
waveform = self.device_manager.move_to_load_device(waveform)
|
||||
expected_channels = self.autoencoder.encoder.in_channels
|
||||
if waveform.shape[1] != expected_channels:
|
||||
if waveform.shape[1] == 1:
|
||||
@ -190,7 +151,7 @@ class AudioVAE(torch.nn.Module):
|
||||
)
|
||||
|
||||
mel_spec = self.preprocessor.waveform_to_mel(
|
||||
waveform, waveform_sample_rate, device=self.device_manager.load_device
|
||||
waveform, waveform_sample_rate, device=waveform.device
|
||||
)
|
||||
|
||||
latents = self.autoencoder.encode(mel_spec)
|
||||
@ -204,17 +165,13 @@ class AudioVAE(torch.nn.Module):
|
||||
"""Decode normalized latent tensors into an audio waveform."""
|
||||
original_shape = latents.shape
|
||||
|
||||
# Ensure that Audio VAE is loaded on the correct device.
|
||||
self.device_manager.ensure_model_loaded()
|
||||
|
||||
latents = self.device_manager.move_to_load_device(latents)
|
||||
latents = self.normalizer.denormalize(latents)
|
||||
|
||||
target_shape = self.target_shape_from_latents(original_shape)
|
||||
mel_spec = self.autoencoder.decode(latents, target_shape=target_shape)
|
||||
|
||||
waveform = self.run_vocoder(mel_spec)
|
||||
return self.device_manager.move_to_load_device(waveform)
|
||||
return waveform
|
||||
|
||||
def target_shape_from_latents(self, latents_shape):
|
||||
batch, _, time, _ = latents_shape
|
||||
|
||||
@ -34,6 +34,16 @@ class TimestepBlock(nn.Module):
|
||||
#This is needed because accelerate makes a copy of transformer_options which breaks "transformer_index"
|
||||
def forward_timestep_embed(ts, x, emb, context=None, transformer_options={}, output_shape=None, time_context=None, num_video_frames=None, image_only_indicator=None):
|
||||
for layer in ts:
|
||||
if "patches" in transformer_options and "forward_timestep_embed_patch" in transformer_options["patches"]:
|
||||
found_patched = False
|
||||
for class_type, handler in transformer_options["patches"]["forward_timestep_embed_patch"]:
|
||||
if isinstance(layer, class_type):
|
||||
x = handler(layer, x, emb, context, transformer_options, output_shape, time_context, num_video_frames, image_only_indicator)
|
||||
found_patched = True
|
||||
break
|
||||
if found_patched:
|
||||
continue
|
||||
|
||||
if isinstance(layer, VideoResBlock):
|
||||
x = layer(x, emb, num_video_frames, image_only_indicator)
|
||||
elif isinstance(layer, TimestepBlock):
|
||||
@ -49,15 +59,6 @@ def forward_timestep_embed(ts, x, emb, context=None, transformer_options={}, out
|
||||
elif isinstance(layer, Upsample):
|
||||
x = layer(x, output_shape=output_shape)
|
||||
else:
|
||||
if "patches" in transformer_options and "forward_timestep_embed_patch" in transformer_options["patches"]:
|
||||
found_patched = False
|
||||
for class_type, handler in transformer_options["patches"]["forward_timestep_embed_patch"]:
|
||||
if isinstance(layer, class_type):
|
||||
x = handler(layer, x, emb, context, transformer_options, output_shape, time_context, num_video_frames, image_only_indicator)
|
||||
found_patched = True
|
||||
break
|
||||
if found_patched:
|
||||
continue
|
||||
x = layer(x)
|
||||
return x
|
||||
|
||||
@ -894,6 +895,12 @@ class UNetModel(nn.Module):
|
||||
h = forward_timestep_embed(self.middle_block, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator)
|
||||
h = apply_control(h, control, 'middle')
|
||||
|
||||
if "middle_block_after_patch" in transformer_patches:
|
||||
patch = transformer_patches["middle_block_after_patch"]
|
||||
for p in patch:
|
||||
out = p({"h": h, "x": x, "emb": emb, "context": context, "y": y,
|
||||
"timesteps": timesteps, "transformer_options": transformer_options})
|
||||
h = out["h"]
|
||||
|
||||
for id, module in enumerate(self.output_blocks):
|
||||
transformer_options["block"] = ("output", id)
|
||||
@ -905,8 +912,9 @@ class UNetModel(nn.Module):
|
||||
for p in patch:
|
||||
h, hsp = p(h, hsp, transformer_options)
|
||||
|
||||
h = th.cat([h, hsp], dim=1)
|
||||
del hsp
|
||||
if hsp is not None:
|
||||
h = th.cat([h, hsp], dim=1)
|
||||
del hsp
|
||||
if len(hs) > 0:
|
||||
output_shape = hs[-1].shape
|
||||
else:
|
||||
|
||||
0
comfy/ldm/supir/__init__.py
Normal file
0
comfy/ldm/supir/__init__.py
Normal file
226
comfy/ldm/supir/supir_modules.py
Normal file
226
comfy/ldm/supir/supir_modules.py
Normal file
@ -0,0 +1,226 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from comfy.ldm.modules.diffusionmodules.util import timestep_embedding
|
||||
from comfy.ldm.modules.diffusionmodules.openaimodel import Downsample, TimestepEmbedSequential, ResBlock, SpatialTransformer
|
||||
from comfy.ldm.modules.attention import optimized_attention
|
||||
|
||||
|
||||
class ZeroSFT(nn.Module):
|
||||
def __init__(self, label_nc, norm_nc, concat_channels=0, dtype=None, device=None, operations=None):
|
||||
super().__init__()
|
||||
|
||||
ks = 3
|
||||
pw = ks // 2
|
||||
|
||||
self.param_free_norm = operations.GroupNorm(32, norm_nc + concat_channels, dtype=dtype, device=device)
|
||||
|
||||
nhidden = 128
|
||||
|
||||
self.mlp_shared = nn.Sequential(
|
||||
operations.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw, dtype=dtype, device=device),
|
||||
nn.SiLU()
|
||||
)
|
||||
self.zero_mul = operations.Conv2d(nhidden, norm_nc + concat_channels, kernel_size=ks, padding=pw, dtype=dtype, device=device)
|
||||
self.zero_add = operations.Conv2d(nhidden, norm_nc + concat_channels, kernel_size=ks, padding=pw, dtype=dtype, device=device)
|
||||
|
||||
self.zero_conv = operations.Conv2d(label_nc, norm_nc, 1, 1, 0, dtype=dtype, device=device)
|
||||
self.pre_concat = bool(concat_channels != 0)
|
||||
|
||||
def forward(self, c, h, h_ori=None, control_scale=1):
|
||||
if h_ori is not None and self.pre_concat:
|
||||
h_raw = torch.cat([h_ori, h], dim=1)
|
||||
else:
|
||||
h_raw = h
|
||||
|
||||
h = h + self.zero_conv(c)
|
||||
if h_ori is not None and self.pre_concat:
|
||||
h = torch.cat([h_ori, h], dim=1)
|
||||
actv = self.mlp_shared(c)
|
||||
gamma = self.zero_mul(actv)
|
||||
beta = self.zero_add(actv)
|
||||
h = self.param_free_norm(h)
|
||||
h = torch.addcmul(h + beta, h, gamma)
|
||||
if h_ori is not None and not self.pre_concat:
|
||||
h = torch.cat([h_ori, h], dim=1)
|
||||
return torch.lerp(h_raw, h, control_scale)
|
||||
|
||||
|
||||
class _CrossAttnInner(nn.Module):
|
||||
"""Inner cross-attention module matching the state_dict layout of the original CrossAttention."""
|
||||
def __init__(self, query_dim, context_dim, heads, dim_head, dtype=None, device=None, operations=None):
|
||||
super().__init__()
|
||||
inner_dim = dim_head * heads
|
||||
self.heads = heads
|
||||
self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
|
||||
self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
|
||||
self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
|
||||
self.to_out = nn.Sequential(
|
||||
operations.Linear(inner_dim, query_dim, dtype=dtype, device=device),
|
||||
)
|
||||
|
||||
def forward(self, x, context):
|
||||
q = self.to_q(x)
|
||||
k = self.to_k(context)
|
||||
v = self.to_v(context)
|
||||
return self.to_out(optimized_attention(q, k, v, self.heads))
|
||||
|
||||
|
||||
class ZeroCrossAttn(nn.Module):
|
||||
def __init__(self, context_dim, query_dim, dtype=None, device=None, operations=None):
|
||||
super().__init__()
|
||||
heads = query_dim // 64
|
||||
dim_head = 64
|
||||
self.attn = _CrossAttnInner(query_dim, context_dim, heads, dim_head, dtype=dtype, device=device, operations=operations)
|
||||
self.norm1 = operations.GroupNorm(32, query_dim, dtype=dtype, device=device)
|
||||
self.norm2 = operations.GroupNorm(32, context_dim, dtype=dtype, device=device)
|
||||
|
||||
def forward(self, context, x, control_scale=1):
|
||||
b, c, h, w = x.shape
|
||||
x_in = x
|
||||
|
||||
x = self.attn(
|
||||
self.norm1(x).flatten(2).transpose(1, 2),
|
||||
self.norm2(context).flatten(2).transpose(1, 2),
|
||||
).transpose(1, 2).unflatten(2, (h, w))
|
||||
|
||||
return x_in + x * control_scale
|
||||
|
||||
|
||||
class GLVControl(nn.Module):
|
||||
"""SUPIR's Guided Latent Vector control encoder. Truncated UNet (input + middle blocks only)."""
|
||||
def __init__(
|
||||
self,
|
||||
in_channels=4,
|
||||
model_channels=320,
|
||||
num_res_blocks=2,
|
||||
attention_resolutions=(4, 2),
|
||||
channel_mult=(1, 2, 4),
|
||||
num_head_channels=64,
|
||||
transformer_depth=(1, 2, 10),
|
||||
context_dim=2048,
|
||||
adm_in_channels=2816,
|
||||
use_linear_in_transformer=True,
|
||||
use_checkpoint=False,
|
||||
dtype=None,
|
||||
device=None,
|
||||
operations=None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__()
|
||||
self.model_channels = model_channels
|
||||
time_embed_dim = model_channels * 4
|
||||
|
||||
self.time_embed = nn.Sequential(
|
||||
operations.Linear(model_channels, time_embed_dim, dtype=dtype, device=device),
|
||||
nn.SiLU(),
|
||||
operations.Linear(time_embed_dim, time_embed_dim, dtype=dtype, device=device),
|
||||
)
|
||||
|
||||
self.label_emb = nn.Sequential(
|
||||
nn.Sequential(
|
||||
operations.Linear(adm_in_channels, time_embed_dim, dtype=dtype, device=device),
|
||||
nn.SiLU(),
|
||||
operations.Linear(time_embed_dim, time_embed_dim, dtype=dtype, device=device),
|
||||
)
|
||||
)
|
||||
|
||||
self.input_blocks = nn.ModuleList([
|
||||
TimestepEmbedSequential(
|
||||
operations.Conv2d(in_channels, model_channels, 3, padding=1, dtype=dtype, device=device)
|
||||
)
|
||||
])
|
||||
ch = model_channels
|
||||
ds = 1
|
||||
for level, mult in enumerate(channel_mult):
|
||||
for nr in range(num_res_blocks):
|
||||
layers = [
|
||||
ResBlock(ch, time_embed_dim, 0, out_channels=mult * model_channels,
|
||||
dtype=dtype, device=device, operations=operations)
|
||||
]
|
||||
ch = mult * model_channels
|
||||
if ds in attention_resolutions:
|
||||
num_heads = ch // num_head_channels
|
||||
layers.append(
|
||||
SpatialTransformer(ch, num_heads, num_head_channels,
|
||||
depth=transformer_depth[level], context_dim=context_dim,
|
||||
use_linear=use_linear_in_transformer,
|
||||
use_checkpoint=use_checkpoint,
|
||||
dtype=dtype, device=device, operations=operations)
|
||||
)
|
||||
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
||||
if level != len(channel_mult) - 1:
|
||||
self.input_blocks.append(
|
||||
TimestepEmbedSequential(
|
||||
Downsample(ch, True, out_channels=ch, dtype=dtype, device=device, operations=operations)
|
||||
)
|
||||
)
|
||||
ds *= 2
|
||||
|
||||
num_heads = ch // num_head_channels
|
||||
self.middle_block = TimestepEmbedSequential(
|
||||
ResBlock(ch, time_embed_dim, 0, dtype=dtype, device=device, operations=operations),
|
||||
SpatialTransformer(ch, num_heads, num_head_channels,
|
||||
depth=transformer_depth[-1], context_dim=context_dim,
|
||||
use_linear=use_linear_in_transformer,
|
||||
use_checkpoint=use_checkpoint,
|
||||
dtype=dtype, device=device, operations=operations),
|
||||
ResBlock(ch, time_embed_dim, 0, dtype=dtype, device=device, operations=operations),
|
||||
)
|
||||
|
||||
self.input_hint_block = TimestepEmbedSequential(
|
||||
operations.Conv2d(in_channels, model_channels, 3, padding=1, dtype=dtype, device=device)
|
||||
)
|
||||
|
||||
def forward(self, x, timesteps, xt, context=None, y=None, **kwargs):
|
||||
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False).to(x.dtype)
|
||||
emb = self.time_embed(t_emb) + self.label_emb(y)
|
||||
|
||||
guided_hint = self.input_hint_block(x, emb, context)
|
||||
|
||||
hs = []
|
||||
h = xt
|
||||
for module in self.input_blocks:
|
||||
if guided_hint is not None:
|
||||
h = module(h, emb, context)
|
||||
h += guided_hint
|
||||
guided_hint = None
|
||||
else:
|
||||
h = module(h, emb, context)
|
||||
hs.append(h)
|
||||
h = self.middle_block(h, emb, context)
|
||||
hs.append(h)
|
||||
return hs
|
||||
|
||||
|
||||
class SUPIR(nn.Module):
|
||||
"""
|
||||
SUPIR model containing GLVControl (control encoder) and project_modules (adapters).
|
||||
State dict keys match the original SUPIR checkpoint layout:
|
||||
control_model.* -> GLVControl
|
||||
project_modules.* -> nn.ModuleList of ZeroSFT/ZeroCrossAttn
|
||||
"""
|
||||
def __init__(self, device=None, dtype=None, operations=None):
|
||||
super().__init__()
|
||||
|
||||
self.control_model = GLVControl(dtype=dtype, device=device, operations=operations)
|
||||
|
||||
project_channel_scale = 2
|
||||
cond_output_channels = [320] * 4 + [640] * 3 + [1280] * 3
|
||||
project_channels = [int(c * project_channel_scale) for c in [160] * 4 + [320] * 3 + [640] * 3]
|
||||
concat_channels = [320] * 2 + [640] * 3 + [1280] * 4 + [0]
|
||||
cross_attn_insert_idx = [6, 3]
|
||||
|
||||
self.project_modules = nn.ModuleList()
|
||||
for i in range(len(cond_output_channels)):
|
||||
self.project_modules.append(ZeroSFT(
|
||||
project_channels[i], cond_output_channels[i],
|
||||
concat_channels=concat_channels[i],
|
||||
dtype=dtype, device=device, operations=operations,
|
||||
))
|
||||
|
||||
for i in cross_attn_insert_idx:
|
||||
self.project_modules.insert(i, ZeroCrossAttn(
|
||||
cond_output_channels[i], concat_channels[i],
|
||||
dtype=dtype, device=device, operations=operations,
|
||||
))
|
||||
103
comfy/ldm/supir/supir_patch.py
Normal file
103
comfy/ldm/supir/supir_patch.py
Normal file
@ -0,0 +1,103 @@
|
||||
import torch
|
||||
from comfy.ldm.modules.diffusionmodules.openaimodel import Upsample
|
||||
|
||||
|
||||
class SUPIRPatch:
|
||||
"""
|
||||
Holds GLVControl (control encoder) + project_modules (ZeroSFT/ZeroCrossAttn adapters).
|
||||
Runs GLVControl lazily on first patch invocation per step, applies adapters through
|
||||
middle_block_after_patch, output_block_merge_patch, and forward_timestep_embed_patch.
|
||||
"""
|
||||
SIGMA_MAX = 14.6146
|
||||
|
||||
def __init__(self, model_patch, project_modules, hint_latent, strength_start, strength_end):
|
||||
self.model_patch = model_patch # CoreModelPatcher wrapping GLVControl
|
||||
self.project_modules = project_modules # nn.ModuleList of ZeroSFT/ZeroCrossAttn
|
||||
self.hint_latent = hint_latent # encoded LQ image latent
|
||||
self.strength_start = strength_start
|
||||
self.strength_end = strength_end
|
||||
self.cached_features = None
|
||||
self.adapter_idx = 0
|
||||
self.control_idx = 0
|
||||
self.current_control_idx = 0
|
||||
self.active = True
|
||||
|
||||
def _ensure_features(self, kwargs):
|
||||
"""Run GLVControl on first call per step, cache results."""
|
||||
if self.cached_features is not None:
|
||||
return
|
||||
x = kwargs["x"]
|
||||
b = x.shape[0]
|
||||
hint = self.hint_latent.to(device=x.device, dtype=x.dtype)
|
||||
if hint.shape[0] != b:
|
||||
hint = hint.expand(b, -1, -1, -1) if hint.shape[0] == 1 else hint.repeat((b + hint.shape[0] - 1) // hint.shape[0], 1, 1, 1)[:b]
|
||||
self.cached_features = self.model_patch.model.control_model(
|
||||
hint, kwargs["timesteps"], x,
|
||||
kwargs["context"], kwargs["y"]
|
||||
)
|
||||
self.adapter_idx = len(self.project_modules) - 1
|
||||
self.control_idx = len(self.cached_features) - 1
|
||||
|
||||
def _get_control_scale(self, kwargs):
|
||||
if self.strength_start == self.strength_end:
|
||||
return self.strength_end
|
||||
sigma = kwargs["transformer_options"].get("sigmas")
|
||||
if sigma is None:
|
||||
return self.strength_end
|
||||
s = sigma[0].item() if sigma.dim() > 0 else sigma.item()
|
||||
t = min(s / self.SIGMA_MAX, 1.0)
|
||||
return t * (self.strength_start - self.strength_end) + self.strength_end
|
||||
|
||||
def middle_after(self, kwargs):
|
||||
"""middle_block_after_patch: run GLVControl lazily, apply last adapter after middle block."""
|
||||
self.cached_features = None # reset from previous step
|
||||
self.current_scale = self._get_control_scale(kwargs)
|
||||
self.active = self.current_scale > 0
|
||||
if not self.active:
|
||||
return {"h": kwargs["h"]}
|
||||
self._ensure_features(kwargs)
|
||||
h = kwargs["h"]
|
||||
h = self.project_modules[self.adapter_idx](
|
||||
self.cached_features[self.control_idx], h, control_scale=self.current_scale
|
||||
)
|
||||
self.adapter_idx -= 1
|
||||
self.control_idx -= 1
|
||||
return {"h": h}
|
||||
|
||||
def output_block(self, h, hsp, transformer_options):
|
||||
"""output_block_patch: ZeroSFT adapter fusion replaces cat([h, hsp]). Returns (h, None) to skip cat."""
|
||||
if not self.active:
|
||||
return h, hsp
|
||||
self.current_control_idx = self.control_idx
|
||||
h = self.project_modules[self.adapter_idx](
|
||||
self.cached_features[self.control_idx], hsp, h, control_scale=self.current_scale
|
||||
)
|
||||
self.adapter_idx -= 1
|
||||
self.control_idx -= 1
|
||||
return h, None
|
||||
|
||||
def pre_upsample(self, layer, x, emb, context, transformer_options, output_shape, *args, **kw):
|
||||
"""forward_timestep_embed_patch for Upsample: extra cross-attn adapter before upsample."""
|
||||
block_type, _ = transformer_options["block"]
|
||||
if block_type == "output" and self.active and self.cached_features is not None:
|
||||
x = self.project_modules[self.adapter_idx](
|
||||
self.cached_features[self.current_control_idx], x, control_scale=self.current_scale
|
||||
)
|
||||
self.adapter_idx -= 1
|
||||
return layer(x, output_shape=output_shape)
|
||||
|
||||
def to(self, device_or_dtype):
|
||||
if isinstance(device_or_dtype, torch.device):
|
||||
self.cached_features = None
|
||||
if self.hint_latent is not None:
|
||||
self.hint_latent = self.hint_latent.to(device_or_dtype)
|
||||
return self
|
||||
|
||||
def models(self):
|
||||
return [self.model_patch]
|
||||
|
||||
def register(self, model_patcher):
|
||||
"""Register all patches on a cloned model patcher."""
|
||||
model_patcher.set_model_patch(self.middle_after, "middle_block_after_patch")
|
||||
model_patcher.set_model_output_block_patch(self.output_block)
|
||||
model_patcher.set_model_patch((Upsample, self.pre_upsample), "forward_timestep_embed_patch")
|
||||
@ -506,6 +506,10 @@ class ModelPatcher:
|
||||
def set_model_noise_refiner_patch(self, patch):
|
||||
self.set_model_patch(patch, "noise_refiner")
|
||||
|
||||
def set_model_middle_block_after_patch(self, patch):
|
||||
self.set_model_patch(patch, "middle_block_after_patch")
|
||||
|
||||
|
||||
def set_model_rope_options(self, scale_x, shift_x, scale_y, shift_y, scale_t, shift_t, **kwargs):
|
||||
rope_options = self.model_options["transformer_options"].get("rope_options", {})
|
||||
rope_options["scale_x"] = scale_x
|
||||
|
||||
@ -1151,7 +1151,7 @@ def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_prec
|
||||
if param is None:
|
||||
continue
|
||||
p = fn(param)
|
||||
if p.is_inference():
|
||||
if (not torch.is_inference_mode_enabled()) and p.is_inference():
|
||||
p = p.clone()
|
||||
self.register_parameter(key, torch.nn.Parameter(p, requires_grad=False))
|
||||
for key, buf in self._buffers.items():
|
||||
|
||||
19
comfy/sd.py
19
comfy/sd.py
@ -12,6 +12,7 @@ from .ldm.cascade.stage_c_coder import StageC_coder
|
||||
from .ldm.audio.autoencoder import AudioOobleckVAE
|
||||
import comfy.ldm.genmo.vae.model
|
||||
import comfy.ldm.lightricks.vae.causal_video_autoencoder
|
||||
import comfy.ldm.lightricks.vae.audio_vae
|
||||
import comfy.ldm.cosmos.vae
|
||||
import comfy.ldm.wan.vae
|
||||
import comfy.ldm.wan.vae2_2
|
||||
@ -805,6 +806,24 @@ class VAE:
|
||||
self.downscale_index_formula = (4, 8, 8)
|
||||
self.memory_used_encode = lambda shape, dtype: (700 * (max(1, (shape[-3] ** 0.66 * 0.11)) * shape[-2] * shape[-1]) * model_management.dtype_size(dtype))
|
||||
self.memory_used_decode = lambda shape, dtype: (50 * (max(1, (shape[-3] ** 0.65 * 0.26)) * shape[-2] * shape[-1] * 32 * 32) * model_management.dtype_size(dtype))
|
||||
elif "vocoder.resblocks.0.convs1.0.weight" in sd or "vocoder.vocoder.resblocks.0.convs1.0.weight" in sd: # LTX Audio
|
||||
sd = comfy.utils.state_dict_prefix_replace(sd, {"audio_vae.": "autoencoder."})
|
||||
self.first_stage_model = comfy.ldm.lightricks.vae.audio_vae.AudioVAE(metadata=metadata)
|
||||
self.memory_used_encode = lambda shape, dtype: (shape[2] * 330) * model_management.dtype_size(dtype)
|
||||
self.memory_used_decode = lambda shape, dtype: (shape[2] * shape[3] * 87000) * model_management.dtype_size(dtype)
|
||||
self.latent_channels = self.first_stage_model.latent_channels
|
||||
self.audio_sample_rate_output = self.first_stage_model.output_sample_rate
|
||||
self.autoencoder = self.first_stage_model.autoencoder # TODO: remove hack for ltxv custom nodes
|
||||
self.output_channels = 2
|
||||
self.pad_channel_value = "replicate"
|
||||
self.upscale_ratio = 4096
|
||||
self.downscale_ratio = 4096
|
||||
self.latent_dim = 2
|
||||
self.process_output = lambda audio: audio
|
||||
self.process_input = lambda audio: audio
|
||||
self.working_dtypes = [torch.float32]
|
||||
self.disable_offload = True
|
||||
self.extra_1d_channel = 16
|
||||
else:
|
||||
logging.warning("WARNING: No VAE weights detected, VAE not initalized.")
|
||||
self.first_stage_model = None
|
||||
|
||||
@ -3,7 +3,7 @@ from comfy import sd1_clip
|
||||
import comfy.text_encoders.llama
|
||||
|
||||
class Ministral3_3BTokenizer(Mistral3Tokenizer):
|
||||
def __init__(self, embedding_directory=None, embedding_size=5120, embedding_key='mistral3_24b', tokenizer_data={}):
|
||||
def __init__(self, embedding_directory=None, embedding_size=5120, embedding_key='ministral3_3b', tokenizer_data={}):
|
||||
return super().__init__(embedding_directory=embedding_directory, embedding_size=embedding_size, embedding_key=embedding_key, tokenizer_data=tokenizer_data)
|
||||
|
||||
class ErnieTokenizer(sd1_clip.SD1Tokenizer):
|
||||
@ -35,4 +35,4 @@ def te(dtype_llama=None, llama_quantization_metadata=None):
|
||||
model_options = model_options.copy()
|
||||
model_options["quantization_metadata"] = llama_quantization_metadata
|
||||
super().__init__(device=device, dtype=dtype, model_options=model_options)
|
||||
return ErnieTEModel
|
||||
return ErnieTEModel_
|
||||
|
||||
@ -82,6 +82,7 @@ class Ministral3_3BConfig:
|
||||
rope_scale = None
|
||||
final_norm: bool = True
|
||||
lm_head: bool = False
|
||||
stop_tokens = [2]
|
||||
|
||||
@dataclass
|
||||
class Qwen25_3BConfig:
|
||||
@ -969,7 +970,7 @@ class Mistral3Small24B(BaseLlama, torch.nn.Module):
|
||||
self.model = Llama2_(config, device=device, dtype=dtype, ops=operations)
|
||||
self.dtype = dtype
|
||||
|
||||
class Ministral3_3B(BaseLlama, torch.nn.Module):
|
||||
class Ministral3_3B(BaseLlama, BaseQwen3, BaseGenerate, torch.nn.Module):
|
||||
def __init__(self, config_dict, dtype, device, operations):
|
||||
super().__init__()
|
||||
config = Ministral3_3BConfig(**config_dict)
|
||||
|
||||
@ -52,6 +52,26 @@ class TaskImageContent(BaseModel):
|
||||
role: Literal["first_frame", "last_frame", "reference_image"] | None = Field(None)
|
||||
|
||||
|
||||
class TaskVideoContentUrl(BaseModel):
|
||||
url: str = Field(...)
|
||||
|
||||
|
||||
class TaskVideoContent(BaseModel):
|
||||
type: str = Field("video_url")
|
||||
video_url: TaskVideoContentUrl = Field(...)
|
||||
role: str = Field("reference_video")
|
||||
|
||||
|
||||
class TaskAudioContentUrl(BaseModel):
|
||||
url: str = Field(...)
|
||||
|
||||
|
||||
class TaskAudioContent(BaseModel):
|
||||
type: str = Field("audio_url")
|
||||
audio_url: TaskAudioContentUrl = Field(...)
|
||||
role: str = Field("reference_audio")
|
||||
|
||||
|
||||
class Text2VideoTaskCreationRequest(BaseModel):
|
||||
model: str = Field(...)
|
||||
content: list[TaskTextContent] = Field(..., min_length=1)
|
||||
@ -64,6 +84,17 @@ class Image2VideoTaskCreationRequest(BaseModel):
|
||||
generate_audio: bool | None = Field(...)
|
||||
|
||||
|
||||
class Seedance2TaskCreationRequest(BaseModel):
|
||||
model: str = Field(...)
|
||||
content: list[TaskTextContent | TaskImageContent | TaskVideoContent | TaskAudioContent] = Field(..., min_length=1)
|
||||
generate_audio: bool | None = Field(None)
|
||||
resolution: str | None = Field(None)
|
||||
ratio: str | None = Field(None)
|
||||
duration: int | None = Field(None, ge=4, le=15)
|
||||
seed: int | None = Field(None, ge=0, le=2147483647)
|
||||
watermark: bool | None = Field(None)
|
||||
|
||||
|
||||
class TaskCreationResponse(BaseModel):
|
||||
id: str = Field(...)
|
||||
|
||||
@ -77,12 +108,27 @@ class TaskStatusResult(BaseModel):
|
||||
video_url: str = Field(...)
|
||||
|
||||
|
||||
class TaskStatusUsage(BaseModel):
|
||||
completion_tokens: int = Field(0)
|
||||
total_tokens: int = Field(0)
|
||||
|
||||
|
||||
class TaskStatusResponse(BaseModel):
|
||||
id: str = Field(...)
|
||||
model: str = Field(...)
|
||||
status: Literal["queued", "running", "cancelled", "succeeded", "failed"] = Field(...)
|
||||
error: TaskStatusError | None = Field(None)
|
||||
content: TaskStatusResult | None = Field(None)
|
||||
usage: TaskStatusUsage | None = Field(None)
|
||||
|
||||
|
||||
# Dollars per 1K tokens, keyed by (model_id, has_video_input).
|
||||
SEEDANCE2_PRICE_PER_1K_TOKENS = {
|
||||
("dreamina-seedance-2-0-260128", False): 0.007,
|
||||
("dreamina-seedance-2-0-260128", True): 0.0043,
|
||||
("dreamina-seedance-2-0-fast-260128", False): 0.0056,
|
||||
("dreamina-seedance-2-0-fast-260128", True): 0.0033,
|
||||
}
|
||||
|
||||
|
||||
RECOMMENDED_PRESETS = [
|
||||
@ -112,6 +158,19 @@ RECOMMENDED_PRESETS_SEEDREAM_4 = [
|
||||
("Custom", None, None),
|
||||
]
|
||||
|
||||
# Seedance 2.0 reference video pixel count limits per model and output resolution.
|
||||
SEEDANCE2_REF_VIDEO_PIXEL_LIMITS = {
|
||||
"dreamina-seedance-2-0-260128": {
|
||||
"480p": {"min": 409_600, "max": 927_408},
|
||||
"720p": {"min": 409_600, "max": 927_408},
|
||||
"1080p": {"min": 409_600, "max": 2_073_600},
|
||||
},
|
||||
"dreamina-seedance-2-0-fast-260128": {
|
||||
"480p": {"min": 409_600, "max": 927_408},
|
||||
"720p": {"min": 409_600, "max": 927_408},
|
||||
},
|
||||
}
|
||||
|
||||
# The time in this dictionary are given for 10 seconds duration.
|
||||
VIDEO_TASKS_EXECUTION_TIME = {
|
||||
"seedance-1-0-lite-t2v-250428": {
|
||||
|
||||
@ -8,16 +8,23 @@ from comfy_api.latest import IO, ComfyExtension, Input
|
||||
from comfy_api_nodes.apis.bytedance import (
|
||||
RECOMMENDED_PRESETS,
|
||||
RECOMMENDED_PRESETS_SEEDREAM_4,
|
||||
SEEDANCE2_PRICE_PER_1K_TOKENS,
|
||||
SEEDANCE2_REF_VIDEO_PIXEL_LIMITS,
|
||||
VIDEO_TASKS_EXECUTION_TIME,
|
||||
Image2VideoTaskCreationRequest,
|
||||
ImageTaskCreationResponse,
|
||||
Seedance2TaskCreationRequest,
|
||||
Seedream4Options,
|
||||
Seedream4TaskCreationRequest,
|
||||
TaskAudioContent,
|
||||
TaskAudioContentUrl,
|
||||
TaskCreationResponse,
|
||||
TaskImageContent,
|
||||
TaskImageContentUrl,
|
||||
TaskStatusResponse,
|
||||
TaskTextContent,
|
||||
TaskVideoContent,
|
||||
TaskVideoContentUrl,
|
||||
Text2ImageTaskCreationRequest,
|
||||
Text2VideoTaskCreationRequest,
|
||||
)
|
||||
@ -28,8 +35,12 @@ from comfy_api_nodes.util import (
|
||||
get_number_of_images,
|
||||
image_tensor_pair_to_batch,
|
||||
poll_op,
|
||||
resize_video_to_pixel_budget,
|
||||
sync_op,
|
||||
upload_audio_to_comfyapi,
|
||||
upload_image_to_comfyapi,
|
||||
upload_images_to_comfyapi,
|
||||
upload_video_to_comfyapi,
|
||||
validate_image_aspect_ratio,
|
||||
validate_image_dimensions,
|
||||
validate_string,
|
||||
@ -46,12 +57,59 @@ SEEDREAM_MODELS = {
|
||||
# Long-running tasks endpoints(e.g., video)
|
||||
BYTEPLUS_TASK_ENDPOINT = "/proxy/byteplus/api/v3/contents/generations/tasks"
|
||||
BYTEPLUS_TASK_STATUS_ENDPOINT = "/proxy/byteplus/api/v3/contents/generations/tasks" # + /{task_id}
|
||||
BYTEPLUS_SEEDANCE2_TASK_STATUS_ENDPOINT = "/proxy/byteplus-seedance2/api/v3/contents/generations/tasks" # + /{task_id}
|
||||
|
||||
SEEDANCE_MODELS = {
|
||||
"Seedance 2.0": "dreamina-seedance-2-0-260128",
|
||||
"Seedance 2.0 Fast": "dreamina-seedance-2-0-fast-260128",
|
||||
}
|
||||
|
||||
DEPRECATED_MODELS = {"seedance-1-0-lite-t2v-250428", "seedance-1-0-lite-i2v-250428"}
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _validate_ref_video_pixels(video: Input.Video, model_id: str, resolution: str, index: int) -> None:
|
||||
"""Validate reference video pixel count against Seedance 2.0 model limits for the selected resolution."""
|
||||
model_limits = SEEDANCE2_REF_VIDEO_PIXEL_LIMITS.get(model_id)
|
||||
if not model_limits:
|
||||
return
|
||||
limits = model_limits.get(resolution)
|
||||
if not limits:
|
||||
return
|
||||
try:
|
||||
w, h = video.get_dimensions()
|
||||
except Exception:
|
||||
return
|
||||
pixels = w * h
|
||||
min_px = limits.get("min")
|
||||
max_px = limits.get("max")
|
||||
if min_px and pixels < min_px:
|
||||
raise ValueError(
|
||||
f"Reference video {index} is too small: {w}x{h} = {pixels:,}px. " f"Minimum is {min_px:,}px for this model."
|
||||
)
|
||||
if max_px and pixels > max_px:
|
||||
raise ValueError(
|
||||
f"Reference video {index} is too large: {w}x{h} = {pixels:,}px. "
|
||||
f"Maximum is {max_px:,}px for this model. Try downscaling the video."
|
||||
)
|
||||
|
||||
|
||||
def _seedance2_price_extractor(model_id: str, has_video_input: bool):
|
||||
"""Returns a price_extractor closure for Seedance 2.0 poll_op."""
|
||||
rate = SEEDANCE2_PRICE_PER_1K_TOKENS.get((model_id, has_video_input))
|
||||
if rate is None:
|
||||
return None
|
||||
|
||||
def extractor(response: TaskStatusResponse) -> float | None:
|
||||
if response.usage is None:
|
||||
return None
|
||||
return response.usage.total_tokens * 1.43 * rate / 1_000.0
|
||||
|
||||
return extractor
|
||||
|
||||
|
||||
def get_image_url_from_response(response: ImageTaskCreationResponse) -> str:
|
||||
if response.error:
|
||||
error_msg = f"ByteDance request failed. Code: {response.error['code']}, message: {response.error['message']}"
|
||||
@ -335,8 +393,7 @@ class ByteDanceSeedreamNode(IO.ComfyNode):
|
||||
mp_provided = out_num_pixels / 1_000_000.0
|
||||
if ("seedream-4-5" in model or "seedream-5-0" in model) and out_num_pixels < 3686400:
|
||||
raise ValueError(
|
||||
f"Minimum image resolution for the selected model is 3.68MP, "
|
||||
f"but {mp_provided:.2f}MP provided."
|
||||
f"Minimum image resolution for the selected model is 3.68MP, " f"but {mp_provided:.2f}MP provided."
|
||||
)
|
||||
if "seedream-4-0" in model and out_num_pixels < 921600:
|
||||
raise ValueError(
|
||||
@ -952,33 +1009,6 @@ class ByteDanceImageReferenceNode(IO.ComfyNode):
|
||||
)
|
||||
|
||||
|
||||
async def process_video_task(
|
||||
cls: type[IO.ComfyNode],
|
||||
payload: Text2VideoTaskCreationRequest | Image2VideoTaskCreationRequest,
|
||||
estimated_duration: int | None,
|
||||
) -> IO.NodeOutput:
|
||||
if payload.model in DEPRECATED_MODELS:
|
||||
logger.warning(
|
||||
"Model '%s' is deprecated and will be deactivated on May 13, 2026. "
|
||||
"Please switch to a newer model. Recommended: seedance-1-0-pro-fast-251015.",
|
||||
payload.model,
|
||||
)
|
||||
initial_response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path=BYTEPLUS_TASK_ENDPOINT, method="POST"),
|
||||
data=payload,
|
||||
response_model=TaskCreationResponse,
|
||||
)
|
||||
response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"{BYTEPLUS_TASK_STATUS_ENDPOINT}/{initial_response.id}"),
|
||||
status_extractor=lambda r: r.status,
|
||||
estimated_duration=estimated_duration,
|
||||
response_model=TaskStatusResponse,
|
||||
)
|
||||
return IO.NodeOutput(await download_url_to_video_output(response.content.video_url))
|
||||
|
||||
|
||||
def raise_if_text_params(prompt: str, text_params: list[str]) -> None:
|
||||
for i in text_params:
|
||||
if f"--{i} " in prompt:
|
||||
@ -1040,6 +1070,563 @@ PRICE_BADGE_VIDEO = IO.PriceBadge(
|
||||
)
|
||||
|
||||
|
||||
def _seedance2_text_inputs(resolutions: list[str]):
|
||||
return [
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
multiline=True,
|
||||
default="",
|
||||
tooltip="Text prompt for video generation.",
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"resolution",
|
||||
options=resolutions,
|
||||
tooltip="Resolution of the output video.",
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"ratio",
|
||||
options=["16:9", "4:3", "1:1", "3:4", "9:16", "21:9", "adaptive"],
|
||||
tooltip="Aspect ratio of the output video.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"duration",
|
||||
default=7,
|
||||
min=4,
|
||||
max=15,
|
||||
step=1,
|
||||
tooltip="Duration of the output video in seconds (4-15).",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"generate_audio",
|
||||
default=True,
|
||||
tooltip="Enable audio generation for the output video.",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class ByteDance2TextToVideoNode(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="ByteDance2TextToVideoNode",
|
||||
display_name="ByteDance Seedance 2.0 Text to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate video using Seedance 2.0 models based on a text prompt.",
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
options=[
|
||||
IO.DynamicCombo.Option("Seedance 2.0", _seedance2_text_inputs(["480p", "720p", "1080p"])),
|
||||
IO.DynamicCombo.Option("Seedance 2.0 Fast", _seedance2_text_inputs(["480p", "720p"])),
|
||||
],
|
||||
tooltip="Seedance 2.0 for maximum quality; Seedance 2.0 Fast for speed optimization.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
step=1,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed controls whether the node should re-run; "
|
||||
"results are non-deterministic regardless of seed.",
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"watermark",
|
||||
default=False,
|
||||
tooltip="Whether to add a watermark to the video.",
|
||||
advanced=True,
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Video.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.resolution", "model.duration"]),
|
||||
expr="""
|
||||
(
|
||||
$rate480 := 10044;
|
||||
$rate720 := 21600;
|
||||
$rate1080 := 48800;
|
||||
$m := widgets.model;
|
||||
$pricePer1K := $contains($m, "fast") ? 0.008008 : 0.01001;
|
||||
$res := $lookup(widgets, "model.resolution");
|
||||
$dur := $lookup(widgets, "model.duration");
|
||||
$rate := $res = "1080p" ? $rate1080 :
|
||||
$res = "720p" ? $rate720 :
|
||||
$rate480;
|
||||
$cost := $dur * $rate * $pricePer1K / 1000;
|
||||
{"type": "usd", "usd": $cost, "format": {"approximate": true}}
|
||||
)
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
model: dict,
|
||||
seed: int,
|
||||
watermark: bool,
|
||||
) -> IO.NodeOutput:
|
||||
validate_string(model["prompt"], strip_whitespace=True, min_length=1)
|
||||
model_id = SEEDANCE_MODELS[model["model"]]
|
||||
initial_response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path=BYTEPLUS_TASK_ENDPOINT, method="POST"),
|
||||
data=Seedance2TaskCreationRequest(
|
||||
model=model_id,
|
||||
content=[TaskTextContent(text=model["prompt"])],
|
||||
generate_audio=model["generate_audio"],
|
||||
resolution=model["resolution"],
|
||||
ratio=model["ratio"],
|
||||
duration=model["duration"],
|
||||
seed=seed,
|
||||
watermark=watermark,
|
||||
),
|
||||
response_model=TaskCreationResponse,
|
||||
)
|
||||
response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"{BYTEPLUS_SEEDANCE2_TASK_STATUS_ENDPOINT}/{initial_response.id}"),
|
||||
response_model=TaskStatusResponse,
|
||||
status_extractor=lambda r: r.status,
|
||||
price_extractor=_seedance2_price_extractor(model_id, has_video_input=False),
|
||||
poll_interval=9,
|
||||
max_poll_attempts=180,
|
||||
)
|
||||
return IO.NodeOutput(await download_url_to_video_output(response.content.video_url))
|
||||
|
||||
|
||||
class ByteDance2FirstLastFrameNode(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="ByteDance2FirstLastFrameNode",
|
||||
display_name="ByteDance Seedance 2.0 First-Last-Frame to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate video using Seedance 2.0 from a first frame image and optional last frame image.",
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
options=[
|
||||
IO.DynamicCombo.Option("Seedance 2.0", _seedance2_text_inputs(["480p", "720p", "1080p"])),
|
||||
IO.DynamicCombo.Option("Seedance 2.0 Fast", _seedance2_text_inputs(["480p", "720p"])),
|
||||
],
|
||||
tooltip="Seedance 2.0 for maximum quality; Seedance 2.0 Fast for speed optimization.",
|
||||
),
|
||||
IO.Image.Input(
|
||||
"first_frame",
|
||||
tooltip="First frame image for the video.",
|
||||
),
|
||||
IO.Image.Input(
|
||||
"last_frame",
|
||||
tooltip="Last frame image for the video.",
|
||||
optional=True,
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
step=1,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed controls whether the node should re-run; "
|
||||
"results are non-deterministic regardless of seed.",
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"watermark",
|
||||
default=False,
|
||||
tooltip="Whether to add a watermark to the video.",
|
||||
advanced=True,
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Video.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.resolution", "model.duration"]),
|
||||
expr="""
|
||||
(
|
||||
$rate480 := 10044;
|
||||
$rate720 := 21600;
|
||||
$rate1080 := 48800;
|
||||
$m := widgets.model;
|
||||
$pricePer1K := $contains($m, "fast") ? 0.008008 : 0.01001;
|
||||
$res := $lookup(widgets, "model.resolution");
|
||||
$dur := $lookup(widgets, "model.duration");
|
||||
$rate := $res = "1080p" ? $rate1080 :
|
||||
$res = "720p" ? $rate720 :
|
||||
$rate480;
|
||||
$cost := $dur * $rate * $pricePer1K / 1000;
|
||||
{"type": "usd", "usd": $cost, "format": {"approximate": true}}
|
||||
)
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
model: dict,
|
||||
first_frame: Input.Image,
|
||||
seed: int,
|
||||
watermark: bool,
|
||||
last_frame: Input.Image | None = None,
|
||||
) -> IO.NodeOutput:
|
||||
validate_string(model["prompt"], strip_whitespace=True, min_length=1)
|
||||
model_id = SEEDANCE_MODELS[model["model"]]
|
||||
|
||||
content: list[TaskTextContent | TaskImageContent] = [
|
||||
TaskTextContent(text=model["prompt"]),
|
||||
TaskImageContent(
|
||||
image_url=TaskImageContentUrl(
|
||||
url=await upload_image_to_comfyapi(cls, first_frame, wait_label="Uploading first frame.")
|
||||
),
|
||||
role="first_frame",
|
||||
),
|
||||
]
|
||||
if last_frame is not None:
|
||||
content.append(
|
||||
TaskImageContent(
|
||||
image_url=TaskImageContentUrl(
|
||||
url=await upload_image_to_comfyapi(cls, last_frame, wait_label="Uploading last frame.")
|
||||
),
|
||||
role="last_frame",
|
||||
),
|
||||
)
|
||||
|
||||
initial_response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path=BYTEPLUS_TASK_ENDPOINT, method="POST"),
|
||||
data=Seedance2TaskCreationRequest(
|
||||
model=model_id,
|
||||
content=content,
|
||||
generate_audio=model["generate_audio"],
|
||||
resolution=model["resolution"],
|
||||
ratio=model["ratio"],
|
||||
duration=model["duration"],
|
||||
seed=seed,
|
||||
watermark=watermark,
|
||||
),
|
||||
response_model=TaskCreationResponse,
|
||||
)
|
||||
response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"{BYTEPLUS_SEEDANCE2_TASK_STATUS_ENDPOINT}/{initial_response.id}"),
|
||||
response_model=TaskStatusResponse,
|
||||
status_extractor=lambda r: r.status,
|
||||
price_extractor=_seedance2_price_extractor(model_id, has_video_input=False),
|
||||
poll_interval=9,
|
||||
max_poll_attempts=180,
|
||||
)
|
||||
return IO.NodeOutput(await download_url_to_video_output(response.content.video_url))
|
||||
|
||||
|
||||
def _seedance2_reference_inputs(resolutions: list[str]):
|
||||
return [
|
||||
*_seedance2_text_inputs(resolutions),
|
||||
IO.Autogrow.Input(
|
||||
"reference_images",
|
||||
template=IO.Autogrow.TemplateNames(
|
||||
IO.Image.Input("reference_image"),
|
||||
names=[
|
||||
"image_1",
|
||||
"image_2",
|
||||
"image_3",
|
||||
"image_4",
|
||||
"image_5",
|
||||
"image_6",
|
||||
"image_7",
|
||||
"image_8",
|
||||
"image_9",
|
||||
],
|
||||
min=0,
|
||||
),
|
||||
),
|
||||
IO.Autogrow.Input(
|
||||
"reference_videos",
|
||||
template=IO.Autogrow.TemplateNames(
|
||||
IO.Video.Input("reference_video"),
|
||||
names=["video_1", "video_2", "video_3"],
|
||||
min=0,
|
||||
),
|
||||
),
|
||||
IO.Autogrow.Input(
|
||||
"reference_audios",
|
||||
template=IO.Autogrow.TemplateNames(
|
||||
IO.Audio.Input("reference_audio"),
|
||||
names=["audio_1", "audio_2", "audio_3"],
|
||||
min=0,
|
||||
),
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"auto_downscale",
|
||||
default=False,
|
||||
advanced=True,
|
||||
optional=True,
|
||||
tooltip="Automatically downscale reference videos that exceed the model's pixel budget "
|
||||
"for the selected resolution. Aspect ratio is preserved; videos already within limits are untouched.",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class ByteDance2ReferenceNode(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="ByteDance2ReferenceNode",
|
||||
display_name="ByteDance Seedance 2.0 Reference to Video",
|
||||
category="api node/video/ByteDance",
|
||||
description="Generate, edit, or extend video using Seedance 2.0 with reference images, "
|
||||
"videos, and audio. Supports multimodal reference, video editing, and video extension.",
|
||||
inputs=[
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
options=[
|
||||
IO.DynamicCombo.Option("Seedance 2.0", _seedance2_reference_inputs(["480p", "720p", "1080p"])),
|
||||
IO.DynamicCombo.Option("Seedance 2.0 Fast", _seedance2_reference_inputs(["480p", "720p"])),
|
||||
],
|
||||
tooltip="Seedance 2.0 for maximum quality; Seedance 2.0 Fast for speed optimization.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
step=1,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed controls whether the node should re-run; "
|
||||
"results are non-deterministic regardless of seed.",
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"watermark",
|
||||
default=False,
|
||||
tooltip="Whether to add a watermark to the video.",
|
||||
advanced=True,
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Video.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(
|
||||
widgets=["model", "model.resolution", "model.duration"],
|
||||
input_groups=["model.reference_videos"],
|
||||
),
|
||||
expr="""
|
||||
(
|
||||
$rate480 := 10044;
|
||||
$rate720 := 21600;
|
||||
$rate1080 := 48800;
|
||||
$m := widgets.model;
|
||||
$hasVideo := $lookup(inputGroups, "model.reference_videos") > 0;
|
||||
$noVideoPricePer1K := $contains($m, "fast") ? 0.008008 : 0.01001;
|
||||
$videoPricePer1K := $contains($m, "fast") ? 0.004719 : 0.006149;
|
||||
$res := $lookup(widgets, "model.resolution");
|
||||
$dur := $lookup(widgets, "model.duration");
|
||||
$rate := $res = "1080p" ? $rate1080 :
|
||||
$res = "720p" ? $rate720 :
|
||||
$rate480;
|
||||
$noVideoCost := $dur * $rate * $noVideoPricePer1K / 1000;
|
||||
$minVideoFactor := $ceil($dur * 5 / 3);
|
||||
$minVideoCost := $minVideoFactor * $rate * $videoPricePer1K / 1000;
|
||||
$maxVideoCost := (15 + $dur) * $rate * $videoPricePer1K / 1000;
|
||||
$hasVideo
|
||||
? {
|
||||
"type": "range_usd",
|
||||
"min_usd": $minVideoCost,
|
||||
"max_usd": $maxVideoCost,
|
||||
"format": {"approximate": true}
|
||||
}
|
||||
: {
|
||||
"type": "usd",
|
||||
"usd": $noVideoCost,
|
||||
"format": {"approximate": true}
|
||||
}
|
||||
)
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
model: dict,
|
||||
seed: int,
|
||||
watermark: bool,
|
||||
) -> IO.NodeOutput:
|
||||
validate_string(model["prompt"], strip_whitespace=True, min_length=1)
|
||||
|
||||
reference_images = model.get("reference_images", {})
|
||||
reference_videos = model.get("reference_videos", {})
|
||||
reference_audios = model.get("reference_audios", {})
|
||||
|
||||
if not reference_images and not reference_videos:
|
||||
raise ValueError("At least one reference image or video is required.")
|
||||
|
||||
model_id = SEEDANCE_MODELS[model["model"]]
|
||||
has_video_input = len(reference_videos) > 0
|
||||
|
||||
if model.get("auto_downscale") and reference_videos:
|
||||
max_px = (
|
||||
SEEDANCE2_REF_VIDEO_PIXEL_LIMITS.get(model_id, {})
|
||||
.get(model["resolution"], {})
|
||||
.get("max")
|
||||
)
|
||||
if max_px:
|
||||
for key in reference_videos:
|
||||
reference_videos[key] = resize_video_to_pixel_budget(
|
||||
reference_videos[key], max_px
|
||||
)
|
||||
|
||||
total_video_duration = 0.0
|
||||
for i, key in enumerate(reference_videos, 1):
|
||||
video = reference_videos[key]
|
||||
_validate_ref_video_pixels(video, model_id, model["resolution"], i)
|
||||
try:
|
||||
dur = video.get_duration()
|
||||
if dur < 1.8:
|
||||
raise ValueError(f"Reference video {i} is too short: {dur:.1f}s. Minimum duration is 1.8 seconds.")
|
||||
total_video_duration += dur
|
||||
except ValueError:
|
||||
raise
|
||||
except Exception:
|
||||
pass
|
||||
if total_video_duration > 15.1:
|
||||
raise ValueError(f"Total reference video duration is {total_video_duration:.1f}s. Maximum is 15.1 seconds.")
|
||||
|
||||
total_audio_duration = 0.0
|
||||
for i, key in enumerate(reference_audios, 1):
|
||||
audio = reference_audios[key]
|
||||
dur = int(audio["waveform"].shape[-1]) / int(audio["sample_rate"])
|
||||
if dur < 1.8:
|
||||
raise ValueError(f"Reference audio {i} is too short: {dur:.1f}s. Minimum duration is 1.8 seconds.")
|
||||
total_audio_duration += dur
|
||||
if total_audio_duration > 15.1:
|
||||
raise ValueError(f"Total reference audio duration is {total_audio_duration:.1f}s. Maximum is 15.1 seconds.")
|
||||
|
||||
content: list[TaskTextContent | TaskImageContent | TaskVideoContent | TaskAudioContent] = [
|
||||
TaskTextContent(text=model["prompt"]),
|
||||
]
|
||||
for i, key in enumerate(reference_images, 1):
|
||||
content.append(
|
||||
TaskImageContent(
|
||||
image_url=TaskImageContentUrl(
|
||||
url=await upload_image_to_comfyapi(
|
||||
cls,
|
||||
image=reference_images[key],
|
||||
wait_label=f"Uploading image {i}",
|
||||
),
|
||||
),
|
||||
role="reference_image",
|
||||
),
|
||||
)
|
||||
for i, key in enumerate(reference_videos, 1):
|
||||
content.append(
|
||||
TaskVideoContent(
|
||||
video_url=TaskVideoContentUrl(
|
||||
url=await upload_video_to_comfyapi(
|
||||
cls,
|
||||
reference_videos[key],
|
||||
wait_label=f"Uploading video {i}",
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
for key in reference_audios:
|
||||
content.append(
|
||||
TaskAudioContent(
|
||||
audio_url=TaskAudioContentUrl(
|
||||
url=await upload_audio_to_comfyapi(
|
||||
cls,
|
||||
reference_audios[key],
|
||||
container_format="mp3",
|
||||
codec_name="libmp3lame",
|
||||
mime_type="audio/mpeg",
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
initial_response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path=BYTEPLUS_TASK_ENDPOINT, method="POST"),
|
||||
data=Seedance2TaskCreationRequest(
|
||||
model=model_id,
|
||||
content=content,
|
||||
generate_audio=model["generate_audio"],
|
||||
resolution=model["resolution"],
|
||||
ratio=model["ratio"],
|
||||
duration=model["duration"],
|
||||
seed=seed,
|
||||
watermark=watermark,
|
||||
),
|
||||
response_model=TaskCreationResponse,
|
||||
)
|
||||
response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"{BYTEPLUS_SEEDANCE2_TASK_STATUS_ENDPOINT}/{initial_response.id}"),
|
||||
response_model=TaskStatusResponse,
|
||||
status_extractor=lambda r: r.status,
|
||||
price_extractor=_seedance2_price_extractor(model_id, has_video_input=has_video_input),
|
||||
poll_interval=9,
|
||||
max_poll_attempts=180,
|
||||
)
|
||||
return IO.NodeOutput(await download_url_to_video_output(response.content.video_url))
|
||||
|
||||
|
||||
async def process_video_task(
|
||||
cls: type[IO.ComfyNode],
|
||||
payload: Text2VideoTaskCreationRequest | Image2VideoTaskCreationRequest,
|
||||
estimated_duration: int | None,
|
||||
) -> IO.NodeOutput:
|
||||
if payload.model in DEPRECATED_MODELS:
|
||||
logger.warning(
|
||||
"Model '%s' is deprecated and will be deactivated on May 13, 2026. "
|
||||
"Please switch to a newer model. Recommended: seedance-1-0-pro-fast-251015.",
|
||||
payload.model,
|
||||
)
|
||||
initial_response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path=BYTEPLUS_TASK_ENDPOINT, method="POST"),
|
||||
data=payload,
|
||||
response_model=TaskCreationResponse,
|
||||
)
|
||||
response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"{BYTEPLUS_TASK_STATUS_ENDPOINT}/{initial_response.id}"),
|
||||
status_extractor=lambda r: r.status,
|
||||
estimated_duration=estimated_duration,
|
||||
response_model=TaskStatusResponse,
|
||||
)
|
||||
return IO.NodeOutput(await download_url_to_video_output(response.content.video_url))
|
||||
|
||||
|
||||
class ByteDanceExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
|
||||
@ -1050,6 +1637,9 @@ class ByteDanceExtension(ComfyExtension):
|
||||
ByteDanceImageToVideoNode,
|
||||
ByteDanceFirstLastFrameNode,
|
||||
ByteDanceImageReferenceNode,
|
||||
ByteDance2TextToVideoNode,
|
||||
ByteDance2FirstLastFrameNode,
|
||||
ByteDance2ReferenceNode,
|
||||
]
|
||||
|
||||
|
||||
|
||||
@ -221,14 +221,17 @@ class TencentTextToModelNode(IO.ComfyNode):
|
||||
response_model=To3DProTaskResultResponse,
|
||||
status_extractor=lambda r: r.Status,
|
||||
)
|
||||
obj_result = await download_and_extract_obj_zip(get_file_from_response(result.ResultFile3Ds, "obj").Url)
|
||||
obj_file_response = get_file_from_response(result.ResultFile3Ds, "obj", raise_if_not_found=False)
|
||||
obj_result = None
|
||||
if obj_file_response:
|
||||
obj_result = await download_and_extract_obj_zip(obj_file_response.Url)
|
||||
return IO.NodeOutput(
|
||||
f"{task_id}.glb",
|
||||
await download_url_to_file_3d(
|
||||
get_file_from_response(result.ResultFile3Ds, "glb").Url, "glb", task_id=task_id
|
||||
),
|
||||
obj_result.obj,
|
||||
obj_result.texture,
|
||||
obj_result.obj if obj_result else None,
|
||||
obj_result.texture if obj_result else None,
|
||||
)
|
||||
|
||||
|
||||
@ -378,17 +381,30 @@ class TencentImageToModelNode(IO.ComfyNode):
|
||||
response_model=To3DProTaskResultResponse,
|
||||
status_extractor=lambda r: r.Status,
|
||||
)
|
||||
obj_result = await download_and_extract_obj_zip(get_file_from_response(result.ResultFile3Ds, "obj").Url)
|
||||
obj_file_response = get_file_from_response(result.ResultFile3Ds, "obj", raise_if_not_found=False)
|
||||
if obj_file_response:
|
||||
obj_result = await download_and_extract_obj_zip(obj_file_response.Url)
|
||||
return IO.NodeOutput(
|
||||
f"{task_id}.glb",
|
||||
await download_url_to_file_3d(
|
||||
get_file_from_response(result.ResultFile3Ds, "glb").Url, "glb", task_id=task_id
|
||||
),
|
||||
obj_result.obj,
|
||||
obj_result.texture,
|
||||
obj_result.metallic if obj_result.metallic is not None else torch.zeros(1, 1, 1, 3),
|
||||
obj_result.normal if obj_result.normal is not None else torch.zeros(1, 1, 1, 3),
|
||||
obj_result.roughness if obj_result.roughness is not None else torch.zeros(1, 1, 1, 3),
|
||||
)
|
||||
return IO.NodeOutput(
|
||||
f"{task_id}.glb",
|
||||
await download_url_to_file_3d(
|
||||
get_file_from_response(result.ResultFile3Ds, "glb").Url, "glb", task_id=task_id
|
||||
),
|
||||
obj_result.obj,
|
||||
obj_result.texture,
|
||||
obj_result.metallic if obj_result.metallic is not None else torch.zeros(1, 1, 1, 3),
|
||||
obj_result.normal if obj_result.normal is not None else torch.zeros(1, 1, 1, 3),
|
||||
obj_result.roughness if obj_result.roughness is not None else torch.zeros(1, 1, 1, 3),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@ -357,13 +357,18 @@ def calculate_tokens_price_image_1_5(response: OpenAIImageGenerationResponse) ->
|
||||
return ((response.usage.input_tokens * 8.0) + (response.usage.output_tokens * 32.0)) / 1_000_000.0
|
||||
|
||||
|
||||
def calculate_tokens_price_image_2(response: OpenAIImageGenerationResponse) -> float | None:
|
||||
# https://platform.openai.com/docs/pricing - gpt-image-2: input $8/1M, output $30/1M
|
||||
return ((response.usage.input_tokens * 8.0) + (response.usage.output_tokens * 30.0)) / 1_000_000.0
|
||||
|
||||
|
||||
class OpenAIGPTImage1(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="OpenAIGPTImage1",
|
||||
display_name="OpenAI GPT Image 1.5",
|
||||
display_name="OpenAI GPT Image 1 & 1.5",
|
||||
category="api node/image/OpenAI",
|
||||
description="Generates images synchronously via OpenAI's GPT Image endpoint.",
|
||||
inputs=[
|
||||
@ -442,14 +447,22 @@ class OpenAIGPTImage1(IO.ComfyNode):
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["quality", "n"]),
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["quality", "n", "model"]),
|
||||
expr="""
|
||||
(
|
||||
$ranges := {
|
||||
"low": [0.011, 0.02],
|
||||
"medium": [0.046, 0.07],
|
||||
"high": [0.167, 0.3]
|
||||
};
|
||||
$m := widgets.model;
|
||||
$ranges :=
|
||||
$contains($m, "gpt-image-1.5")
|
||||
? {
|
||||
"low": [0.009, 0.016],
|
||||
"medium": [0.037, 0.056],
|
||||
"high": [0.134, 0.240]
|
||||
}
|
||||
: {
|
||||
"low": [0.011, 0.020],
|
||||
"medium": [0.046, 0.070],
|
||||
"high": [0.167, 0.300]
|
||||
};
|
||||
$range := $lookup($ranges, widgets.quality);
|
||||
$n := widgets.n;
|
||||
($n = 1)
|
||||
@ -564,6 +577,261 @@ class OpenAIGPTImage1(IO.ComfyNode):
|
||||
return IO.NodeOutput(await validate_and_cast_response(response))
|
||||
|
||||
|
||||
_GPT_IMAGE_2_SIZES = [
|
||||
"auto",
|
||||
"1024x1024",
|
||||
"1536x1024",
|
||||
"1024x1536",
|
||||
"2048x2048",
|
||||
"2048x1152",
|
||||
"3840x2160",
|
||||
"2160x3840",
|
||||
]
|
||||
|
||||
|
||||
def _resolve_gpt_image_2_size(size: str, custom_width: int, custom_height: int) -> str:
|
||||
if custom_width <= 0 or custom_height <= 0:
|
||||
return size
|
||||
w, h = custom_width, custom_height
|
||||
if max(w, h) > 3840:
|
||||
raise ValueError(f"Maximum edge length must be ≤ 3840px, got {max(w, h)}")
|
||||
if w % 16 != 0 or h % 16 != 0:
|
||||
raise ValueError(f"Both edges must be multiples of 16px, got {w}x{h}")
|
||||
if max(w, h) / min(w, h) > 3:
|
||||
raise ValueError(f"Long-to-short edge ratio must not exceed 3:1, got {max(w, h) / min(w, h):.2f}:1")
|
||||
total = w * h
|
||||
if total < 655_360 or total > 8_294_400:
|
||||
raise ValueError(f"Total pixels must be between 655,360 and 8,294,400, got {total:,}")
|
||||
return f"{w}x{h}"
|
||||
|
||||
|
||||
class OpenAIGPTImage2(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="OpenAIGPTImage2",
|
||||
display_name="OpenAI GPT Image 2",
|
||||
category="api node/image/OpenAI",
|
||||
description="Generates images synchronously via OpenAI's GPT-Image-2 endpoint.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
default="",
|
||||
multiline=True,
|
||||
tooltip="Text prompt for GPT Image 2",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2**31 - 1,
|
||||
step=1,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="not implemented yet in backend",
|
||||
optional=True,
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"quality",
|
||||
default="auto",
|
||||
options=["auto", "low", "medium", "high"],
|
||||
tooltip="Image quality. 'auto' lets the model decide based on the prompt. Square images are typically fastest.",
|
||||
optional=True,
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"background",
|
||||
default="auto",
|
||||
options=["auto", "opaque"],
|
||||
tooltip="Background style. GPT-Image-2 does not support transparent backgrounds.",
|
||||
optional=True,
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"size",
|
||||
default="auto",
|
||||
options=_GPT_IMAGE_2_SIZES,
|
||||
tooltip="Output image dimensions. Ignored when custom_width and custom_height are both non-zero.",
|
||||
optional=True,
|
||||
),
|
||||
IO.Int.Input(
|
||||
"custom_width",
|
||||
default=0,
|
||||
min=0,
|
||||
max=3840,
|
||||
step=16,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
tooltip="Custom output width in pixels. Set to 0 (default) to use the size preset. When both width and height are non-zero, they override the size preset. Slider enforces multiples of 16 and max edge 3840px. Additional constraints checked at generation: ratio ≤ 3:1, total pixels 655,360–8,294,400.",
|
||||
optional=True,
|
||||
),
|
||||
IO.Int.Input(
|
||||
"custom_height",
|
||||
default=0,
|
||||
min=0,
|
||||
max=3840,
|
||||
step=16,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
tooltip="Custom output height in pixels. Set to 0 (default) to use the size preset. When both width and height are non-zero, they override the size preset. Slider enforces multiples of 16 and max edge 3840px. Additional constraints checked at generation: ratio ≤ 3:1, total pixels 655,360–8,294,400.",
|
||||
optional=True,
|
||||
),
|
||||
IO.Int.Input(
|
||||
"num_images",
|
||||
default=1,
|
||||
min=1,
|
||||
max=8,
|
||||
step=1,
|
||||
tooltip="Number of images to generate per run.",
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
optional=True,
|
||||
),
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
tooltip="Optional reference image for image editing.",
|
||||
optional=True,
|
||||
),
|
||||
IO.Mask.Input(
|
||||
"mask",
|
||||
tooltip="Optional mask for inpainting (white areas will be replaced).",
|
||||
optional=True,
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
options=["gpt-image-2"],
|
||||
default="gpt-image-2",
|
||||
tooltip="Model used for image generation.",
|
||||
optional=True,
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Image.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["quality", "num_images"]),
|
||||
expr="""
|
||||
(
|
||||
$ranges := {
|
||||
"low": [0.005, 0.010],
|
||||
"medium": [0.041, 0.060],
|
||||
"high": [0.165, 0.250]
|
||||
};
|
||||
$q := widgets.quality;
|
||||
$n := widgets.num_images;
|
||||
$n := ($n != null and $n != 0) ? $n : 1;
|
||||
$range := $lookup($ranges, $q);
|
||||
$lo := $range ? $range[0] : 0.005;
|
||||
$hi := $range ? $range[1] : 0.250;
|
||||
($n = 1)
|
||||
? {"type":"range_usd","min_usd": $lo, "max_usd": $hi, "format": {"approximate": ($range ? false : true)}}
|
||||
: {
|
||||
"type":"range_usd",
|
||||
"min_usd": $lo,
|
||||
"max_usd": $hi,
|
||||
"format": {"approximate": ($range ? false : true), "suffix": " x " & $string($n) & "/Run"}
|
||||
}
|
||||
)
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
prompt: str,
|
||||
seed: int = 0,
|
||||
quality: str = "auto",
|
||||
background: str = "auto",
|
||||
image: Input.Image | None = None,
|
||||
mask: Input.Image | None = None,
|
||||
num_images: int = 1,
|
||||
size: str = "auto",
|
||||
custom_width: int = 0,
|
||||
custom_height: int = 0,
|
||||
model: str = "gpt-image-2",
|
||||
) -> IO.NodeOutput:
|
||||
validate_string(prompt, strip_whitespace=False)
|
||||
|
||||
if mask is not None and image is None:
|
||||
raise ValueError("Cannot use a mask without an input image")
|
||||
|
||||
resolved_size = _resolve_gpt_image_2_size(size, custom_width, custom_height)
|
||||
|
||||
if image is not None:
|
||||
files = []
|
||||
batch_size = image.shape[0]
|
||||
for i in range(batch_size):
|
||||
single_image = image[i : i + 1]
|
||||
scaled_image = downscale_image_tensor(single_image, total_pixels=2048 * 2048).squeeze()
|
||||
|
||||
image_np = (scaled_image.numpy() * 255).astype(np.uint8)
|
||||
img = Image.fromarray(image_np)
|
||||
img_byte_arr = BytesIO()
|
||||
img.save(img_byte_arr, format="PNG")
|
||||
img_byte_arr.seek(0)
|
||||
|
||||
if batch_size == 1:
|
||||
files.append(("image", (f"image_{i}.png", img_byte_arr, "image/png")))
|
||||
else:
|
||||
files.append(("image[]", (f"image_{i}.png", img_byte_arr, "image/png")))
|
||||
|
||||
if mask is not None:
|
||||
if image.shape[0] != 1:
|
||||
raise Exception("Cannot use a mask with multiple image")
|
||||
if mask.shape[1:] != image.shape[1:-1]:
|
||||
raise Exception("Mask and Image must be the same size")
|
||||
_, height, width = mask.shape
|
||||
rgba_mask = torch.zeros(height, width, 4, device="cpu")
|
||||
rgba_mask[:, :, 3] = 1 - mask.squeeze().cpu()
|
||||
|
||||
scaled_mask = downscale_image_tensor(rgba_mask.unsqueeze(0), total_pixels=2048 * 2048).squeeze()
|
||||
|
||||
mask_np = (scaled_mask.numpy() * 255).astype(np.uint8)
|
||||
mask_img = Image.fromarray(mask_np)
|
||||
mask_img_byte_arr = BytesIO()
|
||||
mask_img.save(mask_img_byte_arr, format="PNG")
|
||||
mask_img_byte_arr.seek(0)
|
||||
files.append(("mask", ("mask.png", mask_img_byte_arr, "image/png")))
|
||||
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/openai/images/edits", method="POST"),
|
||||
response_model=OpenAIImageGenerationResponse,
|
||||
data=OpenAIImageEditRequest(
|
||||
model=model,
|
||||
prompt=prompt,
|
||||
quality=quality,
|
||||
background=background,
|
||||
n=num_images,
|
||||
size=resolved_size,
|
||||
moderation="low",
|
||||
),
|
||||
content_type="multipart/form-data",
|
||||
files=files,
|
||||
price_extractor=calculate_tokens_price_image_2,
|
||||
)
|
||||
else:
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/openai/images/generations", method="POST"),
|
||||
response_model=OpenAIImageGenerationResponse,
|
||||
data=OpenAIImageGenerationRequest(
|
||||
model=model,
|
||||
prompt=prompt,
|
||||
quality=quality,
|
||||
background=background,
|
||||
n=num_images,
|
||||
size=resolved_size,
|
||||
moderation="low",
|
||||
),
|
||||
price_extractor=calculate_tokens_price_image_2,
|
||||
)
|
||||
return IO.NodeOutput(await validate_and_cast_response(response))
|
||||
|
||||
|
||||
class OpenAIChatNode(IO.ComfyNode):
|
||||
"""
|
||||
Node to generate text responses from an OpenAI model.
|
||||
@ -913,6 +1181,7 @@ class OpenAIExtension(ComfyExtension):
|
||||
OpenAIDalle2,
|
||||
OpenAIDalle3,
|
||||
OpenAIGPTImage1,
|
||||
OpenAIGPTImage2,
|
||||
OpenAIChatNode,
|
||||
OpenAIInputFiles,
|
||||
OpenAIChatConfig,
|
||||
|
||||
@ -17,6 +17,44 @@ from comfy_api_nodes.util import (
|
||||
)
|
||||
from comfy_extras.nodes_images import SVG
|
||||
|
||||
_ARROW_MODELS = ["arrow-1.1", "arrow-1.1-max", "arrow-preview"]
|
||||
|
||||
|
||||
def _arrow_sampling_inputs():
|
||||
"""Shared sampling inputs for all Arrow model variants."""
|
||||
return [
|
||||
IO.Float.Input(
|
||||
"temperature",
|
||||
default=1.0,
|
||||
min=0.0,
|
||||
max=2.0,
|
||||
step=0.1,
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
tooltip="Randomness control. Higher values increase randomness.",
|
||||
advanced=True,
|
||||
),
|
||||
IO.Float.Input(
|
||||
"top_p",
|
||||
default=1.0,
|
||||
min=0.05,
|
||||
max=1.0,
|
||||
step=0.05,
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
tooltip="Nucleus sampling parameter.",
|
||||
advanced=True,
|
||||
),
|
||||
IO.Float.Input(
|
||||
"presence_penalty",
|
||||
default=0.0,
|
||||
min=-2.0,
|
||||
max=2.0,
|
||||
step=0.1,
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
tooltip="Token presence penalty.",
|
||||
advanced=True,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class QuiverTextToSVGNode(IO.ComfyNode):
|
||||
@classmethod
|
||||
@ -39,6 +77,7 @@ class QuiverTextToSVGNode(IO.ComfyNode):
|
||||
default="",
|
||||
tooltip="Additional style or formatting guidance.",
|
||||
optional=True,
|
||||
advanced=True,
|
||||
),
|
||||
IO.Autogrow.Input(
|
||||
"reference_images",
|
||||
@ -53,43 +92,7 @@ class QuiverTextToSVGNode(IO.ComfyNode):
|
||||
),
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
options=[
|
||||
IO.DynamicCombo.Option(
|
||||
"arrow-preview",
|
||||
[
|
||||
IO.Float.Input(
|
||||
"temperature",
|
||||
default=1.0,
|
||||
min=0.0,
|
||||
max=2.0,
|
||||
step=0.1,
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
tooltip="Randomness control. Higher values increase randomness.",
|
||||
advanced=True,
|
||||
),
|
||||
IO.Float.Input(
|
||||
"top_p",
|
||||
default=1.0,
|
||||
min=0.05,
|
||||
max=1.0,
|
||||
step=0.05,
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
tooltip="Nucleus sampling parameter.",
|
||||
advanced=True,
|
||||
),
|
||||
IO.Float.Input(
|
||||
"presence_penalty",
|
||||
default=0.0,
|
||||
min=-2.0,
|
||||
max=2.0,
|
||||
step=0.1,
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
tooltip="Token presence penalty.",
|
||||
advanced=True,
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
options=[IO.DynamicCombo.Option(m, _arrow_sampling_inputs()) for m in _ARROW_MODELS],
|
||||
tooltip="Model to use for SVG generation.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
@ -112,7 +115,16 @@ class QuiverTextToSVGNode(IO.ComfyNode):
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
expr="""{"type":"usd","usd":0.429}""",
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model"]),
|
||||
expr="""
|
||||
(
|
||||
$contains(widgets.model, "max")
|
||||
? {"type":"usd","usd":0.3575}
|
||||
: $contains(widgets.model, "preview")
|
||||
? {"type":"usd","usd":0.429}
|
||||
: {"type":"usd","usd":0.286}
|
||||
)
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
@ -176,12 +188,13 @@ class QuiverImageToSVGNode(IO.ComfyNode):
|
||||
"auto_crop",
|
||||
default=False,
|
||||
tooltip="Automatically crop to the dominant subject.",
|
||||
advanced=True,
|
||||
),
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
options=[
|
||||
IO.DynamicCombo.Option(
|
||||
"arrow-preview",
|
||||
m,
|
||||
[
|
||||
IO.Int.Input(
|
||||
"target_size",
|
||||
@ -189,39 +202,12 @@ class QuiverImageToSVGNode(IO.ComfyNode):
|
||||
min=128,
|
||||
max=4096,
|
||||
tooltip="Square resize target in pixels.",
|
||||
),
|
||||
IO.Float.Input(
|
||||
"temperature",
|
||||
default=1.0,
|
||||
min=0.0,
|
||||
max=2.0,
|
||||
step=0.1,
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
tooltip="Randomness control. Higher values increase randomness.",
|
||||
advanced=True,
|
||||
),
|
||||
IO.Float.Input(
|
||||
"top_p",
|
||||
default=1.0,
|
||||
min=0.05,
|
||||
max=1.0,
|
||||
step=0.05,
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
tooltip="Nucleus sampling parameter.",
|
||||
advanced=True,
|
||||
),
|
||||
IO.Float.Input(
|
||||
"presence_penalty",
|
||||
default=0.0,
|
||||
min=-2.0,
|
||||
max=2.0,
|
||||
step=0.1,
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
tooltip="Token presence penalty.",
|
||||
advanced=True,
|
||||
),
|
||||
*_arrow_sampling_inputs(),
|
||||
],
|
||||
),
|
||||
)
|
||||
for m in _ARROW_MODELS
|
||||
],
|
||||
tooltip="Model to use for SVG vectorization.",
|
||||
),
|
||||
@ -245,7 +231,16 @@ class QuiverImageToSVGNode(IO.ComfyNode):
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
expr="""{"type":"usd","usd":0.429}""",
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model"]),
|
||||
expr="""
|
||||
(
|
||||
$contains(widgets.model, "max")
|
||||
? {"type":"usd","usd":0.3575}
|
||||
: $contains(widgets.model, "preview")
|
||||
? {"type":"usd","usd":0.429}
|
||||
: {"type":"usd","usd":0.286}
|
||||
)
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
287
comfy_api_nodes/nodes_sonilo.py
Normal file
287
comfy_api_nodes/nodes_sonilo.py
Normal file
@ -0,0 +1,287 @@
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import aiohttp
|
||||
from typing_extensions import override
|
||||
|
||||
from comfy_api.latest import IO, ComfyExtension, Input
|
||||
from comfy_api_nodes.util import (
|
||||
ApiEndpoint,
|
||||
audio_bytes_to_audio_input,
|
||||
upload_video_to_comfyapi,
|
||||
validate_string,
|
||||
)
|
||||
from comfy_api_nodes.util._helpers import (
|
||||
default_base_url,
|
||||
get_auth_header,
|
||||
get_node_id,
|
||||
is_processing_interrupted,
|
||||
)
|
||||
from comfy_api_nodes.util.common_exceptions import ProcessingInterrupted
|
||||
from server import PromptServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SoniloVideoToMusic(IO.ComfyNode):
|
||||
"""Generate music from video using Sonilo's AI model."""
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls) -> IO.Schema:
|
||||
return IO.Schema(
|
||||
node_id="SoniloVideoToMusic",
|
||||
display_name="Sonilo Video to Music",
|
||||
category="api node/audio/Sonilo",
|
||||
description="Generate music from video content using Sonilo's AI model. "
|
||||
"Analyzes the video and creates matching music.",
|
||||
inputs=[
|
||||
IO.Video.Input(
|
||||
"video",
|
||||
tooltip="Input video to generate music from. Maximum duration: 6 minutes.",
|
||||
),
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
default="",
|
||||
multiline=True,
|
||||
tooltip="Optional text prompt to guide music generation. "
|
||||
"Leave empty for best quality - the model will fully analyze the video content.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=0xFFFFFFFFFFFFFFFF,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed for reproducibility. Currently ignored by the Sonilo "
|
||||
"service but kept for graph consistency.",
|
||||
),
|
||||
],
|
||||
outputs=[IO.Audio.Output()],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
expr='{"type":"usd","usd":0.009,"format":{"suffix":"/second"}}',
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
video: Input.Video,
|
||||
prompt: str = "",
|
||||
seed: int = 0,
|
||||
) -> IO.NodeOutput:
|
||||
video_url = await upload_video_to_comfyapi(cls, video, max_duration=360)
|
||||
form = aiohttp.FormData()
|
||||
form.add_field("video_url", video_url)
|
||||
if prompt.strip():
|
||||
form.add_field("prompt", prompt.strip())
|
||||
audio_bytes = await _stream_sonilo_music(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/sonilo/v2m/generate", method="POST"),
|
||||
form,
|
||||
)
|
||||
return IO.NodeOutput(audio_bytes_to_audio_input(audio_bytes))
|
||||
|
||||
|
||||
class SoniloTextToMusic(IO.ComfyNode):
|
||||
"""Generate music from a text prompt using Sonilo's AI model."""
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls) -> IO.Schema:
|
||||
return IO.Schema(
|
||||
node_id="SoniloTextToMusic",
|
||||
display_name="Sonilo Text to Music",
|
||||
category="api node/audio/Sonilo",
|
||||
description="Generate music from a text prompt using Sonilo's AI model. "
|
||||
"Leave duration at 0 to let the model infer it from the prompt.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
default="",
|
||||
multiline=True,
|
||||
tooltip="Text prompt describing the music to generate.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"duration",
|
||||
default=0,
|
||||
min=0,
|
||||
max=360,
|
||||
tooltip="Target duration in seconds. Set to 0 to let the model "
|
||||
"infer the duration from the prompt. Maximum: 6 minutes.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=0xFFFFFFFFFFFFFFFF,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed for reproducibility. Currently ignored by the Sonilo "
|
||||
"service but kept for graph consistency.",
|
||||
),
|
||||
],
|
||||
outputs=[IO.Audio.Output()],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["duration"]),
|
||||
expr="""
|
||||
(
|
||||
widgets.duration > 0
|
||||
? {"type":"usd","usd": 0.005 * widgets.duration}
|
||||
: {"type":"usd","usd": 0.005, "format":{"suffix":"/second"}}
|
||||
)
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
prompt: str,
|
||||
duration: int = 0,
|
||||
seed: int = 0,
|
||||
) -> IO.NodeOutput:
|
||||
validate_string(prompt, strip_whitespace=True, min_length=1)
|
||||
form = aiohttp.FormData()
|
||||
form.add_field("prompt", prompt)
|
||||
if duration > 0:
|
||||
form.add_field("duration", str(duration))
|
||||
audio_bytes = await _stream_sonilo_music(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/sonilo/t2m/generate", method="POST"),
|
||||
form,
|
||||
)
|
||||
return IO.NodeOutput(audio_bytes_to_audio_input(audio_bytes))
|
||||
|
||||
|
||||
async def _stream_sonilo_music(
|
||||
cls: type[IO.ComfyNode],
|
||||
endpoint: ApiEndpoint,
|
||||
form: aiohttp.FormData,
|
||||
) -> bytes:
|
||||
"""POST ``form`` to Sonilo, read the NDJSON stream, and return the first stream's audio bytes."""
|
||||
url = urljoin(default_base_url().rstrip("/") + "/", endpoint.path.lstrip("/"))
|
||||
|
||||
headers: dict[str, str] = {}
|
||||
headers.update(get_auth_header(cls))
|
||||
headers.update(endpoint.headers)
|
||||
|
||||
node_id = get_node_id(cls)
|
||||
start_ts = time.monotonic()
|
||||
last_chunk_status_ts = 0.0
|
||||
audio_streams: dict[int, list[bytes]] = {}
|
||||
title: str | None = None
|
||||
|
||||
timeout = aiohttp.ClientTimeout(total=1200.0, sock_read=300.0)
|
||||
async with aiohttp.ClientSession(timeout=timeout) as session:
|
||||
PromptServer.instance.send_progress_text("Status: Queued", node_id)
|
||||
async with session.post(url, data=form, headers=headers) as resp:
|
||||
if resp.status >= 400:
|
||||
msg = await _extract_error_message(resp)
|
||||
raise Exception(f"Sonilo API error ({resp.status}): {msg}")
|
||||
|
||||
while True:
|
||||
if is_processing_interrupted():
|
||||
raise ProcessingInterrupted("Task cancelled")
|
||||
|
||||
raw_line = await resp.content.readline()
|
||||
if not raw_line:
|
||||
break
|
||||
|
||||
line = raw_line.decode("utf-8").strip()
|
||||
if not line:
|
||||
continue
|
||||
|
||||
try:
|
||||
evt = json.loads(line)
|
||||
except json.JSONDecodeError:
|
||||
logger.warning("Sonilo: skipping malformed NDJSON line")
|
||||
continue
|
||||
|
||||
evt_type = evt.get("type")
|
||||
if evt_type == "error":
|
||||
code = evt.get("code", "UNKNOWN")
|
||||
message = evt.get("message", "Unknown error")
|
||||
raise Exception(f"Sonilo generation error ({code}): {message}")
|
||||
if evt_type == "duration":
|
||||
duration_sec = evt.get("duration_sec")
|
||||
if duration_sec is not None:
|
||||
PromptServer.instance.send_progress_text(
|
||||
f"Status: Generating\nVideo duration: {duration_sec:.1f}s",
|
||||
node_id,
|
||||
)
|
||||
elif evt_type in ("titles", "title"):
|
||||
# v2m sends a "titles" list, t2m sends a scalar "title"
|
||||
if evt_type == "titles":
|
||||
titles = evt.get("titles", [])
|
||||
if titles:
|
||||
title = titles[0]
|
||||
else:
|
||||
title = evt.get("title") or title
|
||||
if title:
|
||||
PromptServer.instance.send_progress_text(
|
||||
f"Status: Generating\nTitle: {title}",
|
||||
node_id,
|
||||
)
|
||||
elif evt_type == "audio_chunk":
|
||||
stream_idx = evt.get("stream_index", 0)
|
||||
chunk_data = base64.b64decode(evt["data"])
|
||||
|
||||
if stream_idx not in audio_streams:
|
||||
audio_streams[stream_idx] = []
|
||||
audio_streams[stream_idx].append(chunk_data)
|
||||
|
||||
now = time.monotonic()
|
||||
if now - last_chunk_status_ts >= 1.0:
|
||||
total_chunks = sum(len(chunks) for chunks in audio_streams.values())
|
||||
elapsed = int(now - start_ts)
|
||||
status_lines = ["Status: Receiving audio"]
|
||||
if title:
|
||||
status_lines.append(f"Title: {title}")
|
||||
status_lines.append(f"Chunks received: {total_chunks}")
|
||||
status_lines.append(f"Time elapsed: {elapsed}s")
|
||||
PromptServer.instance.send_progress_text("\n".join(status_lines), node_id)
|
||||
last_chunk_status_ts = now
|
||||
elif evt_type == "complete":
|
||||
break
|
||||
|
||||
if not audio_streams:
|
||||
raise Exception("Sonilo API returned no audio data.")
|
||||
|
||||
PromptServer.instance.send_progress_text("Status: Completed", node_id)
|
||||
selected_stream = 0 if 0 in audio_streams else min(audio_streams)
|
||||
return b"".join(audio_streams[selected_stream])
|
||||
|
||||
|
||||
async def _extract_error_message(resp: aiohttp.ClientResponse) -> str:
|
||||
"""Extract a human-readable error message from an HTTP error response."""
|
||||
try:
|
||||
error_body = await resp.json()
|
||||
detail = error_body.get("detail", {})
|
||||
if isinstance(detail, dict):
|
||||
return detail.get("message", str(detail))
|
||||
return str(detail)
|
||||
except Exception:
|
||||
return await resp.text()
|
||||
|
||||
|
||||
class SoniloExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
|
||||
return [SoniloVideoToMusic, SoniloTextToMusic]
|
||||
|
||||
|
||||
async def comfy_entrypoint() -> SoniloExtension:
|
||||
return SoniloExtension()
|
||||
@ -401,7 +401,7 @@ class StabilityUpscaleConservativeNode(IO.ComfyNode):
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
expr="""{"type":"usd","usd":0.25}""",
|
||||
expr="""{"type":"usd","usd":0.4}""",
|
||||
),
|
||||
)
|
||||
|
||||
@ -510,7 +510,7 @@ class StabilityUpscaleCreativeNode(IO.ComfyNode):
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
expr="""{"type":"usd","usd":0.25}""",
|
||||
expr="""{"type":"usd","usd":0.6}""",
|
||||
),
|
||||
)
|
||||
|
||||
@ -593,7 +593,7 @@ class StabilityUpscaleFastNode(IO.ComfyNode):
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
expr="""{"type":"usd","usd":0.01}""",
|
||||
expr="""{"type":"usd","usd":0.02}""",
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@ -24,8 +24,9 @@ from comfy_api_nodes.util import (
|
||||
AVERAGE_DURATION_VIDEO_GEN = 32
|
||||
MODELS_MAP = {
|
||||
"veo-2.0-generate-001": "veo-2.0-generate-001",
|
||||
"veo-3.1-generate": "veo-3.1-generate-preview",
|
||||
"veo-3.1-fast-generate": "veo-3.1-fast-generate-preview",
|
||||
"veo-3.1-generate": "veo-3.1-generate-001",
|
||||
"veo-3.1-fast-generate": "veo-3.1-fast-generate-001",
|
||||
"veo-3.1-lite": "veo-3.1-lite-generate-001",
|
||||
"veo-3.0-generate-001": "veo-3.0-generate-001",
|
||||
"veo-3.0-fast-generate-001": "veo-3.0-fast-generate-001",
|
||||
}
|
||||
@ -247,17 +248,8 @@ class VeoVideoGenerationNode(IO.ComfyNode):
|
||||
raise Exception("Video generation completed but no video was returned")
|
||||
|
||||
|
||||
class Veo3VideoGenerationNode(VeoVideoGenerationNode):
|
||||
"""
|
||||
Generates videos from text prompts using Google's Veo 3 API.
|
||||
|
||||
Supported models:
|
||||
- veo-3.0-generate-001
|
||||
- veo-3.0-fast-generate-001
|
||||
|
||||
This node extends the base Veo node with Veo 3 specific features including
|
||||
audio generation and fixed 8-second duration.
|
||||
"""
|
||||
class Veo3VideoGenerationNode(IO.ComfyNode):
|
||||
"""Generates videos from text prompts using Google's Veo 3 API."""
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
@ -279,6 +271,13 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode):
|
||||
default="16:9",
|
||||
tooltip="Aspect ratio of the output video",
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"resolution",
|
||||
options=["720p", "1080p", "4k"],
|
||||
default="720p",
|
||||
tooltip="Output video resolution. 4K is not available for veo-3.1-lite and veo-3.0 models.",
|
||||
optional=True,
|
||||
),
|
||||
IO.String.Input(
|
||||
"negative_prompt",
|
||||
multiline=True,
|
||||
@ -289,11 +288,11 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode):
|
||||
IO.Int.Input(
|
||||
"duration_seconds",
|
||||
default=8,
|
||||
min=8,
|
||||
min=4,
|
||||
max=8,
|
||||
step=1,
|
||||
step=2,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
tooltip="Duration of the output video in seconds (Veo 3 only supports 8 seconds)",
|
||||
tooltip="Duration of the output video in seconds",
|
||||
optional=True,
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
@ -332,10 +331,10 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode):
|
||||
options=[
|
||||
"veo-3.1-generate",
|
||||
"veo-3.1-fast-generate",
|
||||
"veo-3.1-lite",
|
||||
"veo-3.0-generate-001",
|
||||
"veo-3.0-fast-generate-001",
|
||||
],
|
||||
default="veo-3.0-generate-001",
|
||||
tooltip="Veo 3 model to use for video generation",
|
||||
optional=True,
|
||||
),
|
||||
@ -356,21 +355,111 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode):
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "generate_audio"]),
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "generate_audio", "resolution", "duration_seconds"]),
|
||||
expr="""
|
||||
(
|
||||
$m := widgets.model;
|
||||
$r := widgets.resolution;
|
||||
$a := widgets.generate_audio;
|
||||
($contains($m,"veo-3.0-fast-generate-001") or $contains($m,"veo-3.1-fast-generate"))
|
||||
? {"type":"usd","usd": ($a ? 1.2 : 0.8)}
|
||||
: ($contains($m,"veo-3.0-generate-001") or $contains($m,"veo-3.1-generate"))
|
||||
? {"type":"usd","usd": ($a ? 3.2 : 1.6)}
|
||||
: {"type":"range_usd","min_usd":0.8,"max_usd":3.2}
|
||||
$seconds := widgets.duration_seconds;
|
||||
$pps :=
|
||||
$contains($m, "lite")
|
||||
? ($r = "1080p" ? ($a ? 0.08 : 0.05) : ($a ? 0.05 : 0.03))
|
||||
: $contains($m, "3.1-fast")
|
||||
? ($r = "4k" ? ($a ? 0.30 : 0.25) : $r = "1080p" ? ($a ? 0.12 : 0.10) : ($a ? 0.10 : 0.08))
|
||||
: $contains($m, "3.1-generate")
|
||||
? ($r = "4k" ? ($a ? 0.60 : 0.40) : ($a ? 0.40 : 0.20))
|
||||
: $contains($m, "3.0-fast")
|
||||
? ($a ? 0.15 : 0.10)
|
||||
: ($a ? 0.40 : 0.20);
|
||||
{"type":"usd","usd": $pps * $seconds}
|
||||
)
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
prompt,
|
||||
aspect_ratio="16:9",
|
||||
resolution="720p",
|
||||
negative_prompt="",
|
||||
duration_seconds=8,
|
||||
enhance_prompt=True,
|
||||
person_generation="ALLOW",
|
||||
seed=0,
|
||||
image=None,
|
||||
model="veo-3.0-generate-001",
|
||||
generate_audio=False,
|
||||
):
|
||||
if "lite" in model and resolution == "4k":
|
||||
raise Exception("4K resolution is not supported by the veo-3.1-lite model.")
|
||||
|
||||
model = MODELS_MAP[model]
|
||||
|
||||
instances = [{"prompt": prompt}]
|
||||
if image is not None:
|
||||
image_base64 = tensor_to_base64_string(image)
|
||||
if image_base64:
|
||||
instances[0]["image"] = {"bytesBase64Encoded": image_base64, "mimeType": "image/png"}
|
||||
|
||||
parameters = {
|
||||
"aspectRatio": aspect_ratio,
|
||||
"personGeneration": person_generation,
|
||||
"durationSeconds": duration_seconds,
|
||||
"enhancePrompt": True,
|
||||
"generateAudio": generate_audio,
|
||||
}
|
||||
if negative_prompt:
|
||||
parameters["negativePrompt"] = negative_prompt
|
||||
if seed > 0:
|
||||
parameters["seed"] = seed
|
||||
if "veo-3.1" in model:
|
||||
parameters["resolution"] = resolution
|
||||
|
||||
initial_response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/veo/{model}/generate", method="POST"),
|
||||
response_model=VeoGenVidResponse,
|
||||
data=VeoGenVidRequest(
|
||||
instances=instances,
|
||||
parameters=parameters,
|
||||
),
|
||||
)
|
||||
|
||||
poll_response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/veo/{model}/poll", method="POST"),
|
||||
response_model=VeoGenVidPollResponse,
|
||||
status_extractor=lambda r: "completed" if r.done else "pending",
|
||||
data=VeoGenVidPollRequest(operationName=initial_response.name),
|
||||
poll_interval=9.0,
|
||||
estimated_duration=AVERAGE_DURATION_VIDEO_GEN,
|
||||
)
|
||||
|
||||
if poll_response.error:
|
||||
raise Exception(f"Veo API error: {poll_response.error.message} (code: {poll_response.error.code})")
|
||||
|
||||
response = poll_response.response
|
||||
filtered_count = response.raiMediaFilteredCount
|
||||
if filtered_count:
|
||||
reasons = response.raiMediaFilteredReasons or []
|
||||
reason_part = f": {reasons[0]}" if reasons else ""
|
||||
raise Exception(
|
||||
f"Content blocked by Google's Responsible AI filters{reason_part} "
|
||||
f"({filtered_count} video{'s' if filtered_count != 1 else ''} filtered)."
|
||||
)
|
||||
|
||||
if response.videos:
|
||||
video = response.videos[0]
|
||||
if video.bytesBase64Encoded:
|
||||
return IO.NodeOutput(InputImpl.VideoFromFile(BytesIO(base64.b64decode(video.bytesBase64Encoded))))
|
||||
if video.gcsUri:
|
||||
return IO.NodeOutput(await download_url_to_video_output(video.gcsUri))
|
||||
raise Exception("Video returned but no data or URL was provided")
|
||||
raise Exception("Video generation completed but no video was returned")
|
||||
|
||||
|
||||
class Veo3FirstLastFrameNode(IO.ComfyNode):
|
||||
|
||||
@ -394,7 +483,7 @@ class Veo3FirstLastFrameNode(IO.ComfyNode):
|
||||
default="",
|
||||
tooltip="Negative text prompt to guide what to avoid in the video",
|
||||
),
|
||||
IO.Combo.Input("resolution", options=["720p", "1080p"]),
|
||||
IO.Combo.Input("resolution", options=["720p", "1080p", "4k"]),
|
||||
IO.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=["16:9", "9:16"],
|
||||
@ -424,8 +513,7 @@ class Veo3FirstLastFrameNode(IO.ComfyNode):
|
||||
IO.Image.Input("last_frame", tooltip="End frame"),
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
options=["veo-3.1-generate", "veo-3.1-fast-generate"],
|
||||
default="veo-3.1-fast-generate",
|
||||
options=["veo-3.1-generate", "veo-3.1-fast-generate", "veo-3.1-lite"],
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"generate_audio",
|
||||
@ -443,26 +531,20 @@ class Veo3FirstLastFrameNode(IO.ComfyNode):
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "generate_audio", "duration"]),
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "generate_audio", "duration", "resolution"]),
|
||||
expr="""
|
||||
(
|
||||
$prices := {
|
||||
"veo-3.1-fast-generate": { "audio": 0.15, "no_audio": 0.10 },
|
||||
"veo-3.1-generate": { "audio": 0.40, "no_audio": 0.20 }
|
||||
};
|
||||
$m := widgets.model;
|
||||
$ga := (widgets.generate_audio = "true");
|
||||
$r := widgets.resolution;
|
||||
$ga := widgets.generate_audio;
|
||||
$seconds := widgets.duration;
|
||||
$modelKey :=
|
||||
$contains($m, "veo-3.1-fast-generate") ? "veo-3.1-fast-generate" :
|
||||
$contains($m, "veo-3.1-generate") ? "veo-3.1-generate" :
|
||||
"";
|
||||
$audioKey := $ga ? "audio" : "no_audio";
|
||||
$modelPrices := $lookup($prices, $modelKey);
|
||||
$pps := $lookup($modelPrices, $audioKey);
|
||||
($pps != null)
|
||||
? {"type":"usd","usd": $pps * $seconds}
|
||||
: {"type":"range_usd","min_usd": 0.4, "max_usd": 3.2}
|
||||
$pps :=
|
||||
$contains($m, "lite")
|
||||
? ($r = "1080p" ? ($ga ? 0.08 : 0.05) : ($ga ? 0.05 : 0.03))
|
||||
: $contains($m, "fast")
|
||||
? ($r = "4k" ? ($ga ? 0.30 : 0.25) : $r = "1080p" ? ($ga ? 0.12 : 0.10) : ($ga ? 0.10 : 0.08))
|
||||
: ($r = "4k" ? ($ga ? 0.60 : 0.40) : ($ga ? 0.40 : 0.20));
|
||||
{"type":"usd","usd": $pps * $seconds}
|
||||
)
|
||||
""",
|
||||
),
|
||||
@ -482,6 +564,9 @@ class Veo3FirstLastFrameNode(IO.ComfyNode):
|
||||
model: str,
|
||||
generate_audio: bool,
|
||||
):
|
||||
if "lite" in model and resolution == "4k":
|
||||
raise Exception("4K resolution is not supported by the veo-3.1-lite model.")
|
||||
|
||||
model = MODELS_MAP[model]
|
||||
initial_response = await sync_op(
|
||||
cls,
|
||||
@ -519,7 +604,7 @@ class Veo3FirstLastFrameNode(IO.ComfyNode):
|
||||
data=VeoGenVidPollRequest(
|
||||
operationName=initial_response.name,
|
||||
),
|
||||
poll_interval=5.0,
|
||||
poll_interval=9.0,
|
||||
estimated_duration=AVERAGE_DURATION_VIDEO_GEN,
|
||||
)
|
||||
|
||||
|
||||
@ -19,6 +19,7 @@ from .conversions import (
|
||||
image_tensor_pair_to_batch,
|
||||
pil_to_bytesio,
|
||||
resize_mask_to_image,
|
||||
resize_video_to_pixel_budget,
|
||||
tensor_to_base64_string,
|
||||
tensor_to_bytesio,
|
||||
tensor_to_pil,
|
||||
@ -90,6 +91,7 @@ __all__ = [
|
||||
"image_tensor_pair_to_batch",
|
||||
"pil_to_bytesio",
|
||||
"resize_mask_to_image",
|
||||
"resize_video_to_pixel_budget",
|
||||
"tensor_to_base64_string",
|
||||
"tensor_to_bytesio",
|
||||
"tensor_to_pil",
|
||||
|
||||
@ -129,22 +129,38 @@ def pil_to_bytesio(img: Image.Image, mime_type: str = "image/png") -> BytesIO:
|
||||
return img_byte_arr
|
||||
|
||||
|
||||
def _compute_downscale_dims(src_w: int, src_h: int, total_pixels: int) -> tuple[int, int] | None:
|
||||
"""Return downscaled (w, h) with even dims fitting ``total_pixels``, or None if already fits.
|
||||
|
||||
Source aspect ratio is preserved; output may drift by a fraction of a percent because both dimensions
|
||||
are rounded down to even values (many codecs require divisible-by-2).
|
||||
"""
|
||||
pixels = src_w * src_h
|
||||
if pixels <= total_pixels:
|
||||
return None
|
||||
scale = math.sqrt(total_pixels / pixels)
|
||||
new_w = max(2, int(src_w * scale))
|
||||
new_h = max(2, int(src_h * scale))
|
||||
new_w -= new_w % 2
|
||||
new_h -= new_h % 2
|
||||
return new_w, new_h
|
||||
|
||||
|
||||
def downscale_image_tensor(image: torch.Tensor, total_pixels: int = 1536 * 1024) -> torch.Tensor:
|
||||
"""Downscale input image tensor to roughly the specified total pixels."""
|
||||
"""Downscale input image tensor to roughly the specified total pixels.
|
||||
|
||||
Output dimensions are rounded down to even values so that the result is guaranteed to fit within ``total_pixels``
|
||||
and is compatible with codecs that require even dimensions (e.g. yuv420p).
|
||||
"""
|
||||
samples = image.movedim(-1, 1)
|
||||
total = int(total_pixels)
|
||||
scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2]))
|
||||
if scale_by >= 1:
|
||||
dims = _compute_downscale_dims(samples.shape[3], samples.shape[2], int(total_pixels))
|
||||
if dims is None:
|
||||
return image
|
||||
width = round(samples.shape[3] * scale_by)
|
||||
height = round(samples.shape[2] * scale_by)
|
||||
|
||||
s = common_upscale(samples, width, height, "lanczos", "disabled")
|
||||
s = s.movedim(1, -1)
|
||||
return s
|
||||
new_w, new_h = dims
|
||||
return common_upscale(samples, new_w, new_h, "lanczos", "disabled").movedim(1, -1)
|
||||
|
||||
|
||||
def downscale_image_tensor_by_max_side(image: torch.Tensor, *, max_side: int) -> torch.Tensor:
|
||||
def downscale_image_tensor_by_max_side(image: torch.Tensor, *, max_side: int) -> torch.Tensor:
|
||||
"""Downscale input image tensor so the largest dimension is at most max_side pixels."""
|
||||
samples = image.movedim(-1, 1)
|
||||
height, width = samples.shape[2], samples.shape[3]
|
||||
@ -399,6 +415,72 @@ def trim_video(video: Input.Video, duration_sec: float) -> Input.Video:
|
||||
raise RuntimeError(f"Failed to trim video: {str(e)}") from e
|
||||
|
||||
|
||||
def resize_video_to_pixel_budget(video: Input.Video, total_pixels: int) -> Input.Video:
|
||||
"""Downscale a video to fit within ``total_pixels`` (w * h), preserving aspect ratio.
|
||||
|
||||
Returns the original video object untouched when it already fits. Preserves frame rate, duration, and audio.
|
||||
Aspect ratio is preserved up to a fraction of a percent (even-dim rounding).
|
||||
"""
|
||||
src_w, src_h = video.get_dimensions()
|
||||
scale_dims = _compute_downscale_dims(src_w, src_h, total_pixels)
|
||||
if scale_dims is None:
|
||||
return video
|
||||
return _apply_video_scale(video, scale_dims)
|
||||
|
||||
|
||||
def _apply_video_scale(video: Input.Video, scale_dims: tuple[int, int]) -> Input.Video:
|
||||
"""Re-encode ``video`` scaled to ``scale_dims`` with a single decode/encode pass."""
|
||||
out_w, out_h = scale_dims
|
||||
output_buffer = BytesIO()
|
||||
input_container = None
|
||||
output_container = None
|
||||
|
||||
try:
|
||||
input_source = video.get_stream_source()
|
||||
input_container = av.open(input_source, mode="r")
|
||||
output_container = av.open(output_buffer, mode="w", format="mp4")
|
||||
|
||||
video_stream = output_container.add_stream("h264", rate=video.get_frame_rate())
|
||||
video_stream.width = out_w
|
||||
video_stream.height = out_h
|
||||
video_stream.pix_fmt = "yuv420p"
|
||||
|
||||
audio_stream = None
|
||||
for stream in input_container.streams:
|
||||
if isinstance(stream, av.AudioStream):
|
||||
audio_stream = output_container.add_stream("aac", rate=stream.sample_rate)
|
||||
audio_stream.sample_rate = stream.sample_rate
|
||||
audio_stream.layout = stream.layout
|
||||
break
|
||||
|
||||
for frame in input_container.decode(video=0):
|
||||
frame = frame.reformat(width=out_w, height=out_h, format="yuv420p")
|
||||
for packet in video_stream.encode(frame):
|
||||
output_container.mux(packet)
|
||||
for packet in video_stream.encode():
|
||||
output_container.mux(packet)
|
||||
|
||||
if audio_stream is not None:
|
||||
input_container.seek(0)
|
||||
for audio_frame in input_container.decode(audio=0):
|
||||
for packet in audio_stream.encode(audio_frame):
|
||||
output_container.mux(packet)
|
||||
for packet in audio_stream.encode():
|
||||
output_container.mux(packet)
|
||||
|
||||
output_container.close()
|
||||
input_container.close()
|
||||
output_buffer.seek(0)
|
||||
return InputImpl.VideoFromFile(output_buffer)
|
||||
|
||||
except Exception as e:
|
||||
if input_container is not None:
|
||||
input_container.close()
|
||||
if output_container is not None:
|
||||
output_container.close()
|
||||
raise RuntimeError(f"Failed to resize video: {str(e)}") from e
|
||||
|
||||
|
||||
def _f32_pcm(wav: torch.Tensor) -> torch.Tensor:
|
||||
"""Convert audio to float 32 bits PCM format. Copy-paste from nodes_audio.py file."""
|
||||
if wav.dtype.is_floating_point:
|
||||
|
||||
@ -3,136 +3,136 @@ from typing_extensions import override
|
||||
|
||||
import comfy.model_management
|
||||
import node_helpers
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
from comfy_api.latest import ComfyExtension, IO
|
||||
|
||||
|
||||
class TextEncodeAceStepAudio(io.ComfyNode):
|
||||
class TextEncodeAceStepAudio(IO.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
return IO.Schema(
|
||||
node_id="TextEncodeAceStepAudio",
|
||||
category="conditioning",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("tags", multiline=True, dynamic_prompts=True),
|
||||
io.String.Input("lyrics", multiline=True, dynamic_prompts=True),
|
||||
io.Float.Input("lyrics_strength", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
IO.Clip.Input("clip"),
|
||||
IO.String.Input("tags", multiline=True, dynamic_prompts=True),
|
||||
IO.String.Input("lyrics", multiline=True, dynamic_prompts=True),
|
||||
IO.Float.Input("lyrics_strength", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[io.Conditioning.Output()],
|
||||
outputs=[IO.Conditioning.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip, tags, lyrics, lyrics_strength) -> io.NodeOutput:
|
||||
def execute(cls, clip, tags, lyrics, lyrics_strength) -> IO.NodeOutput:
|
||||
tokens = clip.tokenize(tags, lyrics=lyrics)
|
||||
conditioning = clip.encode_from_tokens_scheduled(tokens)
|
||||
conditioning = node_helpers.conditioning_set_values(conditioning, {"lyrics_strength": lyrics_strength})
|
||||
return io.NodeOutput(conditioning)
|
||||
return IO.NodeOutput(conditioning)
|
||||
|
||||
class TextEncodeAceStepAudio15(io.ComfyNode):
|
||||
class TextEncodeAceStepAudio15(IO.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
return IO.Schema(
|
||||
node_id="TextEncodeAceStepAudio1.5",
|
||||
category="conditioning",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("tags", multiline=True, dynamic_prompts=True),
|
||||
io.String.Input("lyrics", multiline=True, dynamic_prompts=True),
|
||||
io.Int.Input("seed", default=0, min=0, max=0xffffffffffffffff, control_after_generate=True),
|
||||
io.Int.Input("bpm", default=120, min=10, max=300),
|
||||
io.Float.Input("duration", default=120.0, min=0.0, max=2000.0, step=0.1),
|
||||
io.Combo.Input("timesignature", options=['2', '3', '4', '6']),
|
||||
io.Combo.Input("language", options=["en", "ja", "zh", "es", "de", "fr", "pt", "ru", "it", "nl", "pl", "tr", "vi", "cs", "fa", "id", "ko", "uk", "hu", "ar", "sv", "ro", "el"]),
|
||||
io.Combo.Input("keyscale", options=[f"{root} {quality}" for quality in ["major", "minor"] for root in ["C", "C#", "Db", "D", "D#", "Eb", "E", "F", "F#", "Gb", "G", "G#", "Ab", "A", "A#", "Bb", "B"]]),
|
||||
io.Boolean.Input("generate_audio_codes", default=True, tooltip="Enable the LLM that generates audio codes. This can be slow but will increase the quality of the generated audio. Turn this off if you are giving the model an audio reference.", advanced=True),
|
||||
io.Float.Input("cfg_scale", default=2.0, min=0.0, max=100.0, step=0.1, advanced=True),
|
||||
io.Float.Input("temperature", default=0.85, min=0.0, max=2.0, step=0.01, advanced=True),
|
||||
io.Float.Input("top_p", default=0.9, min=0.0, max=2000.0, step=0.01, advanced=True),
|
||||
io.Int.Input("top_k", default=0, min=0, max=100, advanced=True),
|
||||
io.Float.Input("min_p", default=0.000, min=0.0, max=1.0, step=0.001, advanced=True),
|
||||
IO.Clip.Input("clip"),
|
||||
IO.String.Input("tags", multiline=True, dynamic_prompts=True),
|
||||
IO.String.Input("lyrics", multiline=True, dynamic_prompts=True),
|
||||
IO.Int.Input("seed", default=0, min=0, max=0xffffffffffffffff, control_after_generate=True),
|
||||
IO.Int.Input("bpm", default=120, min=10, max=300),
|
||||
IO.Float.Input("duration", default=120.0, min=0.0, max=2000.0, step=0.1),
|
||||
IO.Combo.Input("timesignature", options=['2', '3', '4', '6']),
|
||||
IO.Combo.Input("language", options=["en", "ja", "zh", "es", "de", "fr", "pt", "ru", "it", "nl", "pl", "tr", "vi", "cs", "fa", "id", "ko", "uk", "hu", "ar", "sv", "ro", "el"]),
|
||||
IO.Combo.Input("keyscale", options=[f"{root} {quality}" for quality in ["major", "minor"] for root in ["C", "C#", "Db", "D", "D#", "Eb", "E", "F", "F#", "Gb", "G", "G#", "Ab", "A", "A#", "Bb", "B"]]),
|
||||
IO.Boolean.Input("generate_audio_codes", default=True, tooltip="Enable the LLM that generates audio codes. This can be slow but will increase the quality of the generated audio. Turn this off if you are giving the model an audio reference.", advanced=True),
|
||||
IO.Float.Input("cfg_scale", default=2.0, min=0.0, max=100.0, step=0.1, advanced=True),
|
||||
IO.Float.Input("temperature", default=0.85, min=0.0, max=2.0, step=0.01, advanced=True),
|
||||
IO.Float.Input("top_p", default=0.9, min=0.0, max=2000.0, step=0.01, advanced=True),
|
||||
IO.Int.Input("top_k", default=0, min=0, max=100, advanced=True),
|
||||
IO.Float.Input("min_p", default=0.000, min=0.0, max=1.0, step=0.001, advanced=True),
|
||||
],
|
||||
outputs=[io.Conditioning.Output()],
|
||||
outputs=[IO.Conditioning.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip, tags, lyrics, seed, bpm, duration, timesignature, language, keyscale, generate_audio_codes, cfg_scale, temperature, top_p, top_k, min_p) -> io.NodeOutput:
|
||||
def execute(cls, clip, tags, lyrics, seed, bpm, duration, timesignature, language, keyscale, generate_audio_codes, cfg_scale, temperature, top_p, top_k, min_p) -> IO.NodeOutput:
|
||||
tokens = clip.tokenize(tags, lyrics=lyrics, bpm=bpm, duration=duration, timesignature=int(timesignature), language=language, keyscale=keyscale, seed=seed, generate_audio_codes=generate_audio_codes, cfg_scale=cfg_scale, temperature=temperature, top_p=top_p, top_k=top_k, min_p=min_p)
|
||||
conditioning = clip.encode_from_tokens_scheduled(tokens)
|
||||
return io.NodeOutput(conditioning)
|
||||
return IO.NodeOutput(conditioning)
|
||||
|
||||
|
||||
class EmptyAceStepLatentAudio(io.ComfyNode):
|
||||
class EmptyAceStepLatentAudio(IO.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
return IO.Schema(
|
||||
node_id="EmptyAceStepLatentAudio",
|
||||
display_name="Empty Ace Step 1.0 Latent Audio",
|
||||
category="latent/audio",
|
||||
inputs=[
|
||||
io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.1),
|
||||
io.Int.Input(
|
||||
IO.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.1),
|
||||
IO.Int.Input(
|
||||
"batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch."
|
||||
),
|
||||
],
|
||||
outputs=[io.Latent.Output()],
|
||||
outputs=[IO.Latent.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, seconds, batch_size) -> io.NodeOutput:
|
||||
def execute(cls, seconds, batch_size) -> IO.NodeOutput:
|
||||
length = int(seconds * 44100 / 512 / 8)
|
||||
latent = torch.zeros([batch_size, 8, 16, length], device=comfy.model_management.intermediate_device(), dtype=comfy.model_management.intermediate_dtype())
|
||||
return io.NodeOutput({"samples": latent, "type": "audio"})
|
||||
return IO.NodeOutput({"samples": latent, "type": "audio"})
|
||||
|
||||
|
||||
class EmptyAceStep15LatentAudio(io.ComfyNode):
|
||||
class EmptyAceStep15LatentAudio(IO.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
return IO.Schema(
|
||||
node_id="EmptyAceStep1.5LatentAudio",
|
||||
display_name="Empty Ace Step 1.5 Latent Audio",
|
||||
category="latent/audio",
|
||||
inputs=[
|
||||
io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.01),
|
||||
io.Int.Input(
|
||||
IO.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.01),
|
||||
IO.Int.Input(
|
||||
"batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch."
|
||||
),
|
||||
],
|
||||
outputs=[io.Latent.Output()],
|
||||
outputs=[IO.Latent.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, seconds, batch_size) -> io.NodeOutput:
|
||||
def execute(cls, seconds, batch_size) -> IO.NodeOutput:
|
||||
length = round((seconds * 48000 / 1920))
|
||||
latent = torch.zeros([batch_size, 64, length], device=comfy.model_management.intermediate_device(), dtype=comfy.model_management.intermediate_dtype())
|
||||
return io.NodeOutput({"samples": latent, "type": "audio"})
|
||||
return IO.NodeOutput({"samples": latent, "type": "audio"})
|
||||
|
||||
class ReferenceAudio(io.ComfyNode):
|
||||
class ReferenceAudio(IO.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
return IO.Schema(
|
||||
node_id="ReferenceTimbreAudio",
|
||||
display_name="Reference Audio",
|
||||
category="advanced/conditioning/audio",
|
||||
is_experimental=True,
|
||||
description="This node sets the reference audio for ace step 1.5",
|
||||
inputs=[
|
||||
io.Conditioning.Input("conditioning"),
|
||||
io.Latent.Input("latent", optional=True),
|
||||
IO.Conditioning.Input("conditioning"),
|
||||
IO.Latent.Input("latent", optional=True),
|
||||
],
|
||||
outputs=[
|
||||
io.Conditioning.Output(),
|
||||
IO.Conditioning.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, conditioning, latent=None) -> io.NodeOutput:
|
||||
def execute(cls, conditioning, latent=None) -> IO.NodeOutput:
|
||||
if latent is not None:
|
||||
conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_audio_timbre_latents": [latent["samples"]]}, append=True)
|
||||
return io.NodeOutput(conditioning)
|
||||
return IO.NodeOutput(conditioning)
|
||||
|
||||
class AceExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
|
||||
return [
|
||||
TextEncodeAceStepAudio,
|
||||
EmptyAceStepLatentAudio,
|
||||
|
||||
@ -104,7 +104,7 @@ def vae_decode_audio(vae, samples, tile=None, overlap=None):
|
||||
std = torch.std(audio, dim=[1, 2], keepdim=True) * 5.0
|
||||
std[std < 1.0] = 1.0
|
||||
audio /= std
|
||||
vae_sample_rate = getattr(vae, "audio_sample_rate", 44100)
|
||||
vae_sample_rate = getattr(vae, "audio_sample_rate_output", getattr(vae, "audio_sample_rate", 44100))
|
||||
return {"waveform": audio, "sample_rate": vae_sample_rate if "sample_rate" not in samples else samples["sample_rate"]}
|
||||
|
||||
|
||||
|
||||
@ -34,7 +34,7 @@ class Load3D(IO.ComfyNode):
|
||||
essentials_category="Basics",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
IO.Combo.Input("model_file", options=["none"] + sorted(files), upload=IO.UploadType.model),
|
||||
IO.Combo.Input("model_file", options=sorted(files), upload=IO.UploadType.model),
|
||||
IO.Load3D.Input("image"),
|
||||
IO.Int.Input("width", default=1024, min=1, max=4096, step=1),
|
||||
IO.Int.Input("height", default=1024, min=1, max=4096, step=1),
|
||||
@ -68,12 +68,8 @@ class Load3D(IO.ComfyNode):
|
||||
|
||||
video = InputImpl.VideoFromFile(recording_video_path)
|
||||
|
||||
file_3d = None
|
||||
mesh_path = ""
|
||||
if model_file and model_file != "none":
|
||||
file_3d = Types.File3D(folder_paths.get_annotated_filepath(model_file))
|
||||
mesh_path = model_file
|
||||
return IO.NodeOutput(output_image, output_mask, mesh_path, normal_image, image['camera_info'], video, file_3d)
|
||||
file_3d = Types.File3D(folder_paths.get_annotated_filepath(model_file))
|
||||
return IO.NodeOutput(output_image, output_mask, model_file, normal_image, image['camera_info'], video, file_3d)
|
||||
|
||||
process = execute # TODO: remove
|
||||
|
||||
|
||||
@ -3,9 +3,8 @@ import comfy.utils
|
||||
import comfy.model_management
|
||||
import torch
|
||||
|
||||
from comfy.ldm.lightricks.vae.audio_vae import AudioVAE
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
|
||||
from comfy_extras.nodes_audio import VAEEncodeAudio
|
||||
|
||||
class LTXVAudioVAELoader(io.ComfyNode):
|
||||
@classmethod
|
||||
@ -28,10 +27,14 @@ class LTXVAudioVAELoader(io.ComfyNode):
|
||||
def execute(cls, ckpt_name: str) -> io.NodeOutput:
|
||||
ckpt_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name)
|
||||
sd, metadata = comfy.utils.load_torch_file(ckpt_path, return_metadata=True)
|
||||
return io.NodeOutput(AudioVAE(sd, metadata))
|
||||
sd = comfy.utils.state_dict_prefix_replace(sd, {"audio_vae.": "autoencoder.", "vocoder.": "vocoder."}, filter_keys=True)
|
||||
vae = comfy.sd.VAE(sd=sd, metadata=metadata)
|
||||
vae.throw_exception_if_invalid()
|
||||
|
||||
return io.NodeOutput(vae)
|
||||
|
||||
|
||||
class LTXVAudioVAEEncode(io.ComfyNode):
|
||||
class LTXVAudioVAEEncode(VAEEncodeAudio):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
@ -50,15 +53,8 @@ class LTXVAudioVAEEncode(io.ComfyNode):
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, audio, audio_vae: AudioVAE) -> io.NodeOutput:
|
||||
audio_latents = audio_vae.encode(audio)
|
||||
return io.NodeOutput(
|
||||
{
|
||||
"samples": audio_latents,
|
||||
"sample_rate": int(audio_vae.sample_rate),
|
||||
"type": "audio",
|
||||
}
|
||||
)
|
||||
def execute(cls, audio, audio_vae) -> io.NodeOutput:
|
||||
return super().execute(audio_vae, audio)
|
||||
|
||||
|
||||
class LTXVAudioVAEDecode(io.ComfyNode):
|
||||
@ -80,12 +76,12 @@ class LTXVAudioVAEDecode(io.ComfyNode):
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, samples, audio_vae: AudioVAE) -> io.NodeOutput:
|
||||
def execute(cls, samples, audio_vae) -> io.NodeOutput:
|
||||
audio_latent = samples["samples"]
|
||||
if audio_latent.is_nested:
|
||||
audio_latent = audio_latent.unbind()[-1]
|
||||
audio = audio_vae.decode(audio_latent).to(audio_latent.device)
|
||||
output_audio_sample_rate = audio_vae.output_sample_rate
|
||||
audio = audio_vae.decode(audio_latent).movedim(-1, 1).to(audio_latent.device)
|
||||
output_audio_sample_rate = audio_vae.first_stage_model.output_sample_rate
|
||||
return io.NodeOutput(
|
||||
{
|
||||
"waveform": audio,
|
||||
@ -143,17 +139,17 @@ class LTXVEmptyLatentAudio(io.ComfyNode):
|
||||
frames_number: int,
|
||||
frame_rate: int,
|
||||
batch_size: int,
|
||||
audio_vae: AudioVAE,
|
||||
audio_vae,
|
||||
) -> io.NodeOutput:
|
||||
"""Generate empty audio latents matching the reference pipeline structure."""
|
||||
|
||||
assert audio_vae is not None, "Audio VAE model is required"
|
||||
|
||||
z_channels = audio_vae.latent_channels
|
||||
audio_freq = audio_vae.latent_frequency_bins
|
||||
sampling_rate = int(audio_vae.sample_rate)
|
||||
audio_freq = audio_vae.first_stage_model.latent_frequency_bins
|
||||
sampling_rate = int(audio_vae.first_stage_model.sample_rate)
|
||||
|
||||
num_audio_latents = audio_vae.num_of_latents_from_frames(frames_number, frame_rate)
|
||||
num_audio_latents = audio_vae.first_stage_model.num_of_latents_from_frames(frames_number, frame_rate)
|
||||
|
||||
audio_latents = torch.zeros(
|
||||
(batch_size, z_channels, num_audio_latents, audio_freq),
|
||||
|
||||
@ -7,7 +7,10 @@ import comfy.model_management
|
||||
import comfy.ldm.common_dit
|
||||
import comfy.latent_formats
|
||||
import comfy.ldm.lumina.controlnet
|
||||
import comfy.ldm.supir.supir_modules
|
||||
from comfy.ldm.wan.model_multitalk import WanMultiTalkAttentionBlock, MultiTalkAudioProjModel
|
||||
from comfy_api.latest import io
|
||||
from comfy.ldm.supir.supir_patch import SUPIRPatch
|
||||
|
||||
|
||||
class BlockWiseControlBlock(torch.nn.Module):
|
||||
@ -266,6 +269,27 @@ class ModelPatchLoader:
|
||||
out_dim=sd["audio_proj.norm.weight"].shape[0],
|
||||
device=comfy.model_management.unet_offload_device(),
|
||||
operations=comfy.ops.manual_cast)
|
||||
elif 'model.control_model.input_hint_block.0.weight' in sd or 'control_model.input_hint_block.0.weight' in sd:
|
||||
prefix_replace = {}
|
||||
if 'model.control_model.input_hint_block.0.weight' in sd:
|
||||
prefix_replace["model.control_model."] = "control_model."
|
||||
prefix_replace["model.diffusion_model.project_modules."] = "project_modules."
|
||||
else:
|
||||
prefix_replace["control_model."] = "control_model."
|
||||
prefix_replace["project_modules."] = "project_modules."
|
||||
|
||||
# Extract denoise_encoder weights before filter_keys discards them
|
||||
de_prefix = "first_stage_model.denoise_encoder."
|
||||
denoise_encoder_sd = {}
|
||||
for k in list(sd.keys()):
|
||||
if k.startswith(de_prefix):
|
||||
denoise_encoder_sd[k[len(de_prefix):]] = sd.pop(k)
|
||||
|
||||
sd = comfy.utils.state_dict_prefix_replace(sd, prefix_replace, filter_keys=True)
|
||||
sd.pop("control_model.mask_LQ", None)
|
||||
model = comfy.ldm.supir.supir_modules.SUPIR(device=comfy.model_management.unet_offload_device(), dtype=dtype, operations=comfy.ops.manual_cast)
|
||||
if denoise_encoder_sd:
|
||||
model.denoise_encoder_sd = denoise_encoder_sd
|
||||
|
||||
model_patcher = comfy.model_patcher.CoreModelPatcher(model, load_device=comfy.model_management.get_torch_device(), offload_device=comfy.model_management.unet_offload_device())
|
||||
model.load_state_dict(sd, assign=model_patcher.is_dynamic())
|
||||
@ -565,9 +589,89 @@ class MultiTalkModelPatch(torch.nn.Module):
|
||||
)
|
||||
|
||||
|
||||
class SUPIRApply(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.Schema:
|
||||
return io.Schema(
|
||||
node_id="SUPIRApply",
|
||||
category="model_patches/supir",
|
||||
is_experimental=True,
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.ModelPatch.Input("model_patch"),
|
||||
io.Vae.Input("vae"),
|
||||
io.Image.Input("image"),
|
||||
io.Float.Input("strength_start", default=1.0, min=0.0, max=10.0, step=0.01,
|
||||
tooltip="Control strength at the start of sampling (high sigma)."),
|
||||
io.Float.Input("strength_end", default=1.0, min=0.0, max=10.0, step=0.01,
|
||||
tooltip="Control strength at the end of sampling (low sigma). Linearly interpolated from start."),
|
||||
io.Float.Input("restore_cfg", default=4.0, min=0.0, max=20.0, step=0.1, advanced=True,
|
||||
tooltip="Pulls denoised output toward the input latent. Higher = stronger fidelity to input. 0 to disable."),
|
||||
io.Float.Input("restore_cfg_s_tmin", default=0.05, min=0.0, max=1.0, step=0.01, advanced=True,
|
||||
tooltip="Sigma threshold below which restore_cfg is disabled."),
|
||||
],
|
||||
outputs=[io.Model.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _encode_with_denoise_encoder(cls, vae, model_patch, image):
|
||||
"""Encode using denoise_encoder weights from SUPIR checkpoint if available."""
|
||||
denoise_sd = getattr(model_patch.model, 'denoise_encoder_sd', None)
|
||||
if not denoise_sd:
|
||||
return vae.encode(image)
|
||||
|
||||
# Clone VAE patcher, apply denoise_encoder weights to clone, encode
|
||||
orig_patcher = vae.patcher
|
||||
vae.patcher = orig_patcher.clone()
|
||||
patches = {f"encoder.{k}": (v,) for k, v in denoise_sd.items()}
|
||||
vae.patcher.add_patches(patches, strength_patch=1.0, strength_model=0.0)
|
||||
try:
|
||||
return vae.encode(image)
|
||||
finally:
|
||||
vae.patcher = orig_patcher
|
||||
|
||||
@classmethod
|
||||
def execute(cls, *, model: io.Model.Type, model_patch: io.ModelPatch.Type, vae: io.Vae.Type, image: io.Image.Type,
|
||||
strength_start: float, strength_end: float, restore_cfg: float, restore_cfg_s_tmin: float) -> io.NodeOutput:
|
||||
model_patched = model.clone()
|
||||
hint_latent = model.get_model_object("latent_format").process_in(
|
||||
cls._encode_with_denoise_encoder(vae, model_patch, image[:, :, :, :3]))
|
||||
patch = SUPIRPatch(model_patch, model_patch.model.project_modules, hint_latent, strength_start, strength_end)
|
||||
patch.register(model_patched)
|
||||
|
||||
if restore_cfg > 0.0:
|
||||
# Round-trip to match original pipeline: decode hint, re-encode with regular VAE
|
||||
latent_format = model.get_model_object("latent_format")
|
||||
decoded = vae.decode(latent_format.process_out(hint_latent))
|
||||
x_center = latent_format.process_in(vae.encode(decoded[:, :, :, :3]))
|
||||
sigma_max = 14.6146
|
||||
|
||||
def restore_cfg_function(args):
|
||||
denoised = args["denoised"]
|
||||
sigma = args["sigma"]
|
||||
if sigma.dim() > 0:
|
||||
s = sigma[0].item()
|
||||
else:
|
||||
s = sigma.item()
|
||||
if s > restore_cfg_s_tmin:
|
||||
ref = x_center.to(device=denoised.device, dtype=denoised.dtype)
|
||||
b = denoised.shape[0]
|
||||
if ref.shape[0] != b:
|
||||
ref = ref.expand(b, -1, -1, -1) if ref.shape[0] == 1 else ref.repeat((b + ref.shape[0] - 1) // ref.shape[0], 1, 1, 1)[:b]
|
||||
sigma_val = sigma.view(-1, 1, 1, 1) if sigma.dim() > 0 else sigma
|
||||
d_center = denoised - ref
|
||||
denoised = denoised - d_center * ((sigma_val / sigma_max) ** restore_cfg)
|
||||
return denoised
|
||||
|
||||
model_patched.set_model_sampler_post_cfg_function(restore_cfg_function)
|
||||
|
||||
return io.NodeOutput(model_patched)
|
||||
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"ModelPatchLoader": ModelPatchLoader,
|
||||
"QwenImageDiffsynthControlnet": QwenImageDiffsynthControlnet,
|
||||
"ZImageFunControlnet": ZImageFunControlnet,
|
||||
"USOStyleReference": USOStyleReference,
|
||||
"SUPIRApply": SUPIRApply,
|
||||
}
|
||||
|
||||
@ -6,6 +6,7 @@ from PIL import Image
|
||||
import math
|
||||
from enum import Enum
|
||||
from typing import TypedDict, Literal
|
||||
import kornia
|
||||
|
||||
import comfy.utils
|
||||
import comfy.model_management
|
||||
@ -660,6 +661,228 @@ class BatchImagesMasksLatentsNode(io.ComfyNode):
|
||||
return io.NodeOutput(batched)
|
||||
|
||||
|
||||
class ColorTransfer(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ColorTransfer",
|
||||
category="image/postprocessing",
|
||||
description="Match the colors of one image to another using various algorithms.",
|
||||
search_aliases=["color match", "color grading", "color correction", "match colors", "color transform", "mkl", "reinhard", "histogram"],
|
||||
inputs=[
|
||||
io.Image.Input("image_target", tooltip="Image(s) to apply the color transform to."),
|
||||
io.Image.Input("image_ref", optional=True, tooltip="Reference image(s) to match colors to. If not provided, processing is skipped"),
|
||||
io.Combo.Input("method", options=['reinhard_lab', 'mkl_lab', 'histogram'],),
|
||||
io.DynamicCombo.Input("source_stats",
|
||||
tooltip="per_frame: each frame matched to image_ref individually. uniform: pool stats across all source frames as baseline, match to image_ref. target_frame: use one chosen frame as the baseline for the transform to image_ref, applied uniformly to all frames (preserves relative differences)",
|
||||
options=[
|
||||
io.DynamicCombo.Option("per_frame", []),
|
||||
io.DynamicCombo.Option("uniform", []),
|
||||
io.DynamicCombo.Option("target_frame", [
|
||||
io.Int.Input("target_index", default=0, min=0, max=10000,
|
||||
tooltip="Frame index used as the source baseline for computing the transform to image_ref"),
|
||||
]),
|
||||
]),
|
||||
io.Float.Input("strength", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(display_name="image"),
|
||||
],
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _to_lab(images, i, device):
|
||||
return kornia.color.rgb_to_lab(
|
||||
images[i:i+1].to(device, dtype=torch.float32).permute(0, 3, 1, 2))
|
||||
|
||||
@staticmethod
|
||||
def _pool_stats(images, device, is_reinhard, eps):
|
||||
"""Two-pass pooled mean + std/cov across all frames."""
|
||||
N, C = images.shape[0], images.shape[3]
|
||||
HW = images.shape[1] * images.shape[2]
|
||||
mean = torch.zeros(C, 1, device=device, dtype=torch.float32)
|
||||
for i in range(N):
|
||||
mean += ColorTransfer._to_lab(images, i, device).view(C, -1).mean(dim=-1, keepdim=True)
|
||||
mean /= N
|
||||
acc = torch.zeros(C, 1 if is_reinhard else C, device=device, dtype=torch.float32)
|
||||
for i in range(N):
|
||||
centered = ColorTransfer._to_lab(images, i, device).view(C, -1) - mean
|
||||
if is_reinhard:
|
||||
acc += (centered * centered).mean(dim=-1, keepdim=True)
|
||||
else:
|
||||
acc += centered @ centered.T / HW
|
||||
if is_reinhard:
|
||||
return mean, torch.sqrt(acc / N).clamp_min_(eps)
|
||||
return mean, acc / N
|
||||
|
||||
@staticmethod
|
||||
def _frame_stats(lab_flat, hw, is_reinhard, eps):
|
||||
"""Per-frame mean + std/cov."""
|
||||
mean = lab_flat.mean(dim=-1, keepdim=True)
|
||||
if is_reinhard:
|
||||
return mean, lab_flat.std(dim=-1, keepdim=True, unbiased=False).clamp_min_(eps)
|
||||
centered = lab_flat - mean
|
||||
return mean, centered @ centered.T / hw
|
||||
|
||||
@staticmethod
|
||||
def _mkl_matrix(cov_s, cov_r, eps):
|
||||
"""Compute MKL 3x3 transform matrix from source and ref covariances."""
|
||||
eig_val_s, eig_vec_s = torch.linalg.eigh(cov_s)
|
||||
sqrt_val_s = torch.sqrt(eig_val_s.clamp_min(0)).clamp_min_(eps)
|
||||
|
||||
scaled_V = eig_vec_s * sqrt_val_s.unsqueeze(0)
|
||||
mid = scaled_V.T @ cov_r @ scaled_V
|
||||
eig_val_m, eig_vec_m = torch.linalg.eigh(mid)
|
||||
sqrt_m = torch.sqrt(eig_val_m.clamp_min(0))
|
||||
|
||||
inv_sqrt_s = 1.0 / sqrt_val_s
|
||||
inv_scaled_V = eig_vec_s * inv_sqrt_s.unsqueeze(0)
|
||||
M_half = (eig_vec_m * sqrt_m.unsqueeze(0)) @ eig_vec_m.T
|
||||
return inv_scaled_V @ M_half @ inv_scaled_V.T
|
||||
|
||||
@staticmethod
|
||||
def _histogram_lut(src, ref, bins=256):
|
||||
"""Build per-channel LUT from source and ref histograms. src/ref: (C, HW) in [0,1]."""
|
||||
s_bins = (src * (bins - 1)).long().clamp(0, bins - 1)
|
||||
r_bins = (ref * (bins - 1)).long().clamp(0, bins - 1)
|
||||
s_hist = torch.zeros(src.shape[0], bins, device=src.device, dtype=src.dtype)
|
||||
r_hist = torch.zeros(src.shape[0], bins, device=src.device, dtype=src.dtype)
|
||||
ones_s = torch.ones_like(src)
|
||||
ones_r = torch.ones_like(ref)
|
||||
s_hist.scatter_add_(1, s_bins, ones_s)
|
||||
r_hist.scatter_add_(1, r_bins, ones_r)
|
||||
s_cdf = s_hist.cumsum(1)
|
||||
s_cdf = s_cdf / s_cdf[:, -1:]
|
||||
r_cdf = r_hist.cumsum(1)
|
||||
r_cdf = r_cdf / r_cdf[:, -1:]
|
||||
return torch.searchsorted(r_cdf, s_cdf).clamp_max_(bins - 1).float() / (bins - 1)
|
||||
|
||||
@classmethod
|
||||
def _pooled_cdf(cls, images, device, num_bins=256):
|
||||
"""Build pooled CDF across all frames, one frame at a time."""
|
||||
C = images.shape[3]
|
||||
hist = torch.zeros(C, num_bins, device=device, dtype=torch.float32)
|
||||
for i in range(images.shape[0]):
|
||||
frame = images[i].to(device, dtype=torch.float32).permute(2, 0, 1).reshape(C, -1)
|
||||
bins = (frame * (num_bins - 1)).long().clamp(0, num_bins - 1)
|
||||
hist.scatter_add_(1, bins, torch.ones_like(frame))
|
||||
cdf = hist.cumsum(1)
|
||||
return cdf / cdf[:, -1:]
|
||||
|
||||
@classmethod
|
||||
def _build_histogram_transform(cls, image_target, image_ref, device, stats_mode, target_index, B):
|
||||
"""Build per-frame or uniform LUT transform for histogram mode."""
|
||||
if stats_mode == 'per_frame':
|
||||
return None # LUT computed per-frame in the apply loop
|
||||
|
||||
r_cdf = cls._pooled_cdf(image_ref, device)
|
||||
if stats_mode == 'target_frame':
|
||||
ti = min(target_index, B - 1)
|
||||
s_cdf = cls._pooled_cdf(image_target[ti:ti+1], device)
|
||||
else:
|
||||
s_cdf = cls._pooled_cdf(image_target, device)
|
||||
return torch.searchsorted(r_cdf, s_cdf).clamp_max_(255).float() / 255.0
|
||||
|
||||
@classmethod
|
||||
def _build_lab_transform(cls, image_target, image_ref, device, stats_mode, target_index, is_reinhard):
|
||||
"""Build transform parameters for Lab-based methods. Returns a transform function."""
|
||||
eps = 1e-6
|
||||
B, H, W, C = image_target.shape
|
||||
B_ref = image_ref.shape[0]
|
||||
single_ref = B_ref == 1
|
||||
HW = H * W
|
||||
HW_ref = image_ref.shape[1] * image_ref.shape[2]
|
||||
|
||||
# Precompute ref stats
|
||||
if single_ref or stats_mode in ('uniform', 'target_frame'):
|
||||
ref_mean, ref_sc = cls._pool_stats(image_ref, device, is_reinhard, eps)
|
||||
|
||||
# Uniform/target_frame: precompute single affine transform
|
||||
if stats_mode in ('uniform', 'target_frame'):
|
||||
if stats_mode == 'target_frame':
|
||||
ti = min(target_index, B - 1)
|
||||
s_lab = cls._to_lab(image_target, ti, device).view(C, -1)
|
||||
s_mean, s_sc = cls._frame_stats(s_lab, HW, is_reinhard, eps)
|
||||
else:
|
||||
s_mean, s_sc = cls._pool_stats(image_target, device, is_reinhard, eps)
|
||||
|
||||
if is_reinhard:
|
||||
scale = ref_sc / s_sc
|
||||
offset = ref_mean - scale * s_mean
|
||||
return lambda src_flat, **_: src_flat * scale + offset
|
||||
T = cls._mkl_matrix(s_sc, ref_sc, eps)
|
||||
offset = ref_mean - T @ s_mean
|
||||
return lambda src_flat, **_: T @ src_flat + offset
|
||||
|
||||
# per_frame
|
||||
def per_frame_transform(src_flat, frame_idx):
|
||||
s_mean, s_sc = cls._frame_stats(src_flat, HW, is_reinhard, eps)
|
||||
|
||||
if single_ref:
|
||||
r_mean, r_sc = ref_mean, ref_sc
|
||||
else:
|
||||
ri = min(frame_idx, B_ref - 1)
|
||||
r_mean, r_sc = cls._frame_stats(cls._to_lab(image_ref, ri, device).view(C, -1), HW_ref, is_reinhard, eps)
|
||||
|
||||
centered = src_flat - s_mean
|
||||
if is_reinhard:
|
||||
return centered * (r_sc / s_sc) + r_mean
|
||||
T = cls._mkl_matrix(centered @ centered.T / HW, r_sc, eps)
|
||||
return T @ centered + r_mean
|
||||
|
||||
return per_frame_transform
|
||||
|
||||
@classmethod
|
||||
def execute(cls, image_target, image_ref, method, source_stats, strength=1.0) -> io.NodeOutput:
|
||||
stats_mode = source_stats["source_stats"]
|
||||
target_index = source_stats.get("target_index", 0)
|
||||
|
||||
if strength == 0 or image_ref is None:
|
||||
return io.NodeOutput(image_target)
|
||||
|
||||
device = comfy.model_management.get_torch_device()
|
||||
intermediate_device = comfy.model_management.intermediate_device()
|
||||
intermediate_dtype = comfy.model_management.intermediate_dtype()
|
||||
|
||||
B, H, W, C = image_target.shape
|
||||
B_ref = image_ref.shape[0]
|
||||
pbar = comfy.utils.ProgressBar(B)
|
||||
out = torch.empty(B, H, W, C, device=intermediate_device, dtype=intermediate_dtype)
|
||||
|
||||
if method == 'histogram':
|
||||
uniform_lut = cls._build_histogram_transform(
|
||||
image_target, image_ref, device, stats_mode, target_index, B)
|
||||
|
||||
for i in range(B):
|
||||
src = image_target[i].to(device, dtype=torch.float32).permute(2, 0, 1)
|
||||
src_flat = src.reshape(C, -1)
|
||||
if uniform_lut is not None:
|
||||
lut = uniform_lut
|
||||
else:
|
||||
ri = min(i, B_ref - 1)
|
||||
ref = image_ref[ri].to(device, dtype=torch.float32).permute(2, 0, 1).reshape(C, -1)
|
||||
lut = cls._histogram_lut(src_flat, ref)
|
||||
bin_idx = (src_flat * 255).long().clamp(0, 255)
|
||||
matched = lut.gather(1, bin_idx).view(C, H, W)
|
||||
result = matched if strength == 1.0 else torch.lerp(src, matched, strength)
|
||||
out[i] = result.permute(1, 2, 0).clamp_(0, 1).to(device=intermediate_device, dtype=intermediate_dtype)
|
||||
pbar.update(1)
|
||||
else:
|
||||
transform = cls._build_lab_transform(image_target, image_ref, device, stats_mode, target_index, is_reinhard=method == "reinhard_lab")
|
||||
|
||||
for i in range(B):
|
||||
src_frame = cls._to_lab(image_target, i, device)
|
||||
corrected = transform(src_frame.view(C, -1), frame_idx=i)
|
||||
if strength == 1.0:
|
||||
result = kornia.color.lab_to_rgb(corrected.view(1, C, H, W))
|
||||
else:
|
||||
result = kornia.color.lab_to_rgb(torch.lerp(src_frame, corrected.view(1, C, H, W), strength))
|
||||
out[i] = result.squeeze(0).permute(1, 2, 0).clamp_(0, 1).to(device=intermediate_device, dtype=intermediate_dtype)
|
||||
pbar.update(1)
|
||||
|
||||
return io.NodeOutput(out)
|
||||
|
||||
|
||||
class PostProcessingExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
@ -673,6 +896,7 @@ class PostProcessingExtension(ComfyExtension):
|
||||
BatchImagesNode,
|
||||
BatchMasksNode,
|
||||
BatchLatentsNode,
|
||||
ColorTransfer,
|
||||
# BatchImagesMasksLatentsNode,
|
||||
]
|
||||
|
||||
|
||||
@ -11,7 +11,7 @@ class PreviewAny():
|
||||
"required": {"source": (IO.ANY, {})},
|
||||
}
|
||||
|
||||
RETURN_TYPES = ()
|
||||
RETURN_TYPES = (IO.STRING,)
|
||||
FUNCTION = "main"
|
||||
OUTPUT_NODE = True
|
||||
|
||||
@ -33,7 +33,7 @@ class PreviewAny():
|
||||
except Exception:
|
||||
value = 'source exists, but could not be serialized.'
|
||||
|
||||
return {"ui": {"text": (value,)}}
|
||||
return {"ui": {"text": (value,)}, "result": (value,)}
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"PreviewAny": PreviewAny,
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
import re
|
||||
import json
|
||||
from typing_extensions import override
|
||||
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
@ -375,6 +376,39 @@ class RegexReplace(io.ComfyNode):
|
||||
return io.NodeOutput(result)
|
||||
|
||||
|
||||
class JsonExtractString(io.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="JsonExtractString",
|
||||
display_name="Extract String from JSON",
|
||||
category="utils/string",
|
||||
search_aliases=["json", "extract json", "parse json", "json value", "read json"],
|
||||
inputs=[
|
||||
io.String.Input("json_string", multiline=True),
|
||||
io.String.Input("key", multiline=False),
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, json_string, key):
|
||||
try:
|
||||
data = json.loads(json_string)
|
||||
if isinstance(data, dict) and key in data:
|
||||
value = data[key]
|
||||
if value is None:
|
||||
return io.NodeOutput("")
|
||||
|
||||
return io.NodeOutput(str(value))
|
||||
|
||||
return io.NodeOutput("")
|
||||
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
return io.NodeOutput("")
|
||||
|
||||
class StringExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
@ -390,6 +424,7 @@ class StringExtension(ComfyExtension):
|
||||
RegexMatch,
|
||||
RegexExtract,
|
||||
RegexReplace,
|
||||
JsonExtractString,
|
||||
]
|
||||
|
||||
async def comfy_entrypoint() -> StringExtension:
|
||||
|
||||
@ -35,6 +35,7 @@ class TextGenerate(io.ComfyNode):
|
||||
io.Int.Input("max_length", default=256, min=1, max=2048),
|
||||
io.DynamicCombo.Input("sampling_mode", options=sampling_options, display_name="Sampling Mode"),
|
||||
io.Boolean.Input("thinking", optional=True, default=False, tooltip="Operate in thinking mode if the model supports it."),
|
||||
io.Boolean.Input("use_default_template", optional=True, default=True, tooltip="Use the built in system prompt/template if the model has one.", advanced=True),
|
||||
],
|
||||
outputs=[
|
||||
io.String.Output(display_name="generated_text"),
|
||||
@ -42,9 +43,9 @@ class TextGenerate(io.ComfyNode):
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip, prompt, max_length, sampling_mode, image=None, thinking=False) -> io.NodeOutput:
|
||||
def execute(cls, clip, prompt, max_length, sampling_mode, image=None, thinking=False, use_default_template=True) -> io.NodeOutput:
|
||||
|
||||
tokens = clip.tokenize(prompt, image=image, skip_template=False, min_length=1, thinking=thinking)
|
||||
tokens = clip.tokenize(prompt, image=image, skip_template=not use_default_template, min_length=1, thinking=thinking)
|
||||
|
||||
# Get sampling parameters from dynamic combo
|
||||
do_sample = sampling_mode.get("sampling_mode") == "on"
|
||||
@ -160,12 +161,12 @@ class TextGenerateLTX2Prompt(TextGenerate):
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip, prompt, max_length, sampling_mode, image=None, thinking=False) -> io.NodeOutput:
|
||||
def execute(cls, clip, prompt, max_length, sampling_mode, image=None, thinking=False, use_default_template=True) -> io.NodeOutput:
|
||||
if image is None:
|
||||
formatted_prompt = f"<start_of_turn>system\n{LTX2_T2V_SYSTEM_PROMPT.strip()}<end_of_turn>\n<start_of_turn>user\nUser Raw Input Prompt: {prompt}.<end_of_turn>\n<start_of_turn>model\n"
|
||||
else:
|
||||
formatted_prompt = f"<start_of_turn>system\n{LTX2_I2V_SYSTEM_PROMPT.strip()}<end_of_turn>\n<start_of_turn>user\n\n<image_soft_token>\n\nUser Raw Input Prompt: {prompt}.<end_of_turn>\n<start_of_turn>model\n"
|
||||
return super().execute(clip, formatted_prompt, max_length, sampling_mode, image, thinking)
|
||||
return super().execute(clip, formatted_prompt, max_length, sampling_mode, image, thinking, use_default_template)
|
||||
|
||||
|
||||
class TextgenExtension(ComfyExtension):
|
||||
|
||||
@ -1,3 +1,3 @@
|
||||
# This file is automatically generated by the build process when version is
|
||||
# updated in pyproject.toml.
|
||||
__version__ = "0.18.1"
|
||||
__version__ = "0.19.3"
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "ComfyUI"
|
||||
version = "0.18.1"
|
||||
version = "0.19.3"
|
||||
readme = "README.md"
|
||||
license = { file = "LICENSE" }
|
||||
requires-python = ">=3.10"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
comfyui-frontend-package==1.42.10
|
||||
comfyui-workflow-templates==0.9.45
|
||||
comfyui-frontend-package==1.42.14
|
||||
comfyui-workflow-templates==0.9.57
|
||||
comfyui-embedded-docs==0.4.3
|
||||
torch
|
||||
torchsde
|
||||
@ -19,7 +19,7 @@ scipy
|
||||
tqdm
|
||||
psutil
|
||||
alembic
|
||||
SQLAlchemy
|
||||
SQLAlchemy>=2.0
|
||||
filelock
|
||||
av>=14.2.0
|
||||
comfy-kitchen>=0.2.8
|
||||
|
||||
246
tests-unit/comfy_api_test/openai_nodes_test.py
Normal file
246
tests-unit/comfy_api_test/openai_nodes_test.py
Normal file
@ -0,0 +1,246 @@
|
||||
import pytest
|
||||
|
||||
from comfy_api_nodes.nodes_openai import (
|
||||
OpenAIGPTImage1,
|
||||
OpenAIGPTImage2,
|
||||
_GPT_IMAGE_2_SIZES,
|
||||
_resolve_gpt_image_2_size,
|
||||
calculate_tokens_price_image_1,
|
||||
calculate_tokens_price_image_1_5,
|
||||
calculate_tokens_price_image_2,
|
||||
)
|
||||
from comfy_api_nodes.apis.openai import OpenAIImageGenerationResponse, Usage
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _make_response(input_tokens: int, output_tokens: int) -> OpenAIImageGenerationResponse:
|
||||
return OpenAIImageGenerationResponse(
|
||||
data=[],
|
||||
usage=Usage(input_tokens=input_tokens, output_tokens=output_tokens),
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Price extractor tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def test_price_image_1_formula():
|
||||
response = _make_response(input_tokens=1_000_000, output_tokens=1_000_000)
|
||||
assert calculate_tokens_price_image_1(response) == pytest.approx(50.0)
|
||||
|
||||
|
||||
def test_price_image_1_5_formula():
|
||||
response = _make_response(input_tokens=1_000_000, output_tokens=1_000_000)
|
||||
assert calculate_tokens_price_image_1_5(response) == pytest.approx(40.0)
|
||||
|
||||
|
||||
def test_price_image_2_formula():
|
||||
response = _make_response(input_tokens=1_000_000, output_tokens=1_000_000)
|
||||
assert calculate_tokens_price_image_2(response) == pytest.approx(38.0)
|
||||
|
||||
|
||||
def test_price_image_2_cheaper_than_1():
|
||||
response = _make_response(input_tokens=500, output_tokens=196)
|
||||
assert calculate_tokens_price_image_2(response) < calculate_tokens_price_image_1(response)
|
||||
|
||||
|
||||
def test_price_image_2_cheaper_output_than_1_5():
|
||||
# gpt-image-2 output rate ($30/1M) is lower than gpt-image-1.5 ($32/1M)
|
||||
response = _make_response(input_tokens=0, output_tokens=1_000_000)
|
||||
assert calculate_tokens_price_image_2(response) < calculate_tokens_price_image_1_5(response)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _resolve_gpt_image_2_size tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def test_resolve_preset_passthrough_when_custom_zero():
|
||||
# 0/0 means "use size preset"
|
||||
assert _resolve_gpt_image_2_size("1024x1024", 0, 0) == "1024x1024"
|
||||
assert _resolve_gpt_image_2_size("auto", 0, 0) == "auto"
|
||||
assert _resolve_gpt_image_2_size("3840x2160", 0, 0) == "3840x2160"
|
||||
|
||||
|
||||
def test_resolve_preset_passthrough_when_only_one_dim_set():
|
||||
# only one dimension set → still use preset
|
||||
assert _resolve_gpt_image_2_size("auto", 1024, 0) == "auto"
|
||||
assert _resolve_gpt_image_2_size("auto", 0, 1024) == "auto"
|
||||
|
||||
|
||||
def test_resolve_custom_overrides_preset():
|
||||
assert _resolve_gpt_image_2_size("auto", 1024, 1024) == "1024x1024"
|
||||
assert _resolve_gpt_image_2_size("1024x1024", 2048, 1152) == "2048x1152"
|
||||
assert _resolve_gpt_image_2_size("auto", 3840, 2160) == "3840x2160"
|
||||
|
||||
|
||||
def test_resolve_custom_rejects_edge_too_large():
|
||||
with pytest.raises(ValueError, match="3840"):
|
||||
_resolve_gpt_image_2_size("auto", 4096, 1024)
|
||||
|
||||
|
||||
def test_resolve_custom_rejects_non_multiple_of_16():
|
||||
with pytest.raises(ValueError, match="multiple of 16"):
|
||||
_resolve_gpt_image_2_size("auto", 1025, 1024)
|
||||
|
||||
|
||||
def test_resolve_custom_rejects_bad_ratio():
|
||||
with pytest.raises(ValueError, match="ratio"):
|
||||
_resolve_gpt_image_2_size("auto", 3840, 1024) # 3.75:1 > 3:1
|
||||
|
||||
|
||||
def test_resolve_custom_rejects_too_few_pixels():
|
||||
with pytest.raises(ValueError, match="Total pixels"):
|
||||
_resolve_gpt_image_2_size("auto", 16, 16)
|
||||
|
||||
|
||||
def test_resolve_custom_rejects_too_many_pixels():
|
||||
# 3840x2176 exceeds 8,294,400
|
||||
with pytest.raises(ValueError, match="Total pixels"):
|
||||
_resolve_gpt_image_2_size("auto", 3840, 2176)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# OpenAIGPTImage1 schema tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestOpenAIGPTImage1Schema:
|
||||
def setup_method(self):
|
||||
self.schema = OpenAIGPTImage1.define_schema()
|
||||
|
||||
def test_node_id(self):
|
||||
assert self.schema.node_id == "OpenAIGPTImage1"
|
||||
|
||||
def test_display_name(self):
|
||||
assert self.schema.display_name == "OpenAI GPT Image 1 & 1.5"
|
||||
|
||||
def test_model_options_exclude_gpt_image_2(self):
|
||||
model_input = next(i for i in self.schema.inputs if i.name == "model")
|
||||
assert "gpt-image-2" not in model_input.options
|
||||
|
||||
def test_model_options_include_legacy_models(self):
|
||||
model_input = next(i for i in self.schema.inputs if i.name == "model")
|
||||
assert "gpt-image-1" in model_input.options
|
||||
assert "gpt-image-1.5" in model_input.options
|
||||
|
||||
def test_has_background_with_transparent(self):
|
||||
bg_input = next(i for i in self.schema.inputs if i.name == "background")
|
||||
assert "transparent" in bg_input.options
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# OpenAIGPTImage2 schema tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestOpenAIGPTImage2Schema:
|
||||
def setup_method(self):
|
||||
self.schema = OpenAIGPTImage2.define_schema()
|
||||
|
||||
def test_node_id(self):
|
||||
assert self.schema.node_id == "OpenAIGPTImage2"
|
||||
|
||||
def test_display_name(self):
|
||||
assert self.schema.display_name == "OpenAI GPT Image 2"
|
||||
|
||||
def test_category(self):
|
||||
assert "OpenAI" in self.schema.category
|
||||
|
||||
def test_no_transparent_background(self):
|
||||
bg_input = next(i for i in self.schema.inputs if i.name == "background")
|
||||
assert "transparent" not in bg_input.options
|
||||
|
||||
def test_background_options(self):
|
||||
bg_input = next(i for i in self.schema.inputs if i.name == "background")
|
||||
assert set(bg_input.options) == {"auto", "opaque"}
|
||||
|
||||
def test_quality_options(self):
|
||||
quality_input = next(i for i in self.schema.inputs if i.name == "quality")
|
||||
assert set(quality_input.options) == {"auto", "low", "medium", "high"}
|
||||
|
||||
def test_quality_default_is_auto(self):
|
||||
quality_input = next(i for i in self.schema.inputs if i.name == "quality")
|
||||
assert quality_input.default == "auto"
|
||||
|
||||
def test_all_popular_sizes_present(self):
|
||||
size_input = next(i for i in self.schema.inputs if i.name == "size")
|
||||
for size in ["1024x1024", "1536x1024", "1024x1536", "2048x2048", "2048x1152", "3840x2160", "2160x3840"]:
|
||||
assert size in size_input.options, f"Missing size: {size}"
|
||||
|
||||
def test_no_custom_size_option(self):
|
||||
size_input = next(i for i in self.schema.inputs if i.name == "size")
|
||||
assert "custom" not in size_input.options
|
||||
|
||||
def test_size_default_is_auto(self):
|
||||
size_input = next(i for i in self.schema.inputs if i.name == "size")
|
||||
assert size_input.default == "auto"
|
||||
|
||||
def test_custom_width_and_height_inputs_exist(self):
|
||||
input_names = [i.name for i in self.schema.inputs]
|
||||
assert "custom_width" in input_names
|
||||
assert "custom_height" in input_names
|
||||
|
||||
def test_custom_width_height_default_zero(self):
|
||||
width_input = next(i for i in self.schema.inputs if i.name == "custom_width")
|
||||
height_input = next(i for i in self.schema.inputs if i.name == "custom_height")
|
||||
assert width_input.default == 0
|
||||
assert height_input.default == 0
|
||||
|
||||
def test_custom_width_height_step_is_16(self):
|
||||
width_input = next(i for i in self.schema.inputs if i.name == "custom_width")
|
||||
height_input = next(i for i in self.schema.inputs if i.name == "custom_height")
|
||||
assert width_input.step == 16
|
||||
assert height_input.step == 16
|
||||
|
||||
def test_custom_width_height_max_is_3840(self):
|
||||
width_input = next(i for i in self.schema.inputs if i.name == "custom_width")
|
||||
height_input = next(i for i in self.schema.inputs if i.name == "custom_height")
|
||||
assert width_input.max == 3840
|
||||
assert height_input.max == 3840
|
||||
|
||||
def test_uses_num_images_not_n(self):
|
||||
input_names = [i.name for i in self.schema.inputs]
|
||||
assert "num_images" in input_names
|
||||
assert "n" not in input_names
|
||||
|
||||
def test_model_input_shows_gpt_image_2(self):
|
||||
model_input = next(i for i in self.schema.inputs if i.name == "model")
|
||||
assert model_input.options == ["gpt-image-2"]
|
||||
assert model_input.default == "gpt-image-2"
|
||||
|
||||
def test_has_image_and_mask_inputs(self):
|
||||
input_names = [i.name for i in self.schema.inputs]
|
||||
assert "image" in input_names
|
||||
assert "mask" in input_names
|
||||
|
||||
def test_is_api_node(self):
|
||||
assert self.schema.is_api_node is True
|
||||
|
||||
def test_sizes_match_constant(self):
|
||||
size_input = next(i for i in self.schema.inputs if i.name == "size")
|
||||
assert size_input.options == _GPT_IMAGE_2_SIZES
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# OpenAIGPTImage2 execute validation tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_execute_raises_on_empty_prompt():
|
||||
with pytest.raises(Exception):
|
||||
await OpenAIGPTImage2.execute(prompt=" ")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_execute_raises_mask_without_image():
|
||||
import torch
|
||||
mask = torch.ones(1, 64, 64)
|
||||
with pytest.raises(ValueError, match="mask without an input image"):
|
||||
await OpenAIGPTImage2.execute(prompt="test", mask=mask)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_execute_raises_invalid_custom_size():
|
||||
with pytest.raises(ValueError):
|
||||
await OpenAIGPTImage2.execute(prompt="test", custom_width=4096, custom_height=1024)
|
||||
Reference in New Issue
Block a user