Compare commits

...

3 Commits

Author SHA1 Message Date
1f842ab43c chore: update workflow templates to v0.9.65 (#13644) 2026-05-01 00:31:22 -07:00
19699767fd [Partner Nodes] ByteDance: virtual portrait library for regular images (#13638)
* feat(api-nodes-bytedance): use the virtual portrait library for regular images

Signed-off-by: bigcat88 <bigcat88@icloud.com>

* fix: include shape in image dedup hash

Signed-off-by: bigcat88 <bigcat88@icloud.com>

---------

Signed-off-by: bigcat88 <bigcat88@icloud.com>
2026-05-01 00:31:21 -07:00
0a01ce5e16 feat(api-nodes): allow custom resolutions for GPTImage2 node (#13631)
Signed-off-by: bigcat88 <bigcat88@icloud.com>
2026-05-01 00:31:21 -07:00
4 changed files with 86 additions and 10 deletions

View File

@ -157,6 +157,11 @@ class SeedanceCreateAssetResponse(BaseModel):
asset_id: str = Field(...)
class SeedanceVirtualLibraryCreateAssetRequest(BaseModel):
url: str = Field(..., description="Publicly accessible URL of the image asset to upload.")
hash: str = Field(..., description="Dedup key. Re-submitting the same hash returns the existing asset id.")
# Dollars per 1K tokens, keyed by (model_id, has_video_input).
SEEDANCE2_PRICE_PER_1K_TOKENS = {
("dreamina-seedance-2-0-260128", False): 0.007,

View File

@ -1,3 +1,4 @@
import hashlib
import logging
import math
import re
@ -20,6 +21,7 @@ from comfy_api_nodes.apis.bytedance import (
SeedanceCreateAssetResponse,
SeedanceCreateVisualValidateSessionResponse,
SeedanceGetVisualValidateSessionResponse,
SeedanceVirtualLibraryCreateAssetRequest,
Seedream4Options,
Seedream4TaskCreationRequest,
TaskAudioContent,
@ -271,6 +273,30 @@ async def _wait_for_asset_active(cls: type[IO.ComfyNode], asset_id: str, group_i
)
async def _seedance_virtual_library_upload_image_asset(
cls: type[IO.ComfyNode],
image: torch.Tensor,
*,
wait_label: str = "Uploading image",
) -> str:
"""Upload an image into the caller's per-customer Seedance virtual library."""
public_url = await upload_image_to_comfyapi(cls, image, wait_label=wait_label)
normalized = image.detach().cpu().contiguous().to(torch.float32)
digest = hashlib.sha256()
digest.update(str(tuple(normalized.shape)).encode("utf-8"))
digest.update(b"\0")
digest.update(normalized.numpy().tobytes())
image_hash = digest.hexdigest()
create_resp = await sync_op(
cls,
ApiEndpoint(path="/proxy/seedance/virtual-library/assets", method="POST"),
response_model=SeedanceCreateAssetResponse,
data=SeedanceVirtualLibraryCreateAssetRequest(url=public_url, hash=image_hash),
)
await _wait_for_asset_active(cls, create_resp.asset_id, group_id="virtual-library")
return f"asset://{create_resp.asset_id}"
def _seedance2_price_extractor(model_id: str, has_video_input: bool):
"""Returns a price_extractor closure for Seedance 2.0 poll_op."""
rate = SEEDANCE2_PRICE_PER_1K_TOKENS.get((model_id, has_video_input))
@ -1507,7 +1533,9 @@ class ByteDance2FirstLastFrameNode(IO.ComfyNode):
if first_frame_asset_id:
first_frame_url = image_assets[first_frame_asset_id]
else:
first_frame_url = await upload_image_to_comfyapi(cls, first_frame, wait_label="Uploading first frame.")
first_frame_url = await _seedance_virtual_library_upload_image_asset(
cls, first_frame, wait_label="Uploading first frame."
)
content: list[TaskTextContent | TaskImageContent] = [
TaskTextContent(text=model["prompt"]),
@ -1527,7 +1555,9 @@ class ByteDance2FirstLastFrameNode(IO.ComfyNode):
content.append(
TaskImageContent(
image_url=TaskImageContentUrl(
url=await upload_image_to_comfyapi(cls, last_frame, wait_label="Uploading last frame.")
url=await _seedance_virtual_library_upload_image_asset(
cls, last_frame, wait_label="Uploading last frame."
)
),
role="last_frame",
),
@ -1805,9 +1835,9 @@ class ByteDance2ReferenceNode(IO.ComfyNode):
content.append(
TaskImageContent(
image_url=TaskImageContentUrl(
url=await upload_image_to_comfyapi(
url=await _seedance_virtual_library_upload_image_asset(
cls,
image=reference_images[key],
reference_images[key],
wait_label=f"Uploading image {i}",
),
),

View File

@ -415,8 +415,9 @@ class OpenAIGPTImage1(IO.ComfyNode):
"1152x2048",
"3840x2160",
"2160x3840",
"Custom",
],
tooltip="Image size",
tooltip="Image size. Select 'Custom' to use the custom width and height (GPT Image 2 only).",
optional=True,
),
IO.Int.Input(
@ -445,6 +446,26 @@ class OpenAIGPTImage1(IO.ComfyNode):
default="gpt-image-2",
optional=True,
),
IO.Int.Input(
"custom_width",
default=1024,
min=1024,
max=3840,
step=16,
tooltip="Used only when `size` is 'Custom'. Must be a multiple of 16 (GPT Image 2 only).",
optional=True,
advanced=True,
),
IO.Int.Input(
"custom_height",
default=1024,
min=1024,
max=3840,
step=16,
tooltip="Used only when `size` is 'Custom'. Must be a multiple of 16 (GPT Image 2 only).",
optional=True,
advanced=True,
),
],
outputs=[
IO.Image.Output(),
@ -471,9 +492,9 @@ class OpenAIGPTImage1(IO.ComfyNode):
"high": [0.133, 0.22]
},
"gpt-image-2": {
"low": [0.0048, 0.012],
"medium": [0.041, 0.112],
"high": [0.165, 0.43]
"low": [0.0048, 0.019],
"medium": [0.041, 0.168],
"high": [0.165, 0.67]
}
};
$range := $lookup($lookup($ranges, widgets.model), widgets.quality);
@ -503,6 +524,8 @@ class OpenAIGPTImage1(IO.ComfyNode):
mask: Input.Image | None = None,
n: int = 1,
size: str = "1024x1024",
custom_width: int = 1024,
custom_height: int = 1024,
model: str = "gpt-image-1",
) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=False)
@ -510,7 +533,25 @@ class OpenAIGPTImage1(IO.ComfyNode):
if mask is not None and image is None:
raise ValueError("Cannot use a mask without an input image")
if model in ("gpt-image-1", "gpt-image-1.5"):
if size == "Custom":
if model != "gpt-image-2":
raise ValueError("Custom resolution is only supported by GPT Image 2 model")
if custom_width % 16 != 0 or custom_height % 16 != 0:
raise ValueError(f"Custom width and height must be multiples of 16, got {custom_width}x{custom_height}")
if max(custom_width, custom_height) > 3840:
raise ValueError(f"Custom resolution max edge must be <= 3840, got {custom_width}x{custom_height}")
ratio = max(custom_width, custom_height) / min(custom_width, custom_height)
if ratio > 3:
raise ValueError(
f"Custom resolution aspect ratio must not exceed 3:1, got {custom_width}x{custom_height}"
)
total_pixels = custom_width * custom_height
if not 655_360 <= total_pixels <= 8_294_400:
raise ValueError(
f"Custom resolution total pixels must be between 655,360 and 8,294,400, got {total_pixels}"
)
size = f"{custom_width}x{custom_height}"
elif model in ("gpt-image-1", "gpt-image-1.5"):
if size not in ("auto", "1024x1024", "1024x1536", "1536x1024"):
raise ValueError(f"Resolution {size} is only supported by GPT Image 2 model")

View File

@ -1,5 +1,5 @@
comfyui-frontend-package==1.42.15
comfyui-workflow-templates==0.9.63
comfyui-workflow-templates==0.9.65
comfyui-embedded-docs==0.4.4
torch
torchsde