Compare commits

..

9 Commits

Author SHA1 Message Date
75143eeb06 ComfyUI v0.20.0 2026-04-27 13:24:36 -04:00
1233f077b1 chore: update workflow templates to v0.9.63 (#13586)
Co-authored-by: Jedrzej Kosinski <kosinkadink1@gmail.com>
2026-04-27 10:06:03 -07:00
6968a70e60 [Partner Nodes] HappyHorse model (#13582)
* feat(api-nodes): add nodes for HappyHorse model

Signed-off-by: bigcat88 <bigcat88@icloud.com>

* fix price badges

Signed-off-by: bigcat88 <bigcat88@icloud.com>

* fix: allow durations up to 15 s

Signed-off-by: bigcat88 <bigcat88@icloud.com>

---------

Signed-off-by: bigcat88 <bigcat88@icloud.com>
2026-04-27 09:53:08 -07:00
115f418b64 Make EmptySD3LatentImage node use intermediate dtype. (#13577) 2026-04-26 23:23:57 -04:00
7385eb2800 Add new ComfyUI blueprints and fix subgraph naming (#13371)
* Remove local tag from subgraph name

* New Subgraph blueprints

* Remove duplicate blueprint

* Update Subgraph size

* Update subgraph

* Update Blueprint

* Remove local tag from subgraph name

* New Subgraph blueprints

* Remove duplicate blueprint

* Update Subgraph size

* Update subgraph

* Update Blueprint

* Update LTX 2.0 Pose to Video

* Fix crop blueprint split coverage

Made-with: Cursor

* Clean up image edit blueprint metadata

Made-with: Cursor

* Update subgraph blueprints

---------

Co-authored-by: Jedrzej Kosinski <kosinkadink1@gmail.com>
2026-04-26 22:59:16 +08:00
df22bcd5e1 Support loading the alpha channel of videos. (#13564)
Not exposed in nodes yet.
2026-04-25 21:02:58 -04:00
5e3f15a830 Bump comfyui-frontend-package to 1.42.15 (#13556) 2026-04-24 17:21:39 -07:00
4304c15e9b Properly load higher bit depth videos. (#13542) 2026-04-24 16:46:10 -04:00
7636599389 chore(api-nodes): add upcoming-deprecation notice to Sora nodes (#13549) 2026-04-24 06:54:10 -07:00
30 changed files with 30583 additions and 1129 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -160,7 +160,7 @@
},
"revision": 0,
"config": {},
"name": "local-Depth to Image (Z-Image-Turbo)",
"name": "Depth to Image (Z-Image-Turbo)",
"inputNode": {
"id": -10,
"bounding": [
@ -2482,4 +2482,4 @@
"VHS_KeepIntermediate": true
},
"version": 0.4
}
}

View File

@ -261,7 +261,7 @@
},
"revision": 0,
"config": {},
"name": "local-Depth to Video (LTX 2.0)",
"name": "Depth to Video (LTX 2.0)",
"inputNode": {
"id": -10,
"bounding": [
@ -5208,4 +5208,4 @@
"workflowRendererVersion": "LG"
},
"version": 0.4
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -128,7 +128,7 @@
},
"revision": 0,
"config": {},
"name": "local-Image Edit (Flux.2 Klein 4B)",
"name": "Image Edit (Flux.2 Klein 4B)",
"inputNode": {
"id": -10,
"bounding": [
@ -1837,4 +1837,4 @@
}
},
"version": 0.4
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -124,7 +124,7 @@
},
"revision": 0,
"config": {},
"name": "local-Image Inpainting (Qwen-image)",
"name": "Image Inpainting (Qwen-image)",
"inputNode": {
"id": -10,
"bounding": [
@ -1923,4 +1923,4 @@
"workflowRendererVersion": "LG"
},
"version": 0.4
}
}

View File

@ -204,7 +204,7 @@
},
"revision": 0,
"config": {},
"name": "local-Image Outpainting (Qwen-Image)",
"name": "Image Outpainting (Qwen-Image)",
"inputNode": {
"id": -10,
"bounding": [
@ -2749,4 +2749,4 @@
}
},
"version": 0.4
}
}

View File

@ -1,15 +1,14 @@
{
"id": "1a761372-7c82-4016-b9bf-fa285967e1e9",
"revision": 0,
"last_node_id": 83,
"last_node_id": 176,
"last_link_id": 0,
"nodes": [
{
"id": 83,
"type": "f754a936-daaf-4b6e-9658-41fdc54d301d",
"id": 176,
"type": "2d2e3c8e-53b3-4618-be52-6d1d99382f0e",
"pos": [
61.999827823554256,
153.3332507624185
-1150,
200
],
"size": [
400,
@ -56,6 +55,38 @@
"name": "layers"
},
"link": null
},
{
"name": "seed",
"type": "INT",
"widget": {
"name": "seed"
},
"link": null
},
{
"name": "unet_name",
"type": "COMBO",
"widget": {
"name": "unet_name"
},
"link": null
},
{
"name": "clip_name",
"type": "COMBO",
"widget": {
"name": "clip_name"
},
"link": null
},
{
"name": "vae_name",
"type": "COMBO",
"widget": {
"name": "vae_name"
},
"link": null
}
],
"outputs": [
@ -66,28 +97,41 @@
"links": []
}
],
"title": "Image to Layers (Qwen-Image-Layered)",
"properties": {
"proxyWidgets": [
[
"-1",
"6",
"text"
],
[
"-1",
"3",
"steps"
],
[
"-1",
"3",
"cfg"
],
[
"-1",
"83",
"layers"
],
[
"3",
"seed"
],
[
"37",
"unet_name"
],
[
"38",
"clip_name"
],
[
"39",
"vae_name"
],
[
"3",
"control_after_generate"
@ -95,6 +139,11 @@
],
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -103,25 +152,20 @@
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": [
"",
20,
2.5,
2
]
"widgets_values": []
}
],
"links": [],
"groups": [],
"version": 0.4,
"definitions": {
"subgraphs": [
{
"id": "f754a936-daaf-4b6e-9658-41fdc54d301d",
"id": "2d2e3c8e-53b3-4618-be52-6d1d99382f0e",
"version": 1,
"state": {
"lastGroupId": 3,
"lastNodeId": 83,
"lastLinkId": 159,
"lastGroupId": 8,
"lastNodeId": 176,
"lastLinkId": 380,
"lastRerouteId": 0
},
"revision": 0,
@ -130,10 +174,10 @@
"inputNode": {
"id": -10,
"bounding": [
-510,
523,
-720,
720,
120,
140
220
]
},
"outputNode": {
@ -156,8 +200,8 @@
],
"localized_name": "image",
"pos": [
-410,
543
-620,
740
]
},
{
@ -168,8 +212,8 @@
150
],
"pos": [
-410,
563
-620,
760
]
},
{
@ -180,8 +224,8 @@
153
],
"pos": [
-410,
583
-620,
780
]
},
{
@ -192,8 +236,8 @@
154
],
"pos": [
-410,
603
-620,
800
]
},
{
@ -204,8 +248,56 @@
159
],
"pos": [
-410,
623
-620,
820
]
},
{
"id": "9f76338b-f4ca-4bb3-b61a-57b3f233061e",
"name": "seed",
"type": "INT",
"linkIds": [
377
],
"pos": [
-620,
840
]
},
{
"id": "8d0422d5-5eee-4f7e-9817-dc613cc62eca",
"name": "unet_name",
"type": "COMBO",
"linkIds": [
378
],
"pos": [
-620,
860
]
},
{
"id": "552eece2-a735-4d00-ae78-ded454622bc1",
"name": "clip_name",
"type": "COMBO",
"linkIds": [
379
],
"pos": [
-620,
880
]
},
{
"id": "1e6d141c-d0f9-4a2b-895c-b6780e57cfa0",
"name": "vae_name",
"type": "COMBO",
"linkIds": [
380
],
"pos": [
-620,
900
]
}
],
@ -231,14 +323,14 @@
"type": "CLIPLoader",
"pos": [
-320,
310
360
],
"size": [
346.7470703125,
106
350,
150
],
"flags": {},
"order": 0,
"order": 5,
"mode": 0,
"inputs": [
{
@ -248,7 +340,7 @@
"widget": {
"name": "clip_name"
},
"link": null
"link": 379
},
{
"localized_name": "type",
@ -283,9 +375,14 @@
}
],
"properties": {
"Node name for S&R": "CLIPLoader",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "CLIPLoader",
"models": [
{
"name": "qwen_2.5_vl_7b_fp8_scaled.safetensors",
@ -312,14 +409,14 @@
"type": "VAELoader",
"pos": [
-320,
460
580
],
"size": [
346.7470703125,
58
350,
110
],
"flags": {},
"order": 1,
"order": 6,
"mode": 0,
"inputs": [
{
@ -329,7 +426,7 @@
"widget": {
"name": "vae_name"
},
"link": null
"link": 380
}
],
"outputs": [
@ -345,9 +442,14 @@
}
],
"properties": {
"Node name for S&R": "VAELoader",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "VAELoader",
"models": [
{
"name": "qwen_image_layered_vae.safetensors",
@ -375,11 +477,11 @@
420
],
"size": [
425.27801513671875,
180.6060791015625
430,
190
],
"flags": {},
"order": 3,
"order": 2,
"mode": 0,
"inputs": [
{
@ -411,9 +513,14 @@
],
"title": "CLIP Text Encode (Negative Prompt)",
"properties": {
"Node name for S&R": "CLIPTextEncode",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "CLIPTextEncode",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -432,12 +539,12 @@
"id": 70,
"type": "ReferenceLatent",
"pos": [
330,
670
140,
700
],
"size": [
204.1666717529297,
46
210,
50
],
"flags": {
"collapsed": true
@ -470,9 +577,14 @@
}
],
"properties": {
"Node name for S&R": "ReferenceLatent",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "ReferenceLatent",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -480,19 +592,18 @@
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": []
}
},
{
"id": 69,
"type": "ReferenceLatent",
"pos": [
330,
710
160,
820
],
"size": [
204.1666717529297,
46
210,
50
],
"flags": {
"collapsed": true
@ -525,9 +636,14 @@
}
],
"properties": {
"Node name for S&R": "ReferenceLatent",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "ReferenceLatent",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -535,8 +651,7 @@
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": []
}
},
{
"id": 66,
@ -547,10 +662,10 @@
],
"size": [
270,
58
110
],
"flags": {},
"order": 4,
"order": 7,
"mode": 0,
"inputs": [
{
@ -580,9 +695,14 @@
}
],
"properties": {
"Node name for S&R": "ModelSamplingAuraFlow",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "ModelSamplingAuraFlow",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -600,11 +720,11 @@
"type": "LatentCutToBatch",
"pos": [
830,
160
140
],
"size": [
270,
82
140
],
"flags": {},
"order": 11,
@ -646,9 +766,14 @@
}
],
"properties": {
"Node name for S&R": "LatentCutToBatch",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "LatentCutToBatch",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -666,12 +791,12 @@
"id": 71,
"type": "VAEEncode",
"pos": [
100,
690
-280,
780
],
"size": [
140,
46
230,
100
],
"flags": {
"collapsed": false
@ -704,9 +829,14 @@
}
],
"properties": {
"Node name for S&R": "VAEEncode",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "VAEEncode",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -714,24 +844,23 @@
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": []
}
},
{
"id": 8,
"type": "VAEDecode",
"pos": [
850,
310
370
],
"size": [
210,
46
50
],
"flags": {
"collapsed": true
},
"order": 7,
"order": 3,
"mode": 0,
"inputs": [
{
@ -759,9 +888,14 @@
}
],
"properties": {
"Node name for S&R": "VAEDecode",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "VAEDecode",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -769,8 +903,7 @@
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": []
}
},
{
"id": 6,
@ -780,11 +913,11 @@
180
],
"size": [
422.84503173828125,
164.31304931640625
430,
170
],
"flags": {},
"order": 6,
"order": 1,
"mode": 0,
"inputs": [
{
@ -816,9 +949,14 @@
],
"title": "CLIP Text Encode (Positive Prompt)",
"properties": {
"Node name for S&R": "CLIPTextEncode",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "CLIPTextEncode",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -838,14 +976,14 @@
"type": "KSampler",
"pos": [
530,
280
340
],
"size": [
270,
400
],
"flags": {},
"order": 5,
"order": 0,
"mode": 0,
"inputs": [
{
@ -879,7 +1017,7 @@
"widget": {
"name": "seed"
},
"link": null
"link": 377
},
{
"localized_name": "steps",
@ -939,9 +1077,14 @@
}
],
"properties": {
"Node name for S&R": "KSampler",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "KSampler",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -964,12 +1107,12 @@
"id": 78,
"type": "GetImageSize",
"pos": [
80,
790
-280,
930
],
"size": [
210,
136
230,
140
],
"flags": {},
"order": 12,
@ -1007,9 +1150,14 @@
}
],
"properties": {
"Node name for S&R": "GetImageSize",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "GetImageSize",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -1017,23 +1165,23 @@
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": []
}
},
{
"id": 83,
"type": "EmptyQwenImageLayeredLatentImage",
"pos": [
320,
790
-280,
1120
],
"size": [
330.9341796875,
130
340,
200
],
"flags": {},
"order": 13,
"mode": 0,
"showAdvanced": true,
"inputs": [
{
"localized_name": "width",
@ -1083,9 +1231,14 @@
}
],
"properties": {
"Node name for S&R": "EmptyQwenImageLayeredLatentImage",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "EmptyQwenImageLayeredLatentImage",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -1109,11 +1262,11 @@
180
],
"size": [
346.7470703125,
82
350,
110
],
"flags": {},
"order": 2,
"order": 4,
"mode": 0,
"inputs": [
{
@ -1123,7 +1276,7 @@
"widget": {
"name": "unet_name"
},
"link": null
"link": 378
},
{
"localized_name": "weight_dtype",
@ -1147,9 +1300,14 @@
}
],
"properties": {
"Node name for S&R": "UNETLoader",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "UNETLoader",
"models": [
{
"name": "qwen_image_layered_bf16.safetensors",
@ -1191,8 +1349,8 @@
"bounding": [
-330,
110,
366.7470703125,
421.6
370,
610
],
"color": "#3f789e",
"font_size": 24,
@ -1391,6 +1549,38 @@
"target_id": 83,
"target_slot": 2,
"type": "INT"
},
{
"id": 377,
"origin_id": -10,
"origin_slot": 5,
"target_id": 3,
"target_slot": 4,
"type": "INT"
},
{
"id": 378,
"origin_id": -10,
"origin_slot": 6,
"target_id": 37,
"target_slot": 0,
"type": "COMBO"
},
{
"id": 379,
"origin_id": -10,
"origin_slot": 7,
"target_id": 38,
"target_slot": 0,
"type": "COMBO"
},
{
"id": 380,
"origin_id": -10,
"origin_slot": 8,
"target_id": 39,
"target_slot": 0,
"type": "COMBO"
}
],
"extra": {
@ -1400,7 +1590,6 @@
}
]
},
"config": {},
"extra": {
"ds": {
"scale": 1.14,
@ -1409,7 +1598,6 @@
6.855893974423647
]
},
"workflowRendererVersion": "LG"
},
"version": 0.4
}
"ue_links": []
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -240,19 +240,34 @@ class VideoFromFile(VideoInput):
start_time = self.__start_time
# Get video frames
frames = []
alphas = None
start_pts = int(start_time / video_stream.time_base)
end_pts = int((start_time + self.__duration) / video_stream.time_base)
container.seek(start_pts, stream=video_stream)
image_format = 'gbrpf32le'
for frame in container.decode(video_stream):
if alphas is None:
for comp in frame.format.components:
if comp.is_alpha:
alphas = []
image_format = 'gbrapf32le'
break
if frame.pts < start_pts:
continue
if self.__duration and frame.pts >= end_pts:
break
img = frame.to_ndarray(format='rgb24') # shape: (H, W, 3)
img = torch.from_numpy(img) / 255.0 # shape: (H, W, 3)
frames.append(img)
images = torch.stack(frames) if len(frames) > 0 else torch.zeros(0, 3, 0, 0)
img = frame.to_ndarray(format=image_format) # shape: (H, W, 4)
if alphas is None:
frames.append(torch.from_numpy(img))
else:
frames.append(torch.from_numpy(img[..., :-1]))
alphas.append(torch.from_numpy(img[..., -1:]))
images = torch.stack(frames) if len(frames) > 0 else torch.zeros(0, 0, 0, 3)
if alphas is not None:
alphas = torch.stack(alphas) if len(alphas) > 0 else torch.zeros(0, 0, 0, 1)
# Get frame rate
frame_rate = Fraction(video_stream.average_rate) if video_stream.average_rate else Fraction(1)
@ -295,7 +310,7 @@ class VideoFromFile(VideoInput):
})
metadata = container.metadata
return VideoComponents(images=images, audio=audio, frame_rate=frame_rate, metadata=metadata)
return VideoComponents(images=images, alpha=alphas, audio=audio, frame_rate=frame_rate, metadata=metadata)
def get_components(self) -> VideoComponents:
if isinstance(self.__file, io.BytesIO):

View File

@ -3,7 +3,7 @@ from dataclasses import dataclass
from enum import Enum
from fractions import Fraction
from typing import Optional
from .._input import ImageInput, AudioInput
from .._input import ImageInput, AudioInput, MaskInput
class VideoCodec(str, Enum):
AUTO = "auto"
@ -48,5 +48,4 @@ class VideoComponents:
frame_rate: Fraction
audio: Optional[AudioInput] = None
metadata: Optional[dict] = None
alpha: Optional[MaskInput] = None

View File

@ -118,7 +118,7 @@ class Wan27ReferenceVideoInputField(BaseModel):
class Wan27ReferenceVideoParametersField(BaseModel):
resolution: str = Field(...)
ratio: str | None = Field(None)
duration: int = Field(5, ge=2, le=10)
duration: int = Field(5, ge=2, le=15)
watermark: bool = Field(False)
seed: int = Field(..., ge=0, le=2147483647)
@ -157,7 +157,7 @@ class Wan27VideoEditInputField(BaseModel):
class Wan27VideoEditParametersField(BaseModel):
resolution: str = Field(...)
ratio: str | None = Field(None)
duration: int = Field(0)
duration: int | None = Field(0)
audio_setting: str = Field("auto")
watermark: bool = Field(False)
seed: int = Field(..., ge=0, le=2147483647)

View File

@ -33,9 +33,13 @@ class OpenAIVideoSora2(IO.ComfyNode):
def define_schema(cls):
return IO.Schema(
node_id="OpenAIVideoSora2",
display_name="OpenAI Sora - Video",
display_name="OpenAI Sora - Video (Deprecated)",
category="api node/video/Sora",
description="OpenAI video and audio generation.",
description=(
"OpenAI video and audio generation.\n\n"
"DEPRECATION NOTICE: OpenAI will stop serving the Sora v2 API in September 2026. "
"This node will be removed from ComfyUI at that time."
),
inputs=[
IO.Combo.Input(
"model",

View File

@ -1646,6 +1646,557 @@ class Wan2ReferenceVideoApi(IO.ComfyNode):
return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class HappyHorseTextToVideoApi(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="HappyHorseTextToVideoApi",
display_name="HappyHorse Text to Video",
category="api node/video/Wan",
description="Generates a video based on a text prompt using the HappyHorse model.",
inputs=[
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"happyhorse-1.0-t2v",
[
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt describing the elements and visual features. "
"Supports English and Chinese.",
),
IO.Combo.Input(
"resolution",
options=["720P", "1080P"],
),
IO.Combo.Input(
"ratio",
options=["16:9", "9:16", "1:1", "4:3", "3:4"],
),
IO.Int.Input(
"duration",
default=5,
min=3,
max=15,
step=1,
display_mode=IO.NumberDisplay.number,
),
],
),
],
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed to use for generation.",
),
IO.Boolean.Input(
"watermark",
default=False,
tooltip="Whether to add an AI-generated watermark to the result.",
advanced=True,
),
],
outputs=[
IO.Video.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.resolution", "model.duration"]),
expr="""
(
$res := $lookup(widgets, "model.resolution");
$dur := $lookup(widgets, "model.duration");
$ppsTable := { "720p": 0.14, "1080p": 0.24 };
$pps := $lookup($ppsTable, $res);
{ "type": "usd", "usd": $pps * $dur }
)
""",
),
)
@classmethod
async def execute(
cls,
model: dict,
seed: int,
watermark: bool,
):
validate_string(model["prompt"], strip_whitespace=False, min_length=1)
initial_response = await sync_op(
cls,
ApiEndpoint(
path="/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis",
method="POST",
),
response_model=TaskCreationResponse,
data=Wan27Text2VideoTaskCreationRequest(
model=model["model"],
input=Text2VideoInputField(
prompt=model["prompt"],
negative_prompt=None,
),
parameters=Wan27Text2VideoParametersField(
resolution=model["resolution"],
ratio=model["ratio"],
duration=model["duration"],
seed=seed,
watermark=watermark,
),
),
)
if not initial_response.output:
raise Exception(f"An unknown error occurred: {initial_response.code} - {initial_response.message}")
response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"),
response_model=VideoTaskStatusResponse,
status_extractor=lambda x: x.output.task_status,
poll_interval=7,
)
return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class HappyHorseImageToVideoApi(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="HappyHorseImageToVideoApi",
display_name="HappyHorse Image to Video",
category="api node/video/Wan",
description="Generate a video from a first-frame image using the HappyHorse model.",
inputs=[
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"happyhorse-1.0-i2v",
[
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt describing the elements and visual features. "
"Supports English and Chinese.",
),
IO.Combo.Input(
"resolution",
options=["720P", "1080P"],
),
IO.Int.Input(
"duration",
default=5,
min=3,
max=15,
step=1,
display_mode=IO.NumberDisplay.number,
),
],
),
],
),
IO.Image.Input(
"first_frame",
tooltip="First frame image. The output aspect ratio is derived from this image.",
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed to use for generation.",
),
IO.Boolean.Input(
"watermark",
default=False,
tooltip="Whether to add an AI-generated watermark to the result.",
advanced=True,
),
],
outputs=[
IO.Video.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.resolution", "model.duration"]),
expr="""
(
$res := $lookup(widgets, "model.resolution");
$dur := $lookup(widgets, "model.duration");
$ppsTable := { "720p": 0.14, "1080p": 0.24 };
$pps := $lookup($ppsTable, $res);
{ "type": "usd", "usd": $pps * $dur }
)
""",
),
)
@classmethod
async def execute(
cls,
model: dict,
first_frame: Input.Image,
seed: int,
watermark: bool,
):
media = [
Wan27MediaItem(
type="first_frame",
url=await upload_image_to_comfyapi(cls, image=first_frame),
)
]
initial_response = await sync_op(
cls,
ApiEndpoint(
path="/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis",
method="POST",
),
response_model=TaskCreationResponse,
data=Wan27ImageToVideoTaskCreationRequest(
model=model["model"],
input=Wan27ImageToVideoInputField(
prompt=model["prompt"] or None,
negative_prompt=None,
media=media,
),
parameters=Wan27ImageToVideoParametersField(
resolution=model["resolution"],
duration=model["duration"],
seed=seed,
watermark=watermark,
),
),
)
if not initial_response.output:
raise Exception(f"An unknown error occurred: {initial_response.code} - {initial_response.message}")
response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"),
response_model=VideoTaskStatusResponse,
status_extractor=lambda x: x.output.task_status,
poll_interval=7,
)
return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class HappyHorseVideoEditApi(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="HappyHorseVideoEditApi",
display_name="HappyHorse Video Edit",
category="api node/video/Wan",
description="Edit a video using text instructions or reference images with the HappyHorse model. "
"Output duration is 3-15s and matches the input video; inputs longer than 15s are truncated.",
inputs=[
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"happyhorse-1.0-video-edit",
[
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Editing instructions or style transfer requirements.",
),
IO.Combo.Input(
"resolution",
options=["720P", "1080P"],
),
IO.Combo.Input(
"ratio",
options=["16:9", "9:16", "1:1", "4:3", "3:4"],
tooltip="Aspect ratio. If not changed, approximates the input video ratio.",
),
IO.Autogrow.Input(
"reference_images",
template=IO.Autogrow.TemplateNames(
IO.Image.Input("reference_image"),
names=[
"image1",
"image2",
"image3",
"image4",
"image5",
],
min=0,
),
),
],
),
],
),
IO.Video.Input(
"video",
tooltip="The video to edit.",
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed to use for generation.",
),
IO.Boolean.Input(
"watermark",
default=False,
tooltip="Whether to add an AI-generated watermark to the result.",
advanced=True,
),
],
outputs=[
IO.Video.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.resolution"]),
expr="""
(
$res := $lookup(widgets, "model.resolution");
$ppsTable := { "720p": 0.14, "1080p": 0.24 };
$pps := $lookup($ppsTable, $res);
{ "type": "usd", "usd": $pps, "format": { "suffix": "/second" } }
)
""",
),
)
@classmethod
async def execute(
cls,
model: dict,
video: Input.Video,
seed: int,
watermark: bool,
):
validate_string(model["prompt"], strip_whitespace=False, min_length=1)
validate_video_duration(video, min_duration=3, max_duration=60)
media = [Wan27MediaItem(type="video", url=await upload_video_to_comfyapi(cls, video))]
reference_images = model.get("reference_images", {})
for key in reference_images:
media.append(
Wan27MediaItem(
type="reference_image", url=await upload_image_to_comfyapi(cls, image=reference_images[key])
)
)
initial_response = await sync_op(
cls,
ApiEndpoint(
path="/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis",
method="POST",
),
response_model=TaskCreationResponse,
data=Wan27VideoEditTaskCreationRequest(
model=model["model"],
input=Wan27VideoEditInputField(prompt=model["prompt"], media=media),
parameters=Wan27VideoEditParametersField(
resolution=model["resolution"],
ratio=model["ratio"],
duration=None,
watermark=watermark,
seed=seed,
),
),
)
if not initial_response.output:
raise Exception(f"An unknown error occurred: {initial_response.code} - {initial_response.message}")
response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"),
response_model=VideoTaskStatusResponse,
status_extractor=lambda x: x.output.task_status,
poll_interval=7,
)
return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class HappyHorseReferenceVideoApi(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="HappyHorseReferenceVideoApi",
display_name="HappyHorse Reference to Video",
category="api node/video/Wan",
description="Generate a video featuring a person or object from reference materials with the HappyHorse "
"model. Supports single-character performances and multi-character interactions.",
inputs=[
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"happyhorse-1.0-r2v",
[
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt describing the video. Use identifiers such as 'character1' and "
"'character2' to refer to the reference characters.",
),
IO.Combo.Input(
"resolution",
options=["720P", "1080P"],
),
IO.Combo.Input(
"ratio",
options=["16:9", "9:16", "1:1", "4:3", "3:4"],
),
IO.Int.Input(
"duration",
default=5,
min=3,
max=15,
step=1,
display_mode=IO.NumberDisplay.number,
),
IO.Autogrow.Input(
"reference_images",
template=IO.Autogrow.TemplateNames(
IO.Image.Input("reference_image"),
names=[
"image1",
"image2",
"image3",
"image4",
"image5",
"image6",
"image7",
"image8",
"image9",
],
min=1,
),
),
],
),
],
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed to use for generation.",
),
IO.Boolean.Input(
"watermark",
default=False,
tooltip="Whether to add an AI-generated watermark to the result.",
advanced=True,
),
],
outputs=[
IO.Video.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.resolution", "model.duration"]),
expr="""
(
$res := $lookup(widgets, "model.resolution");
$dur := $lookup(widgets, "model.duration");
$ppsTable := { "720p": 0.14, "1080p": 0.24 };
$pps := $lookup($ppsTable, $res);
{ "type": "usd", "usd": $pps * $dur }
)
""",
),
)
@classmethod
async def execute(
cls,
model: dict,
seed: int,
watermark: bool,
):
validate_string(model["prompt"], strip_whitespace=False, min_length=1)
media = []
reference_images = model.get("reference_images", {})
for key in reference_images:
media.append(
Wan27MediaItem(
type="reference_image",
url=await upload_image_to_comfyapi(cls, image=reference_images[key]),
)
)
if not media:
raise ValueError("At least one reference reference image must be provided.")
initial_response = await sync_op(
cls,
ApiEndpoint(
path="/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis",
method="POST",
),
response_model=TaskCreationResponse,
data=Wan27ReferenceVideoTaskCreationRequest(
model=model["model"],
input=Wan27ReferenceVideoInputField(
prompt=model["prompt"],
negative_prompt=None,
media=media,
),
parameters=Wan27ReferenceVideoParametersField(
resolution=model["resolution"],
ratio=model["ratio"],
duration=model["duration"],
watermark=watermark,
seed=seed,
),
),
)
if not initial_response.output:
raise Exception(f"An unknown error occurred: {initial_response.code} - {initial_response.message}")
response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"),
response_model=VideoTaskStatusResponse,
status_extractor=lambda x: x.output.task_status,
poll_interval=7,
)
return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class WanApiExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
@ -1660,6 +2211,10 @@ class WanApiExtension(ComfyExtension):
Wan2VideoContinuationApi,
Wan2VideoEditApi,
Wan2ReferenceVideoApi,
HappyHorseTextToVideoApi,
HappyHorseImageToVideoApi,
HappyHorseVideoEditApi,
HappyHorseReferenceVideoApi,
]

View File

@ -185,36 +185,6 @@ class SplitImageWithAlpha(io.ComfyNode):
return io.NodeOutput(torch.stack(out_images), 1.0 - torch.stack(out_alphas))
class SplitImageChannels(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="SplitImageChannels",
search_aliases=["convert to grayscale", "extract channels", "grayscale", "split channels", "RGB channels"],
display_name="Split Image Channels",
category="mask/compositing",
inputs=[
io.Image.Input("image"),
],
outputs=[
io.Image.Output(display_name="red"),
io.Image.Output(display_name="green"),
io.Image.Output(display_name="blue"),
io.Mask.Output(display_name="alpha")
],
)
@classmethod
def execute(cls, image: torch.Tensor) -> io.NodeOutput:
images = [i[:,:,:3] for i in image]
stacked = torch.stack(images)
reds = stacked[:, :, :, 0:1].repeat(1, 1, 1, 3)
greens = stacked[:, :, :, 1:2].repeat(1, 1, 1, 3)
blues = stacked[:, :, :, 2:3].repeat(1, 1, 1, 3)
alphas = [i[:,:,3] if i.shape[2] > 3 else torch.ones_like(i[:,:,0]) for i in image]
return io.NodeOutput(reds, greens, blues, 1.0 - torch.stack(alphas))
class JoinImageWithAlpha(io.ComfyNode):
@classmethod
def define_schema(cls):
@ -247,7 +217,6 @@ class CompositingExtension(ComfyExtension):
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
PorterDuffImageComposite,
SplitImageChannels,
SplitImageWithAlpha,
JoinImageWithAlpha,
]

View File

@ -54,7 +54,7 @@ class EmptySD3LatentImage(io.ComfyNode):
@classmethod
def execute(cls, width, height, batch_size=1) -> io.NodeOutput:
latent = torch.zeros([batch_size, 16, height // 8, width // 8], device=comfy.model_management.intermediate_device())
latent = torch.zeros([batch_size, 16, height // 8, width // 8], device=comfy.model_management.intermediate_device(), dtype=comfy.model_management.intermediate_dtype())
return io.NodeOutput({"samples": latent, "downscale_ratio_spacial": 8})
generate = execute # TODO: remove

View File

@ -1,3 +1,3 @@
# This file is automatically generated by the build process when version is
# updated in pyproject.toml.
__version__ = "0.19.3"
__version__ = "0.20.0"

View File

@ -1,6 +1,6 @@
[project]
name = "ComfyUI"
version = "0.19.3"
version = "0.20.0"
readme = "README.md"
license = { file = "LICENSE" }
requires-python = ">=3.10"

View File

@ -1,5 +1,5 @@
comfyui-frontend-package==1.42.14
comfyui-workflow-templates==0.9.62
comfyui-frontend-package==1.42.15
comfyui-workflow-templates==0.9.63
comfyui-embedded-docs==0.4.4
torch
torchsde