mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-25 22:36:10 +08:00
Compare commits
2 Commits
feat/advan
...
si/sync-te
| Author | SHA1 | Date | |
|---|---|---|---|
| f2c9889fcb | |||
| 7b8cb55c39 |
@ -66,7 +66,6 @@ class ClipVisionModel():
|
||||
outputs = Output()
|
||||
outputs["last_hidden_state"] = out[0].to(comfy.model_management.intermediate_device())
|
||||
outputs["image_embeds"] = out[2].to(comfy.model_management.intermediate_device())
|
||||
outputs["image_sizes"] = [pixel_values.shape[1:]] * pixel_values.shape[0]
|
||||
if self.return_all_hidden_states:
|
||||
all_hs = out[1].to(comfy.model_management.intermediate_device())
|
||||
outputs["penultimate_hidden_states"] = all_hs[:, -2]
|
||||
|
||||
@ -763,7 +763,7 @@ class Flux2(Flux):
|
||||
|
||||
def __init__(self, unet_config):
|
||||
super().__init__(unet_config)
|
||||
self.memory_usage_factor = self.memory_usage_factor * (2.0 * 2.0) * (unet_config['hidden_size'] / 2604)
|
||||
self.memory_usage_factor = self.memory_usage_factor * (2.0 * 2.0) * 2.36
|
||||
|
||||
def get_model(self, state_dict, prefix="", device=None):
|
||||
out = model_base.Flux2(self, device=device)
|
||||
|
||||
@ -153,7 +153,7 @@ class Input(_IO_V3):
|
||||
'''
|
||||
Base class for a V3 Input.
|
||||
'''
|
||||
def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, extra_dict=None, raw_link: bool=None, advanced: bool=None):
|
||||
def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, extra_dict=None, raw_link: bool=None):
|
||||
super().__init__()
|
||||
self.id = id
|
||||
self.display_name = display_name
|
||||
@ -162,7 +162,6 @@ class Input(_IO_V3):
|
||||
self.lazy = lazy
|
||||
self.extra_dict = extra_dict if extra_dict is not None else {}
|
||||
self.rawLink = raw_link
|
||||
self.advanced = advanced
|
||||
|
||||
def as_dict(self):
|
||||
return prune_dict({
|
||||
@ -171,7 +170,6 @@ class Input(_IO_V3):
|
||||
"tooltip": self.tooltip,
|
||||
"lazy": self.lazy,
|
||||
"rawLink": self.rawLink,
|
||||
"advanced": self.advanced,
|
||||
}) | prune_dict(self.extra_dict)
|
||||
|
||||
def get_io_type(self):
|
||||
@ -186,8 +184,8 @@ class WidgetInput(Input):
|
||||
'''
|
||||
def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None,
|
||||
default: Any=None,
|
||||
socketless: bool=None, widget_type: str=None, force_input: bool=None, extra_dict=None, raw_link: bool=None, advanced: bool=None):
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, extra_dict, raw_link, advanced)
|
||||
socketless: bool=None, widget_type: str=None, force_input: bool=None, extra_dict=None, raw_link: bool=None):
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, extra_dict, raw_link)
|
||||
self.default = default
|
||||
self.socketless = socketless
|
||||
self.widget_type = widget_type
|
||||
@ -244,8 +242,8 @@ class Boolean(ComfyTypeIO):
|
||||
'''Boolean input.'''
|
||||
def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None,
|
||||
default: bool=None, label_on: str=None, label_off: str=None,
|
||||
socketless: bool=None, force_input: bool=None, extra_dict=None, raw_link: bool=None, advanced: bool=None):
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, default, socketless, None, force_input, extra_dict, raw_link, advanced)
|
||||
socketless: bool=None, force_input: bool=None, extra_dict=None, raw_link: bool=None):
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, default, socketless, None, force_input, extra_dict, raw_link)
|
||||
self.label_on = label_on
|
||||
self.label_off = label_off
|
||||
self.default: bool
|
||||
@ -264,8 +262,8 @@ class Int(ComfyTypeIO):
|
||||
'''Integer input.'''
|
||||
def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None,
|
||||
default: int=None, min: int=None, max: int=None, step: int=None, control_after_generate: bool=None,
|
||||
display_mode: NumberDisplay=None, socketless: bool=None, force_input: bool=None, extra_dict=None, raw_link: bool=None, advanced: bool=None):
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, default, socketless, None, force_input, extra_dict, raw_link, advanced)
|
||||
display_mode: NumberDisplay=None, socketless: bool=None, force_input: bool=None, extra_dict=None, raw_link: bool=None):
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, default, socketless, None, force_input, extra_dict, raw_link)
|
||||
self.min = min
|
||||
self.max = max
|
||||
self.step = step
|
||||
@ -290,8 +288,8 @@ class Float(ComfyTypeIO):
|
||||
'''Float input.'''
|
||||
def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None,
|
||||
default: float=None, min: float=None, max: float=None, step: float=None, round: float=None,
|
||||
display_mode: NumberDisplay=None, socketless: bool=None, force_input: bool=None, extra_dict=None, raw_link: bool=None, advanced: bool=None):
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, default, socketless, None, force_input, extra_dict, raw_link, advanced)
|
||||
display_mode: NumberDisplay=None, socketless: bool=None, force_input: bool=None, extra_dict=None, raw_link: bool=None):
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, default, socketless, None, force_input, extra_dict, raw_link)
|
||||
self.min = min
|
||||
self.max = max
|
||||
self.step = step
|
||||
@ -316,8 +314,8 @@ class String(ComfyTypeIO):
|
||||
'''String input.'''
|
||||
def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None,
|
||||
multiline=False, placeholder: str=None, default: str=None, dynamic_prompts: bool=None,
|
||||
socketless: bool=None, force_input: bool=None, extra_dict=None, raw_link: bool=None, advanced: bool=None):
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, default, socketless, None, force_input, extra_dict, raw_link, advanced)
|
||||
socketless: bool=None, force_input: bool=None, extra_dict=None, raw_link: bool=None):
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, default, socketless, None, force_input, extra_dict, raw_link)
|
||||
self.multiline = multiline
|
||||
self.placeholder = placeholder
|
||||
self.dynamic_prompts = dynamic_prompts
|
||||
@ -352,13 +350,12 @@ class Combo(ComfyTypeIO):
|
||||
socketless: bool=None,
|
||||
extra_dict=None,
|
||||
raw_link: bool=None,
|
||||
advanced: bool=None,
|
||||
):
|
||||
if isinstance(options, type) and issubclass(options, Enum):
|
||||
options = [v.value for v in options]
|
||||
if isinstance(default, Enum):
|
||||
default = default.value
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, default, socketless, None, None, extra_dict, raw_link, advanced)
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, default, socketless, None, None, extra_dict, raw_link)
|
||||
self.multiselect = False
|
||||
self.options = options
|
||||
self.control_after_generate = control_after_generate
|
||||
@ -390,8 +387,8 @@ class MultiCombo(ComfyTypeI):
|
||||
class Input(Combo.Input):
|
||||
def __init__(self, id: str, options: list[str], display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None,
|
||||
default: list[str]=None, placeholder: str=None, chip: bool=None, control_after_generate: bool=None,
|
||||
socketless: bool=None, extra_dict=None, raw_link: bool=None, advanced: bool=None):
|
||||
super().__init__(id, options, display_name, optional, tooltip, lazy, default, control_after_generate, socketless=socketless, extra_dict=extra_dict, raw_link=raw_link, advanced=advanced)
|
||||
socketless: bool=None, extra_dict=None, raw_link: bool=None):
|
||||
super().__init__(id, options, display_name, optional, tooltip, lazy, default, control_after_generate, socketless=socketless, extra_dict=extra_dict, raw_link=raw_link)
|
||||
self.multiselect = True
|
||||
self.placeholder = placeholder
|
||||
self.chip = chip
|
||||
@ -424,9 +421,9 @@ class Webcam(ComfyTypeIO):
|
||||
Type = str
|
||||
def __init__(
|
||||
self, id: str, display_name: str=None, optional=False,
|
||||
tooltip: str=None, lazy: bool=None, default: str=None, socketless: bool=None, extra_dict=None, raw_link: bool=None, advanced: bool=None
|
||||
tooltip: str=None, lazy: bool=None, default: str=None, socketless: bool=None, extra_dict=None, raw_link: bool=None
|
||||
):
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, default, socketless, None, None, extra_dict, raw_link, advanced)
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, default, socketless, None, None, extra_dict, raw_link)
|
||||
|
||||
|
||||
@comfytype(io_type="MASK")
|
||||
@ -779,7 +776,7 @@ class MultiType:
|
||||
'''
|
||||
Input that permits more than one input type; if `id` is an instance of `ComfyType.Input`, then that input will be used to create a widget (if applicable) with overridden values.
|
||||
'''
|
||||
def __init__(self, id: str | Input, types: list[type[_ComfyType] | _ComfyType], display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, extra_dict=None, raw_link: bool=None, advanced: bool=None):
|
||||
def __init__(self, id: str | Input, types: list[type[_ComfyType] | _ComfyType], display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, extra_dict=None, raw_link: bool=None):
|
||||
# if id is an Input, then use that Input with overridden values
|
||||
self.input_override = None
|
||||
if isinstance(id, Input):
|
||||
@ -792,7 +789,7 @@ class MultiType:
|
||||
# if is a widget input, make sure widget_type is set appropriately
|
||||
if isinstance(self.input_override, WidgetInput):
|
||||
self.input_override.widget_type = self.input_override.get_io_type()
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, extra_dict, raw_link, advanced)
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, extra_dict, raw_link)
|
||||
self._io_types = types
|
||||
|
||||
@property
|
||||
@ -846,8 +843,8 @@ class MatchType(ComfyTypeIO):
|
||||
|
||||
class Input(Input):
|
||||
def __init__(self, id: str, template: MatchType.Template,
|
||||
display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, extra_dict=None, raw_link: bool=None, advanced: bool=None):
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, extra_dict, raw_link, advanced)
|
||||
display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, extra_dict=None, raw_link: bool=None):
|
||||
super().__init__(id, display_name, optional, tooltip, lazy, extra_dict, raw_link)
|
||||
self.template = template
|
||||
|
||||
def as_dict(self):
|
||||
@ -1122,8 +1119,8 @@ class ImageCompare(ComfyTypeI):
|
||||
|
||||
class Input(WidgetInput):
|
||||
def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None,
|
||||
socketless: bool=True, advanced: bool=None):
|
||||
super().__init__(id, display_name, optional, tooltip, None, None, socketless, None, None, None, None, advanced)
|
||||
socketless: bool=True):
|
||||
super().__init__(id, display_name, optional, tooltip, None, None, socketless)
|
||||
|
||||
def as_dict(self):
|
||||
return super().as_dict()
|
||||
|
||||
@ -65,13 +65,11 @@ class TaskImageContent(BaseModel):
|
||||
class Text2VideoTaskCreationRequest(BaseModel):
|
||||
model: str = Field(...)
|
||||
content: list[TaskTextContent] = Field(..., min_length=1)
|
||||
generate_audio: bool | None = Field(...)
|
||||
|
||||
|
||||
class Image2VideoTaskCreationRequest(BaseModel):
|
||||
model: str = Field(...)
|
||||
content: list[TaskTextContent | TaskImageContent] = Field(..., min_length=2)
|
||||
generate_audio: bool | None = Field(...)
|
||||
|
||||
|
||||
class TaskCreationResponse(BaseModel):
|
||||
@ -143,9 +141,4 @@ VIDEO_TASKS_EXECUTION_TIME = {
|
||||
"720p": 65,
|
||||
"1080p": 100,
|
||||
},
|
||||
"seedance-1-5-pro-251215": {
|
||||
"480p": 80,
|
||||
"720p": 100,
|
||||
"1080p": 150,
|
||||
},
|
||||
}
|
||||
|
||||
@ -477,12 +477,7 @@ class ByteDanceTextToVideoNode(IO.ComfyNode):
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
options=[
|
||||
"seedance-1-5-pro-251215",
|
||||
"seedance-1-0-pro-250528",
|
||||
"seedance-1-0-lite-t2v-250428",
|
||||
"seedance-1-0-pro-fast-251015",
|
||||
],
|
||||
options=["seedance-1-0-pro-250528", "seedance-1-0-lite-t2v-250428", "seedance-1-0-pro-fast-251015"],
|
||||
default="seedance-1-0-pro-fast-251015",
|
||||
),
|
||||
IO.String.Input(
|
||||
@ -533,12 +528,6 @@ class ByteDanceTextToVideoNode(IO.ComfyNode):
|
||||
tooltip='Whether to add an "AI generated" watermark to the video.',
|
||||
optional=True,
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"generate_audio",
|
||||
default=False,
|
||||
tooltip="This parameter is ignored for any model except seedance-1-5-pro.",
|
||||
optional=True,
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Video.Output(),
|
||||
@ -563,10 +552,7 @@ class ByteDanceTextToVideoNode(IO.ComfyNode):
|
||||
seed: int,
|
||||
camera_fixed: bool,
|
||||
watermark: bool,
|
||||
generate_audio: bool = False,
|
||||
) -> IO.NodeOutput:
|
||||
if model == "seedance-1-5-pro-251215" and duration < 4:
|
||||
raise ValueError("Minimum supported duration for Seedance 1.5 Pro is 4 seconds.")
|
||||
validate_string(prompt, strip_whitespace=True, min_length=1)
|
||||
raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"])
|
||||
|
||||
@ -581,11 +567,7 @@ class ByteDanceTextToVideoNode(IO.ComfyNode):
|
||||
)
|
||||
return await process_video_task(
|
||||
cls,
|
||||
payload=Text2VideoTaskCreationRequest(
|
||||
model=model,
|
||||
content=[TaskTextContent(text=prompt)],
|
||||
generate_audio=generate_audio if model == "seedance-1-5-pro-251215" else None,
|
||||
),
|
||||
payload=Text2VideoTaskCreationRequest(model=model, content=[TaskTextContent(text=prompt)]),
|
||||
estimated_duration=max(1, math.ceil(VIDEO_TASKS_EXECUTION_TIME[model][resolution] * (duration / 10.0))),
|
||||
)
|
||||
|
||||
@ -602,12 +584,7 @@ class ByteDanceImageToVideoNode(IO.ComfyNode):
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
options=[
|
||||
"seedance-1-5-pro-251215",
|
||||
"seedance-1-0-pro-250528",
|
||||
"seedance-1-0-lite-i2v-250428",
|
||||
"seedance-1-0-pro-fast-251015",
|
||||
],
|
||||
options=["seedance-1-0-pro-250528", "seedance-1-0-lite-t2v-250428", "seedance-1-0-pro-fast-251015"],
|
||||
default="seedance-1-0-pro-fast-251015",
|
||||
),
|
||||
IO.String.Input(
|
||||
@ -662,12 +639,6 @@ class ByteDanceImageToVideoNode(IO.ComfyNode):
|
||||
tooltip='Whether to add an "AI generated" watermark to the video.',
|
||||
optional=True,
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"generate_audio",
|
||||
default=False,
|
||||
tooltip="This parameter is ignored for any model except seedance-1-5-pro.",
|
||||
optional=True,
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Video.Output(),
|
||||
@ -693,10 +664,7 @@ class ByteDanceImageToVideoNode(IO.ComfyNode):
|
||||
seed: int,
|
||||
camera_fixed: bool,
|
||||
watermark: bool,
|
||||
generate_audio: bool = False,
|
||||
) -> IO.NodeOutput:
|
||||
if model == "seedance-1-5-pro-251215" and duration < 4:
|
||||
raise ValueError("Minimum supported duration for Seedance 1.5 Pro is 4 seconds.")
|
||||
validate_string(prompt, strip_whitespace=True, min_length=1)
|
||||
raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"])
|
||||
validate_image_dimensions(image, min_width=300, min_height=300, max_width=6000, max_height=6000)
|
||||
@ -718,7 +686,6 @@ class ByteDanceImageToVideoNode(IO.ComfyNode):
|
||||
payload=Image2VideoTaskCreationRequest(
|
||||
model=model,
|
||||
content=[TaskTextContent(text=prompt), TaskImageContent(image_url=TaskImageContentUrl(url=image_url))],
|
||||
generate_audio=generate_audio if model == "seedance-1-5-pro-251215" else None,
|
||||
),
|
||||
estimated_duration=max(1, math.ceil(VIDEO_TASKS_EXECUTION_TIME[model][resolution] * (duration / 10.0))),
|
||||
)
|
||||
@ -736,7 +703,7 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode):
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
options=["seedance-1-5-pro-251215", "seedance-1-0-pro-250528", "seedance-1-0-lite-i2v-250428"],
|
||||
options=["seedance-1-0-pro-250528", "seedance-1-0-lite-i2v-250428"],
|
||||
default="seedance-1-0-lite-i2v-250428",
|
||||
),
|
||||
IO.String.Input(
|
||||
@ -795,12 +762,6 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode):
|
||||
tooltip='Whether to add an "AI generated" watermark to the video.',
|
||||
optional=True,
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"generate_audio",
|
||||
default=False,
|
||||
tooltip="This parameter is ignored for any model except seedance-1-5-pro.",
|
||||
optional=True,
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Video.Output(),
|
||||
@ -827,10 +788,7 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode):
|
||||
seed: int,
|
||||
camera_fixed: bool,
|
||||
watermark: bool,
|
||||
generate_audio: bool = False,
|
||||
) -> IO.NodeOutput:
|
||||
if model == "seedance-1-5-pro-251215" and duration < 4:
|
||||
raise ValueError("Minimum supported duration for Seedance 1.5 Pro is 4 seconds.")
|
||||
validate_string(prompt, strip_whitespace=True, min_length=1)
|
||||
raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"])
|
||||
for i in (first_frame, last_frame):
|
||||
@ -863,7 +821,6 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode):
|
||||
TaskImageContent(image_url=TaskImageContentUrl(url=str(download_urls[0])), role="first_frame"),
|
||||
TaskImageContent(image_url=TaskImageContentUrl(url=str(download_urls[1])), role="last_frame"),
|
||||
],
|
||||
generate_audio=generate_audio if model == "seedance-1-5-pro-251215" else None,
|
||||
),
|
||||
estimated_duration=max(1, math.ceil(VIDEO_TASKS_EXECUTION_TIME[model][resolution] * (duration / 10.0))),
|
||||
)
|
||||
@ -939,41 +896,7 @@ class ByteDanceImageReferenceNode(IO.ComfyNode):
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "duration", "resolution"]),
|
||||
expr="""
|
||||
(
|
||||
$priceByModel := {
|
||||
"seedance-1-0-pro": {
|
||||
"480p":[0.23,0.24],
|
||||
"720p":[0.51,0.56]
|
||||
},
|
||||
"seedance-1-0-lite": {
|
||||
"480p":[0.17,0.18],
|
||||
"720p":[0.37,0.41]
|
||||
}
|
||||
};
|
||||
$model := widgets.model;
|
||||
$modelKey :=
|
||||
$contains($model, "seedance-1-0-pro") ? "seedance-1-0-pro" :
|
||||
"seedance-1-0-lite";
|
||||
$resolution := widgets.resolution;
|
||||
$resKey :=
|
||||
$contains($resolution, "720") ? "720p" :
|
||||
"480p";
|
||||
$modelPrices := $lookup($priceByModel, $modelKey);
|
||||
$baseRange := $lookup($modelPrices, $resKey);
|
||||
$min10s := $baseRange[0];
|
||||
$max10s := $baseRange[1];
|
||||
$scale := widgets.duration / 10;
|
||||
$minCost := $min10s * $scale;
|
||||
$maxCost := $max10s * $scale;
|
||||
($minCost = $maxCost)
|
||||
? {"type":"usd","usd": $minCost}
|
||||
: {"type":"range_usd","min_usd": $minCost, "max_usd": $maxCost}
|
||||
)
|
||||
""",
|
||||
),
|
||||
price_badge=PRICE_BADGE_VIDEO,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@ -1044,15 +967,10 @@ def raise_if_text_params(prompt: str, text_params: list[str]) -> None:
|
||||
|
||||
|
||||
PRICE_BADGE_VIDEO = IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "duration", "resolution", "generate_audio"]),
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "duration", "resolution"]),
|
||||
expr="""
|
||||
(
|
||||
$priceByModel := {
|
||||
"seedance-1-5-pro": {
|
||||
"480p":[0.12,0.12],
|
||||
"720p":[0.26,0.26],
|
||||
"1080p":[0.58,0.59]
|
||||
},
|
||||
"seedance-1-0-pro": {
|
||||
"480p":[0.23,0.24],
|
||||
"720p":[0.51,0.56],
|
||||
@ -1071,7 +989,6 @@ PRICE_BADGE_VIDEO = IO.PriceBadge(
|
||||
};
|
||||
$model := widgets.model;
|
||||
$modelKey :=
|
||||
$contains($model, "seedance-1-5-pro") ? "seedance-1-5-pro" :
|
||||
$contains($model, "seedance-1-0-pro-fast") ? "seedance-1-0-pro-fast" :
|
||||
$contains($model, "seedance-1-0-pro") ? "seedance-1-0-pro" :
|
||||
"seedance-1-0-lite";
|
||||
@ -1085,12 +1002,11 @@ PRICE_BADGE_VIDEO = IO.PriceBadge(
|
||||
$min10s := $baseRange[0];
|
||||
$max10s := $baseRange[1];
|
||||
$scale := widgets.duration / 10;
|
||||
$audioMultiplier := ($modelKey = "seedance-1-5-pro" and widgets.generate_audio) ? 2 : 1;
|
||||
$minCost := $min10s * $scale * $audioMultiplier;
|
||||
$maxCost := $max10s * $scale * $audioMultiplier;
|
||||
$minCost := $min10s * $scale;
|
||||
$maxCost := $max10s * $scale;
|
||||
($minCost = $maxCost)
|
||||
? {"type":"usd","usd": $minCost, "format": { "approximate": true }}
|
||||
: {"type":"range_usd","min_usd": $minCost, "max_usd": $maxCost, "format": { "approximate": true }}
|
||||
? {"type":"usd","usd": $minCost}
|
||||
: {"type":"range_usd","min_usd": $minCost, "max_usd": $maxCost}
|
||||
)
|
||||
""",
|
||||
)
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
comfyui-frontend-package==1.36.14
|
||||
comfyui-workflow-templates==0.8.11
|
||||
comfyui-workflow-templates==0.8.4
|
||||
comfyui-embedded-docs==0.4.0
|
||||
torch
|
||||
torchsde
|
||||
|
||||
@ -686,10 +686,7 @@ class PromptServer():
|
||||
|
||||
@routes.get("/object_info")
|
||||
async def get_object_info(request):
|
||||
try:
|
||||
seed_assets(["models"])
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to seed assets: {e}")
|
||||
seed_assets(["models"])
|
||||
with folder_paths.cache_helper:
|
||||
out = {}
|
||||
for x in nodes.NODE_CLASS_MAPPINGS:
|
||||
|
||||
Reference in New Issue
Block a user