Compare commits

..

4 Commits

Author SHA1 Message Date
6667fb1630 Rename essentials_category to main_category
The field is used for top-level tabs in the node library, not just
the Essentials tab. Rename to main_category to support Partner Nodes,
Basic, and other main categories.

Amp-Thread-ID: https://ampcode.com/threads/T-019c2b69-81c1-71c3-8096-450a39e20910
2026-02-05 15:46:04 -08:00
a1a48ffe28 Add ESSENTIALS_CATEGORY to more nodes
- SaveGLB (Basic)
- GetVideoComponents (Video Tools)
- TencentTextToModelNode, TencentImageToModelNode (3D)
- RecraftRemoveBackgroundNode (Image Tools)
- KlingLipSyncAudioToVideoNode (Video Generation)
- OpenAIChatNode (Text Generation)
- StabilityTextToAudio (Audio)

Amp-Thread-ID: https://ampcode.com/threads/T-019c2b69-81c1-71c3-8096-450a39e20910
2026-02-04 18:35:36 -08:00
4b34511db9 feat: add ESSENTIALS_CATEGORY to core nodes
Marked nodes:
- Basic: LoadImage, SaveImage, LoadVideo, SaveVideo, Load3D, CLIPTextEncode
- Image Tools: ImageScale, ImageInvert, ImageBatch, ImageCrop, ImageRotate, ImageBlur
- Image Tools/Preprocessing: Canny
- Image Generation: LoraLoader
- Audio: LoadAudio, SaveAudio

Amp-Thread-ID: https://ampcode.com/threads/T-019c2b25-cd90-7218-9071-03cb46b351b3
2026-02-04 16:54:43 -08:00
078008b734 feat: add essentials_category field to node schema
Amp-Thread-ID: https://ampcode.com/threads/T-019c2b25-cd90-7218-9071-03cb46b351b3
2026-02-04 16:37:12 -08:00
26 changed files with 56 additions and 298 deletions

View File

@ -1,38 +0,0 @@
from __future__ import annotations
from aiohttp import web
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from comfy_api.latest._node_replace import NodeReplace
class NodeReplaceManager:
"""Manages node replacement registrations."""
def __init__(self):
self._replacements: dict[str, list[NodeReplace]] = {}
def register(self, node_replace: NodeReplace):
"""Register a node replacement mapping."""
self._replacements.setdefault(node_replace.old_node_id, []).append(node_replace)
def get_replacement(self, old_node_id: str) -> list[NodeReplace] | None:
"""Get replacements for an old node ID."""
return self._replacements.get(old_node_id)
def has_replacement(self, old_node_id: str) -> bool:
"""Check if a replacement exists for an old node ID."""
return old_node_id in self._replacements
def as_dict(self):
"""Serialize all replacements to dict."""
return {
k: [v.as_dict() for v in v_list]
for k, v_list in self._replacements.items()
}
def add_routes(self, routes):
@routes.get("/node_replacements")
async def get_node_replacements(request):
return web.json_response(self.as_dict())

View File

@ -183,7 +183,7 @@ class AceStepAttention(nn.Module):
else:
attn_bias = window_bias
attn_output = optimized_attention(query_states, key_states, value_states, self.num_heads, attn_bias, skip_reshape=True, low_precision_attention=False)
attn_output = optimized_attention(query_states, key_states, value_states, self.num_heads, attn_bias, skip_reshape=True)
attn_output = self.o_proj(attn_output)
return attn_output
@ -1035,7 +1035,8 @@ class AceStepConditionGenerationModel(nn.Module):
audio_codes = torch.nn.functional.pad(audio_codes, (0, math.ceil(src_latents.shape[1] / 5) - audio_codes.shape[1]), "constant", 35847)
lm_hints_5Hz = self.tokenizer.quantizer.get_output_from_indices(audio_codes, dtype=text_hidden_states.dtype)
else:
lm_hints_5Hz, indices = self.tokenizer.tokenize(refer_audio_acoustic_hidden_states_packed)
assert False
# TODO ?
lm_hints = self.detokenizer(lm_hints_5Hz)

View File

@ -524,9 +524,6 @@ def attention_pytorch(q, k, v, heads, mask=None, attn_precision=None, skip_resha
@wrap_attn
def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False, **kwargs):
if kwargs.get("low_precision_attention", True) is False:
return attention_pytorch(q, k, v, heads, mask=mask, skip_reshape=skip_reshape, skip_output_reshape=skip_output_reshape, **kwargs)
exception_fallback = False
if skip_reshape:
b, _, _, dim_head = q.shape

View File

@ -1548,7 +1548,6 @@ class ACEStep15(BaseModel):
def extra_conds(self, **kwargs):
out = super().extra_conds(**kwargs)
device = kwargs["device"]
noise = kwargs["noise"]
cross_attn = kwargs.get("cross_attn", None)
if cross_attn is not None:
@ -1572,19 +1571,15 @@ class ACEStep15(BaseModel):
1.4844e-01, 9.4727e-02, 3.8477e-01, -1.2578e+00, -3.3203e-01,
-8.5547e-01, 4.3359e-01, 4.2383e-01, -8.9453e-01, -5.0391e-01,
-5.6152e-02, -2.9219e+00, -2.4658e-02, 5.0391e-01, 9.8438e-01,
7.2754e-02, -2.1582e-01, 6.3672e-01, 1.0000e+00]]], device=device).movedim(-1, 1).repeat(1, 1, noise.shape[2])
pass_audio_codes = True
7.2754e-02, -2.1582e-01, 6.3672e-01, 1.0000e+00]]], device=device).movedim(-1, 1).repeat(1, 1, 750)
else:
refer_audio = refer_audio[-1][:, :, :noise.shape[2]]
pass_audio_codes = False
if pass_audio_codes:
audio_codes = kwargs.get("audio_codes", None)
if audio_codes is not None:
out['audio_codes'] = comfy.conds.CONDRegular(torch.tensor(audio_codes, device=device))
refer_audio = refer_audio[:, :, :750]
refer_audio = refer_audio[-1]
out['refer_audio'] = comfy.conds.CONDRegular(refer_audio)
audio_codes = kwargs.get("audio_codes", None)
if audio_codes is not None:
out['audio_codes'] = comfy.conds.CONDRegular(torch.tensor(audio_codes, device=device))
return out
class Omnigen2(BaseModel):

View File

@ -54,8 +54,6 @@ try:
SDPA_BACKEND_PRIORITY.insert(0, SDPBackend.CUDNN_ATTENTION)
def scaled_dot_product_attention(q, k, v, *args, **kwargs):
if q.nelement() < 1024 * 128: # arbitrary number, for small inputs cudnn attention seems slower
return torch.nn.functional.scaled_dot_product_attention(q, k, v, *args, **kwargs)
with sdpa_kernel(SDPA_BACKEND_PRIORITY, set_priority=True):
return torch.nn.functional.scaled_dot_product_attention(q, k, v, *args, **kwargs)
else:

View File

@ -101,7 +101,9 @@ def sample_manual_loop_no_classes(
return output_audio_codes
def generate_audio_codes(model, positive, negative, min_tokens=1, max_tokens=1024, seed=0, cfg_scale=2.0, temperature=0.85, top_p=0.9, top_k=0):
def generate_audio_codes(model, positive, negative, min_tokens=1, max_tokens=1024, seed=0):
cfg_scale = 2.0
positive = [[token for token, _ in inner_list] for inner_list in positive]
negative = [[token for token, _ in inner_list] for inner_list in negative]
positive = positive[0]
@ -118,7 +120,7 @@ def generate_audio_codes(model, positive, negative, min_tokens=1, max_tokens=102
positive = [model.special_tokens["pad"]] * pos_pad + positive
paddings = [pos_pad, neg_pad]
return sample_manual_loop_no_classes(model, [positive, negative], paddings, cfg_scale=cfg_scale, temperature=temperature, top_p=top_p, top_k=top_k, seed=seed, min_tokens=min_tokens, max_new_tokens=max_tokens)
return sample_manual_loop_no_classes(model, [positive, negative], paddings, cfg_scale=cfg_scale, seed=seed, min_tokens=min_tokens, max_new_tokens=max_tokens)
class ACE15Tokenizer(sd1_clip.SD1Tokenizer):
@ -135,12 +137,6 @@ class ACE15Tokenizer(sd1_clip.SD1Tokenizer):
language = kwargs.get("language", "en")
seed = kwargs.get("seed", 0)
generate_audio_codes = kwargs.get("generate_audio_codes", True)
cfg_scale = kwargs.get("cfg_scale", 2.0)
temperature = kwargs.get("temperature", 0.85)
top_p = kwargs.get("top_p", 0.9)
top_k = kwargs.get("top_k", 0.0)
duration = math.ceil(duration)
meta_lm = 'bpm: {}\nduration: {}\nkeyscale: {}\ntimesignature: {}'.format(bpm, duration, keyscale, timesignature)
lm_template = "<|im_start|>system\n# Instruction\nGenerate audio semantic tokens based on the given conditions:\n\n<|im_end|>\n<|im_start|>user\n# Caption\n{}\n{}\n<|im_end|>\n<|im_start|>assistant\n<think>\n{}\n</think>\n\n<|im_end|>\n"
@ -151,14 +147,7 @@ class ACE15Tokenizer(sd1_clip.SD1Tokenizer):
out["lyrics"] = self.qwen3_06b.tokenize_with_weights("# Languages\n{}\n\n# Lyric{}<|endoftext|><|endoftext|>".format(language, lyrics), return_word_ids, disable_weights=True, **kwargs)
out["qwen3_06b"] = self.qwen3_06b.tokenize_with_weights("# Instruction\nGenerate audio semantic tokens based on the given conditions:\n\n# Caption\n{}# Metas\n{}<|endoftext|>\n<|endoftext|>".format(text, meta_cap), return_word_ids, **kwargs)
out["lm_metadata"] = {"min_tokens": duration * 5,
"seed": seed,
"generate_audio_codes": generate_audio_codes,
"cfg_scale": cfg_scale,
"temperature": temperature,
"top_p": top_p,
"top_k": top_k,
}
out["lm_metadata"] = {"min_tokens": duration * 5, "seed": seed}
return out
@ -214,14 +203,10 @@ class ACE15TEModel(torch.nn.Module):
self.qwen3_06b.set_clip_options({"layer": [0]})
lyrics_embeds, _, extra_l = self.qwen3_06b.encode_token_weights(token_weight_pairs_lyrics)
out = {"conditioning_lyrics": lyrics_embeds[:, 0]}
lm_metadata = token_weight_pairs["lm_metadata"]
if lm_metadata["generate_audio_codes"]:
audio_codes = generate_audio_codes(getattr(self, self.lm_model, self.qwen3_06b), token_weight_pairs["lm_prompt"], token_weight_pairs["lm_prompt_negative"], min_tokens=lm_metadata["min_tokens"], max_tokens=lm_metadata["min_tokens"], seed=lm_metadata["seed"], cfg_scale=lm_metadata["cfg_scale"], temperature=lm_metadata["temperature"], top_p=lm_metadata["top_p"], top_k=lm_metadata["top_k"])
out["audio_codes"] = [audio_codes]
audio_codes = generate_audio_codes(getattr(self, self.lm_model, self.qwen3_06b), token_weight_pairs["lm_prompt"], token_weight_pairs["lm_prompt_negative"], min_tokens=lm_metadata["min_tokens"], max_tokens=lm_metadata["min_tokens"], seed=lm_metadata["seed"])
return base_out, None, out
return base_out, None, {"conditioning_lyrics": lyrics_embeds[:, 0], "audio_codes": [audio_codes]}
def set_clip_options(self, options):
self.qwen3_06b.set_clip_options(options)

View File

@ -10,7 +10,6 @@ from ._input_impl import VideoFromFile, VideoFromComponents
from ._util import VideoCodec, VideoContainer, VideoComponents, MESH, VOXEL, File3D
from . import _io_public as io
from . import _ui_public as ui
from . import _node_replace_public as node_replace
from comfy_execution.utils import get_executing_context
from comfy_execution.progress import get_progress_state, PreviewImageTuple
from PIL import Image
@ -22,14 +21,6 @@ class ComfyAPI_latest(ComfyAPIBase):
VERSION = "latest"
STABLE = False
class NodeReplacement(ProxiedSingleton):
async def register(self, node_replace: 'node_replace.NodeReplace') -> None:
"""Register a node replacement mapping."""
from server import PromptServer
PromptServer.instance.node_replace_manager.register(node_replace)
node_replacement: NodeReplacement
class Execution(ProxiedSingleton):
async def set_progress(
self,
@ -140,5 +131,4 @@ __all__ = [
"IO",
"ui",
"UI",
"node_replace",
]

View File

@ -1309,6 +1309,7 @@ class NodeInfoV1:
api_node: bool=None
price_badge: dict | None = None
search_aliases: list[str]=None
main_category: str=None
@dataclass
@ -1430,6 +1431,8 @@ class Schema:
"""Flags a node as expandable, allowing NodeOutput to include 'expand' property."""
accept_all_inputs: bool=False
"""When True, all inputs from the prompt will be passed to the node as kwargs, even if not defined in the schema."""
main_category: str | None = None
"""Optional main category for top-level tabs in the node library (e.g., 'Basic', 'Image Tools', 'Partner Nodes')."""
def validate(self):
'''Validate the schema:
@ -1536,6 +1539,7 @@ class Schema:
python_module=getattr(cls, "RELATIVE_PYTHON_MODULE", "nodes"),
price_badge=self.price_badge.as_dict(self.inputs) if self.price_badge is not None else None,
search_aliases=self.search_aliases if self.search_aliases else None,
main_category=self.main_category,
)
return info

View File

@ -1,94 +0,0 @@
from __future__ import annotations
from typing import Any
class NodeReplace:
"""
Defines a possible node replacement, mapping inputs and outputs of the old node to the new node.
Also supports assigning specific values to the input widgets of the new node.
"""
def __init__(self,
new_node_id: str,
old_node_id: str,
old_widget_ids: list[str] | None=None,
input_mapping: list[InputMap] | None=None,
output_mapping: list[OutputMap] | None=None,
):
self.new_node_id = new_node_id
self.old_node_id = old_node_id
self.old_widget_ids = old_widget_ids
self.input_mapping = input_mapping
self.output_mapping = output_mapping
def as_dict(self):
"""Create serializable representation of the node replacement."""
return {
"new_node_id": self.new_node_id,
"old_node_id": self.old_node_id,
"old_widget_ids": self.old_widget_ids,
"input_mapping": [m.as_dict() for m in self.input_mapping] if self.input_mapping else None,
"output_mapping": [m.as_dict() for m in self.output_mapping] if self.output_mapping else None,
}
class InputMap:
"""
Map inputs of node replacement.
Use InputMap.OldId or InputMap.SetValue for mapping purposes.
"""
class _Assign:
def __init__(self, assign_type: str):
self.assign_type = assign_type
def as_dict(self):
return {
"assign_type": self.assign_type,
}
class OldId(_Assign):
"""Connect the input of the old node with given id to new node when replacing."""
def __init__(self, old_id: str):
super().__init__("old_id")
self.old_id = old_id
def as_dict(self):
return super().as_dict() | {
"old_id": self.old_id,
}
class SetValue(_Assign):
"""Use the given value for the input of the new node when replacing; assumes input is a widget."""
def __init__(self, value: Any):
super().__init__("set_value")
self.value = value
def as_dict(self):
return super().as_dict() | {
"value": self.value,
}
def __init__(self, new_id: str, assign: OldId | SetValue):
self.new_id = new_id
self.assign = assign
def as_dict(self):
return {
"new_id": self.new_id,
"assign": self.assign.as_dict(),
}
class OutputMap:
"""Map outputs of node replacement via indexes, as that's how outputs are stored."""
def __init__(self, new_idx: int, old_idx: int):
self.new_idx = new_idx
self.old_idx = old_idx
def as_dict(self):
return {
"new_idx": self.new_idx,
"old_idx": self.old_idx,
}

View File

@ -1 +0,0 @@
from ._node_replace import * # noqa: F403

View File

@ -6,7 +6,7 @@ from comfy_api.latest import (
)
from typing import Type, TYPE_CHECKING
from comfy_api.internal.async_to_sync import create_sync_class
from comfy_api.latest import io, ui, IO, UI, ComfyExtension, node_replace #noqa: F401
from comfy_api.latest import io, ui, IO, UI, ComfyExtension #noqa: F401
class ComfyAPIAdapter_v0_0_2(ComfyAPI_latest):
@ -46,5 +46,4 @@ __all__ = [
"IO",
"ui",
"UI",
"node_replace",
]

View File

@ -37,6 +37,7 @@ class TencentTextToModelNode(IO.ComfyNode):
node_id="TencentTextToModelNode",
display_name="Hunyuan3D: Text to Model (Pro)",
category="api node/3d/Tencent",
main_category="3D",
inputs=[
IO.Combo.Input(
"model",
@ -147,6 +148,7 @@ class TencentImageToModelNode(IO.ComfyNode):
node_id="TencentImageToModelNode",
display_name="Hunyuan3D: Image(s) to Model (Pro)",
category="api node/3d/Tencent",
main_category="3D",
inputs=[
IO.Combo.Input(
"model",

View File

@ -1936,6 +1936,7 @@ class KlingLipSyncAudioToVideoNode(IO.ComfyNode):
node_id="KlingLipSyncAudioToVideoNode",
display_name="Kling Lip Sync Video with Audio",
category="api node/video/Kling",
main_category="Video Generation",
description="Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.",
inputs=[
IO.Video.Input("video"),

View File

@ -576,6 +576,7 @@ class OpenAIChatNode(IO.ComfyNode):
node_id="OpenAIChatNode",
display_name="OpenAI ChatGPT",
category="api node/text/OpenAI",
main_category="Text Generation",
description="Generate text responses from an OpenAI model.",
inputs=[
IO.String.Input(

View File

@ -963,6 +963,7 @@ class RecraftRemoveBackgroundNode(IO.ComfyNode):
node_id="RecraftRemoveBackgroundNode",
display_name="Recraft Remove Background",
category="api node/image/Recraft",
main_category="Image Tools",
description="Remove background from image, and return processed image and mask.",
inputs=[
IO.Image.Input("image"),

View File

@ -624,6 +624,7 @@ class StabilityTextToAudio(IO.ComfyNode):
node_id="StabilityTextToAudio",
display_name="Stability AI Text To Audio",
category="api node/audio/Stability AI",
main_category="Audio",
description=cleandoc(cls.__doc__ or ""),
inputs=[
IO.Combo.Input(

View File

@ -44,18 +44,13 @@ class TextEncodeAceStepAudio15(io.ComfyNode):
io.Combo.Input("timesignature", options=['2', '3', '4', '6']),
io.Combo.Input("language", options=["en", "ja", "zh", "es", "de", "fr", "pt", "ru", "it", "nl", "pl", "tr", "vi", "cs", "fa", "id", "ko", "uk", "hu", "ar", "sv", "ro", "el"]),
io.Combo.Input("keyscale", options=[f"{root} {quality}" for quality in ["major", "minor"] for root in ["C", "C#", "Db", "D", "D#", "Eb", "E", "F", "F#", "Gb", "G", "G#", "Ab", "A", "A#", "Bb", "B"]]),
io.Boolean.Input("generate_audio_codes", default=True, tooltip="Enable the LLM that generates audio codes. This can be slow but will increase the quality of the generated audio. Turn this off if you are giving the model an audio reference.", advanced=True),
io.Float.Input("cfg_scale", default=2.0, min=0.0, max=100.0, step=0.1, advanced=True),
io.Float.Input("temperature", default=0.85, min=0.0, max=2.0, step=0.01, advanced=True),
io.Float.Input("top_p", default=0.9, min=0.0, max=2000.0, step=0.01, advanced=True),
io.Int.Input("top_k", default=0, min=0, max=100, advanced=True),
],
outputs=[io.Conditioning.Output()],
)
@classmethod
def execute(cls, clip, tags, lyrics, seed, bpm, duration, timesignature, language, keyscale, generate_audio_codes, cfg_scale, temperature, top_p, top_k) -> io.NodeOutput:
tokens = clip.tokenize(tags, lyrics=lyrics, bpm=bpm, duration=duration, timesignature=int(timesignature), language=language, keyscale=keyscale, seed=seed, generate_audio_codes=generate_audio_codes, cfg_scale=cfg_scale, temperature=temperature, top_p=top_p, top_k=top_k)
def execute(cls, clip, tags, lyrics, seed, bpm, duration, timesignature, language, keyscale) -> io.NodeOutput:
tokens = clip.tokenize(tags, lyrics=lyrics, bpm=bpm, duration=duration, timesignature=int(timesignature), language=language, keyscale=keyscale, seed=seed)
conditioning = clip.encode_from_tokens_scheduled(tokens)
return io.NodeOutput(conditioning)
@ -105,15 +100,14 @@ class EmptyAceStep15LatentAudio(io.ComfyNode):
latent = torch.zeros([batch_size, 64, length], device=comfy.model_management.intermediate_device())
return io.NodeOutput({"samples": latent, "type": "audio"})
class ReferenceAudio(io.ComfyNode):
class ReferenceTimbreAudio(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="ReferenceTimbreAudio",
display_name="Reference Audio",
category="advanced/conditioning/audio",
is_experimental=True,
description="This node sets the reference audio for ace step 1.5",
description="This node sets the reference audio for timbre (for ace step 1.5)",
inputs=[
io.Conditioning.Input("conditioning"),
io.Latent.Input("latent", optional=True),
@ -137,7 +131,7 @@ class AceExtension(ComfyExtension):
EmptyAceStepLatentAudio,
TextEncodeAceStepAudio15,
EmptyAceStep15LatentAudio,
ReferenceAudio,
ReferenceTimbreAudio,
]
async def comfy_entrypoint() -> AceExtension:

View File

@ -129,6 +129,7 @@ class SaveAudio(IO.ComfyNode):
search_aliases=["export flac"],
display_name="Save Audio (FLAC)",
category="audio",
main_category="Audio",
inputs=[
IO.Audio.Input("audio"),
IO.String.Input("filename_prefix", default="audio/ComfyUI"),
@ -270,6 +271,7 @@ class LoadAudio(IO.ComfyNode):
search_aliases=["import audio", "open audio", "audio file"],
display_name="Load Audio",
category="audio",
main_category="Audio",
inputs=[
IO.Combo.Input("audio", upload=IO.UploadType.audio, options=sorted(files)),
],

View File

@ -12,6 +12,7 @@ class Canny(io.ComfyNode):
node_id="Canny",
search_aliases=["edge detection", "outline", "contour detection", "line art"],
category="image/preprocessors",
main_category="Image Tools/Preprocessing",
inputs=[
io.Image.Input("image"),
io.Float.Input("low_threshold", default=0.4, min=0.01, max=0.99, step=0.01),

View File

@ -621,6 +621,7 @@ class SaveGLB(IO.ComfyNode):
display_name="Save 3D Model",
search_aliases=["export 3d model", "save mesh"],
category="3d",
main_category="Basic",
is_output_node=True,
inputs=[
IO.MultiType.Input(

View File

@ -25,6 +25,7 @@ class ImageCrop(IO.ComfyNode):
search_aliases=["trim"],
display_name="Image Crop",
category="image/transform",
main_category="Image Tools",
inputs=[
IO.Image.Input("image"),
IO.Int.Input("width", default=512, min=1, max=nodes.MAX_RESOLUTION, step=1),
@ -537,6 +538,7 @@ class ImageRotate(IO.ComfyNode):
node_id="ImageRotate",
search_aliases=["turn", "flip orientation"],
category="image/transform",
main_category="Image Tools",
inputs=[
IO.Image.Input("image"),
IO.Combo.Input("rotation", options=["none", "90 degrees", "180 degrees", "270 degrees"]),

View File

@ -31,6 +31,7 @@ class Load3D(IO.ComfyNode):
node_id="Load3D",
display_name="Load 3D & Animation",
category="3d",
main_category="Basic",
is_experimental=True,
inputs=[
IO.Combo.Input("model_file", options=sorted(files), upload=IO.UploadType.model),

View File

@ -77,6 +77,7 @@ class Blur(io.ComfyNode):
return io.Schema(
node_id="ImageBlur",
category="image/postprocessing",
main_category="Image Tools",
inputs=[
io.Image.Input("image"),
io.Int.Input("blur_radius", default=1, min=1, max=31, step=1),
@ -655,103 +656,6 @@ class BatchImagesMasksLatentsNode(io.ComfyNode):
batched = batch_masks(values)
return io.NodeOutput(batched)
from comfy_api.latest import node_replace
from server import PromptServer
def _register(nr: node_replace.NodeReplace):
"""Helper to register replacements via PromptServer."""
PromptServer.instance.node_replace_manager.register(nr)
async def register_replacements():
"""Register all built-in node replacements."""
register_replacements_longeredge()
register_replacements_batchimages()
register_replacements_upscaleimage()
register_replacements_controlnet()
register_replacements_load3d()
register_replacements_preview3d()
register_replacements_svdimg2vid()
register_replacements_conditioningavg()
def register_replacements_longeredge():
# No dynamic inputs here
_register(node_replace.NodeReplace(
new_node_id="ImageScaleToMaxDimension",
old_node_id="ResizeImagesByLongerEdge",
old_widget_ids=["longer_edge"],
input_mapping=[
node_replace.InputMap(new_id="image", assign=node_replace.InputMap.OldId("images")),
node_replace.InputMap(new_id="largest_size", assign=node_replace.InputMap.OldId("longer_edge")),
node_replace.InputMap(new_id="upscale_method", assign=node_replace.InputMap.SetValue("lanczos")),
],
# just to test the frontend output_mapping code, does nothing really here
output_mapping=[node_replace.OutputMap(new_idx=0, old_idx=0)],
))
def register_replacements_batchimages():
# BatchImages node uses Autogrow
_register(node_replace.NodeReplace(
new_node_id="BatchImagesNode",
old_node_id="ImageBatch",
input_mapping=[
node_replace.InputMap(new_id="images.image0", assign=node_replace.InputMap.OldId("image1")),
node_replace.InputMap(new_id="images.image1", assign=node_replace.InputMap.OldId("image2")),
],
))
def register_replacements_upscaleimage():
# ResizeImageMaskNode uses DynamicCombo
_register(node_replace.NodeReplace(
new_node_id="ResizeImageMaskNode",
old_node_id="ImageScaleBy",
old_widget_ids=["upscale_method", "scale_by"],
input_mapping=[
node_replace.InputMap(new_id="input", assign=node_replace.InputMap.OldId("image")),
node_replace.InputMap(new_id="resize_type", assign=node_replace.InputMap.SetValue("scale by multiplier")),
node_replace.InputMap(new_id="resize_type.multiplier", assign=node_replace.InputMap.OldId("scale_by")),
node_replace.InputMap(new_id="scale_method", assign=node_replace.InputMap.OldId("upscale_method")),
],
))
def register_replacements_controlnet():
# T2IAdapterLoader → ControlNetLoader
_register(node_replace.NodeReplace(
new_node_id="ControlNetLoader",
old_node_id="T2IAdapterLoader",
input_mapping=[
node_replace.InputMap(new_id="control_net_name", assign=node_replace.InputMap.OldId("t2i_adapter_name")),
],
))
def register_replacements_load3d():
# Load3DAnimation merged into Load3D
_register(node_replace.NodeReplace(
new_node_id="Load3D",
old_node_id="Load3DAnimation",
))
def register_replacements_preview3d():
# Preview3DAnimation merged into Preview3D
_register(node_replace.NodeReplace(
new_node_id="Preview3D",
old_node_id="Preview3DAnimation",
))
def register_replacements_svdimg2vid():
# Typo fix: SDV → SVD
_register(node_replace.NodeReplace(
new_node_id="SVD_img2vid_Conditioning",
old_node_id="SDV_img2vid_Conditioning",
))
def register_replacements_conditioningavg():
# Typo fix: trailing space in node name
_register(node_replace.NodeReplace(
new_node_id="ConditioningAverage",
old_node_id="ConditioningAverage ",
))
class PostProcessingExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
@ -769,5 +673,4 @@ class PostProcessingExtension(ComfyExtension):
]
async def comfy_entrypoint() -> PostProcessingExtension:
await register_replacements()
return PostProcessingExtension()

View File

@ -73,6 +73,7 @@ class SaveVideo(io.ComfyNode):
search_aliases=["export video"],
display_name="Save Video",
category="image/video",
main_category="Basic",
description="Saves the input images to your ComfyUI output directory.",
inputs=[
io.Video.Input("video", tooltip="The video to save."),
@ -146,6 +147,7 @@ class GetVideoComponents(io.ComfyNode):
search_aliases=["extract frames", "split video", "video to images", "demux"],
display_name="Get Video Components",
category="image/video",
main_category="Video Tools",
description="Extracts all components from a video: frames, audio, and framerate.",
inputs=[
io.Video.Input("video", tooltip="The video to extract components from."),
@ -174,6 +176,7 @@ class LoadVideo(io.ComfyNode):
search_aliases=["import video", "open video", "video file"],
display_name="Load Video",
category="image/video",
main_category="Basic",
inputs=[
io.Combo.Input("file", options=sorted(files), upload=io.UploadType.video),
],

View File

@ -69,6 +69,7 @@ class CLIPTextEncode(ComfyNodeABC):
FUNCTION = "encode"
CATEGORY = "conditioning"
MAIN_CATEGORY = "Basic"
DESCRIPTION = "Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images."
SEARCH_ALIASES = ["text", "prompt", "text prompt", "positive prompt", "negative prompt", "encode text", "text encoder", "encode prompt"]
@ -667,6 +668,8 @@ class CLIPSetLastLayer:
return (clip,)
class LoraLoader:
MAIN_CATEGORY = "Image Generation"
def __init__(self):
self.loaded_lora = None
@ -1648,6 +1651,7 @@ class SaveImage:
OUTPUT_NODE = True
CATEGORY = "image"
MAIN_CATEGORY = "Basic"
DESCRIPTION = "Saves the input images to your ComfyUI output directory."
SEARCH_ALIASES = ["save", "save image", "export image", "output image", "write image", "download"]
@ -1706,6 +1710,7 @@ class LoadImage:
}
CATEGORY = "image"
MAIN_CATEGORY = "Basic"
SEARCH_ALIASES = ["load image", "open image", "import image", "image input", "upload image", "read image", "image loader"]
RETURN_TYPES = ("IMAGE", "MASK")
@ -1863,6 +1868,7 @@ class ImageScale:
FUNCTION = "upscale"
CATEGORY = "image/upscaling"
MAIN_CATEGORY = "Image Tools"
SEARCH_ALIASES = ["resize", "resize image", "scale image", "image resize", "zoom", "zoom in", "change size"]
def upscale(self, image, upscale_method, width, height, crop):
@ -1902,6 +1908,7 @@ class ImageScaleBy:
class ImageInvert:
SEARCH_ALIASES = ["reverse colors"]
MAIN_CATEGORY = "Image Tools"
@classmethod
def INPUT_TYPES(s):
@ -1918,6 +1925,7 @@ class ImageInvert:
class ImageBatch:
SEARCH_ALIASES = ["combine images", "merge images", "stack images"]
MAIN_CATEGORY = "Image Tools"
@classmethod
def INPUT_TYPES(s):

View File

@ -40,7 +40,6 @@ from app.user_manager import UserManager
from app.model_manager import ModelFileManager
from app.custom_node_manager import CustomNodeManager
from app.subgraph_manager import SubgraphManager
from app.node_replace_manager import NodeReplaceManager
from typing import Optional, Union
from api_server.routes.internal.internal_routes import InternalRoutes
from protocol import BinaryEventTypes
@ -205,7 +204,6 @@ class PromptServer():
self.model_file_manager = ModelFileManager()
self.custom_node_manager = CustomNodeManager()
self.subgraph_manager = SubgraphManager()
self.node_replace_manager = NodeReplaceManager()
self.internal_routes = InternalRoutes(self)
self.supports = ["custom_nodes_from_web"]
self.prompt_queue = execution.PromptQueue(self)
@ -689,6 +687,10 @@ class PromptServer():
info['api_node'] = obj_class.API_NODE
info['search_aliases'] = getattr(obj_class, 'SEARCH_ALIASES', [])
if hasattr(obj_class, 'MAIN_CATEGORY'):
info['main_category'] = obj_class.MAIN_CATEGORY
return info
@routes.get("/object_info")
@ -997,7 +999,6 @@ class PromptServer():
self.model_file_manager.add_routes(self.routes)
self.custom_node_manager.add_routes(self.routes, self.app, nodes.LOADED_MODULE_DIRS.items())
self.subgraph_manager.add_routes(self.routes, nodes.LOADED_MODULE_DIRS.items())
self.node_replace_manager.add_routes(self.routes)
self.app.add_subapp('/internal', self.internal_routes.get_app())
# Prefix every route with /api for easier matching for delegation.