Compare commits

..

4 Commits

Author SHA1 Message Date
a0a65f51bc fix: address coderabbit findings - replace exec() with AST parse, gate summary on job.status, persist-credentials false
Amp-Thread-ID: https://ampcode.com/threads/T-019e042d-d972-7559-b462-6e838c2da164
Co-authored-by: Amp <amp@ampcode.com>
2026-05-08 15:18:09 -07:00
3566e6b6a6 fix: remove auto-delete of source branch (footgun for non-ephemeral sources like master)
Amp-Thread-ID: https://ampcode.com/threads/T-019e042d-d972-7559-b462-6e838c2da164
Co-authored-by: Amp <amp@ampcode.com>
2026-05-08 15:15:53 -07:00
a8e11936f3 fix: address code review issues in cut-release.yml
- security: pass all inputs via env vars (no command injection)
- security: validate ref names against safe charset
- bug: use robust dry_run check (handle bool and 'true'/'false' string)
- robustness: combine branch+tag push into single atomic operation
- robustness: add set -euo pipefail to Configure git identity step

Amp-Thread-ID: https://ampcode.com/threads/T-019e042d-d972-7559-b462-6e838c2da164
Co-authored-by: Amp <amp@ampcode.com>
2026-05-08 14:30:36 -07:00
08a9245e7f feat: add cut-release.yml workflow for backport release promotion
Amp-Thread-ID: https://ampcode.com/threads/T-019e042d-d972-7559-b462-6e838c2da164
Co-authored-by: Amp <amp@ampcode.com>
2026-05-08 14:17:40 -07:00
56 changed files with 497 additions and 241 deletions

268
.github/workflows/cut-release.yml vendored Normal file
View File

@ -0,0 +1,268 @@
name: Cut Release
# Promote a prepared "candidate" branch (cherry-picks + version bump
# already committed) into a real release: create the canonical
# `release/v<version>` branch and the `v<version>` annotated tag at the
# candidate's HEAD, atomically.
#
# After this runs, kick off `release-stable-all.yml` manually with the new
# tag to build portable artifacts — same as previous backports.
#
# The candidate branch is left in place; clean it up manually once you've
# verified the release.
on:
workflow_dispatch:
inputs:
source_branch:
description: 'Candidate branch with cherry-picks + version bump (e.g. kosinkadink/release-v0.20.3-prep)'
required: true
type: string
version:
description: 'Target version (e.g. 0.20.3 — no leading v)'
required: true
type: string
release_branch:
description: 'Override target release branch name (default: release/v<version>)'
required: false
type: string
default: ''
tag_name:
description: 'Override tag name (default: v<version>)'
required: false
type: string
default: ''
dry_run:
description: 'Validate only — do not push branch or tag'
required: false
type: boolean
default: false
jobs:
cut-release:
runs-on: ubuntu-latest
permissions:
# GITHUB_TOKEN write access is unused for the actual push (we use
# RELEASE_BOT_TOKEN to bypass branch-protection rules), but kept
# write so subsequent automation hooks have it if needed.
contents: write
env:
# All workflow inputs are pulled in via env vars instead of being
# interpolated directly into bash, to avoid command injection.
INPUT_VERSION: ${{ inputs.version }}
INPUT_SOURCE_BRANCH: ${{ inputs.source_branch }}
INPUT_RELEASE_BRANCH: ${{ inputs.release_branch }}
INPUT_TAG_NAME: ${{ inputs.tag_name }}
INPUT_DRY_RUN: ${{ inputs.dry_run }}
REPO_FULL: ${{ github.repository }}
steps:
- name: Compute names
id: names
run: |
set -euo pipefail
VERSION="${INPUT_VERSION#v}"
if ! [[ "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "::error::Invalid version '$VERSION' — must match X.Y.Z"
exit 1
fi
REL_BRANCH="$INPUT_RELEASE_BRANCH"
[ -z "$REL_BRANCH" ] && REL_BRANCH="release/v${VERSION}"
TAG_NAME="$INPUT_TAG_NAME"
[ -z "$TAG_NAME" ] && TAG_NAME="v${VERSION}"
# Validate the overrides too — they're effectively user-controlled
# ref names and end up in shell + git invocations.
REF_RE='^[A-Za-z0-9._/-]+$'
if ! [[ "$REL_BRANCH" =~ $REF_RE ]]; then
echo "::error::Invalid release_branch '$REL_BRANCH' — only [A-Za-z0-9._/-] allowed."
exit 1
fi
if ! [[ "$TAG_NAME" =~ $REF_RE ]]; then
echo "::error::Invalid tag_name '$TAG_NAME' — only [A-Za-z0-9._/-] allowed."
exit 1
fi
if ! [[ "$INPUT_SOURCE_BRANCH" =~ $REF_RE ]]; then
echo "::error::Invalid source_branch '$INPUT_SOURCE_BRANCH' — only [A-Za-z0-9._/-] allowed."
exit 1
fi
{
echo "version=$VERSION"
echo "release_branch=$REL_BRANCH"
echo "tag_name=$TAG_NAME"
} >> "$GITHUB_OUTPUT"
echo "Computed: version=$VERSION, release_branch=$REL_BRANCH, tag_name=$TAG_NAME"
- name: Pre-flight remote checks
env:
REL_BRANCH: ${{ steps.names.outputs.release_branch }}
TAG_NAME: ${{ steps.names.outputs.tag_name }}
run: |
set -euo pipefail
REPO_URL="https://github.com/${REPO_FULL}.git"
# Source branch must exist.
if ! git ls-remote --heads --exit-code "$REPO_URL" "$INPUT_SOURCE_BRANCH" >/dev/null; then
echo "::error::Source branch '$INPUT_SOURCE_BRANCH' does not exist on origin."
exit 1
fi
# Release branch must NOT exist.
if git ls-remote --heads --exit-code "$REPO_URL" "$REL_BRANCH" >/dev/null 2>&1; then
echo "::error::Release branch '$REL_BRANCH' already exists on origin. Refusing to overwrite."
exit 1
fi
# Tag must NOT exist.
if git ls-remote --tags --exit-code "$REPO_URL" "$TAG_NAME" >/dev/null 2>&1; then
echo "::error::Tag '$TAG_NAME' already exists on origin. Refusing to overwrite."
exit 1
fi
- name: Checkout source branch
uses: actions/checkout@v4
with:
ref: ${{ inputs.source_branch }}
fetch-depth: 0
# Defense in depth: don't leave a usable git credential on disk
# for any code that runs against the checked-out branch.
persist-credentials: false
- name: Verify version files at source HEAD
env:
EXPECTED: ${{ steps.names.outputs.version }}
run: |
set -euo pipefail
# pyproject.toml is parsed with tomllib (safe — pure data, no code).
PYPROJECT_VERSION=$(python3 -c "import tomllib; print(tomllib.load(open('pyproject.toml','rb'))['project']['version'])")
if [ "$PYPROJECT_VERSION" != "$EXPECTED" ]; then
echo "::error::pyproject.toml version is '$PYPROJECT_VERSION' but expected '$EXPECTED'."
exit 1
fi
# comfyui_version.py contains Python — never `exec()` it. A
# malicious candidate branch could replace it with arbitrary code
# that would then run in CI with RELEASE_BOT_TOKEN in scope.
# Statically parse the AST to extract __version__ instead.
MODULE_VERSION=$(python3 - <<'PY'
import ast, pathlib, sys
tree = ast.parse(pathlib.Path("comfyui_version.py").read_text(encoding="utf-8"))
for node in tree.body:
if isinstance(node, ast.Assign):
for target in node.targets:
if isinstance(target, ast.Name) and target.id == "__version__":
if isinstance(node.value, ast.Constant) and isinstance(node.value.value, str):
print(node.value.value)
sys.exit(0)
sys.exit("Could not statically read __version__ from comfyui_version.py")
PY
)
if [ "$MODULE_VERSION" != "$EXPECTED" ]; then
echo "::error::comfyui_version.py __version__ is '$MODULE_VERSION' but expected '$EXPECTED'."
exit 1
fi
echo "Version files OK: $EXPECTED"
- name: Identify previous tag and log commit range
id: range
env:
REL_BRANCH: ${{ steps.names.outputs.release_branch }}
TAG_NAME: ${{ steps.names.outputs.tag_name }}
run: |
set -euo pipefail
# Find latest tag reachable from this commit (the basis backport
# was branched from). Falls back to "" gracefully.
PREV_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "")
SOURCE_SHA=$(git rev-parse HEAD)
{
echo "prev_tag=$PREV_TAG"
echo "source_sha=$SOURCE_SHA"
} >> "$GITHUB_OUTPUT"
{
echo "## Cut Release plan"
echo ""
echo "| Field | Value |"
echo "| --- | --- |"
echo "| Source branch | \`$INPUT_SOURCE_BRANCH\` @ \`${SOURCE_SHA:0:12}\` |"
echo "| Target release branch | \`$REL_BRANCH\` |"
echo "| Tag | \`$TAG_NAME\` |"
echo "| Previous reachable tag | \`${PREV_TAG:-none}\` |"
echo "| Dry run | \`$INPUT_DRY_RUN\` |"
echo ""
echo "### Commits to be tagged"
echo ""
if [ -n "$PREV_TAG" ]; then
echo "Range: \`${PREV_TAG}..${SOURCE_SHA}\`"
echo ""
echo '```'
git log --oneline --no-decorate "${PREV_TAG}..${SOURCE_SHA}"
echo '```'
else
echo "(no prior tag found — listing last 30 commits on source)"
echo ""
echo '```'
git log --oneline --no-decorate -30 "$SOURCE_SHA"
echo '```'
fi
} >> "$GITHUB_STEP_SUMMARY"
- name: Configure git identity
if: inputs.dry_run != true && inputs.dry_run != 'true'
run: |
set -euo pipefail
git config --global user.name "github-actions[bot]"
git config --global user.email "github-actions[bot]@users.noreply.github.com"
- name: Push release branch and tag (atomic)
if: inputs.dry_run != true && inputs.dry_run != 'true'
env:
GH_TOKEN: ${{ secrets.RELEASE_BOT_TOKEN }}
REL_BRANCH: ${{ steps.names.outputs.release_branch }}
TAG_NAME: ${{ steps.names.outputs.tag_name }}
VERSION: ${{ steps.names.outputs.version }}
SOURCE_SHA: ${{ steps.range.outputs.source_sha }}
run: |
set -euo pipefail
if [ -z "${GH_TOKEN:-}" ]; then
echo "::error::secrets.RELEASE_BOT_TOKEN is not set."
exit 1
fi
# Create the annotated tag locally first so we can push branch +
# tag in a single atomic operation: either both refs are created
# on origin, or neither — no half-promoted state.
git tag -a "$TAG_NAME" "$SOURCE_SHA" -m "ComfyUI v${VERSION}"
AUTH_URL="https://x-access-token:${GH_TOKEN}@github.com/${REPO_FULL}.git"
git push --atomic "$AUTH_URL" \
"${SOURCE_SHA}:refs/heads/${REL_BRANCH}" \
"refs/tags/${TAG_NAME}"
- name: Final summary
if: always()
env:
REL_BRANCH: ${{ steps.names.outputs.release_branch }}
TAG_NAME: ${{ steps.names.outputs.tag_name }}
JOB_STATUS: ${{ job.status }}
run: |
set -euo pipefail
{
echo ""
echo "### Result"
if [ "$JOB_STATUS" != "success" ]; then
echo "❌ Workflow did not complete successfully (job status: \`$JOB_STATUS\`). See the run logs for details. No branch or tag should be assumed to have been created."
elif [ "$INPUT_DRY_RUN" = "true" ]; then
echo "🔍 **Dry run** — no branch or tag was created."
else
echo "✅ Created \`$REL_BRANCH\` and tagged \`$TAG_NAME\` from \`$INPUT_SOURCE_BRANCH\`."
echo ""
echo "- Branch: <https://github.com/${REPO_FULL}/tree/${REL_BRANCH}>"
echo "- Tag: <https://github.com/${REPO_FULL}/releases/tag/${TAG_NAME}>"
echo ""
echo "Next: run [release-stable-all.yml](https://github.com/${REPO_FULL}/actions/workflows/release-stable-all.yml) with \`git_tag=${TAG_NAME}\`."
echo ""
echo "The candidate branch \`$INPUT_SOURCE_BRANCH\` was left in place; delete it manually once you've verified the release."
fi
} >> "$GITHUB_STEP_SUMMARY"

View File

@ -431,10 +431,9 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust",
"description": "Adjusts image brightness and contrast using a real-time GPU fragment shader."
"category": "Image Tools/Color adjust"
}
]
},
"extra": {}
}
}

View File

@ -162,7 +162,7 @@
},
"revision": 0,
"config": {},
"name": "Canny to Image (Z-Image-Turbo)",
"name": "local-Canny to Image (Z-Image-Turbo)",
"inputNode": {
"id": -10,
"bounding": [
@ -1553,8 +1553,7 @@
"VHS_MetadataImage": true,
"VHS_KeepIntermediate": true
},
"category": "Image generation and editing/Canny to image",
"description": "Generates an image from a Canny edge map using Z-Image-Turbo, with text conditioning."
"category": "Image generation and editing/Canny to image"
}
]
},
@ -1575,4 +1574,4 @@
}
},
"version": 0.4
}
}

View File

@ -192,7 +192,7 @@
},
"revision": 0,
"config": {},
"name": "Canny to Video (LTX 2.0)",
"name": "local-Canny to Video (LTX 2.0)",
"inputNode": {
"id": -10,
"bounding": [
@ -3600,8 +3600,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Video generation and editing/Canny to video",
"description": "Generates video from Canny edge maps using LTX-2, with optional synchronized audio."
"category": "Video generation and editing/Canny to video"
}
]
},
@ -3617,4 +3616,4 @@
}
},
"version": 0.4
}
}

View File

@ -377,9 +377,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust",
"description": "Adds lens-style chromatic aberration (color fringing) using a real-time GPU fragment shader."
"category": "Image Tools/Color adjust"
}
]
}
}
}

View File

@ -596,8 +596,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust",
"description": "Adjusts saturation, temperature, tint, and vibrance using a real-time GPU fragment shader."
"category": "Image Tools/Color adjust"
}
]
}

View File

@ -1129,8 +1129,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust",
"description": "Balances colors across shadows, midtones, and highlights using a real-time GPU fragment shader."
"category": "Image Tools/Color adjust"
}
]
}

View File

@ -608,8 +608,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust",
"description": "Fine-tunes tone and color with per-channel curve adjustments using a real-time GPU fragment shader."
"category": "Image Tools/Color adjust"
}
]
}

View File

@ -1609,8 +1609,7 @@
}
],
"extra": {},
"category": "Image Tools/Crop",
"description": "Splits an image into a 2×2 grid of four equal tiles."
"category": "Image Tools/Crop"
}
]
},

View File

@ -2946,8 +2946,7 @@
}
],
"extra": {},
"category": "Image Tools/Crop",
"description": "Splits an image into a 3×3 grid of nine equal tiles."
"category": "Image Tools/Crop"
}
]
},

View File

@ -1579,8 +1579,7 @@
"VHS_MetadataImage": true,
"VHS_KeepIntermediate": true
},
"category": "Image generation and editing/Depth to image",
"description": "Generates an image from a depth map using Z-Image-Turbo with text conditioning."
"category": "Image generation and editing/Depth to image"
},
{
"id": "458bdf3c-4b58-421c-af50-c9c663a4d74c",
@ -2462,8 +2461,7 @@
]
},
"workflowRendererVersion": "LG"
},
"description": "Estimates a monocular depth map from an input image using the Lotus depth estimation model."
}
}
]
},

View File

@ -4233,8 +4233,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Video generation and editing/Depth to video",
"description": "Generates video from depth maps using LTX-2, with optional synchronized audio."
"category": "Video generation and editing/Depth to video"
},
{
"id": "38b60539-50a7-42f9-a5fe-bdeca26272e2",
@ -5193,8 +5192,7 @@
],
"extra": {
"workflowRendererVersion": "LG"
},
"description": "Estimates a monocular depth map from an input image using the Lotus depth estimation model."
}
}
]
},

View File

@ -450,10 +450,9 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Blur",
"description": "Applies bilateral (edge-preserving) blur to soften images while retaining detail."
"category": "Image Tools/Blur"
}
]
},
"extra": {}
}
}

View File

@ -580,9 +580,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust",
"description": "Adds procedural film grain texture for a cinematic look via GPU fragment shader."
"category": "Image Tools/Color adjust"
}
]
}
}
}

View File

@ -3350,8 +3350,7 @@
}
],
"extra": {},
"category": "Video generation and editing/First-Last-Frame to Video",
"description": "Generates a video interpolating between first and last keyframes using LTX-2.3."
"category": "Video generation and editing/First-Last-Frame to Video"
}
]
},

View File

@ -575,9 +575,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust",
"description": "Adds a glow/bloom effect around bright image areas via GPU fragment shader."
"category": "Image Tools/Color adjust"
}
]
}
}
}

View File

@ -752,9 +752,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust",
"description": "Adjusts hue, saturation, and lightness of an image using a real-time GPU fragment shader."
"category": "Image Tools/Color adjust"
}
]
}
}
}

View File

@ -374,8 +374,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Blur",
"description": "Applies Gaussian, Box, or Radial blur to soften images and create stylized depth or motion effects."
"category": "Image Tools/Blur"
}
]
}

View File

@ -310,8 +310,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Text generation/Image Captioning",
"description": "Generates descriptive captions for images using Google's Gemini multimodal LLM."
"category": "Text generation/Image Captioning"
}
]
}

View File

@ -315,9 +315,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust",
"description": "Manipulates individual RGBA channels for masking, compositing, and channel effects."
"category": "Image Tools/Color adjust"
}
]
}
}
}

View File

@ -2138,8 +2138,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Edit image",
"description": "Edits images via text instructions using FireRed Image Edit 1.1, a diffusion-based instruction-following editing model."
"category": "Image generation and editing/Edit image"
}
]
},

View File

@ -1472,8 +1472,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Edit image",
"description": "Edits an input image via text instructions using FLUX.2 [klein] 4B."
"category": "Image generation and editing/Edit image"
},
{
"id": "6007e698-2ebd-4917-84d8-299b35d7b7ab",
@ -1822,8 +1821,7 @@
],
"extra": {
"workflowRendererVersion": "LG"
},
"description": "Applies reference image conditioning for style/identity transfer (Flux.2 Klein 4B)."
}
}
]
},
@ -1839,4 +1837,4 @@
}
},
"version": 0.4
}
}

View File

@ -1417,8 +1417,7 @@
}
],
"extra": {},
"category": "Image generation and editing/Edit image",
"description": "Edits images via text instructions using LongCat Image Edit, an instruction-following image editing diffusion model."
"category": "Image generation and editing/Edit image"
}
]
},

View File

@ -132,7 +132,7 @@
},
"revision": 0,
"config": {},
"name": "Image Edit (Qwen 2511)",
"name": "local-Image Edit (Qwen 2511)",
"inputNode": {
"id": -10,
"bounding": [
@ -1468,8 +1468,7 @@
"VHS_MetadataImage": true,
"VHS_KeepIntermediate": true
},
"category": "Image generation and editing/Edit image",
"description": "Edits images via text instructions using Qwen-Image-Edit-2511 with improved character consistency and integrated LoRA."
"category": "Image generation and editing/Edit image"
}
]
},
@ -1490,4 +1489,4 @@
}
},
"version": 0.4
}
}

View File

@ -1188,8 +1188,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Inpaint image",
"description": "Inpaints masked image regions using Flux.1 fill [dev], Black Forest Labs' inpainting/outpainting model."
"category": "Image generation and editing/Inpaint image"
}
]
},
@ -1203,4 +1202,4 @@
},
"ue_links": []
}
}
}

View File

@ -1548,8 +1548,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Inpaint image",
"description": "Inpaints masked regions using Qwen-Image, extending its multilingual text rendering to inpainting tasks."
"category": "Image generation and editing/Inpaint image"
},
{
"id": "56a1f603-fbd2-40ed-94ef-c9ecbd96aca8",
@ -1908,8 +1907,7 @@
],
"extra": {
"workflowRendererVersion": "LG"
},
"description": "Expands and softens mask edges to reduce visible seams after image processing."
}
}
]
},

View File

@ -742,10 +742,9 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust",
"description": "Adjusts black point, white point, and gamma for tonal range control via GPU shader."
"category": "Image Tools/Color adjust"
}
]
},
"extra": {}
}
}

View File

@ -1919,8 +1919,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Outpaint image",
"description": "Outpaints beyond image boundaries using Qwen-Image's outpainting capabilities."
"category": "Image generation and editing/Outpaint image"
},
{
"id": "f93c215e-c393-460e-9534-ed2c3d8a652e",
@ -2279,8 +2278,7 @@
],
"extra": {
"workflowRendererVersion": "LG"
},
"description": "Expands and softens mask edges to reduce visible seams after image processing."
}
},
{
"id": "2a4b2cc0-db37-4302-a067-da392f38f06b",
@ -2735,8 +2733,7 @@
],
"extra": {
"workflowRendererVersion": "LG"
},
"description": "Scales both image and mask together while preserving alignment for editing workflows."
}
}
]
},

View File

@ -141,7 +141,7 @@
},
"revision": 0,
"config": {},
"name": "Image Upscale (Z-image-Turbo)",
"name": "local-Image Upscale(Z-image-Turbo)",
"inputNode": {
"id": -10,
"bounding": [
@ -1302,8 +1302,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Enhance",
"description": "Upscales images to higher resolution using Z-Image-Turbo."
"category": "Image generation and editing/Enhance"
}
]
},

View File

@ -99,7 +99,7 @@
},
"revision": 0,
"config": {},
"name": "Image to Depth Map (Lotus)",
"name": "local-Image to Depth Map (Lotus)",
"inputNode": {
"id": -10,
"bounding": [
@ -948,8 +948,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Depth to image",
"description": "Estimates a monocular depth map from an input image using the Lotus depth estimation model."
"category": "Image generation and editing/Depth to image"
}
]
},
@ -965,4 +964,4 @@
"workflowRendererVersion": "LG"
},
"version": 0.4
}
}

View File

@ -1586,8 +1586,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Image to layers",
"description": "Decomposes an image into variable-resolution RGBA layers for independent editing using Qwen-Image-Layered."
"category": "Image generation and editing/Image to layers"
}
]
},

View File

@ -72,7 +72,7 @@
},
"revision": 0,
"config": {},
"name": "Image to 3D Model (Hunyuan3d 2.1)",
"name": "local-Image to Model (Hunyuan3d 2.1)",
"inputNode": {
"id": -10,
"bounding": [
@ -765,8 +765,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "3D/Image to 3D Model",
"description": "Generates 3D mesh models from a single input image using Hunyuan3D 2.0/2.1."
"category": "3D/Image to 3D Model"
}
]
},

View File

@ -4223,8 +4223,7 @@
"extra": {
"workflowRendererVersion": "Vue-corrected"
},
"category": "Video generation and editing/Image to video",
"description": "Generates video from a single input image using LTX-2.3."
"category": "Video generation and editing/Image to video"
}
]
},

View File

@ -206,7 +206,7 @@
},
"revision": 0,
"config": {},
"name": "Image to Video (Wan 2.2)",
"name": "local-Image to Video (Wan 2.2)",
"inputNode": {
"id": -10,
"bounding": [
@ -2027,8 +2027,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Video generation and editing/Image to video",
"description": "Generates video from an image and text prompt using Wan 2.2, supporting T2V and I2V."
"category": "Video generation and editing/Image to video"
}
]
},

View File

@ -134,7 +134,7 @@
},
"revision": 0,
"config": {},
"name": "Pose to Image (Z-Image-Turbo)",
"name": "local-Pose to Image (Z-Image-Turbo)",
"inputNode": {
"id": -10,
"bounding": [
@ -1298,8 +1298,7 @@
"VHS_MetadataImage": true,
"VHS_KeepIntermediate": true
},
"category": "Image generation and editing/Pose to image",
"description": "Generates an image from pose keypoints using Z-Image-Turbo with text conditioning."
"category": "Image generation and editing/Pose to image"
}
]
},
@ -1320,4 +1319,4 @@
}
},
"version": 0.4
}
}

View File

@ -3870,8 +3870,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Video generation and editing/Pose to video",
"description": "Generates video from pose reference frames using LTX-2, with optional synchronized audio."
"category": "Video generation and editing/Pose to video"
}
]
},

View File

@ -270,10 +270,9 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Text generation/Prompt enhance",
"description": "Expands short text prompts into detailed descriptions using a text generation model for better generation quality."
"category": "Text generation/Prompt enhance"
}
]
},
"extra": {}
}
}

View File

@ -302,9 +302,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Sharpen",
"description": "Sharpens image details using a GPU fragment shader for enhanced clarity."
"category": "Image Tools/Sharpen"
}
]
}
}
}

View File

@ -222,7 +222,7 @@
},
"revision": 0,
"config": {},
"name": "Text to Audio (ACE-Step 1.5)",
"name": "local-Text to Audio (ACE-Step 1.5)",
"inputNode": {
"id": -10,
"bounding": [
@ -1502,8 +1502,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Audio/Music generation",
"description": "Generates audio/music from text prompts using ACE-Step 1.5, a diffusion-based audio generation model."
"category": "Audio/Music generation"
}
]
},
@ -1519,4 +1518,4 @@
}
},
"version": 0.4
}
}

View File

@ -1029,8 +1029,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Text to image",
"description": "Generates images from text prompts using Flux.1 [dev], Black Forest Labs' 12B diffusion model."
"category": "Image generation and editing/Text to image"
}
]
},
@ -1044,4 +1043,4 @@
},
"ue_links": []
}
}
}

View File

@ -1023,8 +1023,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Text to image",
"description": "Generates images from text prompts using Flux.1 Krea Dev, a Black Forest Labs × Krea collaboration variant."
"category": "Image generation and editing/Text to image"
}
]
},
@ -1038,4 +1037,4 @@
},
"ue_links": []
}
}
}

View File

@ -1104,8 +1104,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Text to image",
"description": "Generates images from text prompts using NetaYume Lumina, fine-tuned from Neta Lumina for anime-style and illustration generation."
"category": "Image generation and editing/Text to image"
},
{
"id": "a07fdf06-1bda-4dac-bdbd-63ee8ebca1c9",
@ -1459,12 +1458,11 @@
],
"extra": {
"workflowRendererVersion": "LG"
},
"description": "Encodes a negative text prompt via CLIP for classifier-free guidance in anime-style generation (NetaYume Lumina)."
}
}
]
},
"extra": {
"ue_links": []
}
}
}

View File

@ -1941,8 +1941,7 @@
"extra": {
"workflowRendererVersion": "Vue-corrected"
},
"category": "Image generation and editing/Text to image",
"description": "Generates images from text prompts using Qwen-Image-2512, with enhanced human realism and finer natural detail over the base version."
"category": "Image generation and editing/Text to image"
}
]
},

View File

@ -1873,8 +1873,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Text to image",
"description": "Generates images from text prompts using Qwen-Image, Alibaba's 20B MMDiT model with excellent multilingual text rendering."
"category": "Image generation and editing/Text to image"
}
]
},

View File

@ -149,7 +149,7 @@
},
"revision": 0,
"config": {},
"name": "Text to Image (Z-Image-Turbo)",
"name": "local-Text to Image (Z-Image-Turbo)",
"inputNode": {
"id": -10,
"bounding": [
@ -1054,8 +1054,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Text to image",
"description": "Generates images from text prompts using Z-Image-Turbo, Alibaba's distilled 6B DiT model."
"category": "Image generation and editing/Text to image"
}
]
},
@ -1076,4 +1075,4 @@
}
},
"version": 0.4
}
}

View File

@ -4286,8 +4286,7 @@
"extra": {
"workflowRendererVersion": "Vue-corrected"
},
"category": "Video generation and editing/Text to video",
"description": "Generates video from text prompts using LTX-2.3, Lightricks' video diffusion model."
"category": "Video generation and editing/Text to video"
}
]
},

View File

@ -1572,8 +1572,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Video generation and editing/Text to video",
"description": "Generates video from text prompts using Wan2.2, Alibaba's diffusion video model."
"category": "Video generation and editing/Text to video"
}
]
},
@ -1587,4 +1586,4 @@
"VHS_KeepIntermediate": true
},
"version": 0.4
}
}

View File

@ -434,9 +434,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Sharpen",
"description": "Enhances edge contrast via unsharp masking for a sharper image appearance."
"category": "Image Tools/Sharpen"
}
]
}
}
}

View File

@ -307,8 +307,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Text generation/Video Captioning",
"description": "Generates descriptive captions for video input using Google's Gemini multimodal LLM."
"category": "Text generation/Video Captioning"
}
]
}

View File

@ -165,7 +165,7 @@
},
"revision": 0,
"config": {},
"name": "Video Inpaint (Wan 2.1 VACE)",
"name": "local-Video Inpaint(Wan2.1 VACE)",
"inputNode": {
"id": -10,
"bounding": [
@ -2368,8 +2368,7 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Video generation and editing/Inpaint video",
"description": "Inpaints masked regions in video frames using Wan 2.1 VACE."
"category": "Video generation and editing/Inpaint video"
}
]
},

View File

@ -584,9 +584,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Video Tools/Stitch videos",
"description": "Stitches multiple video clips into a single sequential video file."
"category": "Video Tools/Stitch videos"
}
]
}
}
}

View File

@ -412,10 +412,9 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Video generation and editing/Enhance video",
"description": "Upscales video to 4× resolution using a GAN-based upscaling model."
"category": "Video generation and editing/Enhance video"
}
]
},
"extra": {}
}
}

View File

@ -1390,7 +1390,7 @@ def convert_old_quants(state_dict, model_prefix="", metadata={}):
k_out = "{}.weight_scale".format(layer)
if layer is not None:
layer_conf = {"format": "float8_e4m3fn"}
layer_conf = {"format": "float8_e4m3fn"} # TODO: check if anyone did some non e4m3fn scaled checkpoints
if full_precision_matrix_mult:
layer_conf["full_precision_matrix_mult"] = full_precision_matrix_mult
layers[layer] = layer_conf

View File

@ -1,11 +1,10 @@
from __future__ import annotations
from enum import Enum
from typing import Optional, Any
from typing import Optional, List, Dict, Any, Union
from pydantic import BaseModel, Field, RootModel
class TripoModelVersion(str, Enum):
v3_1_20260211 = 'v3.1-20260211'
v3_0_20250812 = 'v3.0-20250812'
v2_5_20250123 = 'v2.5-20250123'
v2_0_20240919 = 'v2.0-20240919'
@ -143,7 +142,7 @@ class TripoFileEmptyReference(BaseModel):
pass
class TripoFileReference(RootModel):
root: TripoFileTokenReference | TripoUrlReference | TripoObjectReference | TripoFileEmptyReference
root: Union[TripoFileTokenReference, TripoUrlReference, TripoObjectReference, TripoFileEmptyReference]
class TripoGetStsTokenRequest(BaseModel):
format: str = Field(..., description='The format of the image')
@ -184,7 +183,7 @@ class TripoImageToModelRequest(BaseModel):
class TripoMultiviewToModelRequest(BaseModel):
type: TripoTaskType = TripoTaskType.MULTIVIEW_TO_MODEL
files: list[TripoFileReference] = Field(..., description='The file references to convert to a model')
files: List[TripoFileReference] = Field(..., description='The file references to convert to a model')
model_version: Optional[TripoModelVersion] = Field(None, description='The model version to use for generation')
orthographic_projection: Optional[bool] = Field(False, description='Whether to use orthographic projection')
face_limit: Optional[int] = Field(None, description='The number of faces to limit the generation to')
@ -252,13 +251,27 @@ class TripoConvertModelRequest(BaseModel):
with_animation: Optional[bool] = Field(None, description='Whether to include animations')
pack_uv: Optional[bool] = Field(None, description='Whether to pack the UVs')
bake: Optional[bool] = Field(None, description='Whether to bake the model')
part_names: Optional[list[str]] = Field(None, description='The names of the parts to include')
part_names: Optional[List[str]] = Field(None, description='The names of the parts to include')
fbx_preset: Optional[TripoFbxPreset] = Field(None, description='The preset for the FBX export')
export_vertex_colors: Optional[bool] = Field(None, description='Whether to export the vertex colors')
export_orientation: Optional[TripoOrientation] = Field(None, description='The orientation for the export')
animate_in_place: Optional[bool] = Field(None, description='Whether to animate in place')
class TripoTaskRequest(RootModel):
root: Union[
TripoTextToModelRequest,
TripoImageToModelRequest,
TripoMultiviewToModelRequest,
TripoTextureModelRequest,
TripoRefineModelRequest,
TripoAnimatePrerigcheckRequest,
TripoAnimateRigRequest,
TripoAnimateRetargetRequest,
TripoStylizeModelRequest,
TripoConvertModelRequest
]
class TripoTaskOutput(BaseModel):
model: Optional[str] = Field(None, description='URL to the model')
base_model: Optional[str] = Field(None, description='URL to the base model')
@ -270,13 +283,12 @@ class TripoTask(BaseModel):
task_id: str = Field(..., description='The task ID')
type: Optional[str] = Field(None, description='The type of task')
status: Optional[TripoTaskStatus] = Field(None, description='The status of the task')
input: Optional[dict[str, Any]] = Field(None, description='The input parameters for the task')
input: Optional[Dict[str, Any]] = Field(None, description='The input parameters for the task')
output: Optional[TripoTaskOutput] = Field(None, description='The output of the task')
progress: Optional[int] = Field(None, description='The progress of the task', ge=0, le=100)
create_time: Optional[int] = Field(None, description='The creation time of the task')
running_left_time: Optional[int] = Field(None, description='The estimated time left for the task')
queue_position: Optional[int] = Field(None, description='The position in the queue')
consumed_credit: int | None = Field(None)
class TripoTaskResponse(BaseModel):
code: int = Field(0, description='The response code')
@ -284,7 +296,7 @@ class TripoTaskResponse(BaseModel):
class TripoGeneralResponse(BaseModel):
code: int = Field(0, description='The response code')
data: dict[str, str] = Field(..., description='The task ID data')
data: Dict[str, str] = Field(..., description='The task ID data')
class TripoBalanceData(BaseModel):
balance: float = Field(..., description='The account balance')

View File

@ -60,7 +60,6 @@ async def poll_until_finished(
],
status_extractor=lambda x: x.data.status,
progress_extractor=lambda x: x.data.progress,
price_extractor=lambda x: x.data.consumed_credit * 0.01 if x.data.consumed_credit else None,
estimated_duration=average_duration,
)
if response_poll.data.status == TripoTaskStatus.SUCCESS:
@ -114,6 +113,7 @@ class TripoTextToModelNode(IO.ComfyNode):
depends_on=IO.PriceBadgeDepends(
widgets=[
"model_version",
"style",
"texture",
"pbr",
"quad",
@ -124,17 +124,20 @@ class TripoTextToModelNode(IO.ComfyNode):
expr="""
(
$isV14 := $contains(widgets.model_version,"v1.4");
$isV3OrLater := $contains(widgets.model_version,"v3.");
$style := widgets.style;
$hasStyle := ($style != "" and $style != "none");
$withTexture := widgets.texture or widgets.pbr;
$isHdTexture := (widgets.texture_quality = "detailed");
$isDetailedGeometry := (widgets.geometry_quality = "detailed");
$credits := $isV14 ? 20 : (
($withTexture ? 20 : 10)
$baseCredits :=
$isV14 ? 20 : ($withTexture ? 20 : 10);
$credits :=
$baseCredits
+ ($hasStyle ? 5 : 0)
+ (widgets.quad ? 5 : 0)
+ ($isHdTexture ? 10 : 0)
+ (($isDetailedGeometry and $isV3OrLater) ? 20 : 0)
);
{"type":"usd","usd": $round($credits * 0.01, 2), "format": {"approximate": true}}
+ ($isDetailedGeometry ? 20 : 0);
{"type":"usd","usd": $round($credits * 0.01, 2)}
)
""",
),
@ -236,6 +239,7 @@ class TripoImageToModelNode(IO.ComfyNode):
depends_on=IO.PriceBadgeDepends(
widgets=[
"model_version",
"style",
"texture",
"pbr",
"quad",
@ -246,17 +250,20 @@ class TripoImageToModelNode(IO.ComfyNode):
expr="""
(
$isV14 := $contains(widgets.model_version,"v1.4");
$isV3OrLater := $contains(widgets.model_version,"v3.");
$style := widgets.style;
$hasStyle := ($style != "" and $style != "none");
$withTexture := widgets.texture or widgets.pbr;
$isHdTexture := (widgets.texture_quality = "detailed");
$isDetailedGeometry := (widgets.geometry_quality = "detailed");
$credits := $isV14 ? 30 : (
($withTexture ? 30 : 20)
$baseCredits :=
$isV14 ? 30 : ($withTexture ? 30 : 20);
$credits :=
$baseCredits
+ ($hasStyle ? 5 : 0)
+ (widgets.quad ? 5 : 0)
+ ($isHdTexture ? 10 : 0)
+ (($isDetailedGeometry and $isV3OrLater) ? 20 : 0)
);
{"type":"usd","usd": $round($credits * 0.01, 2), "format": {"approximate": true}}
+ ($isDetailedGeometry ? 20 : 0);
{"type":"usd","usd": $round($credits * 0.01, 2)}
)
""",
),
@ -351,7 +358,7 @@ class TripoMultiviewToModelNode(IO.ComfyNode):
"texture_alignment", default="original_image", options=["original_image", "geometry"], optional=True, advanced=True
),
IO.Int.Input("face_limit", default=-1, min=-1, max=500000, optional=True, advanced=True),
IO.Boolean.Input("quad", default=False, optional=True, advanced=True, tooltip="This parameter is deprecated and does nothing."),
IO.Boolean.Input("quad", default=False, optional=True, advanced=True),
IO.Combo.Input("geometry_quality", default="standard", options=["standard", "detailed"], optional=True, advanced=True),
],
outputs=[
@ -372,6 +379,7 @@ class TripoMultiviewToModelNode(IO.ComfyNode):
"model_version",
"texture",
"pbr",
"quad",
"texture_quality",
"geometry_quality",
],
@ -379,16 +387,17 @@ class TripoMultiviewToModelNode(IO.ComfyNode):
expr="""
(
$isV14 := $contains(widgets.model_version,"v1.4");
$isV3OrLater := $contains(widgets.model_version,"v3.");
$withTexture := widgets.texture or widgets.pbr;
$isHdTexture := (widgets.texture_quality = "detailed");
$isDetailedGeometry := (widgets.geometry_quality = "detailed");
$credits := $isV14 ? 30 : (
($withTexture ? 30 : 20)
$baseCredits :=
$isV14 ? 30 : ($withTexture ? 30 : 20);
$credits :=
$baseCredits
+ (widgets.quad ? 5 : 0)
+ ($isHdTexture ? 10 : 0)
+ (($isDetailedGeometry and $isV3OrLater) ? 20 : 0)
);
{"type":"usd","usd": $round($credits * 0.01, 2), "format": {"approximate": true}}
+ ($isDetailedGeometry ? 20 : 0);
{"type":"usd","usd": $round($credits * 0.01, 2)}
)
""",
),
@ -448,7 +457,7 @@ class TripoMultiviewToModelNode(IO.ComfyNode):
geometry_quality=geometry_quality,
texture_alignment=texture_alignment,
face_limit=face_limit if face_limit != -1 else None,
quad=None,
quad=quad,
),
)
return await poll_until_finished(cls, response, average_duration=80)
@ -489,7 +498,7 @@ class TripoTextureNode(IO.ComfyNode):
expr="""
(
$tq := widgets.texture_quality;
{"type":"usd","usd": ($contains($tq,"detailed") ? 0.2 : 0.1), "format": {"approximate": true}}
{"type":"usd","usd": ($contains($tq,"detailed") ? 0.2 : 0.1)}
)
""",
),
@ -546,7 +555,7 @@ class TripoRefineNode(IO.ComfyNode):
is_api_node=True,
is_output_node=True,
price_badge=IO.PriceBadge(
expr="""{"type":"usd","usd":0.3, "format": {"approximate": true}}""",
expr="""{"type":"usd","usd":0.3}""",
),
)
@ -583,7 +592,7 @@ class TripoRigNode(IO.ComfyNode):
is_api_node=True,
is_output_node=True,
price_badge=IO.PriceBadge(
expr="""{"type":"usd","usd":0.25, "format": {"approximate": true}}""",
expr="""{"type":"usd","usd":0.25}""",
),
)
@ -643,7 +652,7 @@ class TripoRetargetNode(IO.ComfyNode):
is_api_node=True,
is_output_node=True,
price_badge=IO.PriceBadge(
expr="""{"type":"usd","usd":0.1, "format": {"approximate": true}}""",
expr="""{"type":"usd","usd":0.1}""",
),
)
@ -752,10 +761,19 @@ class TripoConversionNode(IO.ComfyNode):
"face_limit",
"texture_size",
"texture_format",
"force_symmetry",
"flatten_bottom",
"flatten_bottom_threshold",
"pivot_to_center_bottom",
"scale_factor",
"with_animation",
"pack_uv",
"bake",
"part_names",
"fbx_preset",
"export_vertex_colors",
"export_orientation",
"animate_in_place",
],
),
expr="""
@ -765,16 +783,28 @@ class TripoConversionNode(IO.ComfyNode):
$flatThresh := (widgets.flatten_bottom_threshold != null) ? widgets.flatten_bottom_threshold : 0;
$scale := (widgets.scale_factor != null) ? widgets.scale_factor : 1;
$texFmt := (widgets.texture_format != "" ? widgets.texture_format : "jpeg");
$part := widgets.part_names;
$fbx := (widgets.fbx_preset != "" ? widgets.fbx_preset : "blender");
$orient := (widgets.export_orientation != "" ? widgets.export_orientation : "default");
$advanced :=
widgets.quad or
widgets.force_symmetry or
widgets.flatten_bottom or
widgets.pivot_to_center_bottom or
widgets.with_animation or
widgets.pack_uv or
widgets.bake or
widgets.export_vertex_colors or
widgets.animate_in_place or
($face != -1) or
($texSize != 4096) or
($flatThresh != 0) or
($scale != 1) or
($texFmt != "jpeg");
{"type":"usd","usd": ($advanced ? 0.1 : 0.05), "format": {"approximate": true}}
($texFmt != "jpeg") or
($part != "") or
($fbx != "blender") or
($orient != "default");
{"type":"usd","usd": ($advanced ? 0.1 : 0.05)}
)
""",
),

View File

@ -74,8 +74,6 @@ tags:
description: Cloud workflow management and versioning (cloud-only)
- name: task
description: Background task management (cloud-only)
- name: runtime-only
description: Operations served exclusively by the cloud runtime with no local equivalent
paths:
# ---------------------------------------------------------------------------
@ -2575,38 +2573,35 @@ paths:
# ---------------------------------------------------------------------------
/api/experiment/nodes:
get:
operationId: getNodeInfoSchema
tags: [runtime-only]
summary: Get pre-rendered node info schema
description: "[cloud-only] Returns the static ComfyUI object_info schema, identical for every caller, rendered once at startup with empty model/user-file context. Served by a raw HTTP handler that writes pre-rendered bytes with ETag + Cache-Control validators for RFC 7232 conditional GETs."
operationId: listCloudNodes
tags: [node]
summary: List installed custom nodes
description: "[cloud-only] Returns the list of custom node packages installed in the cloud runtime."
x-runtime: [cloud]
parameters:
- name: If-None-Match
in: header
required: false
- name: limit
in: query
schema:
type: string
description: Entity tag previously returned by this endpoint. When present and matching, the server returns 304 Not Modified.
type: integer
description: Maximum number of results
- name: offset
in: query
schema:
type: integer
description: Pagination offset
responses:
"200":
description: Node info schema
headers:
ETag:
schema:
type: string
description: Entity tag for conditional request validation
Cache-Control:
schema:
type: string
description: Cache directives for the response
description: Custom node list
content:
application/json:
schema:
type: object
additionalProperties:
$ref: "#/components/schemas/NodeInfo"
"304":
description: Not Modified — returned when the client sends a matching If-None-Match header
$ref: "#/components/schemas/CloudNodeList"
"401":
description: Unauthorized
content:
application/json:
schema:
$ref: "#/components/schemas/CloudError"
post:
operationId: installCloudNode
tags: [node]
@ -2656,10 +2651,10 @@ paths:
/api/experiment/nodes/{id}:
get:
operationId: getNodeByID
tags: [runtime-only]
summary: Get a single node definition by ID
description: "[cloud-only] Returns one node's definition from the pre-indexed object_info schema. Served by a raw HTTP handler that writes pre-rendered bytes with ETag + Cache-Control validators for RFC 7232 conditional GETs."
operationId: getCloudNode
tags: [node]
summary: Get details of an installed custom node
description: "[cloud-only] Returns details about a specific installed custom node package."
x-runtime: [cloud]
parameters:
- name: id
@ -2667,33 +2662,26 @@ paths:
required: true
schema:
type: string
description: Node class identifier
- name: If-None-Match
in: header
required: false
schema:
type: string
description: Entity tag previously returned by this endpoint. When present and matching, the server returns 304 Not Modified.
description: Custom node package ID
responses:
"200":
description: Single node definition
headers:
ETag:
schema:
type: string
description: Entity tag for conditional request validation
Cache-Control:
schema:
type: string
description: Cache directives for the response
description: Node detail
content:
application/json:
schema:
$ref: "#/components/schemas/NodeInfo"
"304":
description: Not Modified — returned when the client sends a matching If-None-Match header
$ref: "#/components/schemas/CloudNode"
"401":
description: Unauthorized
content:
application/json:
schema:
$ref: "#/components/schemas/CloudError"
"404":
description: Node not found
description: Not found
content:
application/json:
schema:
$ref: "#/components/schemas/CloudError"
delete:
operationId: uninstallCloudNode
tags: [node]
@ -7112,6 +7100,22 @@ components:
enabled:
type: boolean
CloudNodeList:
type: object
x-runtime: [cloud]
description: "[cloud-only] Paginated list of installed custom node packages."
required:
- nodes
properties:
nodes:
type: array
items:
$ref: "#/components/schemas/CloudNode"
total:
type: integer
has_more:
type: boolean
HubLabel:
type: object
x-runtime: [cloud]