Compare commits

..

8 Commits

Author SHA1 Message Date
73eb9647a2 fta 2024-11-05 21:13:43 +08:00
a033a53a32 fta 2024-11-05 21:10:30 +08:00
cae7f7523b Merge branch 'main' into feat/support-extractor-tools 2024-11-05 16:31:19 +08:00
766d88b29e fta 2024-11-05 16:30:49 +08:00
2434b97f84 Merge branch 'main' into feat/support-extractor-tools 2024-11-05 14:44:42 +08:00
bc7cc06572 text extractor tool 2024-11-04 14:52:02 +08:00
67b1190535 Merge branch 'main' into feat/support-extractor-tools 2024-11-04 14:24:16 +08:00
79d284d686 update knowledge api url 2024-10-31 13:43:06 +08:00
315 changed files with 1727 additions and 7363 deletions

View File

@ -1,36 +0,0 @@
name: Setup Poetry and Python
inputs:
python-version:
description: Python version to use and the Poetry installed with
required: true
default: '3.10'
poetry-version:
description: Poetry version to set up
required: true
default: '1.8.4'
poetry-lockfile:
description: Path to the Poetry lockfile to restore cache from
required: true
default: ''
runs:
using: composite
steps:
- name: Set up Python ${{ inputs.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ inputs.python-version }}
cache: pip
- name: Install Poetry
shell: bash
run: pip install poetry==${{ inputs.poetry-version }}
- name: Restore Poetry cache
if: ${{ inputs.poetry-lockfile != '' }}
uses: actions/setup-python@v5
with:
python-version: ${{ inputs.python-version }}
cache: poetry
cache-dependency-path: ${{ inputs.poetry-lockfile }}

View File

@ -7,7 +7,6 @@ on:
paths:
- api/**
- docker/**
- .github/workflows/api-tests.yml
concurrency:
group: api-tests-${{ github.head_ref || github.run_id }}
@ -28,11 +27,16 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Poetry and Python ${{ matrix.python-version }}
uses: ./.github/actions/setup-poetry
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
poetry-lockfile: api/poetry.lock
cache-dependency-path: |
api/pyproject.toml
api/poetry.lock
- name: Install Poetry
uses: abatilo/actions-poetry@v3
- name: Check Poetry lockfile
run: |
@ -63,7 +67,7 @@ jobs:
run: sh .github/workflows/expose_service_ports.sh
- name: Set up Sandbox
uses: hoverkraft-tech/compose-action@v2.0.2
uses: hoverkraft-tech/compose-action@v2.0.0
with:
compose-file: |
docker/docker-compose.middleware.yaml
@ -73,3 +77,22 @@ jobs:
- name: Run Workflow
run: poetry run -C api bash dev/pytest/pytest_workflow.sh
- name: Set up Vector Stores (Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma, MyScale, ElasticSearch, Couchbase)
uses: hoverkraft-tech/compose-action@v2.0.0
with:
compose-file: |
docker/docker-compose.yaml
services: |
weaviate
qdrant
couchbase-server
etcd
minio
milvus-standalone
pgvecto-rs
pgvector
chroma
elasticsearch
- name: Test Vector Stores
run: poetry run -C api bash dev/pytest/pytest_vdb.sh

View File

@ -6,7 +6,6 @@ on:
- main
paths:
- api/migrations/**
- .github/workflows/db-migration-test.yml
concurrency:
group: db-migration-test-${{ github.ref }}
@ -15,15 +14,25 @@ concurrency:
jobs:
db-migration-test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- "3.10"
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Poetry and Python
uses: ./.github/actions/setup-poetry
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
poetry-lockfile: api/poetry.lock
python-version: ${{ matrix.python-version }}
cache-dependency-path: |
api/pyproject.toml
api/poetry.lock
- name: Install Poetry
uses: abatilo/actions-poetry@v3
- name: Install dependencies
run: poetry install -C api
@ -34,7 +43,7 @@ jobs:
cp middleware.env.example middleware.env
- name: Set up Middlewares
uses: hoverkraft-tech/compose-action@v2.0.2
uses: hoverkraft-tech/compose-action@v2.0.0
with:
compose-file: |
docker/docker-compose.middleware.yaml

View File

@ -22,28 +22,34 @@ jobs:
id: changed-files
uses: tj-actions/changed-files@v45
with:
files: |
api/**
.github/workflows/style.yml
files: api/**
- name: Setup Poetry and Python
- name: Set up Python
uses: actions/setup-python@v5
if: steps.changed-files.outputs.any_changed == 'true'
uses: ./.github/actions/setup-poetry
with:
python-version: '3.10'
- name: Install dependencies
- name: Install Poetry
if: steps.changed-files.outputs.any_changed == 'true'
uses: abatilo/actions-poetry@v3
- name: Python dependencies
if: steps.changed-files.outputs.any_changed == 'true'
run: poetry install -C api --only lint
- name: Ruff check
if: steps.changed-files.outputs.any_changed == 'true'
run: |
poetry run -C api ruff check ./api
poetry run -C api ruff format --check ./api
run: poetry run -C api ruff check ./api
- name: Dotenv check
if: steps.changed-files.outputs.any_changed == 'true'
run: poetry run -C api dotenv-linter ./api/.env.example ./web/.env.example
- name: Ruff formatter check
if: steps.changed-files.outputs.any_changed == 'true'
run: poetry run -C api ruff format --check ./api
- name: Lint hints
if: failure()
run: echo "Please run 'dev/reformat' to fix the fixable linting errors."

View File

@ -1,71 +0,0 @@
name: Run VDB Tests
on:
pull_request:
branches:
- main
paths:
- api/core/rag/datasource/**
- docker/**
- .github/workflows/vdb-tests.yml
concurrency:
group: vdb-tests-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
test:
name: VDB Tests
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- "3.10"
- "3.11"
- "3.12"
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Poetry and Python ${{ matrix.python-version }}
uses: ./.github/actions/setup-poetry
with:
python-version: ${{ matrix.python-version }}
poetry-lockfile: api/poetry.lock
- name: Check Poetry lockfile
run: |
poetry check -C api --lock
poetry show -C api
- name: Install dependencies
run: poetry install -C api --with dev
- name: Set up dotenvs
run: |
cp docker/.env.example docker/.env
cp docker/middleware.env.example docker/middleware.env
- name: Expose Service Ports
run: sh .github/workflows/expose_service_ports.sh
- name: Set up Vector Stores (Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma, MyScale, ElasticSearch, Couchbase)
uses: hoverkraft-tech/compose-action@v2.0.2
with:
compose-file: |
docker/docker-compose.yaml
services: |
weaviate
qdrant
couchbase-server
etcd
minio
milvus-standalone
pgvecto-rs
pgvector
chroma
elasticsearch
- name: Test Vector Stores
run: poetry run -C api bash dev/pytest/pytest_vdb.sh

1
.gitignore vendored
View File

@ -175,7 +175,6 @@ docker/volumes/pgvector/data/*
docker/volumes/pgvecto_rs/data/*
docker/volumes/couchbase/*
docker/volumes/oceanbase/*
!docker/volumes/oceanbase/init.d
docker/nginx/conf.d/default.conf
docker/nginx/ssl/*

View File

@ -46,18 +46,45 @@
</p>
Dify is an open-source LLM app development platform. Its intuitive interface combines agentic AI workflow, RAG pipeline, agent capabilities, model management, observability features and more, letting you quickly go from prototype to production.
## Table of Content
0. [Quick-Start🚀](https://github.com/langgenius/dify?tab=readme-ov-file#quick-start)
1. [Intro📖](https://github.com/langgenius/dify?tab=readme-ov-file#intro)
2. [How to use🔧](https://github.com/langgenius/dify?tab=readme-ov-file#using-dify)
3. [Stay Ahead🏃](https://github.com/langgenius/dify?tab=readme-ov-file#staying-ahead)
4. [Next Steps🏹](https://github.com/langgenius/dify?tab=readme-ov-file#next-steps)
5. [Contributing💪](https://github.com/langgenius/dify?tab=readme-ov-file#contributing)
6. [Community and Contact🏠](https://github.com/langgenius/dify?tab=readme-ov-file#community--contact)
7. [Star-History📈](https://github.com/langgenius/dify?tab=readme-ov-file#star-history)
8. [Security🔒](https://github.com/langgenius/dify?tab=readme-ov-file#security-disclosure)
9. [License🤝](https://github.com/langgenius/dify?tab=readme-ov-file#license)
> Make sure you read through this README before you start utilizing Dify😊
## Quick start
The quickest way to deploy Dify locally is to run our [docker-compose.yml](https://github.com/langgenius/dify/blob/main/docker/docker-compose.yaml). Follow the instructions to start in 5 minutes.
> Before installing Dify, make sure your machine meets the following minimum system requirements:
>
>- CPU >= 2 Core
>- RAM >= 4 GiB
>- Docker and Docker Compose Installed
</br>
The easiest way to start the Dify server is through [docker compose](docker/docker-compose.yaml). Before running Dify with the following commands, make sure that [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/) are installed on your machine:
Run the following command in your terminal to clone the whole repo.
```bash
git clone https://github.com/langgenius/dify.git
```
After cloning,run the following command one by one.
```bash
cd dify
cd docker
@ -65,14 +92,13 @@ cp .env.example .env
docker compose up -d
```
After running, you can access the Dify dashboard in your browser at [http://localhost/install](http://localhost/install) and start the initialization process.
After running, you can access the Dify dashboard in your browser at [http://localhost/install](http://localhost/install) and start the initialization process. You will be asked to setup an admin account.
For more info of quick setup, check [here](https://docs.dify.ai/getting-started/install-self-hosted/docker-compose)
#### Seeking help
Please refer to our [FAQ](https://docs.dify.ai/getting-started/install-self-hosted/faqs) if you encounter problems setting up Dify. Reach out to [the community and us](#community--contact) if you are still having issues.
## Intro
Dify is an open-source LLM app development platform. Its intuitive interface combines AI workflow, RAG pipeline, agent capabilities, model management, observability features and more, letting you quickly go from prototype to production. Here's a list of the core features:
</br> </br>
> If you'd like to contribute to Dify or do additional development, refer to our [guide to deploying from source code](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code)
## Key features
**1. Workflow**:
Build and test powerful AI workflows on a visual canvas, leveraging all the following features and beyond.
@ -123,8 +149,20 @@ Star Dify on GitHub and be instantly notified of new releases.
![star-us](https://github.com/langgenius/dify/assets/13230914/b823edc1-6388-4e25-ad45-2f6b187adbb4)
## Next steps
## Advanced Setup
Go to [quick-start](https://github.com/langgenius/dify?tab=readme-ov-file#quick-start) to setup your Dify or setup by source code.
#### If you......
If you forget your admin account, you can refer to this [guide](https://docs.dify.ai/getting-started/install-self-hosted/faqs#id-4.-how-to-reset-the-password-of-the-admin-account) to reset the password.
> Use docker compose up without "-d" to enable logs printing out in your terminal. This might be useful if you have encountered unknow problems when using Dify.
If you encountered system error and would like to acquire help in Github issues, make sure you always paste logs of the error in the request to accerate the conversation. Go to [Community & contact](https://github.com/langgenius/dify?tab=readme-ov-file#community--contact) for more information.
> Please read the [Dify Documentation](https://docs.dify.ai/) for detailed how-to-use guidance. Most of the potential problems are explained in the doc.
> If you'd like to contribute to Dify or make additional development, refer to our [guide to deploying from source code](https://docs.dify.ai/getting-started/install-self-hosted/local-source-code)
If you need to customize the configuration, please refer to the comments in our [.env.example](docker/.env.example) file and update the corresponding values in your `.env` file. Additionally, you might need to make adjustments to the `docker-compose.yaml` file itself, such as changing image versions, port mappings, or volume mounts, based on your specific deployment environment and requirements. After making any changes, please re-run `docker-compose up -d`. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments).
@ -152,18 +190,19 @@ At the same time, please consider supporting Dify by sharing it on social media
> We are looking for contributors to help with translating Dify to languages other than Mandarin or English. If you are interested in helping, please see the [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) for more information, and leave us a comment in the `global-users` channel of our [Discord Community Server](https://discord.gg/8Tpq4AcN9c).
**Contributors**
<a href="https://github.com/langgenius/dify/graphs/contributors">
<img src="https://contrib.rocks/image?repo=langgenius/dify" />
</a>
## Community & contact
* [Github Discussion](https://github.com/langgenius/dify/discussions). Best for: sharing feedback and asking questions.
* [GitHub Issues](https://github.com/langgenius/dify/issues). Best for: bugs you encounter using Dify.AI, and feature proposals. See our [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
* [Discord](https://discord.gg/FngNHpbcY7). Best for: sharing your applications and hanging out with the community.
* [X(Twitter)](https://twitter.com/dify_ai). Best for: sharing your applications and hanging out with the community.
**Contributors**
<a href="https://github.com/langgenius/dify/graphs/contributors">
<img src="https://contrib.rocks/image?repo=langgenius/dify" />
</a>
* Make sure a log, if possible, is attached to an error reported to maximize solution efficiency.
## Star history
@ -177,4 +216,3 @@ To protect your privacy, please avoid posting security issues on GitHub. Instead
## License
This repository is available under the [Dify Open Source License](LICENSE), which is essentially Apache 2.0 with a few additional restrictions.

View File

@ -121,7 +121,7 @@ WEB_API_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*
CONSOLE_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*
# Vector database configuration, support: weaviate, qdrant, milvus, myscale, relyt, pgvecto_rs, pgvector, pgvector, chroma, opensearch, tidb_vector, couchbase, vikingdb, upstash, lindorm, oceanbase
# Vector database configuration, support: weaviate, qdrant, milvus, myscale, relyt, pgvecto_rs, pgvector, pgvector, chroma, opensearch, tidb_vector, couchbase, vikingdb, upstash, lindorm
VECTOR_STORE=weaviate
# Weaviate configuration
@ -273,7 +273,7 @@ LINDORM_PASSWORD=admin
OCEANBASE_VECTOR_HOST=127.0.0.1
OCEANBASE_VECTOR_PORT=2881
OCEANBASE_VECTOR_USER=root@test
OCEANBASE_VECTOR_PASSWORD=difyai123456
OCEANBASE_VECTOR_PASSWORD=
OCEANBASE_VECTOR_DATABASE=test
OCEANBASE_MEMORY_LIMIT=6G
@ -285,9 +285,8 @@ UPLOAD_IMAGE_FILE_SIZE_LIMIT=10
UPLOAD_VIDEO_FILE_SIZE_LIMIT=100
UPLOAD_AUDIO_FILE_SIZE_LIMIT=50
# Model configuration
# Model Configuration
MULTIMODAL_SEND_IMAGE_FORMAT=base64
MULTIMODAL_SEND_VIDEO_FORMAT=base64
PROMPT_GENERATION_MAX_TOKENS=512
CODE_GENERATION_MAX_TOKENS=1024
@ -321,14 +320,9 @@ ETL_TYPE=dify
UNSTRUCTURED_API_URL=
UNSTRUCTURED_API_KEY=
#ssrf
SSRF_PROXY_HTTP_URL=
SSRF_PROXY_HTTPS_URL=
SSRF_DEFAULT_MAX_RETRIES=3
SSRF_DEFAULT_TIME_OUT=5
SSRF_DEFAULT_CONNECT_TIME_OUT=5
SSRF_DEFAULT_READ_TIME_OUT=5
SSRF_DEFAULT_WRITE_TIME_OUT=5
BATCH_UPLOAD_LIMIT=10
KEYWORD_DATA_SOURCE_TYPE=database
@ -367,10 +361,6 @@ LOG_FILE=
LOG_FILE_MAX_SIZE=20
# Log file max backup count
LOG_FILE_BACKUP_COUNT=5
# Log dateformat
LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S
# Log Timezone
LOG_TZ=UTC
# Indexing configuration
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=1000
@ -400,5 +390,3 @@ POSITION_PROVIDER_EXCLUDES=
# Reset password token expiry minutes
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5
CREATE_TIDB_SERVICE_JOB_ENABLED=false

View File

@ -4,7 +4,7 @@ FROM python:3.10-slim-bookworm AS base
WORKDIR /app/api
# Install Poetry
ENV POETRY_VERSION=1.8.4
ENV POETRY_VERSION=1.8.3
# if you located in China, you can use aliyun mirror to speed up
# RUN pip install --no-cache-dir poetry==${POETRY_VERSION} -i https://mirrors.aliyun.com/pypi/simple/
@ -55,7 +55,7 @@ RUN apt-get update \
&& echo "deb http://deb.debian.org/debian testing main" > /etc/apt/sources.list \
&& apt-get update \
# For Security
&& apt-get install -y --no-install-recommends expat=2.6.3-2 libldap-2.5-0=2.5.18+dfsg-3+b1 perl=5.40.0-7 libsqlite3-0=3.46.1-1 zlib1g=1:1.3.dfsg+really1.3.1-1+b1 \
&& apt-get install -y --no-install-recommends expat=2.6.3-2 libldap-2.5-0=2.5.18+dfsg-3+b1 perl=5.40.0-6 libsqlite3-0=3.46.1-1 zlib1g=1:1.3.dfsg+really1.3.1-1+b1 \
# install a chinese font to support the use of tools like matplotlib
&& apt-get install -y fonts-noto-cjk \
&& apt-get autoremove -y \

View File

@ -76,13 +76,13 @@
1. Install dependencies for both the backend and the test environment
```bash
poetry install -C api --with dev
poetry install --with dev
```
2. Run the tests locally with mocked system environment variables in `tool.pytest_env` section in `pyproject.toml`
```bash
cd ../
poetry run -C api bash dev/pytest/pytest_all_tests.sh
```

View File

@ -1,9 +1,8 @@
import os
import sys
from configs import dify_config
if not dify_config.DEBUG:
if os.environ.get("DEBUG", "false").lower() != "true":
from gevent import monkey
monkey.patch_all()
@ -30,9 +29,6 @@ from models import account, dataset, model, source, task, tool, tools, web # no
# DO NOT REMOVE ABOVE
if sys.version_info[:2] == (3, 10):
print("Warning: Python 3.10 will not be supported in the next version.")
warnings.simplefilter("ignore", ResourceWarning)

View File

@ -1,8 +1,6 @@
import os
from configs import dify_config
if not dify_config.DEBUG:
if os.environ.get("DEBUG", "false").lower() != "true":
from gevent import monkey
monkey.patch_all()

View File

@ -109,7 +109,7 @@ class CodeExecutionSandboxConfig(BaseSettings):
)
CODE_MAX_PRECISION: PositiveInt = Field(
description="Maximum number of decimal places for floating-point numbers in code execution",
description="mMaximum number of decimal places for floating-point numbers in code execution",
default=20,
)
@ -276,16 +276,6 @@ class HttpConfig(BaseSettings):
default=1 * 1024 * 1024,
)
SSRF_DEFAULT_MAX_RETRIES: PositiveInt = Field(
description="Maximum number of retries for network requests (SSRF)",
default=3,
)
SSRF_PROXY_ALL_URL: Optional[str] = Field(
description="Proxy URL for HTTP or HTTPS requests to prevent Server-Side Request Forgery (SSRF)",
default=None,
)
SSRF_PROXY_HTTP_URL: Optional[str] = Field(
description="Proxy URL for HTTP requests to prevent Server-Side Request Forgery (SSRF)",
default=None,
@ -296,26 +286,6 @@ class HttpConfig(BaseSettings):
default=None,
)
SSRF_DEFAULT_TIME_OUT: PositiveFloat = Field(
description="The default timeout period used for network requests (SSRF)",
default=5,
)
SSRF_DEFAULT_CONNECT_TIME_OUT: PositiveFloat = Field(
description="The default connect timeout period used for network requests (SSRF)",
default=5,
)
SSRF_DEFAULT_READ_TIME_OUT: PositiveFloat = Field(
description="The default read timeout period used for network requests (SSRF)",
default=5,
)
SSRF_DEFAULT_WRITE_TIME_OUT: PositiveFloat = Field(
description="The default write timeout period used for network requests (SSRF)",
default=5,
)
RESPECT_XFORWARD_HEADERS_ENABLED: bool = Field(
description="Enable or disable the X-Forwarded-For Proxy Fix middleware from Werkzeug"
" to respect X-* headers to redirect clients",
@ -376,7 +346,7 @@ class LoggingConfig(BaseSettings):
LOG_TZ: Optional[str] = Field(
description="Timezone for log timestamps (e.g., 'America/New_York')",
default="UTC",
default=None,
)
@ -611,11 +581,6 @@ class DataSetConfig(BaseSettings):
default=500,
)
CREATE_TIDB_SERVICE_JOB_ENABLED: bool = Field(
description="Enable or disable create tidb service job",
default=False,
)
class WorkspaceConfig(BaseSettings):
"""
@ -639,17 +604,12 @@ class IndexingConfig(BaseSettings):
)
class VisionFormatConfig(BaseSettings):
class ImageFormatConfig(BaseSettings):
MULTIMODAL_SEND_IMAGE_FORMAT: Literal["base64", "url"] = Field(
description="Format for sending images in multimodal contexts ('base64' or 'url'), default is base64",
default="base64",
)
MULTIMODAL_SEND_VIDEO_FORMAT: Literal["base64", "url"] = Field(
description="Format for sending videos in multimodal contexts ('base64' or 'url'), default is base64",
default="base64",
)
class CeleryBeatConfig(BaseSettings):
CELERY_BEAT_SCHEDULER_TIME: int = Field(
@ -752,7 +712,7 @@ class FeatureConfig(
FileAccessConfig,
FileUploadConfig,
HttpConfig,
VisionFormatConfig,
ImageFormatConfig,
InnerAPIConfig,
IndexingConfig,
LoggingConfig,

View File

@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
CURRENT_VERSION: str = Field(
description="Dify version",
default="0.11.1",
default="0.10.2",
)
COMMIT_SHA: str = Field(

View File

@ -55,6 +55,7 @@ from .datasets import (
datasets_document,
datasets_segments,
external,
fta_test,
hit_testing,
website,
)

View File

@ -317,11 +317,8 @@ class DatasetInitApi(Resource):
raise ValueError("embedding model and embedding model provider are required for high quality indexing.")
try:
model_manager = ModelManager()
model_manager.get_model_instance(
tenant_id=current_user.current_tenant_id,
provider=args["embedding_model_provider"],
model_type=ModelType.TEXT_EMBEDDING,
model=args["embedding_model"],
model_manager.get_default_model_instance(
tenant_id=current_user.current_tenant_id, model_type=ModelType.TEXT_EMBEDDING
)
except InvokeAuthorizationError:
raise ProviderNotInitializeError(
@ -948,7 +945,7 @@ class DocumentRetryApi(DocumentResource):
raise DocumentAlreadyFinishedError()
retry_documents.append(document)
except Exception as e:
logging.exception(f"Document {document_id} retry failed: {str(e)}")
logging.error(f"Document {document_id} retry failed: {str(e)}")
continue
# retry document
DocumentService.retry_document(dataset_id, retry_documents)

View File

@ -0,0 +1,145 @@
import json
import requests
from flask import Response
from flask_restful import Resource, reqparse
from sqlalchemy import text
from controllers.console import api
from extensions.ext_database import db
from extensions.ext_storage import storage
from models.fta import ComponentFailure, ComponentFailureStats
class FATTestApi(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument("log_process_data", nullable=False, required=True, type=str, location="args")
args = parser.parse_args()
print(args["log_process_data"])
# Extract the JSON string from the text field
json_str = args["log_process_data"].strip("```json\\n").strip("```").strip().replace("\\n", "")
log_data = json.loads(json_str)
db.session.query(ComponentFailure).delete()
for data in log_data:
if not isinstance(data, dict):
raise TypeError("Data must be a dictionary.")
required_keys = {"Date", "Component", "FailureMode", "Cause", "RepairAction", "Technician"}
if not required_keys.issubset(data.keys()):
raise ValueError(f"Data dictionary must contain the following keys: {required_keys}")
try:
# Clear existing stats
component_failure = ComponentFailure(
Date=data["Date"],
Component=data["Component"],
FailureMode=data["FailureMode"],
Cause=data["Cause"],
RepairAction=data["RepairAction"],
Technician=data["Technician"],
)
db.session.add(component_failure)
db.session.commit()
except Exception as e:
print(e)
# Clear existing stats
db.session.query(ComponentFailureStats).delete()
# Insert calculated statistics
try:
db.session.execute(
text("""
INSERT INTO component_failure_stats ("Component", "FailureMode", "Cause", "PossibleAction", "Probability", "MTBF")
SELECT
cf."Component",
cf."FailureMode",
cf."Cause",
cf."RepairAction" as "PossibleAction",
COUNT(*) * 1.0 / (SELECT COUNT(*) FROM component_failure WHERE "Component" = cf."Component") AS "Probability",
COALESCE(AVG(EXTRACT(EPOCH FROM (next_failure_date::timestamp - cf."Date"::timestamp)) / 86400.0),0)AS "MTBF"
FROM (
SELECT
"Component",
"FailureMode",
"Cause",
"RepairAction",
"Date",
LEAD("Date") OVER (PARTITION BY "Component", "FailureMode", "Cause" ORDER BY "Date") AS next_failure_date
FROM
component_failure
) cf
GROUP BY
cf."Component", cf."FailureMode", cf."Cause", cf."RepairAction";
""")
)
db.session.commit()
except Exception as e:
db.session.rollback()
print(f"Error during stats calculation: {e}")
# output format
# [
# (17, 'Hydraulic system', 'Leak', 'Hose rupture', 'Replaced hydraulic hose', 0.3333333333333333, None),
# (18, 'Hydraulic system', 'Leak', 'Seal Wear', 'Replaced the faulty seal', 0.3333333333333333, None),
# (19, 'Hydraulic system', 'Pressure drop', 'Fluid leak', 'Replaced hydraulic fluid and seals', 0.3333333333333333, None)
# ]
component_failure_stats = db.session.query(ComponentFailureStats).all()
# Convert stats to list of tuples format
stats_list = []
for stat in component_failure_stats:
stats_list.append(
(
stat.StatID,
stat.Component,
stat.FailureMode,
stat.Cause,
stat.PossibleAction,
stat.Probability,
stat.MTBF,
)
)
return {"data": stats_list}, 200
# generate-fault-tree
class GenerateFaultTreeApi(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument("llm_text", nullable=False, required=True, type=str, location="args")
args = parser.parse_args()
entities = args["llm_text"].replace("```", "").replace("\\n", "\n")
print(entities)
request_data = {"fault_tree_text": entities}
url = "https://fta.cognitech-dev.live/generate-fault-tree"
headers = {"accept": "application/json", "Content-Type": "application/json"}
response = requests.post(url, json=request_data, headers=headers)
print(response.json())
return {"data": response.json()}, 200
class ExtractSVGApi(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument("svg_text", nullable=False, required=True, type=str, location="args")
args = parser.parse_args()
# svg_text = ''.join(args["svg_text"].splitlines())
svg_text = args["svg_text"].replace("\n", "")
svg_text = svg_text.replace('"', '"')
print(svg_text)
svg_text_json = json.loads(svg_text)
svg_content = svg_text_json.get("data").get("svg_content")[0]
svg_content = svg_content.replace("\n", "").replace('"', '"')
file_key = "fta_svg/" + "fat.svg"
if storage.exists(file_key):
storage.delete(file_key)
storage.save(file_key, svg_content.encode("utf-8"))
generator = storage.load(file_key, stream=True)
return Response(generator, mimetype="image/svg+xml")
api.add_resource(FATTestApi, "/fta/db-handler")
api.add_resource(GenerateFaultTreeApi, "/fta/generate-fault-tree")
api.add_resource(ExtractSVGApi, "/fta/extract-svg")

View File

@ -62,27 +62,3 @@ class EmailSendIpLimitError(BaseHTTPException):
error_code = "email_send_ip_limit"
description = "Too many emails have been sent from this IP address recently. Please try again later."
code = 429
class FileTooLargeError(BaseHTTPException):
error_code = "file_too_large"
description = "File size exceeded. {message}"
code = 413
class UnsupportedFileTypeError(BaseHTTPException):
error_code = "unsupported_file_type"
description = "File type not allowed."
code = 415
class TooManyFilesError(BaseHTTPException):
error_code = "too_many_files"
description = "Only one file is allowed."
code = 400
class NoFileUploadedError(BaseHTTPException):
error_code = "no_file_uploaded"
description = "Please upload your file."
code = 400

View File

@ -15,7 +15,7 @@ from fields.file_fields import file_fields, upload_config_fields
from libs.login import login_required
from services.file_service import FileService
from .error import (
from .errors import (
FileTooLargeError,
NoFileUploadedError,
TooManyFilesError,

View File

@ -0,0 +1,25 @@
from libs.exception import BaseHTTPException
class FileTooLargeError(BaseHTTPException):
error_code = "file_too_large"
description = "File size exceeded. {message}"
code = 413
class UnsupportedFileTypeError(BaseHTTPException):
error_code = "unsupported_file_type"
description = "File type not allowed."
code = 415
class TooManyFilesError(BaseHTTPException):
error_code = "too_many_files"
description = "Only one file is allowed."
code = 400
class NoFileUploadedError(BaseHTTPException):
error_code = "no_file_uploaded"
description = "Please upload your file."
code = 400

View File

@ -1,11 +1,9 @@
import urllib.parse
from typing import cast
import httpx
from flask_login import current_user
from flask_restful import Resource, marshal_with, reqparse
import services
from controllers.common import helpers
from core.file import helpers as file_helpers
from core.helper import ssrf_proxy
@ -13,25 +11,19 @@ from fields.file_fields import file_fields_with_signed_url, remote_file_info_fie
from models.account import Account
from services.file_service import FileService
from .error import (
FileTooLargeError,
UnsupportedFileTypeError,
)
class RemoteFileInfoApi(Resource):
@marshal_with(remote_file_info_fields)
def get(self, url):
decoded_url = urllib.parse.unquote(url)
resp = ssrf_proxy.head(decoded_url)
if resp.status_code != httpx.codes.OK:
# failed back to get method
resp = ssrf_proxy.get(decoded_url, timeout=3)
resp.raise_for_status()
return {
"file_type": resp.headers.get("Content-Type", "application/octet-stream"),
"file_length": int(resp.headers.get("Content-Length", 0)),
}
try:
response = ssrf_proxy.head(decoded_url)
return {
"file_type": response.headers.get("Content-Type", "application/octet-stream"),
"file_length": int(response.headers.get("Content-Length", 0)),
}
except Exception as e:
return {"error": str(e)}, 400
class RemoteFileUploadApi(Resource):
@ -43,17 +35,17 @@ class RemoteFileUploadApi(Resource):
url = args["url"]
resp = ssrf_proxy.head(url=url)
if resp.status_code != httpx.codes.OK:
resp = ssrf_proxy.get(url=url, timeout=3)
resp.raise_for_status()
response = ssrf_proxy.head(url)
response.raise_for_status()
file_info = helpers.guess_file_info_from_response(resp)
file_info = helpers.guess_file_info_from_response(response)
if not FileService.is_file_size_within_limit(extension=file_info.extension, file_size=file_info.size):
raise FileTooLargeError
return {"error": "File size exceeded"}, 400
content = resp.content if resp.request.method == "GET" else ssrf_proxy.get(url).content
response = ssrf_proxy.get(url)
response.raise_for_status()
content = response.content
try:
user = cast(Account, current_user)
@ -64,10 +56,8 @@ class RemoteFileUploadApi(Resource):
user=user,
source_url=url,
)
except services.errors.file.FileTooLargeError as file_too_large_error:
raise FileTooLargeError(file_too_large_error.description)
except services.errors.file.UnsupportedFileTypeError:
raise UnsupportedFileTypeError()
except Exception as e:
return {"error": str(e)}, 400
return {
"id": upload_file.id,

View File

@ -7,11 +7,7 @@ from controllers.service_api import api
from controllers.service_api.app.error import NotChatAppError
from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
from core.app.entities.app_invoke_entities import InvokeFrom
from fields.conversation_fields import (
conversation_delete_fields,
conversation_infinite_scroll_pagination_fields,
simple_conversation_fields,
)
from fields.conversation_fields import conversation_infinite_scroll_pagination_fields, simple_conversation_fields
from libs.helper import uuid_value
from models.model import App, AppMode, EndUser
from services.conversation_service import ConversationService
@ -53,7 +49,7 @@ class ConversationApi(Resource):
class ConversationDetailApi(Resource):
@validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON))
@marshal_with(conversation_delete_fields)
@marshal_with(simple_conversation_fields)
def delete(self, app_model: App, end_user: EndUser, c_id):
app_mode = AppMode.value_of(app_model.mode)
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:

View File

@ -41,6 +41,7 @@ class FileApi(Resource):
content=file.read(),
mimetype=file.mimetype,
user=end_user,
source="datasets",
)
except services.errors.file.FileTooLargeError as file_too_large_error:
raise FileTooLargeError(file_too_large_error.description)

View File

@ -10,7 +10,6 @@ from controllers.service_api.app.error import NotChatAppError
from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
from core.app.entities.app_invoke_entities import InvokeFrom
from fields.conversation_fields import message_file_fields
from fields.raws import FilesContainedField
from libs.helper import TimestampField, uuid_value
from models.model import App, AppMode, EndUser
from services.errors.message import SuggestedQuestionsAfterAnswerDisabledError
@ -56,7 +55,7 @@ class MessageListApi(Resource):
"id": fields.String,
"conversation_id": fields.String,
"parent_message_id": fields.String,
"inputs": FilesContainedField,
"inputs": fields.Raw,
"query": fields.String,
"answer": fields.String(attribute="re_sign_file_url_answer"),
"message_files": fields.List(fields.Nested(message_file_fields)),

View File

@ -1,9 +1,7 @@
import urllib.parse
import httpx
from flask_restful import marshal_with, reqparse
import services
from controllers.common import helpers
from controllers.web.wraps import WebApiResource
from core.file import helpers as file_helpers
@ -11,22 +9,19 @@ from core.helper import ssrf_proxy
from fields.file_fields import file_fields_with_signed_url, remote_file_info_fields
from services.file_service import FileService
from .error import FileTooLargeError, UnsupportedFileTypeError
class RemoteFileInfoApi(WebApiResource):
@marshal_with(remote_file_info_fields)
def get(self, app_model, end_user, url):
def get(self, url):
decoded_url = urllib.parse.unquote(url)
resp = ssrf_proxy.head(decoded_url)
if resp.status_code != httpx.codes.OK:
# failed back to get method
resp = ssrf_proxy.get(decoded_url, timeout=3)
resp.raise_for_status()
return {
"file_type": resp.headers.get("Content-Type", "application/octet-stream"),
"file_length": int(resp.headers.get("Content-Length", -1)),
}
try:
response = ssrf_proxy.head(decoded_url)
return {
"file_type": response.headers.get("Content-Type", "application/octet-stream"),
"file_length": int(response.headers.get("Content-Length", -1)),
}
except Exception as e:
return {"error": str(e)}, 400
class RemoteFileUploadApi(WebApiResource):
@ -38,30 +33,28 @@ class RemoteFileUploadApi(WebApiResource):
url = args["url"]
resp = ssrf_proxy.head(url=url)
if resp.status_code != httpx.codes.OK:
resp = ssrf_proxy.get(url=url, timeout=3)
resp.raise_for_status()
response = ssrf_proxy.head(url)
response.raise_for_status()
file_info = helpers.guess_file_info_from_response(resp)
file_info = helpers.guess_file_info_from_response(response)
if not FileService.is_file_size_within_limit(extension=file_info.extension, file_size=file_info.size):
raise FileTooLargeError
return {"error": "File size exceeded"}, 400
content = resp.content if resp.request.method == "GET" else ssrf_proxy.get(url).content
response = ssrf_proxy.get(url)
response.raise_for_status()
content = response.content
try:
upload_file = FileService.upload_file(
filename=file_info.filename,
content=content,
mimetype=file_info.mimetype,
user=end_user,
user=end_user, # Use end_user instead of current_user
source_url=url,
)
except services.errors.file.FileTooLargeError as file_too_large_error:
raise FileTooLargeError(file_too_large_error.description)
except services.errors.file.UnsupportedFileTypeError:
raise UnsupportedFileTypeError
except Exception as e:
return {"error": str(e)}, 400
return {
"id": upload_file.id,

View File

@ -30,7 +30,6 @@ from core.model_runtime.entities import (
ToolPromptMessage,
UserPromptMessage,
)
from core.model_runtime.entities.message_entities import ImagePromptMessageContent
from core.model_runtime.entities.model_entities import ModelFeature
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.model_runtime.utils.encoders import jsonable_encoder
@ -66,7 +65,7 @@ class BaseAgentRunner(AppRunner):
prompt_messages: Optional[list[PromptMessage]] = None,
variables_pool: Optional[ToolRuntimeVariablePool] = None,
db_variables: Optional[ToolConversationVariables] = None,
model_instance: ModelInstance | None = None,
model_instance: ModelInstance = None,
) -> None:
self.tenant_id = tenant_id
self.application_generate_entity = application_generate_entity
@ -509,27 +508,24 @@ class BaseAgentRunner(AppRunner):
def organize_agent_user_prompt(self, message: Message) -> UserPromptMessage:
files = db.session.query(MessageFile).filter(MessageFile.message_id == message.id).all()
if not files:
return UserPromptMessage(content=message.query)
file_extra_config = FileUploadConfigManager.convert(message.app_model_config.to_dict())
if not file_extra_config:
return UserPromptMessage(content=message.query)
if files:
file_extra_config = FileUploadConfigManager.convert(message.app_model_config.to_dict())
image_detail_config = file_extra_config.image_config.detail if file_extra_config.image_config else None
image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW
file_objs = file_factory.build_from_message_files(
message_files=files, tenant_id=self.tenant_id, config=file_extra_config
)
if not file_objs:
return UserPromptMessage(content=message.query)
prompt_message_contents: list[PromptMessageContent] = []
prompt_message_contents.append(TextPromptMessageContent(data=message.query))
for file in file_objs:
prompt_message_contents.append(
file_manager.to_prompt_message_content(
file,
image_detail_config=image_detail_config,
if file_extra_config:
file_objs = file_factory.build_from_message_files(
message_files=files, tenant_id=self.tenant_id, config=file_extra_config
)
)
return UserPromptMessage(content=prompt_message_contents)
else:
file_objs = []
if not file_objs:
return UserPromptMessage(content=message.query)
else:
prompt_message_contents: list[PromptMessageContent] = []
prompt_message_contents.append(TextPromptMessageContent(data=message.query))
for file_obj in file_objs:
prompt_message_contents.append(file_manager.to_prompt_message_content(file_obj))
return UserPromptMessage(content=prompt_message_contents)
else:
return UserPromptMessage(content=message.query)

View File

@ -10,7 +10,6 @@ from core.model_runtime.entities import (
TextPromptMessageContent,
UserPromptMessage,
)
from core.model_runtime.entities.message_entities import ImagePromptMessageContent
from core.model_runtime.utils.encoders import jsonable_encoder
@ -37,24 +36,8 @@ class CotChatAgentRunner(CotAgentRunner):
if self.files:
prompt_message_contents: list[PromptMessageContent] = []
prompt_message_contents.append(TextPromptMessageContent(data=query))
# get image detail config
image_detail_config = (
self.application_generate_entity.file_upload_config.image_config.detail
if (
self.application_generate_entity.file_upload_config
and self.application_generate_entity.file_upload_config.image_config
)
else None
)
image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW
for file in self.files:
prompt_message_contents.append(
file_manager.to_prompt_message_content(
file,
image_detail_config=image_detail_config,
)
)
for file_obj in self.files:
prompt_message_contents.append(file_manager.to_prompt_message_content(file_obj))
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
else:

View File

@ -22,7 +22,6 @@ from core.model_runtime.entities import (
ToolPromptMessage,
UserPromptMessage,
)
from core.model_runtime.entities.message_entities import ImagePromptMessageContent
from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform
from core.tools.entities.tool_entities import ToolInvokeMeta
from core.tools.tool_engine import ToolEngine
@ -398,24 +397,8 @@ class FunctionCallAgentRunner(BaseAgentRunner):
if self.files:
prompt_message_contents: list[PromptMessageContent] = []
prompt_message_contents.append(TextPromptMessageContent(data=query))
# get image detail config
image_detail_config = (
self.application_generate_entity.file_upload_config.image_config.detail
if (
self.application_generate_entity.file_upload_config
and self.application_generate_entity.file_upload_config.image_config
)
else None
)
image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW
for file in self.files:
prompt_message_contents.append(
file_manager.to_prompt_message_content(
file,
image_detail_config=image_detail_config,
)
)
for file_obj in self.files:
prompt_message_contents.append(file_manager.to_prompt_message_content(file_obj))
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
else:

View File

@ -4,7 +4,7 @@ from typing import Any, Optional
from pydantic import BaseModel, Field, field_validator
from core.file import FileTransferMethod, FileType, FileUploadConfig
from core.file import FileExtraConfig, FileTransferMethod, FileType
from core.model_runtime.entities.message_entities import PromptMessageRole
from models.model import AppMode
@ -211,7 +211,7 @@ class TracingConfigEntity(BaseModel):
class AppAdditionalFeatures(BaseModel):
file_upload: Optional[FileUploadConfig] = None
file_upload: Optional[FileExtraConfig] = None
opening_statement: Optional[str] = None
suggested_questions: list[str] = []
suggested_questions_after_answer: bool = False

View File

@ -1,7 +1,7 @@
from collections.abc import Mapping
from typing import Any
from core.file import FileUploadConfig
from core.file import FileExtraConfig
class FileUploadConfigManager:
@ -29,18 +29,19 @@ class FileUploadConfigManager:
if is_vision:
data["image_config"]["detail"] = file_upload_dict.get("image", {}).get("detail", "low")
return FileUploadConfig.model_validate(data)
return FileExtraConfig.model_validate(data)
@classmethod
def validate_and_set_defaults(cls, config: dict) -> tuple[dict, list[str]]:
def validate_and_set_defaults(cls, config: dict, is_vision: bool = True) -> tuple[dict, list[str]]:
"""
Validate and set defaults for file upload feature
:param config: app model config args
:param is_vision: if True, the feature is vision feature
"""
if not config.get("file_upload"):
config["file_upload"] = {}
else:
FileUploadConfig.model_validate(config["file_upload"])
FileExtraConfig.model_validate(config["file_upload"])
return config, ["file_upload"]

View File

@ -52,7 +52,9 @@ class AdvancedChatAppConfigManager(BaseAppConfigManager):
related_config_keys = []
# file upload validation
config, current_related_config_keys = FileUploadConfigManager.validate_and_set_defaults(config=config)
config, current_related_config_keys = FileUploadConfigManager.validate_and_set_defaults(
config=config, is_vision=False
)
related_config_keys.extend(current_related_config_keys)
# opening_statement

View File

@ -1,5 +1,6 @@
import contextvars
import logging
import os
import threading
import uuid
from collections.abc import Generator
@ -9,7 +10,6 @@ from flask import Flask, current_app
from pydantic import ValidationError
import contexts
from configs import dify_config
from constants import UUID_NIL
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
from core.app.apps.advanced_chat.app_config_manager import AdvancedChatAppConfigManager
@ -26,6 +26,7 @@ from core.ops.ops_trace_manager import TraceQueueManager
from extensions.ext_database import db
from factories import file_factory
from models.account import Account
from models.enums import CreatedByRole
from models.model import App, Conversation, EndUser, Message
from models.workflow import Workflow
@ -97,10 +98,13 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
# parse files
files = args["files"] if args.get("files") else []
file_extra_config = FileUploadConfigManager.convert(workflow.features_dict, is_vision=False)
role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER
if file_extra_config:
file_objs = file_factory.build_from_mappings(
mappings=files,
tenant_id=app_model.tenant_id,
user_id=user.id,
role=role,
config=file_extra_config,
)
else:
@ -123,11 +127,10 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
application_generate_entity = AdvancedChatAppGenerateEntity(
task_id=str(uuid.uuid4()),
app_config=app_config,
file_upload_config=file_extra_config,
conversation_id=conversation.id if conversation else None,
inputs=conversation.inputs
if conversation
else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config),
else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role),
query=query,
files=file_objs,
parent_message_id=args.get("parent_message_id") if invoke_from != InvokeFrom.SERVICE_API else UUID_NIL,
@ -314,7 +317,7 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
logger.exception("Validation Error when generating")
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
except (ValueError, InvokeError) as e:
if dify_config.DEBUG:
if os.environ.get("DEBUG", "false").lower() == "true":
logger.exception("Error when generating")
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
except Exception as e:

View File

@ -242,7 +242,7 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc
start_listener_time = time.time()
yield MessageAudioStreamResponse(audio=audio_trunk.audio, task_id=task_id)
except Exception as e:
logger.exception(e)
logger.error(e)
break
if tts_publisher:
yield MessageAudioEndStreamResponse(audio="", task_id=task_id)

View File

@ -1,4 +1,5 @@
import logging
import os
import threading
import uuid
from collections.abc import Generator
@ -7,7 +8,6 @@ from typing import Any, Literal, Union, overload
from flask import Flask, current_app
from pydantic import ValidationError
from configs import dify_config
from constants import UUID_NIL
from core.app.app_config.easy_ui_based_app.model_config.converter import ModelConfigConverter
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
@ -23,6 +23,7 @@ from core.ops.ops_trace_manager import TraceQueueManager
from extensions.ext_database import db
from factories import file_factory
from models import Account, App, EndUser
from models.enums import CreatedByRole
logger = logging.getLogger(__name__)
@ -102,6 +103,8 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
# always enable retriever resource in debugger mode
override_model_config_dict["retriever_resource"] = {"enabled": True}
role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER
# parse files
files = args.get("files") or []
file_extra_config = FileUploadConfigManager.convert(override_model_config_dict or app_model_config.to_dict())
@ -109,6 +112,8 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
file_objs = file_factory.build_from_mappings(
mappings=files,
tenant_id=app_model.tenant_id,
user_id=user.id,
role=role,
config=file_extra_config,
)
else:
@ -130,11 +135,10 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
task_id=str(uuid.uuid4()),
app_config=app_config,
model_conf=ModelConfigConverter.convert(app_config),
file_upload_config=file_extra_config,
conversation_id=conversation.id if conversation else None,
inputs=conversation.inputs
if conversation
else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config),
else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role),
query=query,
files=file_objs,
parent_message_id=args.get("parent_message_id") if invoke_from != InvokeFrom.SERVICE_API else UUID_NIL,
@ -226,7 +230,7 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
logger.exception("Validation Error when generating")
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
except (ValueError, InvokeError) as e:
if dify_config.DEBUG:
if os.environ.get("DEBUG") and os.environ.get("DEBUG").lower() == "true":
logger.exception("Error when generating")
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
except Exception as e:

View File

@ -2,11 +2,12 @@ from collections.abc import Mapping
from typing import TYPE_CHECKING, Any, Optional
from core.app.app_config.entities import VariableEntityType
from core.file import File, FileUploadConfig
from core.file import File, FileExtraConfig
from factories import file_factory
if TYPE_CHECKING:
from core.app.app_config.entities import AppConfig, VariableEntity
from models.enums import CreatedByRole
class BaseAppGenerator:
@ -15,6 +16,8 @@ class BaseAppGenerator:
*,
user_inputs: Optional[Mapping[str, Any]],
app_config: "AppConfig",
user_id: str,
role: "CreatedByRole",
) -> Mapping[str, Any]:
user_inputs = user_inputs or {}
# Filter input variables from form configuration, handle required fields, default values, and option values
@ -31,7 +34,9 @@ class BaseAppGenerator:
k: file_factory.build_from_mapping(
mapping=v,
tenant_id=app_config.tenant_id,
config=FileUploadConfig(
user_id=user_id,
role=role,
config=FileExtraConfig(
allowed_file_types=entity_dictionary[k].allowed_file_types,
allowed_extensions=entity_dictionary[k].allowed_file_extensions,
allowed_upload_methods=entity_dictionary[k].allowed_file_upload_methods,
@ -45,7 +50,9 @@ class BaseAppGenerator:
k: file_factory.build_from_mappings(
mappings=v,
tenant_id=app_config.tenant_id,
config=FileUploadConfig(
user_id=user_id,
role=role,
config=FileExtraConfig(
allowed_file_types=entity_dictionary[k].allowed_file_types,
allowed_extensions=entity_dictionary[k].allowed_file_extensions,
allowed_upload_methods=entity_dictionary[k].allowed_file_upload_methods,

View File

@ -1,4 +1,5 @@
import logging
import os
import threading
import uuid
from collections.abc import Generator
@ -7,7 +8,6 @@ from typing import Any, Literal, Union, overload
from flask import Flask, current_app
from pydantic import ValidationError
from configs import dify_config
from constants import UUID_NIL
from core.app.app_config.easy_ui_based_app.model_config.converter import ModelConfigConverter
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
@ -23,6 +23,7 @@ from core.ops.ops_trace_manager import TraceQueueManager
from extensions.ext_database import db
from factories import file_factory
from models.account import Account
from models.enums import CreatedByRole
from models.model import App, EndUser
logger = logging.getLogger(__name__)
@ -100,6 +101,8 @@ class ChatAppGenerator(MessageBasedAppGenerator):
# always enable retriever resource in debugger mode
override_model_config_dict["retriever_resource"] = {"enabled": True}
role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER
# parse files
files = args["files"] if args.get("files") else []
file_extra_config = FileUploadConfigManager.convert(override_model_config_dict or app_model_config.to_dict())
@ -107,6 +110,8 @@ class ChatAppGenerator(MessageBasedAppGenerator):
file_objs = file_factory.build_from_mappings(
mappings=files,
tenant_id=app_model.tenant_id,
user_id=user.id,
role=role,
config=file_extra_config,
)
else:
@ -128,11 +133,10 @@ class ChatAppGenerator(MessageBasedAppGenerator):
task_id=str(uuid.uuid4()),
app_config=app_config,
model_conf=ModelConfigConverter.convert(app_config),
file_upload_config=file_extra_config,
conversation_id=conversation.id if conversation else None,
inputs=conversation.inputs
if conversation
else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config),
else self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role),
query=query,
files=file_objs,
parent_message_id=args.get("parent_message_id") if invoke_from != InvokeFrom.SERVICE_API else UUID_NIL,
@ -223,7 +227,7 @@ class ChatAppGenerator(MessageBasedAppGenerator):
logger.exception("Validation Error when generating")
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
except (ValueError, InvokeError) as e:
if dify_config.DEBUG:
if os.environ.get("DEBUG") and os.environ.get("DEBUG").lower() == "true":
logger.exception("Error when generating")
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
except Exception as e:

View File

@ -1,4 +1,5 @@
import logging
import os
import threading
import uuid
from collections.abc import Generator
@ -7,7 +8,6 @@ from typing import Any, Literal, Union, overload
from flask import Flask, current_app
from pydantic import ValidationError
from configs import dify_config
from core.app.app_config.easy_ui_based_app.model_config.converter import ModelConfigConverter
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
from core.app.apps.base_app_queue_manager import AppQueueManager, GenerateTaskStoppedError, PublishFrom
@ -22,6 +22,7 @@ from core.ops.ops_trace_manager import TraceQueueManager
from extensions.ext_database import db
from factories import file_factory
from models import Account, App, EndUser, Message
from models.enums import CreatedByRole
from services.errors.app import MoreLikeThisDisabledError
from services.errors.message import MessageNotExistsError
@ -87,6 +88,8 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
tenant_id=app_model.tenant_id, config=args.get("model_config")
)
role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER
# parse files
files = args["files"] if args.get("files") else []
file_extra_config = FileUploadConfigManager.convert(override_model_config_dict or app_model_config.to_dict())
@ -94,6 +97,8 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
file_objs = file_factory.build_from_mappings(
mappings=files,
tenant_id=app_model.tenant_id,
user_id=user.id,
role=role,
config=file_extra_config,
)
else:
@ -105,6 +110,7 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
)
# get tracing instance
user_id = user.id if isinstance(user, Account) else user.session_id
trace_manager = TraceQueueManager(app_model.id)
# init application generate entity
@ -112,8 +118,7 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
task_id=str(uuid.uuid4()),
app_config=app_config,
model_conf=ModelConfigConverter.convert(app_config),
file_upload_config=file_extra_config,
inputs=self._prepare_user_inputs(user_inputs=inputs, app_config=app_config),
inputs=self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role),
query=query,
files=file_objs,
user_id=user.id,
@ -198,7 +203,7 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
logger.exception("Validation Error when generating")
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
except (ValueError, InvokeError) as e:
if dify_config.DEBUG:
if os.environ.get("DEBUG") and os.environ.get("DEBUG").lower() == "true":
logger.exception("Error when generating")
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
except Exception as e:
@ -254,11 +259,14 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
override_model_config_dict["model"] = model_dict
# parse files
role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER
file_extra_config = FileUploadConfigManager.convert(override_model_config_dict)
if file_extra_config:
file_objs = file_factory.build_from_mappings(
mappings=message.message_files,
tenant_id=app_model.tenant_id,
user_id=user.id,
role=role,
config=file_extra_config,
)
else:

View File

@ -46,7 +46,9 @@ class WorkflowAppConfigManager(BaseAppConfigManager):
related_config_keys = []
# file upload validation
config, current_related_config_keys = FileUploadConfigManager.validate_and_set_defaults(config=config)
config, current_related_config_keys = FileUploadConfigManager.validate_and_set_defaults(
config=config, is_vision=False
)
related_config_keys.extend(current_related_config_keys)
# text_to_speech

View File

@ -1,5 +1,6 @@
import contextvars
import logging
import os
import threading
import uuid
from collections.abc import Generator, Mapping, Sequence
@ -9,7 +10,6 @@ from flask import Flask, current_app
from pydantic import ValidationError
import contexts
from configs import dify_config
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
from core.app.apps.base_app_generator import BaseAppGenerator
from core.app.apps.base_app_queue_manager import AppQueueManager, GenerateTaskStoppedError, PublishFrom
@ -25,6 +25,7 @@ from core.ops.ops_trace_manager import TraceQueueManager
from extensions.ext_database import db
from factories import file_factory
from models import Account, App, EndUser, Workflow
from models.enums import CreatedByRole
logger = logging.getLogger(__name__)
@ -69,11 +70,15 @@ class WorkflowAppGenerator(BaseAppGenerator):
):
files: Sequence[Mapping[str, Any]] = args.get("files") or []
role = CreatedByRole.ACCOUNT if isinstance(user, Account) else CreatedByRole.END_USER
# parse files
file_extra_config = FileUploadConfigManager.convert(workflow.features_dict, is_vision=False)
system_files = file_factory.build_from_mappings(
mappings=files,
tenant_id=app_model.tenant_id,
user_id=user.id,
role=role,
config=file_extra_config,
)
@ -95,8 +100,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
application_generate_entity = WorkflowAppGenerateEntity(
task_id=str(uuid.uuid4()),
app_config=app_config,
file_upload_config=file_extra_config,
inputs=self._prepare_user_inputs(user_inputs=inputs, app_config=app_config),
inputs=self._prepare_user_inputs(user_inputs=inputs, app_config=app_config, user_id=user.id, role=role),
files=system_files,
user_id=user.id,
stream=stream,
@ -257,7 +261,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
logger.exception("Validation Error when generating")
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
except (ValueError, InvokeError) as e:
if dify_config.DEBUG:
if os.environ.get("DEBUG") and os.environ.get("DEBUG", "false").lower() == "true":
logger.exception("Error when generating")
queue_manager.publish_error(e, PublishFrom.APPLICATION_MANAGER)
except Exception as e:

View File

@ -216,7 +216,7 @@ class WorkflowAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCycleMa
else:
yield MessageAudioStreamResponse(audio=audio_trunk.audio, task_id=task_id)
except Exception as e:
logger.exception(e)
logger.error(e)
break
if tts_publisher:
yield MessageAudioEndStreamResponse(audio="", task_id=task_id)

View File

@ -361,7 +361,6 @@ class WorkflowBasedAppRunner(AppRunner):
node_run_index=workflow_entry.graph_engine.graph_runtime_state.node_run_steps,
output=event.pre_iteration_output,
parallel_mode_run_id=event.parallel_mode_run_id,
duration=event.duration,
)
)
elif isinstance(event, (IterationRunSucceededEvent | IterationRunFailedEvent)):

View File

@ -7,7 +7,7 @@ from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validat
from constants import UUID_NIL
from core.app.app_config.entities import AppConfig, EasyUIBasedAppConfig, WorkflowUIBasedAppConfig
from core.entities.provider_configuration import ProviderModelBundle
from core.file import File, FileUploadConfig
from core.file.models import File
from core.model_runtime.entities.model_entities import AIModelEntity
from core.ops.ops_trace_manager import TraceQueueManager
@ -80,7 +80,6 @@ class AppGenerateEntity(BaseModel):
# app config
app_config: AppConfig
file_upload_config: Optional[FileUploadConfig] = None
inputs: Mapping[str, Any]
files: Sequence[File]

View File

@ -111,7 +111,6 @@ class QueueIterationNextEvent(AppQueueEvent):
"""iteratoin run in parallel mode run id"""
node_run_index: int
output: Optional[Any] = None # output for the current iteration
duration: Optional[float] = None
@field_validator("output", mode="before")
@classmethod
@ -308,8 +307,6 @@ class QueueNodeSucceededEvent(AppQueueEvent):
execution_metadata: Optional[dict[NodeRunMetadataKey, Any]] = None
error: Optional[str] = None
"""single iteration duration map"""
iteration_duration_map: Optional[dict[str, float]] = None
class QueueNodeInIterationFailedEvent(AppQueueEvent):

View File

@ -434,7 +434,6 @@ class IterationNodeNextStreamResponse(StreamResponse):
parallel_id: Optional[str] = None
parallel_start_node_id: Optional[str] = None
parallel_mode_run_id: Optional[str] = None
duration: Optional[float] = None
event: StreamEvent = StreamEvent.ITERATION_NEXT
workflow_run_id: str

View File

@ -624,7 +624,6 @@ class WorkflowCycleManage:
parallel_id=event.parallel_id,
parallel_start_node_id=event.parallel_start_node_id,
parallel_mode_run_id=event.parallel_mode_run_id,
duration=event.duration,
),
)

View File

@ -2,13 +2,13 @@ from .constants import FILE_MODEL_IDENTITY
from .enums import ArrayFileAttribute, FileAttribute, FileBelongsTo, FileTransferMethod, FileType
from .models import (
File,
FileUploadConfig,
FileExtraConfig,
ImageConfig,
)
__all__ = [
"FileType",
"FileUploadConfig",
"FileExtraConfig",
"FileTransferMethod",
"FileBelongsTo",
"File",

View File

@ -1,9 +1,11 @@
import base64
import tempfile
from pathlib import Path
from configs import dify_config
from core.file import file_repository
from core.helper import ssrf_proxy
from core.model_runtime.entities import AudioPromptMessageContent, ImagePromptMessageContent, VideoPromptMessageContent
from core.model_runtime.entities import AudioPromptMessageContent, ImagePromptMessageContent
from extensions.ext_database import db
from extensions.ext_storage import storage
@ -33,28 +35,25 @@ def get_attr(*, file: File, attr: FileAttribute):
raise ValueError(f"Invalid file attribute: {attr}")
def to_prompt_message_content(
f: File,
/,
*,
image_detail_config: ImagePromptMessageContent.DETAIL = ImagePromptMessageContent.DETAIL.LOW,
):
def to_prompt_message_content(f: File, /):
"""
Convert a File object to an ImagePromptMessageContent or AudioPromptMessageContent object.
Convert a File object to an ImagePromptMessageContent object.
This function takes a File object and converts it to an appropriate PromptMessageContent
object, which can be used as a prompt for image or audio-based AI models.
This function takes a File object and converts it to an ImagePromptMessageContent
object, which can be used as a prompt for image-based AI models.
Args:
f (File): The File object to convert.
detail (Optional[ImagePromptMessageContent.DETAIL]): The detail level for image prompts.
If not provided, defaults to ImagePromptMessageContent.DETAIL.LOW.
file (File): The File object to convert. Must be of type FileType.IMAGE.
Returns:
Union[ImagePromptMessageContent, AudioPromptMessageContent]: An object containing the file data and detail level
ImagePromptMessageContent: An object containing the image data and detail level.
Raises:
ValueError: If the file type is not supported or if required data is missing.
ValueError: If the file is not an image or if the file data is missing.
Note:
The detail level of the image prompt is determined by the file's extra_config.
If not specified, it defaults to ImagePromptMessageContent.DETAIL.LOW.
"""
match f.type:
case FileType.IMAGE:
@ -63,20 +62,19 @@ def to_prompt_message_content(
else:
data = _to_base64_data_string(f)
return ImagePromptMessageContent(data=data, detail=image_detail_config)
if f._extra_config and f._extra_config.image_config and f._extra_config.image_config.detail:
detail = f._extra_config.image_config.detail
else:
detail = ImagePromptMessageContent.DETAIL.LOW
return ImagePromptMessageContent(data=data, detail=detail)
case FileType.AUDIO:
encoded_string = _file_to_encoded_string(f)
if f.extension is None:
raise ValueError("Missing file extension")
return AudioPromptMessageContent(data=encoded_string, format=f.extension.lstrip("."))
case FileType.VIDEO:
if dify_config.MULTIMODAL_SEND_VIDEO_FORMAT == "url":
data = _to_url(f)
else:
data = _to_base64_data_string(f)
return VideoPromptMessageContent(data=data, format=f.extension.lstrip("."))
case _:
raise ValueError("file type f.type is not supported")
raise ValueError(f"file type {f.type} is not supported")
def download(f: File, /):
@ -113,10 +111,42 @@ def _download_file_content(path: str, /):
return data
def download_to_target_path(f: File, temp_dir: str, /):
if f.transfer_method == FileTransferMethod.TOOL_FILE:
tool_file = file_repository.get_tool_file(session=db.session(), file=f)
suffix = Path(tool_file.file_key).suffix
target_path = f"{temp_dir}/{next(tempfile._get_candidate_names())}{suffix}"
_download_file_to_target_path(tool_file.file_key, target_path)
return target_path
elif f.transfer_method == FileTransferMethod.LOCAL_FILE:
upload_file = file_repository.get_upload_file(session=db.session(), file=f)
suffix = Path(upload_file.key).suffix
target_path = f"{temp_dir}/{next(tempfile._get_candidate_names())}{suffix}"
_download_file_to_target_path(upload_file.key, target_path)
return target_path
else:
raise ValueError(f"Unsupported transfer method: {f.transfer_method}")
def _download_file_to_target_path(path: str, target_path: str, /):
"""
Download and return the contents of a file as bytes.
This function loads the file from storage and ensures it's in bytes format.
Args:
path (str): The path to the file in storage.
target_path (str): The path to the target file.
Raises:
ValueError: If the loaded file is not a bytes object.
"""
storage.download(path, target_path)
def _get_encoded_string(f: File, /):
match f.transfer_method:
case FileTransferMethod.REMOTE_URL:
response = ssrf_proxy.get(f.remote_url, follow_redirects=True)
response = ssrf_proxy.get(f.remote_url)
response.raise_for_status()
content = response.content
encoded_string = base64.b64encode(content).decode("utf-8")
@ -144,8 +174,6 @@ def _file_to_encoded_string(f: File, /):
match f.type:
case FileType.IMAGE:
return _to_base64_data_string(f)
case FileType.VIDEO:
return _to_base64_data_string(f)
case FileType.AUDIO:
return _get_encoded_string(f)
case _:

View File

@ -21,7 +21,7 @@ class ImageConfig(BaseModel):
detail: ImagePromptMessageContent.DETAIL | None = None
class FileUploadConfig(BaseModel):
class FileExtraConfig(BaseModel):
"""
File Upload Entity.
"""
@ -46,6 +46,7 @@ class File(BaseModel):
extension: Optional[str] = Field(default=None, description="File extension, should contains dot")
mime_type: Optional[str] = None
size: int = -1
_extra_config: FileExtraConfig | None = None
def to_dict(self) -> Mapping[str, str | int | None]:
data = self.model_dump(mode="json")
@ -106,4 +107,34 @@ class File(BaseModel):
case FileTransferMethod.TOOL_FILE:
if not self.related_id:
raise ValueError("Missing file related_id")
# Validate the extra config.
if not self._extra_config:
return self
if self._extra_config.allowed_file_types:
if self.type not in self._extra_config.allowed_file_types and self.type != FileType.CUSTOM:
raise ValueError(f"Invalid file type: {self.type}")
if self._extra_config.allowed_extensions and self.extension not in self._extra_config.allowed_extensions:
raise ValueError(f"Invalid file extension: {self.extension}")
if (
self._extra_config.allowed_upload_methods
and self.transfer_method not in self._extra_config.allowed_upload_methods
):
raise ValueError(f"Invalid transfer method: {self.transfer_method}")
match self.type:
case FileType.IMAGE:
# NOTE: This part of validation is deprecated, but still used in app features "Image Upload".
if not self._extra_config.image_config:
return self
# TODO: skip check if transfer_methods is empty, because many test cases are not setting this field
if (
self._extra_config.image_config.transfer_methods
and self.transfer_method not in self._extra_config.image_config.transfer_methods
):
raise ValueError(f"Invalid transfer method: {self.transfer_method}")
return self

View File

@ -1,3 +0,0 @@
from .code_executor import CodeExecutor, CodeLanguage
__all__ = ["CodeExecutor", "CodeLanguage"]

View File

@ -1,8 +1,7 @@
import logging
from collections.abc import Mapping
from enum import Enum
from threading import Lock
from typing import Any, Optional
from typing import Optional
from httpx import Timeout, post
from pydantic import BaseModel
@ -118,7 +117,7 @@ class CodeExecutor:
return response.data.stdout or ""
@classmethod
def execute_workflow_code_template(cls, language: CodeLanguage, code: str, inputs: Mapping[str, Any]) -> dict:
def execute_workflow_code_template(cls, language: CodeLanguage, code: str, inputs: dict) -> dict:
"""
Execute code
:param language: code language

View File

@ -2,8 +2,6 @@ import json
import re
from abc import ABC, abstractmethod
from base64 import b64encode
from collections.abc import Mapping
from typing import Any
class TemplateTransformer(ABC):
@ -12,7 +10,7 @@ class TemplateTransformer(ABC):
_result_tag: str = "<<RESULT>>"
@classmethod
def transform_caller(cls, code: str, inputs: Mapping[str, Any]) -> tuple[str, str]:
def transform_caller(cls, code: str, inputs: dict) -> tuple[str, str]:
"""
Transform code to python runner
:param code: code
@ -50,13 +48,13 @@ class TemplateTransformer(ABC):
pass
@classmethod
def serialize_inputs(cls, inputs: Mapping[str, Any]) -> str:
def serialize_inputs(cls, inputs: dict) -> str:
inputs_json_str = json.dumps(inputs, ensure_ascii=False).encode()
input_base64_encoded = b64encode(inputs_json_str).decode("utf-8")
return input_base64_encoded
@classmethod
def assemble_runner_script(cls, code: str, inputs: Mapping[str, Any]) -> str:
def assemble_runner_script(cls, code: str, inputs: dict) -> str:
# assemble runner script
script = cls.get_runner_script()
script = script.replace(cls._code_placeholder, code)

View File

@ -3,20 +3,22 @@ Proxy requests to avoid SSRF
"""
import logging
import os
import time
import httpx
from configs import dify_config
SSRF_DEFAULT_MAX_RETRIES = dify_config.SSRF_DEFAULT_MAX_RETRIES
SSRF_PROXY_ALL_URL = os.getenv("SSRF_PROXY_ALL_URL", "")
SSRF_PROXY_HTTP_URL = os.getenv("SSRF_PROXY_HTTP_URL", "")
SSRF_PROXY_HTTPS_URL = os.getenv("SSRF_PROXY_HTTPS_URL", "")
SSRF_DEFAULT_MAX_RETRIES = int(os.getenv("SSRF_DEFAULT_MAX_RETRIES", "3"))
proxy_mounts = (
{
"http://": httpx.HTTPTransport(proxy=dify_config.SSRF_PROXY_HTTP_URL),
"https://": httpx.HTTPTransport(proxy=dify_config.SSRF_PROXY_HTTPS_URL),
"http://": httpx.HTTPTransport(proxy=SSRF_PROXY_HTTP_URL),
"https://": httpx.HTTPTransport(proxy=SSRF_PROXY_HTTPS_URL),
}
if dify_config.SSRF_PROXY_HTTP_URL and dify_config.SSRF_PROXY_HTTPS_URL
if SSRF_PROXY_HTTP_URL and SSRF_PROXY_HTTPS_URL
else None
)
@ -30,19 +32,11 @@ def make_request(method, url, max_retries=SSRF_DEFAULT_MAX_RETRIES, **kwargs):
if "follow_redirects" not in kwargs:
kwargs["follow_redirects"] = allow_redirects
if "timeout" not in kwargs:
kwargs["timeout"] = httpx.Timeout(
timeout=dify_config.SSRF_DEFAULT_TIME_OUT,
connect=dify_config.SSRF_DEFAULT_CONNECT_TIME_OUT,
read=dify_config.SSRF_DEFAULT_READ_TIME_OUT,
write=dify_config.SSRF_DEFAULT_WRITE_TIME_OUT,
)
retries = 0
while retries <= max_retries:
try:
if dify_config.SSRF_PROXY_ALL_URL:
with httpx.Client(proxy=dify_config.SSRF_PROXY_ALL_URL) as client:
if SSRF_PROXY_ALL_URL:
with httpx.Client(proxy=SSRF_PROXY_ALL_URL) as client:
response = client.request(method=method, url=url, **kwargs)
elif proxy_mounts:
with httpx.Client(mounts=proxy_mounts) as client:

View File

@ -81,18 +81,15 @@ class TokenBufferMemory:
db.session.query(WorkflowRun).filter(WorkflowRun.id == message.workflow_run_id).first()
)
if workflow_run and workflow_run.workflow:
if workflow_run:
file_extra_config = FileUploadConfigManager.convert(
workflow_run.workflow.features_dict, is_vision=False
)
detail = ImagePromptMessageContent.DETAIL.LOW
if file_extra_config and app_record:
file_objs = file_factory.build_from_message_files(
message_files=files, tenant_id=app_record.tenant_id, config=file_extra_config
)
if file_extra_config.image_config and file_extra_config.image_config.detail:
detail = file_extra_config.image_config.detail
else:
file_objs = []
@ -101,16 +98,12 @@ class TokenBufferMemory:
else:
prompt_message_contents: list[PromptMessageContent] = []
prompt_message_contents.append(TextPromptMessageContent(data=message.query))
for file in file_objs:
if file.type in {FileType.IMAGE, FileType.AUDIO}:
prompt_message = file_manager.to_prompt_message_content(
file,
image_detail_config=detail,
)
for file_obj in file_objs:
if file_obj.type in {FileType.IMAGE, FileType.AUDIO}:
prompt_message = file_manager.to_prompt_message_content(file_obj)
prompt_message_contents.append(prompt_message)
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
else:
prompt_messages.append(UserPromptMessage(content=message.query))

View File

@ -1,8 +1,8 @@
import logging
import os
from collections.abc import Callable, Generator, Iterable, Sequence
from typing import IO, Any, Optional, Union, cast
from configs import dify_config
from core.entities.embedding_type import EmbeddingInputType
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
from core.entities.provider_entities import ModelLoadBalancingConfiguration
@ -473,7 +473,7 @@ class LBModelManager:
continue
if dify_config.DEBUG:
if bool(os.environ.get("DEBUG", "False").lower() == "true"):
logger.info(
f"Model LB\nid: {config.id}\nname:{config.name}\n"
f"tenant_id: {self._tenant_id}\nprovider: {self._provider}\n"

View File

@ -12,13 +12,11 @@ from .message_entities import (
TextPromptMessageContent,
ToolPromptMessage,
UserPromptMessage,
VideoPromptMessageContent,
)
from .model_entities import ModelPropertyKey
__all__ = [
"ImagePromptMessageContent",
"VideoPromptMessageContent",
"PromptMessage",
"PromptMessageRole",
"LLMUsage",

View File

@ -56,7 +56,6 @@ class PromptMessageContentType(Enum):
TEXT = "text"
IMAGE = "image"
AUDIO = "audio"
VIDEO = "video"
class PromptMessageContent(BaseModel):
@ -76,12 +75,6 @@ class TextPromptMessageContent(PromptMessageContent):
type: PromptMessageContentType = PromptMessageContentType.TEXT
class VideoPromptMessageContent(PromptMessageContent):
type: PromptMessageContentType = PromptMessageContentType.VIDEO
data: str = Field(..., description="Base64 encoded video data")
format: str = Field(..., description="Video format")
class AudioPromptMessageContent(PromptMessageContent):
type: PromptMessageContentType = PromptMessageContentType.AUDIO
data: str = Field(..., description="Base64 encoded audio data")

View File

@ -4,6 +4,7 @@ label:
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
model_properties:

View File

@ -47,9 +47,9 @@ class AzureRerankModel(RerankModel):
result = response.read()
return json.loads(result)
except urllib.error.HTTPError as error:
logger.exception(f"The request failed with status code: {error.code}")
logger.exception(error.info())
logger.exception(error.read().decode("utf8", "ignore"))
logger.error(f"The request failed with status code: {error.code}")
logger.error(error.info())
logger.error(error.read().decode("utf8", "ignore"))
raise
def _invoke(

View File

@ -4,6 +4,7 @@ label:
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
model_properties:

View File

@ -16,9 +16,9 @@ parameter_rules:
use_template: max_tokens
required: true
type: int
default: 8192
default: 4096
min: 1
max: 8192
max: 4096
help:
zh_Hans: 停止前生成的最大令牌数。请注意Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.

View File

@ -4,6 +4,7 @@ label:
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
model_properties:

View File

@ -55,7 +55,6 @@ class JinaRerankModel(RerankModel):
base_url + "/rerank",
json={"model": model, "query": query, "documents": docs, "top_n": top_n},
headers={"Authorization": f"Bearer {credentials.get('api_key')}"},
timeout=20,
)
response.raise_for_status()
results = response.json()

View File

@ -617,10 +617,6 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
# o1 compatibility
block_as_stream = False
if model.startswith("o1"):
if "max_tokens" in model_parameters:
model_parameters["max_completion_tokens"] = model_parameters["max_tokens"]
del model_parameters["max_tokens"]
if stream:
block_as_stream = True
stream = False

View File

@ -1,38 +0,0 @@
model: anthropic/claude-3-5-haiku
label:
en_US: claude-3-5-haiku
model_type: llm
features:
- agent-thought
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 200000
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: max_tokens
use_template: max_tokens
required: true
default: 8192
min: 1
max: 8192
- name: response_format
use_template: response_format
pricing:
input: "1"
output: "5"
unit: "0.000001"
currency: USD

View File

@ -4,7 +4,10 @@ import re
from collections.abc import Generator, Iterator
from typing import Any, Optional, Union, cast
# from openai.types.chat import ChatCompletion, ChatCompletionChunk
import boto3
from sagemaker import Predictor, serializers
from sagemaker.session import Session
from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (
@ -209,9 +212,6 @@ class SageMakerLargeLanguageModel(LargeLanguageModel):
:param user: unique user id
:return: full response or stream response chunk generator result
"""
from sagemaker import Predictor, serializers
from sagemaker.session import Session
if not self.sagemaker_session:
access_key = credentials.get("aws_access_key_id")
secret_key = credentials.get("aws_secret_access_key")

View File

@ -29,7 +29,6 @@ from core.model_runtime.entities.message_entities import (
TextPromptMessageContent,
ToolPromptMessage,
UserPromptMessage,
VideoPromptMessageContent,
)
from core.model_runtime.entities.model_entities import (
AIModelEntity,
@ -432,14 +431,6 @@ class TongyiLargeLanguageModel(LargeLanguageModel):
sub_message_dict = {"image": image_url}
sub_messages.append(sub_message_dict)
elif message_content.type == PromptMessageContentType.VIDEO:
message_content = cast(VideoPromptMessageContent, message_content)
video_url = message_content.data
if message_content.data.startswith("data:"):
raise InvokeError("not support base64, please set MULTIMODAL_SEND_VIDEO_FORMAT to url")
sub_message_dict = {"video": video_url}
sub_messages.append(sub_message_dict)
# resort sub_messages to ensure text is always at last
sub_messages = sorted(sub_messages, key=lambda x: "text" in x)

View File

@ -13,9 +13,9 @@ parameter_rules:
use_template: max_tokens
required: true
type: int
default: 8192
default: 4096
min: 1
max: 8192
max: 4096
help:
zh_Hans: 停止前生成的最大令牌数。请注意Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.

View File

@ -1,6 +1,6 @@
provider: vessl_ai
label:
en_US: VESSL AI
en_US: vessl_ai
icon_small:
en_US: icon_s_en.svg
icon_large:
@ -20,28 +20,28 @@ model_credential_schema:
label:
en_US: Model Name
placeholder:
en_US: Enter model name
en_US: Enter your model name
credential_form_schemas:
- variable: endpoint_url
label:
en_US: Endpoint Url
en_US: endpoint url
type: text-input
required: true
placeholder:
en_US: Enter VESSL AI service endpoint url
en_US: Enter the url of your endpoint url
- variable: api_key
required: true
label:
en_US: API Key
type: secret-input
placeholder:
en_US: Enter VESSL AI secret key
en_US: Enter your VESSL AI secret key
- variable: mode
show_on:
- variable: __model_type
value: llm
label:
en_US: Completion Mode
en_US: Completion mode
type: select
required: false
default: chat

View File

@ -313,35 +313,21 @@ class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):
return params
def _construct_glm_4v_messages(self, prompt_message: Union[str, list[PromptMessageContent]]) -> list[dict]:
if isinstance(prompt_message, list):
sub_messages = []
for item in prompt_message:
if item.type == PromptMessageContentType.IMAGE:
sub_messages.append(
{
"type": "image_url",
"image_url": {"url": self._remove_base64_header(item.data)},
}
)
elif item.type == PromptMessageContentType.VIDEO:
sub_messages.append(
{
"type": "video_url",
"video_url": {"url": self._remove_base64_header(item.data)},
}
)
else:
sub_messages.append({"type": "text", "text": item.data})
return sub_messages
else:
if isinstance(prompt_message, str):
return [{"type": "text", "text": prompt_message}]
def _remove_base64_header(self, file_content: str) -> str:
if file_content.startswith("data:"):
data_split = file_content.split(";base64,")
return data_split[1]
return [
{"type": "image_url", "image_url": {"url": self._remove_image_header(item.data)}}
if item.type == PromptMessageContentType.IMAGE
else {"type": "text", "text": item.data}
for item in prompt_message
]
return file_content
def _remove_image_header(self, image: str) -> str:
if image.startswith("data:image"):
return image.split(",")[1]
return image
def _handle_generate_response(
self,

View File

@ -126,6 +126,6 @@ class OutputModeration(BaseModel):
result: ModerationOutputsResult = moderation_factory.moderation_for_outputs(moderation_buffer)
return result
except Exception as e:
logger.exception("Moderation Output error: %s", e)
logger.error("Moderation Output error: %s", e)
return None

View File

@ -54,7 +54,3 @@ class LangSmithConfig(BaseTracingConfig):
raise ValueError("endpoint must start with https://")
return v
OPS_FILE_PATH = "ops_trace/"
OPS_TRACE_FAILED_KEY = "FAILED_OPS_TRACE"

View File

@ -23,11 +23,6 @@ class BaseTraceInfo(BaseModel):
return v
return ""
class Config:
json_encoders = {
datetime: lambda v: v.isoformat(),
}
class WorkflowTraceInfo(BaseTraceInfo):
workflow_data: Any
@ -105,12 +100,6 @@ class GenerateNameTraceInfo(BaseTraceInfo):
tenant_id: str
class TaskData(BaseModel):
app_id: str
trace_info_type: str
trace_info: Any
trace_info_info_map = {
"WorkflowTraceInfo": WorkflowTraceInfo,
"MessageTraceInfo": MessageTraceInfo,

View File

@ -6,13 +6,12 @@ import threading
import time
from datetime import timedelta
from typing import Any, Optional, Union
from uuid import UUID, uuid4
from uuid import UUID
from flask import current_app
from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token
from core.ops.entities.config_entity import (
OPS_FILE_PATH,
LangfuseConfig,
LangSmithConfig,
TracingProviderEnum,
@ -23,7 +22,6 @@ from core.ops.entities.trace_entity import (
MessageTraceInfo,
ModerationTraceInfo,
SuggestedQuestionTraceInfo,
TaskData,
ToolTraceInfo,
TraceTaskName,
WorkflowTraceInfo,
@ -32,7 +30,6 @@ from core.ops.langfuse_trace.langfuse_trace import LangFuseDataTrace
from core.ops.langsmith_trace.langsmith_trace import LangSmithDataTrace
from core.ops.utils import get_message_data
from extensions.ext_database import db
from extensions.ext_storage import storage
from models.model import App, AppModelConfig, Conversation, Message, MessageAgentThought, MessageFile, TraceAppConfig
from models.workflow import WorkflowAppLog, WorkflowRun
from tasks.ops_trace_task import process_trace_tasks
@ -711,7 +708,7 @@ class TraceQueueManager:
trace_task.app_id = self.app_id
trace_manager_queue.put(trace_task)
except Exception as e:
logging.exception(f"Error adding trace task: {e}")
logging.error(f"Error adding trace task: {e}")
finally:
self.start_timer()
@ -730,7 +727,7 @@ class TraceQueueManager:
if tasks:
self.send_to_celery(tasks)
except Exception as e:
logging.exception(f"Error processing trace tasks: {e}")
logging.error(f"Error processing trace tasks: {e}")
def start_timer(self):
global trace_manager_timer
@ -743,17 +740,10 @@ class TraceQueueManager:
def send_to_celery(self, tasks: list[TraceTask]):
with self.flask_app.app_context():
for task in tasks:
file_id = uuid4().hex
trace_info = task.execute()
task_data = TaskData(
app_id=task.app_id,
trace_info_type=type(trace_info).__name__,
trace_info=trace_info.model_dump() if trace_info else None,
)
file_path = f"{OPS_FILE_PATH}{task.app_id}/{file_id}.json"
storage.save(file_path, task_data.model_dump_json().encode("utf-8"))
file_info = {
"file_id": file_id,
task_data = {
"app_id": task.app_id,
"trace_info_type": type(trace_info).__name__,
"trace_info": trace_info.model_dump() if trace_info else {},
}
process_trace_tasks.delay(file_info)
process_trace_tasks.delay(task_data)

View File

@ -15,7 +15,6 @@ from core.model_runtime.entities import (
TextPromptMessageContent,
UserPromptMessage,
)
from core.model_runtime.entities.message_entities import ImagePromptMessageContent
from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate, MemoryConfig
from core.prompt.prompt_transform import PromptTransform
from core.prompt.utils.prompt_template_parser import PromptTemplateParser
@ -27,13 +26,8 @@ class AdvancedPromptTransform(PromptTransform):
Advanced Prompt Transform for Workflow LLM Node.
"""
def __init__(
self,
with_variable_tmpl: bool = False,
image_detail_config: ImagePromptMessageContent.DETAIL = ImagePromptMessageContent.DETAIL.LOW,
) -> None:
def __init__(self, with_variable_tmpl: bool = False) -> None:
self.with_variable_tmpl = with_variable_tmpl
self.image_detail_config = image_detail_config
def get_prompt(
self,

View File

@ -242,7 +242,7 @@ class CouchbaseVector(BaseVector):
try:
self._cluster.query(query, named_parameters={"doc_ids": ids}).execute()
except Exception as e:
logger.exception(e)
logger.error(e)
def delete_by_document_id(self, document_id: str):
query = f"""

View File

@ -79,7 +79,7 @@ class LindormVectorStore(BaseVector):
existing_docs = self._client.mget(index=self._collection_name, body={"ids": batch_ids}, _source=False)
return {doc["_id"] for doc in existing_docs["docs"] if doc["found"]}
except Exception as e:
logger.exception(f"Error fetching batch {batch_ids}: {e}")
logger.error(f"Error fetching batch {batch_ids}: {e}")
return set()
@retry(stop=stop_after_attempt(3), wait=wait_fixed(60))
@ -96,7 +96,7 @@ class LindormVectorStore(BaseVector):
)
return {doc["_id"] for doc in existing_docs["docs"] if doc["found"]}
except Exception as e:
logger.exception(f"Error fetching batch {batch_ids}: {e}")
logger.error(f"Error fetching batch {batch_ids}: {e}")
return set()
if ids is None:
@ -177,7 +177,7 @@ class LindormVectorStore(BaseVector):
else:
logger.warning(f"Index '{self._collection_name}' does not exist. No deletion performed.")
except Exception as e:
logger.exception(f"Error occurred while deleting the index: {e}")
logger.error(f"Error occurred while deleting the index: {e}")
raise e
def text_exists(self, id: str) -> bool:
@ -201,7 +201,7 @@ class LindormVectorStore(BaseVector):
try:
response = self._client.search(index=self._collection_name, body=query)
except Exception as e:
logger.exception(f"Error executing search: {e}")
logger.error(f"Error executing search: {e}")
raise
docs_and_scores = []

View File

@ -86,7 +86,7 @@ class MilvusVector(BaseVector):
ids = self._client.insert(collection_name=self._collection_name, data=batch_insert_list)
pks.extend(ids)
except MilvusException as e:
logger.exception("Failed to insert batch starting at entity: %s/%s", i, total_count)
logger.error("Failed to insert batch starting at entity: %s/%s", i, total_count)
raise e
return pks

View File

@ -142,7 +142,7 @@ class MyScaleVector(BaseVector):
for r in self._client.query(sql).named_results()
]
except Exception as e:
logging.exception(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
logging.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
def delete(self) -> None:

View File

@ -129,7 +129,7 @@ class OpenSearchVector(BaseVector):
if status == 404:
logger.warning(f"Document not found for deletion: {doc_id}")
else:
logger.exception(f"Error deleting document: {error}")
logger.error(f"Error deleting document: {error}")
def delete(self) -> None:
self._client.indices.delete(index=self._collection_name.lower())
@ -158,7 +158,7 @@ class OpenSearchVector(BaseVector):
try:
response = self._client.search(index=self._collection_name.lower(), body=query)
except Exception as e:
logger.exception(f"Error executing search: {e}")
logger.error(f"Error executing search: {e}")
raise
docs = []

View File

@ -89,7 +89,7 @@ class CacheEmbedding(Embeddings):
db.session.rollback()
except Exception as ex:
db.session.rollback()
logger.exception("Failed to embed documents: %s", ex)
logger.error("Failed to embed documents: %s", ex)
raise ex
return text_embeddings

View File

@ -28,6 +28,7 @@ logger = logging.getLogger(__name__)
class WordExtractor(BaseExtractor):
"""Load docx files.
Args:
file_path: Path to the file to load.
"""
@ -229,7 +230,7 @@ class WordExtractor(BaseExtractor):
for i in url_pattern.findall(x.text):
hyperlinks_url = str(i)
except Exception as e:
logger.exception(e)
logger.error(e)
def parse_paragraph(paragraph):
paragraph_content = []

View File

@ -48,13 +48,6 @@
- feishu_task
- feishu_calendar
- feishu_spreadsheet
- lark_base
- lark_document
- lark_message_and_group
- lark_wiki
- lark_task
- lark_calendar
- lark_spreadsheet
- slack
- twilio
- wecom

View File

@ -1,24 +0,0 @@
from typing import Any, Union
from zhipuai import ZhipuAI
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.tool.builtin_tool import BuiltinTool
class CogVideoTool(BuiltinTool):
def _invoke(
self, user_id: str, tool_parameters: dict[str, Any]
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
client = ZhipuAI(
base_url=self.runtime.credentials["zhipuai_base_url"],
api_key=self.runtime.credentials["zhipuai_api_key"],
)
if not tool_parameters.get("prompt") and not tool_parameters.get("image_url"):
return self.create_text_message("require at least one of prompt and image_url")
response = client.videos.generations(
model="cogvideox", prompt=tool_parameters.get("prompt"), image_url=tool_parameters.get("image_url")
)
return self.create_json_message(response.dict())

View File

@ -1,32 +0,0 @@
identity:
name: cogvideo
author: hjlarry
label:
en_US: CogVideo
zh_Hans: CogVideo 视频生成
description:
human:
en_US: Use the CogVideox model provided by ZhipuAI to generate videos based on user prompts and images.
zh_Hans: 使用智谱cogvideox模型根据用户输入的提示词和图片生成视频。
llm: A tool for generating videos. The input is user's prompt or image url or both of them, the output is a task id. You can use another tool with this task id to check the status and get the video.
parameters:
- name: prompt
type: string
label:
en_US: prompt
zh_Hans: 提示词
human_description:
en_US: The prompt text used to generate video.
zh_Hans: 用于生成视频的提示词。
llm_description: The prompt text used to generate video. Optional.
form: llm
- name: image_url
type: string
label:
en_US: image url
zh_Hans: 图片链接
human_description:
en_US: The image url used to generate video.
zh_Hans: 输入一个图片链接,生成的视频将基于该图片和提示词。
llm_description: The image url used to generate video. Optional.
form: llm

View File

@ -1,30 +0,0 @@
from typing import Any, Union
import httpx
from zhipuai import ZhipuAI
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.tool.builtin_tool import BuiltinTool
class CogVideoJobTool(BuiltinTool):
def _invoke(
self, user_id: str, tool_parameters: dict[str, Any]
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
client = ZhipuAI(
api_key=self.runtime.credentials["zhipuai_api_key"],
base_url=self.runtime.credentials["zhipuai_base_url"],
)
response = client.videos.retrieve_videos_result(id=tool_parameters.get("id"))
result = [self.create_json_message(response.dict())]
if response.task_status == "SUCCESS":
for item in response.video_result:
video_cover_image = self.create_image_message(item.cover_image_url)
result.append(video_cover_image)
video = self.create_blob_message(
blob=httpx.get(item.url).content, meta={"mime_type": "video/mp4"}, save_as=self.VariableKey.VIDEO
)
result.append(video)
return result

View File

@ -1,21 +0,0 @@
identity:
name: cogvideo_job
author: hjlarry
label:
en_US: CogVideo Result
zh_Hans: CogVideo 结果获取
description:
human:
en_US: Get the result of CogVideo tool generation.
zh_Hans: 根据 CogVideo 工具返回的 id 获取视频生成结果。
llm: Get the result of CogVideo tool generation. The input is the id which is returned by the CogVideo tool. The output is the url of video and video cover image.
parameters:
- name: id
type: string
label:
en_US: id
human_description:
en_US: The id returned by the CogVideo.
zh_Hans: CogVideo 工具返回的 id。
llm_description: The id returned by the cogvideo.
form: llm

View File

@ -48,6 +48,7 @@ class ComfyUiClient:
prompt = origin_prompt.copy()
id_to_class_type = {id: details["class_type"] for id, details in prompt.items()}
k_sampler = [key for key, value in id_to_class_type.items() if value == "KSampler"][0]
prompt.get(k_sampler)["inputs"]["seed"] = random.randint(10**14, 10**15 - 1)
positive_input_id = prompt.get(k_sampler)["inputs"]["positive"][0]
prompt.get(positive_input_id)["inputs"]["text"] = positive_prompt
@ -71,18 +72,6 @@ class ComfyUiClient:
prompt.get(load_image)["inputs"]["image"] = image_name
return prompt
def set_prompt_seed_by_id(self, origin_prompt: dict, seed_id: str) -> dict:
prompt = origin_prompt.copy()
if seed_id not in prompt:
raise Exception("Not a valid seed node")
if "seed" in prompt[seed_id]["inputs"]:
prompt[seed_id]["inputs"]["seed"] = random.randint(10**14, 10**15 - 1)
elif "noise_seed" in prompt[seed_id]["inputs"]:
prompt[seed_id]["inputs"]["noise_seed"] = random.randint(10**14, 10**15 - 1)
else:
raise Exception("Not a valid seed node")
return prompt
def track_progress(self, prompt: dict, ws: WebSocket, prompt_id: str):
node_ids = list(prompt.keys())
finished_nodes = []

View File

@ -8,20 +8,6 @@ from core.tools.provider.builtin.comfyui.tools.comfyui_client import ComfyUiClie
from core.tools.tool.builtin_tool import BuiltinTool
def sanitize_json_string(s):
escape_dict = {
"\n": "\\n",
"\r": "\\r",
"\t": "\\t",
"\b": "\\b",
"\f": "\\f",
}
for char, escaped in escape_dict.items():
s = s.replace(char, escaped)
return s
class ComfyUIWorkflowTool(BuiltinTool):
def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage | list[ToolInvokeMessage]:
comfyui = ComfyUiClient(self.runtime.credentials["base_url"])
@ -40,17 +26,13 @@ class ComfyUIWorkflowTool(BuiltinTool):
set_prompt_with_ksampler = True
if "{{positive_prompt}}" in workflow:
set_prompt_with_ksampler = False
workflow = workflow.replace("{{positive_prompt}}", positive_prompt.replace('"', "'"))
workflow = workflow.replace("{{negative_prompt}}", negative_prompt.replace('"', "'"))
workflow = workflow.replace("{{positive_prompt}}", positive_prompt)
workflow = workflow.replace("{{negative_prompt}}", negative_prompt)
try:
prompt = json.loads(workflow)
except json.JSONDecodeError:
cleaned_string = sanitize_json_string(workflow)
try:
prompt = json.loads(cleaned_string)
except:
return self.create_text_message("the Workflow JSON is not correct")
except:
return self.create_text_message("the Workflow JSON is not correct")
if set_prompt_with_ksampler:
try:
@ -70,9 +52,6 @@ class ComfyUIWorkflowTool(BuiltinTool):
else:
prompt = comfyui.set_prompt_images_by_default(prompt, image_names)
if seed_id := tool_parameters.get("seed_id"):
prompt = comfyui.set_prompt_seed_by_id(prompt, seed_id)
images = comfyui.generate_image_by_prompt(prompt)
result = []
for img in images:

View File

@ -52,12 +52,3 @@ parameters:
en_US: When the workflow has multiple image nodes, enter the ID list of these nodes, and the images will be passed to ComfyUI in the order of the list.
zh_Hans: 当工作流有多个图片节点时输入这些节点的ID列表图片将按列表顺序传给ComfyUI
form: form
- name: seed_id
type: string
label:
en_US: Seed Node Id
zh_Hans: 种子节点ID
human_description:
en_US: If you need to generate different images each time, you need to enter the ID of the seed node.
zh_Hans: 如果需要每次生成时使用不同的种子需要输入包含种子的节点的ID
form: form

View File

@ -34,7 +34,7 @@ parameters:
Page size, default value: 20, maximum value: 100.
zh_Hans: 分页大小默认值20最大值100。
llm_description: 分页大小默认值20最大值100。
form: form
form: llm
- name: page_token
type: string

View File

@ -147,7 +147,7 @@ parameters:
Page size, default value: 20, maximum value: 500.
zh_Hans: 分页大小默认值20最大值500。
llm_description: 分页大小默认值20最大值500。
form: form
form: llm
- name: page_token
type: string

View File

@ -47,7 +47,7 @@ parameters:
en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 50, and the value range is [50,1000].
zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 50取值范围为 [50,1000]。
llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 50取值范围为 [50,1000]。
form: form
form: llm
- name: page_token
type: string

View File

@ -85,7 +85,7 @@ parameters:
en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 20, and the value range is [10,100].
zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 20取值范围为 [10,100]。
llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 20取值范围为 [10,100]。
form: form
form: llm
- name: page_token
type: string

View File

@ -59,7 +59,7 @@ parameters:
en_US: Paging size, the default and maximum value is 500.
zh_Hans: 分页大小, 默认值和最大值为 500。
llm_description: 分页大小, 表示一次请求最多返回多少条数据,默认值和最大值为 500。
form: form
form: llm
- name: page_token
type: string

View File

@ -81,7 +81,7 @@ parameters:
en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 20, and the value range is [1,50].
zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 20取值范围为 [1,50]。
llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 20取值范围为 [1,50]。
form: form
form: llm
- name: page_token
type: string

View File

@ -57,7 +57,7 @@ parameters:
en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 20, and the value range is [1,50].
zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 20取值范围为 [1,50]。
llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 20取值范围为 [1,50]。
form: form
form: llm
- name: page_token
type: string

View File

@ -56,7 +56,7 @@ parameters:
en_US: Number of columns to add, range (0-5000].
zh_Hans: 要增加的列数,范围(0-5000]。
llm_description: 要增加的列数,范围(0-5000]。
form: form
form: llm
- name: values
type: string

View File

@ -56,7 +56,7 @@ parameters:
en_US: Number of rows to add, range (0-5000].
zh_Hans: 要增加行数,范围(0-5000]。
llm_description: 要增加行数,范围(0-5000]。
form: form
form: llm
- name: values
type: string

View File

@ -82,7 +82,7 @@ parameters:
en_US: Starting column number, starting from 1.
zh_Hans: 起始列号,从 1 开始。
llm_description: 起始列号,从 1 开始。
form: form
form: llm
- name: num_cols
type: number
@ -94,4 +94,4 @@ parameters:
en_US: Number of columns to read.
zh_Hans: 读取列数
llm_description: 读取列数
form: form
form: llm

View File

@ -82,7 +82,7 @@ parameters:
en_US: Starting row number, starting from 1.
zh_Hans: 起始行号,从 1 开始。
llm_description: 起始行号,从 1 开始。
form: form
form: llm
- name: num_rows
type: number
@ -94,4 +94,4 @@ parameters:
en_US: Number of rows to read.
zh_Hans: 读取行数
llm_description: 读取行数
form: form
form: llm

Some files were not shown because too many files have changed in this diff Show More