Compare commits

..

1 Commits

Author SHA1 Message Date
Joe
4fc91d526e fix: dataset editor 2025-03-07 15:59:36 +08:00
3603 changed files with 295062 additions and 76962 deletions

View File

@ -1,12 +1,11 @@
#!/bin/bash #!/bin/bash
npm add -g pnpm@9.12.2 cd web && npm install
cd web && pnpm install
pipx install poetry pipx install poetry
echo 'alias start-api="cd /workspaces/dify/api && poetry run python -m flask run --host 0.0.0.0 --port=5001 --debug"' >> ~/.bashrc echo 'alias start-api="cd /workspaces/dify/api && poetry run python -m flask run --host 0.0.0.0 --port=5001 --debug"' >> ~/.bashrc
echo 'alias start-worker="cd /workspaces/dify/api && poetry run python -m celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion"' >> ~/.bashrc echo 'alias start-worker="cd /workspaces/dify/api && poetry run python -m celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion"' >> ~/.bashrc
echo 'alias start-web="cd /workspaces/dify/web && pnpm dev"' >> ~/.bashrc echo 'alias start-web="cd /workspaces/dify/web && npm run dev"' >> ~/.bashrc
echo 'alias start-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify up -d"' >> ~/.bashrc echo 'alias start-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify up -d"' >> ~/.bashrc
echo 'alias stop-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify down"' >> ~/.bashrc echo 'alias stop-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify down"' >> ~/.bashrc

View File

@ -26,9 +26,6 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Setup Poetry and Python ${{ matrix.python-version }} - name: Setup Poetry and Python ${{ matrix.python-version }}
uses: ./.github/actions/setup-poetry uses: ./.github/actions/setup-poetry
@ -50,9 +47,15 @@ jobs:
- name: Run Unit tests - name: Run Unit tests
run: poetry run -P api bash dev/pytest/pytest_unit_tests.sh run: poetry run -P api bash dev/pytest/pytest_unit_tests.sh
- name: Run ModelRuntime
run: poetry run -P api bash dev/pytest/pytest_model_runtime.sh
- name: Run dify config tests - name: Run dify config tests
run: poetry run -P api python dev/pytest/pytest_config_tests.py run: poetry run -P api python dev/pytest/pytest_config_tests.py
- name: Run Tool
run: poetry run -P api bash dev/pytest/pytest_tools.sh
- name: Run mypy - name: Run mypy
run: | run: |
poetry run -C api python -m mypy --install-types --non-interactive . poetry run -C api python -m mypy --install-types --non-interactive .

View File

@ -5,7 +5,7 @@ on:
branches: branches:
- "main" - "main"
- "deploy/dev" - "deploy/dev"
- "dev/plugin-deploy" - "fix/dataset-admin"
release: release:
types: [published] types: [published]
@ -80,12 +80,10 @@ jobs:
cache-to: type=gha,mode=max,scope=${{ matrix.service_name }} cache-to: type=gha,mode=max,scope=${{ matrix.service_name }}
- name: Export digest - name: Export digest
env:
DIGEST: ${{ steps.build.outputs.digest }}
run: | run: |
mkdir -p /tmp/digests mkdir -p /tmp/digests
sanitized_digest=${DIGEST#sha256:} digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${sanitized_digest}" touch "/tmp/digests/${digest#sha256:}"
- name: Upload digest - name: Upload digest
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
@ -135,15 +133,10 @@ jobs:
- name: Create manifest list and push - name: Create manifest list and push
working-directory: /tmp/digests working-directory: /tmp/digests
env:
IMAGE_NAME: ${{ env[matrix.image_name_env] }}
run: | run: |
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf "$IMAGE_NAME@sha256:%s " *) $(printf '${{ env[matrix.image_name_env] }}@sha256:%s ' *)
- name: Inspect image - name: Inspect image
env:
IMAGE_NAME: ${{ env[matrix.image_name_env] }}
IMAGE_VERSION: ${{ steps.meta.outputs.version }}
run: | run: |
docker buildx imagetools inspect "$IMAGE_NAME:$IMAGE_VERSION" docker buildx imagetools inspect ${{ env[matrix.image_name_env] }}:${{ steps.meta.outputs.version }}

View File

@ -4,7 +4,6 @@ on:
pull_request: pull_request:
branches: branches:
- main - main
- plugins/beta
paths: paths:
- api/migrations/** - api/migrations/**
- .github/workflows/db-migration-test.yml - .github/workflows/db-migration-test.yml
@ -20,9 +19,6 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Setup Poetry and Python - name: Setup Poetry and Python
uses: ./.github/actions/setup-poetry uses: ./.github/actions/setup-poetry

View File

@ -9,6 +9,6 @@ yq eval '.services["pgvecto-rs"].ports += ["5431:5432"]' -i docker/docker-compos
yq eval '.services["elasticsearch"].ports += ["9200:9200"]' -i docker/docker-compose.yaml yq eval '.services["elasticsearch"].ports += ["9200:9200"]' -i docker/docker-compose.yaml
yq eval '.services.couchbase-server.ports += ["8091-8096:8091-8096"]' -i docker/docker-compose.yaml yq eval '.services.couchbase-server.ports += ["8091-8096:8091-8096"]' -i docker/docker-compose.yaml
yq eval '.services.couchbase-server.ports += ["11210:11210"]' -i docker/docker-compose.yaml yq eval '.services.couchbase-server.ports += ["11210:11210"]' -i docker/docker-compose.yaml
yq eval '.services.tidb.ports += ["4000:4000"]' -i docker/tidb/docker-compose.yaml yq eval '.services.tidb.ports += ["4000:4000"]' -i docker/docker-compose.yaml
echo "Ports exposed for sandbox, weaviate, tidb, qdrant, chroma, milvus, pgvector, pgvecto-rs, elasticsearch, couchbase" echo "Ports exposed for sandbox, weaviate, tidb, qdrant, chroma, milvus, pgvector, pgvecto-rs, elasticsearch, couchbase"

View File

@ -17,9 +17,6 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Check changed files - name: Check changed files
id: changed-files id: changed-files
@ -62,9 +59,6 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Check changed files - name: Check changed files
id: changed-files id: changed-files
@ -72,27 +66,21 @@ jobs:
with: with:
files: web/** files: web/**
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
version: 10
run_install: false
- name: Setup NodeJS - name: Setup NodeJS
uses: actions/setup-node@v4 uses: actions/setup-node@v4
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: with:
node-version: 20 node-version: 20
cache: pnpm cache: yarn
cache-dependency-path: ./web/package.json cache-dependency-path: ./web/package.json
- name: Web dependencies - name: Web dependencies
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: pnpm install --frozen-lockfile run: yarn install --frozen-lockfile
- name: Web style check - name: Web style check
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
run: pnpm run lint run: yarn run lint
docker-compose-template: docker-compose-template:
name: Docker Compose Template name: Docker Compose Template
@ -101,9 +89,6 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Check changed files - name: Check changed files
id: changed-files id: changed-files
@ -132,9 +117,6 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Check changed files - name: Check changed files
id: changed-files id: changed-files

View File

@ -26,19 +26,16 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Use Node.js ${{ matrix.node-version }} - name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v4 uses: actions/setup-node@v4
with: with:
node-version: ${{ matrix.node-version }} node-version: ${{ matrix.node-version }}
cache: '' cache: ''
cache-dependency-path: 'pnpm-lock.yaml' cache-dependency-path: 'yarn.lock'
- name: Install Dependencies - name: Install Dependencies
run: pnpm install --frozen-lockfile run: yarn install
- name: Test - name: Test
run: pnpm test run: yarn test

View File

@ -16,7 +16,6 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
fetch-depth: 2 # last 2 commits fetch-depth: 2 # last 2 commits
persist-credentials: false
- name: Check for file changes in i18n/en-US - name: Check for file changes in i18n/en-US
id: check_files id: check_files
@ -39,11 +38,11 @@ jobs:
- name: Install dependencies - name: Install dependencies
if: env.FILES_CHANGED == 'true' if: env.FILES_CHANGED == 'true'
run: pnpm install --frozen-lockfile run: yarn install --frozen-lockfile
- name: Run npm script - name: Run npm script
if: env.FILES_CHANGED == 'true' if: env.FILES_CHANGED == 'true'
run: pnpm run auto-gen-i18n run: npm run auto-gen-i18n
- name: Create Pull Request - name: Create Pull Request
if: env.FILES_CHANGED == 'true' if: env.FILES_CHANGED == 'true'

View File

@ -28,9 +28,6 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Setup Poetry and Python ${{ matrix.python-version }} - name: Setup Poetry and Python ${{ matrix.python-version }}
uses: ./.github/actions/setup-poetry uses: ./.github/actions/setup-poetry
@ -54,15 +51,7 @@ jobs:
- name: Expose Service Ports - name: Expose Service Ports
run: sh .github/workflows/expose_service_ports.sh run: sh .github/workflows/expose_service_ports.sh
- name: Set up Vector Store (TiDB) - name: Set up Vector Stores (TiDB, Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma, MyScale, ElasticSearch, Couchbase)
uses: hoverkraft-tech/compose-action@v2.0.2
with:
compose-file: docker/tidb/docker-compose.yaml
services: |
tidb
tiflash
- name: Set up Vector Stores (Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma, MyScale, ElasticSearch, Couchbase)
uses: hoverkraft-tech/compose-action@v2.0.2 uses: hoverkraft-tech/compose-action@v2.0.2
with: with:
compose-file: | compose-file: |
@ -78,9 +67,7 @@ jobs:
pgvector pgvector
chroma chroma
elasticsearch elasticsearch
tidb
- name: Check TiDB Ready
run: poetry run -P api python api/tests/integration_tests/vdb/tidb_vector/check_tiflash_ready.py
- name: Test Vector Stores - name: Test Vector Stores
run: poetry run -P api bash dev/pytest/pytest_vdb.sh run: poetry run -P api bash dev/pytest/pytest_vdb.sh

View File

@ -22,34 +22,25 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
fetch-depth: 0
persist-credentials: false
- name: Check changed files - name: Check changed files
id: changed-files id: changed-files
uses: tj-actions/changed-files@v45 uses: tj-actions/changed-files@v45
with: with:
files: web/** files: web/**
# to run pnpm, should install package canvas, but it always install failed on amd64 under ubuntu-latest
# - name: Install pnpm
# uses: pnpm/action-setup@v4
# with:
# version: 10
# run_install: false
# - name: Setup Node.js - name: Setup Node.js
# uses: actions/setup-node@v4 uses: actions/setup-node@v4
# if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
# with: with:
# node-version: 20 node-version: 20
# cache: pnpm cache: yarn
# cache-dependency-path: ./web/package.json cache-dependency-path: ./web/package.json
# - name: Install dependencies - name: Install dependencies
# if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
# run: pnpm install --frozen-lockfile run: yarn install --frozen-lockfile
# - name: Run tests - name: Run tests
# if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
# run: pnpm test run: yarn test

8
.gitignore vendored
View File

@ -163,7 +163,6 @@ docker/volumes/db/data/*
docker/volumes/redis/data/* docker/volumes/redis/data/*
docker/volumes/weaviate/* docker/volumes/weaviate/*
docker/volumes/qdrant/* docker/volumes/qdrant/*
docker/tidb/volumes/*
docker/volumes/etcd/* docker/volumes/etcd/*
docker/volumes/minio/* docker/volumes/minio/*
docker/volumes/milvus/* docker/volumes/milvus/*
@ -176,7 +175,6 @@ docker/volumes/pgvector/data/*
docker/volumes/pgvecto_rs/data/* docker/volumes/pgvecto_rs/data/*
docker/volumes/couchbase/* docker/volumes/couchbase/*
docker/volumes/oceanbase/* docker/volumes/oceanbase/*
docker/volumes/plugin_daemon/*
!docker/volumes/oceanbase/init.d !docker/volumes/oceanbase/init.d
docker/nginx/conf.d/default.conf docker/nginx/conf.d/default.conf
@ -195,9 +193,3 @@ api/.vscode
.idea/ .idea/
.vscode .vscode
# pnpm
/.pnpm-store
# plugin migrate
plugins.jsonl

View File

@ -73,7 +73,7 @@ Dify requires the following dependencies to build, make sure they're installed o
* [Docker](https://www.docker.com/) * [Docker](https://www.docker.com/)
* [Docker Compose](https://docs.docker.com/compose/install/) * [Docker Compose](https://docs.docker.com/compose/install/)
* [Node.js v18.x (LTS)](http://nodejs.org) * [Node.js v18.x (LTS)](http://nodejs.org)
* [pnpm](https://pnpm.io/) * [npm](https://www.npmjs.com/) version 8.x.x or [Yarn](https://yarnpkg.com/)
* [Python](https://www.python.org/) version 3.11.x or 3.12.x * [Python](https://www.python.org/) version 3.11.x or 3.12.x
### 4. Installations ### 4. Installations

View File

@ -70,7 +70,7 @@ Dify 依赖以下工具和库:
- [Docker](https://www.docker.com/) - [Docker](https://www.docker.com/)
- [Docker Compose](https://docs.docker.com/compose/install/) - [Docker Compose](https://docs.docker.com/compose/install/)
- [Node.js v18.x (LTS)](http://nodejs.org) - [Node.js v18.x (LTS)](http://nodejs.org)
- [pnpm](https://pnpm.io/) - [npm](https://www.npmjs.com/) version 8.x.x or [Yarn](https://yarnpkg.com/)
- [Python](https://www.python.org/) version 3.11.x or 3.12.x - [Python](https://www.python.org/) version 3.11.x or 3.12.x
### 4. 安装 ### 4. 安装

View File

@ -73,7 +73,7 @@ Dify を構築するには次の依存関係が必要です。それらがシス
- [Docker](https://www.docker.com/) - [Docker](https://www.docker.com/)
- [Docker Compose](https://docs.docker.com/compose/install/) - [Docker Compose](https://docs.docker.com/compose/install/)
- [Node.js v18.x (LTS)](http://nodejs.org) - [Node.js v18.x (LTS)](http://nodejs.org)
- [pnpm](https://pnpm.io/) - [npm](https://www.npmjs.com/) version 8.x.x or [Yarn](https://yarnpkg.com/)
- [Python](https://www.python.org/) version 3.11.x or 3.12.x - [Python](https://www.python.org/) version 3.11.x or 3.12.x
### 4. インストール ### 4. インストール

View File

@ -72,7 +72,7 @@ Dify yêu cầu các phụ thuộc sau để build, hãy đảm bảo chúng đ
- [Docker](https://www.docker.com/) - [Docker](https://www.docker.com/)
- [Docker Compose](https://docs.docker.com/compose/install/) - [Docker Compose](https://docs.docker.com/compose/install/)
- [Node.js v18.x (LTS)](http://nodejs.org) - [Node.js v18.x (LTS)](http://nodejs.org)
- [pnpm](https://pnpm.io/) - [npm](https://www.npmjs.com/) phiên bản 8.x.x hoặc [Yarn](https://yarnpkg.com/)
- [Python](https://www.python.org/) phiên bản 3.11.x hoặc 3.12.x - [Python](https://www.python.org/) phiên bản 3.11.x hoặc 3.12.x
### 4. Cài đặt ### 4. Cài đặt

23
LICENSE
View File

@ -1,12 +1,12 @@
# Open Source License # Open Source License
Dify is licensed under a modified version of the Apache License 2.0, with the following additional conditions: Dify is licensed under the Apache License 2.0, with the following additional conditions:
1. Dify may be utilized commercially, including as a backend service for other applications or as an application development platform for enterprises. Should the conditions below be met, a commercial license must be obtained from the producer: 1. Dify may be utilized commercially, including as a backend service for other applications or as an application development platform for enterprises. Should the conditions below be met, a commercial license must be obtained from the producer:
a. Multi-tenant service: Unless explicitly authorized by Dify in writing, you may not use the Dify source code to operate a multi-tenant environment. a. Multi-tenant service: Unless explicitly authorized by Dify in writing, you may not use the Dify source code to operate a multi-tenant environment.
- Tenant Definition: Within the context of Dify, one tenant corresponds to one workspace. The workspace provides a separated area for each tenant's data and configurations. - Tenant Definition: Within the context of Dify, one tenant corresponds to one workspace. The workspace provides a separated area for each tenant's data and configurations.
b. LOGO and copyright information: In the process of using Dify's frontend, you may not remove or modify the LOGO or copyright information in the Dify console or applications. This restriction is inapplicable to uses of Dify that do not involve its frontend. b. LOGO and copyright information: In the process of using Dify's frontend, you may not remove or modify the LOGO or copyright information in the Dify console or applications. This restriction is inapplicable to uses of Dify that do not involve its frontend.
- Frontend Definition: For the purposes of this license, the "frontend" of Dify includes all components located in the `web/` directory when running Dify from the raw source code, or the "web" image when running Dify with Docker. - Frontend Definition: For the purposes of this license, the "frontend" of Dify includes all components located in the `web/` directory when running Dify from the raw source code, or the "web" image when running Dify with Docker.
@ -21,4 +21,19 @@ Apart from the specific conditions mentioned above, all other rights and restric
The interactive design of this product is protected by appearance patent. The interactive design of this product is protected by appearance patent.
© 2025 LangGenius, Inc. © 2024 LangGenius, Inc.
----------
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -108,72 +108,6 @@ Please refer to our [FAQ](https://docs.dify.ai/getting-started/install-self-host
**7. Backend-as-a-Service**: **7. Backend-as-a-Service**:
All of Dify's offerings come with corresponding APIs, so you could effortlessly integrate Dify into your own business logic. All of Dify's offerings come with corresponding APIs, so you could effortlessly integrate Dify into your own business logic.
## Feature Comparison
<table style="width: 100%;">
<tr>
<th align="center">Feature</th>
<th align="center">Dify.AI</th>
<th align="center">LangChain</th>
<th align="center">Flowise</th>
<th align="center">OpenAI Assistants API</th>
</tr>
<tr>
<td align="center">Programming Approach</td>
<td align="center">API + App-oriented</td>
<td align="center">Python Code</td>
<td align="center">App-oriented</td>
<td align="center">API-oriented</td>
</tr>
<tr>
<td align="center">Supported LLMs</td>
<td align="center">Rich Variety</td>
<td align="center">Rich Variety</td>
<td align="center">Rich Variety</td>
<td align="center">OpenAI-only</td>
</tr>
<tr>
<td align="center">RAG Engine</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Agent</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Workflow</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Observability</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Enterprise Feature (SSO/Access control)</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Local Deployment</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
</table>
## Using Dify ## Using Dify

View File

@ -55,7 +55,7 @@
Dify est une plateforme de développement d'applications LLM open source. Son interface intuitive combine un flux de travail d'IA, un pipeline RAG, des capacités d'agent, une gestion de modèles, des fonctionnalités d'observabilité, et plus encore, vous permettant de passer rapidement du prototype à la production. Voici une liste des fonctionnalités principales: Dify est une plateforme de développement d'applications LLM open source. Son interface intuitive combine un flux de travail d'IA, un pipeline RAG, des capacités d'agent, une gestion de modèles, des fonctionnalités d'observabilité, et plus encore, vous permettant de passer rapidement du prototype à la production. Voici une liste des fonctionnalités principales:
</br> </br> </br> </br>
**1. Flux de travail** : **1. Flux de travail**:
Construisez et testez des flux de travail d'IA puissants sur un canevas visuel, en utilisant toutes les fonctionnalités suivantes et plus encore. Construisez et testez des flux de travail d'IA puissants sur un canevas visuel, en utilisant toutes les fonctionnalités suivantes et plus encore.
@ -63,25 +63,27 @@ Dify est une plateforme de développement d'applications LLM open source. Son in
**2. Prise en charge complète des modèles** : **2. Prise en charge complète des modèles**:
Intégration transparente avec des centaines de LLM propriétaires / open source provenant de dizaines de fournisseurs d'inférence et de solutions auto-hébergées, couvrant GPT, Mistral, Llama3, et tous les modèles compatibles avec l'API OpenAI. Une liste complète des fournisseurs de modèles pris en charge se trouve [ici](https://docs.dify.ai/getting-started/readme/model-providers). Intégration transparente avec des centaines de LLM propriétaires / open source provenant de dizaines de fournisseurs d'inférence et de solutions auto-hébergées, couvrant GPT, Mistral, Llama3, et tous les modèles compatibles avec l'API OpenAI. Une liste complète des fournisseurs de modèles pris en charge se trouve [ici](https://docs.dify.ai/getting-started/readme/model-providers).
![providers-v5](https://github.com/langgenius/dify/assets/13230914/5a17bdbe-097a-4100-8363-40255b70f6e3) ![providers-v5](https://github.com/langgenius/dify/assets/13230914/5a17bdbe-097a-4100-8363-40255b70f6e3)
**3. IDE de prompt** : **3. IDE de prompt**:
Interface intuitive pour créer des prompts, comparer les performances des modèles et ajouter des fonctionnalités supplémentaires telles que la synthèse vocale à une application basée sur des chats. Interface intuitive pour créer des prompts, comparer les performances des modèles et ajouter des fonctionnalités supplémentaires telles que la synthèse vocale à une application basée sur des chats.
**4. Pipeline RAG** : **4. Pipeline RAG**:
Des capacités RAG étendues qui couvrent tout, de l'ingestion de documents à la récupération, avec un support prêt à l'emploi pour l'extraction de texte à partir de PDF, PPT et autres formats de document courants. Des capacités RAG étendues qui couvrent tout, de l'ingestion de documents à la récupération, avec un support prêt à l'emploi pour l'extraction de texte à partir de PDF, PPT et autres formats de document courants.
**5. Capacités d'agent** : **5. Capac
ités d'agent**:
Vous pouvez définir des agents basés sur l'appel de fonction LLM ou ReAct, et ajouter des outils pré-construits ou personnalisés pour l'agent. Dify fournit plus de 50 outils intégrés pour les agents d'IA, tels que la recherche Google, DALL·E, Stable Diffusion et WolframAlpha. Vous pouvez définir des agents basés sur l'appel de fonction LLM ou ReAct, et ajouter des outils pré-construits ou personnalisés pour l'agent. Dify fournit plus de 50 outils intégrés pour les agents d'IA, tels que la recherche Google, DALL·E, Stable Diffusion et WolframAlpha.
**6. LLMOps** : **6. LLMOps**:
Surveillez et analysez les journaux d'application et les performances au fil du temps. Vous pouvez continuellement améliorer les prompts, les ensembles de données et les modèles en fonction des données de production et des annotations. Surveillez et analysez les journaux d'application et les performances au fil du temps. Vous pouvez continuellement améliorer les prompts, les ensembles de données et les modèles en fonction des données de production et des annotations.
**7. Backend-as-a-Service** : **7. Backend-as-a-Service**:
Toutes les offres de Dify sont accompagnées d'API correspondantes, vous permettant d'intégrer facilement Dify dans votre propre logique métier. Toutes les offres de Dify sont accompagnées d'API correspondantes, vous permettant d'intégrer facilement Dify dans votre propre logique métier.

View File

@ -164,7 +164,7 @@ DifyはオープンソースのLLMアプリケーション開発プラットフ
- **企業/組織向けのDify</br>** - **企業/組織向けのDify</br>**
企業中心の機能を提供しています。[メールを送信](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry)して企業のニーズについて相談してください。 </br> 企業中心の機能を提供しています。[メールを送信](mailto:business@dify.ai?subject=[GitHub]Business%20License%20Inquiry)して企業のニーズについて相談してください。 </br>
> AWSを使用しているスタートアップ企業や中小企業の場合は、[AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-t23mebxzwjhu6)のDify Premiumをチェックして、ワンクリックで自分のAWS VPCにデプロイできます。さらに、手頃な価格のAMIオファリングして、ロゴやブランディングをカスタマイズしてアプリケーションを作成するオプションがあります。 > AWSを使用しているスタートアップ企業や中小企業の場合は、[AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-t22mebxzwjhu6)のDify Premiumをチェックして、ワンクリックで自分のAWS VPCにデプロイできます。さらに、手頃な価格のAMIオファリングして、ロゴやブランディングをカスタマイズしてアプリケーションを作成するオプションがあります。
## 最新の情報を入手 ## 最新の情報を入手

View File

@ -87,7 +87,9 @@ Dify is an open-source LLM app development platform. Its intuitive interface com
## Feature Comparison ## Feature Comparison
<table style="width: 100%;"> <table style="width: 100%;">
<tr> <tr
>
<th align="center">Feature</th> <th align="center">Feature</th>
<th align="center">Dify.AI</th> <th align="center">Dify.AI</th>
<th align="center">LangChain</th> <th align="center">LangChain</th>

View File

@ -106,73 +106,6 @@ Prosimo, glejte naša pogosta vprašanja [FAQ](https://docs.dify.ai/getting-star
**7. Backend-as-a-Service**: **7. Backend-as-a-Service**:
AVse ponudbe Difyja so opremljene z ustreznimi API-ji, tako da lahko Dify brez težav integrirate v svojo poslovno logiko. AVse ponudbe Difyja so opremljene z ustreznimi API-ji, tako da lahko Dify brez težav integrirate v svojo poslovno logiko.
## Primerjava Funkcij
<table style="width: 100%;">
<tr>
<th align="center">Funkcija</th>
<th align="center">Dify.AI</th>
<th align="center">LangChain</th>
<th align="center">Flowise</th>
<th align="center">OpenAI Assistants API</th>
</tr>
<tr>
<td align="center">Programski pristop</td>
<td align="center">API + usmerjeno v aplikacije</td>
<td align="center">Python koda</td>
<td align="center">Usmerjeno v aplikacije</td>
<td align="center">Usmerjeno v API</td>
</tr>
<tr>
<td align="center">Podprti LLM-ji</td>
<td align="center">Bogata izbira</td>
<td align="center">Bogata izbira</td>
<td align="center">Bogata izbira</td>
<td align="center">Samo OpenAI</td>
</tr>
<tr>
<td align="center">RAG pogon</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Agent</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Potek dela</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Spremljanje</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Funkcija za podjetja (SSO/nadzor dostopa)</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Lokalna namestitev</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
</table>
## Uporaba Dify ## Uporaba Dify
@ -254,4 +187,4 @@ Zaradi zaščite vaše zasebnosti se izogibajte objavljanju varnostnih vprašanj
## Licenca ## Licenca
To skladišče je na voljo pod [odprtokodno licenco Dify](LICENSE) , ki je v bistvu Apache 2.0 z nekaj dodatnimi omejitvami. To skladišče je na voljo pod [odprtokodno licenco Dify](LICENSE) , ki je v bistvu Apache 2.0 z nekaj dodatnimi omejitvami.

View File

@ -1,10 +1,7 @@
.env .env
*.env.* *.env.*
storage/generate_files/*
storage/privkeys/* storage/privkeys/*
storage/tools/*
storage/upload_files/*
# Logs # Logs
logs logs
@ -12,8 +9,6 @@ logs
# jetbrains # jetbrains
.idea .idea
.mypy_cache
.ruff_cache
# venv # venv
.venv .venv

View File

@ -409,6 +409,7 @@ MAX_VARIABLE_SIZE=204800
APP_MAX_EXECUTION_TIME=1200 APP_MAX_EXECUTION_TIME=1200
APP_MAX_ACTIVE_REQUESTS=0 APP_MAX_ACTIVE_REQUESTS=0
# Celery beat configuration # Celery beat configuration
CELERY_BEAT_SCHEDULER_TIME=1 CELERY_BEAT_SCHEDULER_TIME=1
@ -421,22 +422,6 @@ POSITION_PROVIDER_PINS=
POSITION_PROVIDER_INCLUDES= POSITION_PROVIDER_INCLUDES=
POSITION_PROVIDER_EXCLUDES= POSITION_PROVIDER_EXCLUDES=
# Plugin configuration
PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi
PLUGIN_DAEMON_URL=http://127.0.0.1:5002
PLUGIN_REMOTE_INSTALL_PORT=5003
PLUGIN_REMOTE_INSTALL_HOST=localhost
PLUGIN_MAX_PACKAGE_SIZE=15728640
INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1
INNER_API_KEY_FOR_PLUGIN=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1
# Marketplace configuration
MARKETPLACE_ENABLED=true
MARKETPLACE_API_URL=https://marketplace.dify.ai
# Endpoint configuration
ENDPOINT_URL_TEMPLATE=http://localhost:5002/e/{hook_id}
# Reset password token expiry minutes # Reset password token expiry minutes
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5

View File

@ -58,8 +58,6 @@ RUN \
expat libldap-2.5-0 perl libsqlite3-0 zlib1g \ expat libldap-2.5-0 perl libsqlite3-0 zlib1g \
# install a chinese font to support the use of tools like matplotlib # install a chinese font to support the use of tools like matplotlib
fonts-noto-cjk \ fonts-noto-cjk \
# install a package to improve the accuracy of guessing mime type and file extension
media-types \
# install libmagic to support the use of python-magic guess MIMETYPE # install libmagic to support the use of python-magic guess MIMETYPE
libmagic1 \ libmagic1 \
&& apt-get autoremove -y \ && apt-get autoremove -y \
@ -73,10 +71,6 @@ ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
# Download nltk data # Download nltk data
RUN python -c "import nltk; nltk.download('punkt'); nltk.download('averaged_perceptron_tagger')" RUN python -c "import nltk; nltk.download('punkt'); nltk.download('averaged_perceptron_tagger')"
ENV TIKTOKEN_CACHE_DIR=/app/api/.tiktoken_cache
RUN python -c "import tiktoken; tiktoken.encoding_for_model('gpt2')"
# Copy source code # Copy source code
COPY . /app/api/ COPY . /app/api/

View File

@ -37,13 +37,7 @@
4. Create environment. 4. Create environment.
Dify API service uses [Poetry](https://python-poetry.org/docs/) to manage dependencies. First, you need to add the poetry shell plugin, if you don't have it already, in order to run in a virtual environment. [Note: Poetry shell is no longer a native command so you need to install the poetry plugin beforehand] Dify API service uses [Poetry](https://python-poetry.org/docs/) to manage dependencies. You can execute `poetry shell` to activate the environment.
```bash
poetry self add poetry-plugin-shell
```
Then, You can execute `poetry shell` to activate the environment.
5. Install dependencies 5. Install dependencies

View File

@ -2,7 +2,6 @@ import logging
import time import time
from configs import dify_config from configs import dify_config
from contexts.wrapper import RecyclableContextVar
from dify_app import DifyApp from dify_app import DifyApp
@ -17,12 +16,6 @@ def create_flask_app_with_configs() -> DifyApp:
dify_app = DifyApp(__name__) dify_app = DifyApp(__name__)
dify_app.config.from_mapping(dify_config.model_dump()) dify_app.config.from_mapping(dify_config.model_dump())
# add before request hook
@dify_app.before_request
def before_request():
# add an unique identifier to each request
RecyclableContextVar.increment_thread_recycles()
return dify_app return dify_app

View File

@ -25,8 +25,6 @@ from models.dataset import Document as DatasetDocument
from models.model import Account, App, AppAnnotationSetting, AppMode, Conversation, MessageAnnotation from models.model import Account, App, AppAnnotationSetting, AppMode, Conversation, MessageAnnotation
from models.provider import Provider, ProviderModel from models.provider import Provider, ProviderModel
from services.account_service import RegisterService, TenantService from services.account_service import RegisterService, TenantService
from services.plugin.data_migration import PluginDataMigration
from services.plugin.plugin_migration import PluginMigration
@click.command("reset-password", help="Reset the account password.") @click.command("reset-password", help="Reset the account password.")
@ -526,7 +524,7 @@ def add_qdrant_doc_id_index(field: str):
) )
) )
except Exception: except Exception as e:
click.echo(click.style("Failed to create Qdrant client.", fg="red")) click.echo(click.style("Failed to create Qdrant client.", fg="red"))
click.echo(click.style(f"Index creation complete. Created {create_count} collection indexes.", fg="green")) click.echo(click.style(f"Index creation complete. Created {create_count} collection indexes.", fg="green"))
@ -595,7 +593,7 @@ def upgrade_db():
click.echo(click.style("Database migration successful!", fg="green")) click.echo(click.style("Database migration successful!", fg="green"))
except Exception: except Exception as e:
logging.exception("Failed to execute database migration") logging.exception("Failed to execute database migration")
finally: finally:
lock.release() lock.release()
@ -641,7 +639,7 @@ where sites.id is null limit 1000"""
account = accounts[0] account = accounts[0]
print("Fixing missing site for app {}".format(app.id)) print("Fixing missing site for app {}".format(app.id))
app_was_created.send(app, account=account) app_was_created.send(app, account=account)
except Exception: except Exception as e:
failed_app_ids.append(app_id) failed_app_ids.append(app_id)
click.echo(click.style("Failed to fix missing site for app {}".format(app_id), fg="red")) click.echo(click.style("Failed to fix missing site for app {}".format(app_id), fg="red"))
logging.exception(f"Failed to fix app related site missing issue, app_id: {app_id}") logging.exception(f"Failed to fix app related site missing issue, app_id: {app_id}")
@ -651,69 +649,3 @@ where sites.id is null limit 1000"""
break break
click.echo(click.style("Fix for missing app-related sites completed successfully!", fg="green")) click.echo(click.style("Fix for missing app-related sites completed successfully!", fg="green"))
@click.command("migrate-data-for-plugin", help="Migrate data for plugin.")
def migrate_data_for_plugin():
"""
Migrate data for plugin.
"""
click.echo(click.style("Starting migrate data for plugin.", fg="white"))
PluginDataMigration.migrate()
click.echo(click.style("Migrate data for plugin completed.", fg="green"))
@click.command("extract-plugins", help="Extract plugins.")
@click.option("--output_file", prompt=True, help="The file to store the extracted plugins.", default="plugins.jsonl")
@click.option("--workers", prompt=True, help="The number of workers to extract plugins.", default=10)
def extract_plugins(output_file: str, workers: int):
"""
Extract plugins.
"""
click.echo(click.style("Starting extract plugins.", fg="white"))
PluginMigration.extract_plugins(output_file, workers)
click.echo(click.style("Extract plugins completed.", fg="green"))
@click.command("extract-unique-identifiers", help="Extract unique identifiers.")
@click.option(
"--output_file",
prompt=True,
help="The file to store the extracted unique identifiers.",
default="unique_identifiers.json",
)
@click.option(
"--input_file", prompt=True, help="The file to store the extracted unique identifiers.", default="plugins.jsonl"
)
def extract_unique_plugins(output_file: str, input_file: str):
"""
Extract unique plugins.
"""
click.echo(click.style("Starting extract unique plugins.", fg="white"))
PluginMigration.extract_unique_plugins_to_file(input_file, output_file)
click.echo(click.style("Extract unique plugins completed.", fg="green"))
@click.command("install-plugins", help="Install plugins.")
@click.option(
"--input_file", prompt=True, help="The file to store the extracted unique identifiers.", default="plugins.jsonl"
)
@click.option(
"--output_file", prompt=True, help="The file to store the installed plugins.", default="installed_plugins.jsonl"
)
@click.option("--workers", prompt=True, help="The number of workers to install plugins.", default=100)
def install_plugins(input_file: str, output_file: str, workers: int):
"""
Install plugins.
"""
click.echo(click.style("Starting install plugins.", fg="white"))
PluginMigration.install_plugins(input_file, output_file, workers)
click.echo(click.style("Install plugins completed.", fg="green"))

View File

@ -134,60 +134,6 @@ class CodeExecutionSandboxConfig(BaseSettings):
) )
class PluginConfig(BaseSettings):
"""
Plugin configs
"""
PLUGIN_DAEMON_URL: HttpUrl = Field(
description="Plugin API URL",
default="http://localhost:5002",
)
PLUGIN_DAEMON_KEY: str = Field(
description="Plugin API key",
default="plugin-api-key",
)
INNER_API_KEY_FOR_PLUGIN: str = Field(description="Inner api key for plugin", default="inner-api-key")
PLUGIN_REMOTE_INSTALL_HOST: str = Field(
description="Plugin Remote Install Host",
default="localhost",
)
PLUGIN_REMOTE_INSTALL_PORT: PositiveInt = Field(
description="Plugin Remote Install Port",
default=5003,
)
PLUGIN_MAX_PACKAGE_SIZE: PositiveInt = Field(
description="Maximum allowed size for plugin packages in bytes",
default=15728640,
)
PLUGIN_MAX_BUNDLE_SIZE: PositiveInt = Field(
description="Maximum allowed size for plugin bundles in bytes",
default=15728640 * 12,
)
class MarketplaceConfig(BaseSettings):
"""
Configuration for marketplace
"""
MARKETPLACE_ENABLED: bool = Field(
description="Enable or disable marketplace",
default=True,
)
MARKETPLACE_API_URL: HttpUrl = Field(
description="Marketplace API URL",
default="https://marketplace.dify.ai",
)
class EndpointConfig(BaseSettings): class EndpointConfig(BaseSettings):
""" """
Configuration for various application endpoints and URLs Configuration for various application endpoints and URLs
@ -214,10 +160,6 @@ class EndpointConfig(BaseSettings):
default="", default="",
) )
ENDPOINT_URL_TEMPLATE: str = Field(
description="Template url for endpoint plugin", default="http://localhost:5002/e/{hook_id}"
)
class FileAccessConfig(BaseSettings): class FileAccessConfig(BaseSettings):
""" """
@ -373,8 +315,8 @@ class HttpConfig(BaseSettings):
) )
RESPECT_XFORWARD_HEADERS_ENABLED: bool = Field( RESPECT_XFORWARD_HEADERS_ENABLED: bool = Field(
description="Enable handling of X-Forwarded-For, X-Forwarded-Proto, and X-Forwarded-Port headers" description="Enable or disable the X-Forwarded-For Proxy Fix middleware from Werkzeug"
" when the app is behind a single trusted reverse proxy.", " to respect X-* headers to redirect clients",
default=False, default=False,
) )
@ -556,11 +498,6 @@ class AuthConfig(BaseSettings):
default=86400, default=86400,
) )
FORGOT_PASSWORD_LOCKOUT_DURATION: PositiveInt = Field(
description="Time (in seconds) a user must wait before retrying password reset after exceeding the rate limit.",
default=86400,
)
class ModerationConfig(BaseSettings): class ModerationConfig(BaseSettings):
""" """
@ -851,8 +788,6 @@ class FeatureConfig(
AuthConfig, # Changed from OAuthConfig to AuthConfig AuthConfig, # Changed from OAuthConfig to AuthConfig
BillingConfig, BillingConfig,
CodeExecutionSandboxConfig, CodeExecutionSandboxConfig,
PluginConfig,
MarketplaceConfig,
DataSetConfig, DataSetConfig,
EndpointConfig, EndpointConfig,
FileAccessConfig, FileAccessConfig,

View File

@ -1,4 +1,3 @@
import os
from typing import Any, Literal, Optional from typing import Any, Literal, Optional
from urllib.parse import quote_plus from urllib.parse import quote_plus
@ -167,11 +166,6 @@ class DatabaseConfig(BaseSettings):
default=False, default=False,
) )
RETRIEVAL_SERVICE_EXECUTORS: NonNegativeInt = Field(
description="Number of processes for the retrieval service, default to CPU cores.",
default=os.cpu_count(),
)
@computed_field @computed_field
def SQLALCHEMY_ENGINE_OPTIONS(self) -> dict[str, Any]: def SQLALCHEMY_ENGINE_OPTIONS(self) -> dict[str, Any]:
return { return {

View File

@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
CURRENT_VERSION: str = Field( CURRENT_VERSION: str = Field(
description="Dify version", description="Dify version",
default="1.0.0", default="0.15.3",
) )
COMMIT_SHA: str = Field( COMMIT_SHA: str = Field(

View File

@ -15,7 +15,7 @@ AUDIO_EXTENSIONS.extend([ext.upper() for ext in AUDIO_EXTENSIONS])
if dify_config.ETL_TYPE == "Unstructured": if dify_config.ETL_TYPE == "Unstructured":
DOCUMENT_EXTENSIONS = ["txt", "markdown", "md", "mdx", "pdf", "html", "htm", "xlsx", "xls"] DOCUMENT_EXTENSIONS = ["txt", "markdown", "md", "mdx", "pdf", "html", "htm", "xlsx", "xls"]
DOCUMENT_EXTENSIONS.extend(("doc", "docx", "csv", "eml", "msg", "pptx", "xml", "epub")) DOCUMENT_EXTENSIONS.extend(("docx", "csv", "eml", "msg", "pptx", "xml", "epub"))
if dify_config.UNSTRUCTURED_API_URL: if dify_config.UNSTRUCTURED_API_URL:
DOCUMENT_EXTENSIONS.append("ppt") DOCUMENT_EXTENSIONS.append("ppt")
DOCUMENT_EXTENSIONS.extend([ext.upper() for ext in DOCUMENT_EXTENSIONS]) DOCUMENT_EXTENSIONS.extend([ext.upper() for ext in DOCUMENT_EXTENSIONS])

View File

@ -1,30 +1,9 @@
from contextvars import ContextVar from contextvars import ContextVar
from threading import Lock
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
from contexts.wrapper import RecyclableContextVar
if TYPE_CHECKING: if TYPE_CHECKING:
from core.plugin.entities.plugin_daemon import PluginModelProviderEntity
from core.tools.plugin_tool.provider import PluginToolProviderController
from core.workflow.entities.variable_pool import VariablePool from core.workflow.entities.variable_pool import VariablePool
tenant_id: ContextVar[str] = ContextVar("tenant_id") tenant_id: ContextVar[str] = ContextVar("tenant_id")
workflow_variable_pool: ContextVar["VariablePool"] = ContextVar("workflow_variable_pool") workflow_variable_pool: ContextVar["VariablePool"] = ContextVar("workflow_variable_pool")
"""
To avoid race-conditions caused by gunicorn thread recycling, using RecyclableContextVar to replace with
"""
plugin_tool_providers: RecyclableContextVar[dict[str, "PluginToolProviderController"]] = RecyclableContextVar(
ContextVar("plugin_tool_providers")
)
plugin_tool_providers_lock: RecyclableContextVar[Lock] = RecyclableContextVar(ContextVar("plugin_tool_providers_lock"))
plugin_model_providers: RecyclableContextVar[list["PluginModelProviderEntity"] | None] = RecyclableContextVar(
ContextVar("plugin_model_providers")
)
plugin_model_providers_lock: RecyclableContextVar[Lock] = RecyclableContextVar(
ContextVar("plugin_model_providers_lock")
)

View File

@ -1,65 +0,0 @@
from contextvars import ContextVar
from typing import Generic, TypeVar
T = TypeVar("T")
class HiddenValue:
pass
_default = HiddenValue()
class RecyclableContextVar(Generic[T]):
"""
RecyclableContextVar is a wrapper around ContextVar
It's safe to use in gunicorn with thread recycling, but features like `reset` are not available for now
NOTE: you need to call `increment_thread_recycles` before requests
"""
_thread_recycles: ContextVar[int] = ContextVar("thread_recycles")
@classmethod
def increment_thread_recycles(cls):
try:
recycles = cls._thread_recycles.get()
cls._thread_recycles.set(recycles + 1)
except LookupError:
cls._thread_recycles.set(0)
def __init__(self, context_var: ContextVar[T]):
self._context_var = context_var
self._updates = ContextVar[int](context_var.name + "_updates", default=0)
def get(self, default: T | HiddenValue = _default) -> T:
thread_recycles = self._thread_recycles.get(0)
self_updates = self._updates.get()
if thread_recycles > self_updates:
self._updates.set(thread_recycles)
# check if thread is recycled and should be updated
if thread_recycles < self_updates:
return self._context_var.get()
else:
# thread_recycles >= self_updates, means current context is invalid
if isinstance(default, HiddenValue) or default is _default:
raise LookupError
else:
return default
def set(self, value: T):
# it leads to a situation that self.updates is less than cls.thread_recycles if `set` was never called before
# increase it manually
thread_recycles = self._thread_recycles.get(0)
self_updates = self._updates.get()
if thread_recycles > self_updates:
self._updates.set(thread_recycles)
if self._updates.get() == self._thread_recycles.get(0):
# after increment,
self._updates.set(self._updates.get() + 1)
# set the context
self._context_var.set(value)

View File

@ -2,7 +2,7 @@ from flask import Blueprint
from libs.external_api import ExternalApi from libs.external_api import ExternalApi
from .app.app_import import AppImportApi, AppImportCheckDependenciesApi, AppImportConfirmApi from .app.app_import import AppImportApi, AppImportConfirmApi
from .explore.audio import ChatAudioApi, ChatTextApi from .explore.audio import ChatAudioApi, ChatTextApi
from .explore.completion import ChatApi, ChatStopApi, CompletionApi, CompletionStopApi from .explore.completion import ChatApi, ChatStopApi, CompletionApi, CompletionStopApi
from .explore.conversation import ( from .explore.conversation import (
@ -40,7 +40,6 @@ api.add_resource(RemoteFileUploadApi, "/remote-files/upload")
# Import App # Import App
api.add_resource(AppImportApi, "/apps/imports") api.add_resource(AppImportApi, "/apps/imports")
api.add_resource(AppImportConfirmApi, "/apps/imports/<string:import_id>/confirm") api.add_resource(AppImportConfirmApi, "/apps/imports/<string:import_id>/confirm")
api.add_resource(AppImportCheckDependenciesApi, "/apps/imports/<string:app_id>/check-dependencies")
# Import other controllers # Import other controllers
from . import admin, apikey, extension, feature, ping, setup, version from . import admin, apikey, extension, feature, ping, setup, version
@ -167,15 +166,4 @@ api.add_resource(
from .tag import tags from .tag import tags
# Import workspace controllers # Import workspace controllers
from .workspace import ( from .workspace import account, load_balancing_config, members, model_providers, models, tool_providers, workspace
account,
agent_providers,
endpoint,
load_balancing_config,
members,
model_providers,
models,
plugin,
tool_providers,
workspace,
)

View File

@ -2,8 +2,6 @@ from functools import wraps
from flask import request from flask import request
from flask_restful import Resource, reqparse # type: ignore from flask_restful import Resource, reqparse # type: ignore
from sqlalchemy import select
from sqlalchemy.orm import Session
from werkzeug.exceptions import NotFound, Unauthorized from werkzeug.exceptions import NotFound, Unauthorized
from configs import dify_config from configs import dify_config
@ -56,8 +54,7 @@ class InsertExploreAppListApi(Resource):
parser.add_argument("position", type=int, required=True, nullable=False, location="json") parser.add_argument("position", type=int, required=True, nullable=False, location="json")
args = parser.parse_args() args = parser.parse_args()
with Session(db.engine) as session: app = App.query.filter(App.id == args["app_id"]).first()
app = session.execute(select(App).filter(App.id == args["app_id"])).scalar_one_or_none()
if not app: if not app:
raise NotFound(f"App '{args['app_id']}' is not found") raise NotFound(f"App '{args['app_id']}' is not found")
@ -73,10 +70,7 @@ class InsertExploreAppListApi(Resource):
privacy_policy = site.privacy_policy or args["privacy_policy"] or "" privacy_policy = site.privacy_policy or args["privacy_policy"] or ""
custom_disclaimer = site.custom_disclaimer or args["custom_disclaimer"] or "" custom_disclaimer = site.custom_disclaimer or args["custom_disclaimer"] or ""
with Session(db.engine) as session: recommended_app = RecommendedApp.query.filter(RecommendedApp.app_id == args["app_id"]).first()
recommended_app = session.execute(
select(RecommendedApp).filter(RecommendedApp.app_id == args["app_id"])
).scalar_one_or_none()
if not recommended_app: if not recommended_app:
recommended_app = RecommendedApp( recommended_app = RecommendedApp(
@ -116,27 +110,17 @@ class InsertExploreAppApi(Resource):
@only_edition_cloud @only_edition_cloud
@admin_required @admin_required
def delete(self, app_id): def delete(self, app_id):
with Session(db.engine) as session: recommended_app = RecommendedApp.query.filter(RecommendedApp.app_id == str(app_id)).first()
recommended_app = session.execute(
select(RecommendedApp).filter(RecommendedApp.app_id == str(app_id))
).scalar_one_or_none()
if not recommended_app: if not recommended_app:
return {"result": "success"}, 204 return {"result": "success"}, 204
with Session(db.engine) as session: app = App.query.filter(App.id == recommended_app.app_id).first()
app = session.execute(select(App).filter(App.id == recommended_app.app_id)).scalar_one_or_none()
if app: if app:
app.is_public = False app.is_public = False
with Session(db.engine) as session: installed_apps = InstalledApp.query.filter(
installed_apps = session.execute( InstalledApp.app_id == recommended_app.app_id, InstalledApp.tenant_id != InstalledApp.app_owner_tenant_id
select(InstalledApp).filter( ).all()
InstalledApp.app_id == recommended_app.app_id,
InstalledApp.tenant_id != InstalledApp.app_owner_tenant_id,
)
).all()
for installed_app in installed_apps: for installed_app in installed_apps:
db.session.delete(installed_app) db.session.delete(installed_app)

View File

@ -3,8 +3,6 @@ from typing import Any
import flask_restful # type: ignore import flask_restful # type: ignore
from flask_login import current_user # type: ignore from flask_login import current_user # type: ignore
from flask_restful import Resource, fields, marshal_with from flask_restful import Resource, fields, marshal_with
from sqlalchemy import select
from sqlalchemy.orm import Session
from werkzeug.exceptions import Forbidden from werkzeug.exceptions import Forbidden
from extensions.ext_database import db from extensions.ext_database import db
@ -28,16 +26,7 @@ api_key_list = {"data": fields.List(fields.Nested(api_key_fields), attribute="it
def _get_resource(resource_id, tenant_id, resource_model): def _get_resource(resource_id, tenant_id, resource_model):
if resource_model == App: resource = resource_model.query.filter_by(id=resource_id, tenant_id=tenant_id).first()
with Session(db.engine) as session:
resource = session.execute(
select(resource_model).filter_by(id=resource_id, tenant_id=tenant_id)
).scalar_one_or_none()
else:
with Session(db.engine) as session:
resource = session.execute(
select(resource_model).filter_by(id=resource_id, tenant_id=tenant_id)
).scalar_one_or_none()
if resource is None: if resource is None:
flask_restful.abort(404, message=f"{resource_model.__name__} not found.") flask_restful.abort(404, message=f"{resource_model.__name__} not found.")

View File

@ -5,16 +5,14 @@ from flask_restful import Resource, marshal_with, reqparse # type: ignore
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
from werkzeug.exceptions import Forbidden from werkzeug.exceptions import Forbidden
from controllers.console.app.wraps import get_app_model
from controllers.console.wraps import ( from controllers.console.wraps import (
account_initialization_required, account_initialization_required,
setup_required, setup_required,
) )
from extensions.ext_database import db from extensions.ext_database import db
from fields.app_fields import app_import_check_dependencies_fields, app_import_fields from fields.app_fields import app_import_fields
from libs.login import login_required from libs.login import login_required
from models import Account from models import Account
from models.model import App
from services.app_dsl_service import AppDslService, ImportStatus from services.app_dsl_service import AppDslService, ImportStatus
@ -90,20 +88,3 @@ class AppImportConfirmApi(Resource):
if result.status == ImportStatus.FAILED.value: if result.status == ImportStatus.FAILED.value:
return result.model_dump(mode="json"), 400 return result.model_dump(mode="json"), 400
return result.model_dump(mode="json"), 200 return result.model_dump(mode="json"), 200
class AppImportCheckDependenciesApi(Resource):
@setup_required
@login_required
@get_app_model
@account_initialization_required
@marshal_with(app_import_check_dependencies_fields)
def get(self, app_model: App):
if not current_user.is_editor:
raise Forbidden()
with Session(db.engine) as session:
import_service = AppDslService(session)
result = import_service.check_dependencies(app_model=app_model)
return result.model_dump(mode="json"), 200

View File

@ -2,7 +2,6 @@ from datetime import UTC, datetime
from flask_login import current_user # type: ignore from flask_login import current_user # type: ignore
from flask_restful import Resource, marshal_with, reqparse # type: ignore from flask_restful import Resource, marshal_with, reqparse # type: ignore
from sqlalchemy.orm import Session
from werkzeug.exceptions import Forbidden, NotFound from werkzeug.exceptions import Forbidden, NotFound
from constants.languages import supported_language from constants.languages import supported_language
@ -51,37 +50,33 @@ class AppSite(Resource):
if not current_user.is_editor: if not current_user.is_editor:
raise Forbidden() raise Forbidden()
with Session(db.engine) as session: site = Site.query.filter(Site.app_id == app_model.id).one_or_404()
site = session.query(Site).filter(Site.app_id == app_model.id).first()
if not site: for attr_name in [
raise NotFound "title",
"icon_type",
"icon",
"icon_background",
"description",
"default_language",
"chat_color_theme",
"chat_color_theme_inverted",
"customize_domain",
"copyright",
"privacy_policy",
"custom_disclaimer",
"customize_token_strategy",
"prompt_public",
"show_workflow_steps",
"use_icon_as_answer_icon",
]:
value = args.get(attr_name)
if value is not None:
setattr(site, attr_name, value)
for attr_name in [ site.updated_by = current_user.id
"title", site.updated_at = datetime.now(UTC).replace(tzinfo=None)
"icon_type", db.session.commit()
"icon",
"icon_background",
"description",
"default_language",
"chat_color_theme",
"chat_color_theme_inverted",
"customize_domain",
"copyright",
"privacy_policy",
"custom_disclaimer",
"customize_token_strategy",
"prompt_public",
"show_workflow_steps",
"use_icon_as_answer_icon",
]:
value = args.get(attr_name)
if value is not None:
setattr(site, attr_name, value)
site.updated_by = current_user.id
site.updated_at = datetime.now(UTC).replace(tzinfo=None)
session.commit()
return site return site

View File

@ -20,7 +20,6 @@ from libs import helper
from libs.helper import TimestampField, uuid_value from libs.helper import TimestampField, uuid_value
from libs.login import current_user, login_required from libs.login import current_user, login_required
from models import App from models import App
from models.account import Account
from models.model import AppMode from models.model import AppMode
from services.app_generate_service import AppGenerateService from services.app_generate_service import AppGenerateService
from services.errors.app import WorkflowHashNotEqualError from services.errors.app import WorkflowHashNotEqualError
@ -97,9 +96,6 @@ class DraftWorkflowApi(Resource):
else: else:
abort(415) abort(415)
if not isinstance(current_user, Account):
raise Forbidden()
workflow_service = WorkflowService() workflow_service = WorkflowService()
try: try:
@ -143,9 +139,6 @@ class AdvancedChatDraftWorkflowRunApi(Resource):
if not current_user.is_editor: if not current_user.is_editor:
raise Forbidden() raise Forbidden()
if not isinstance(current_user, Account):
raise Forbidden()
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("inputs", type=dict, location="json") parser.add_argument("inputs", type=dict, location="json")
parser.add_argument("query", type=str, required=True, location="json", default="") parser.add_argument("query", type=str, required=True, location="json", default="")
@ -167,7 +160,7 @@ class AdvancedChatDraftWorkflowRunApi(Resource):
raise ConversationCompletedError() raise ConversationCompletedError()
except ValueError as e: except ValueError as e:
raise e raise e
except Exception: except Exception as e:
logging.exception("internal server error.") logging.exception("internal server error.")
raise InternalServerError() raise InternalServerError()
@ -185,9 +178,6 @@ class AdvancedChatDraftRunIterationNodeApi(Resource):
if not current_user.is_editor: if not current_user.is_editor:
raise Forbidden() raise Forbidden()
if not isinstance(current_user, Account):
raise Forbidden()
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("inputs", type=dict, location="json") parser.add_argument("inputs", type=dict, location="json")
args = parser.parse_args() args = parser.parse_args()
@ -204,7 +194,7 @@ class AdvancedChatDraftRunIterationNodeApi(Resource):
raise ConversationCompletedError() raise ConversationCompletedError()
except ValueError as e: except ValueError as e:
raise e raise e
except Exception: except Exception as e:
logging.exception("internal server error.") logging.exception("internal server error.")
raise InternalServerError() raise InternalServerError()
@ -222,9 +212,6 @@ class WorkflowDraftRunIterationNodeApi(Resource):
if not current_user.is_editor: if not current_user.is_editor:
raise Forbidden() raise Forbidden()
if not isinstance(current_user, Account):
raise Forbidden()
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("inputs", type=dict, location="json") parser.add_argument("inputs", type=dict, location="json")
args = parser.parse_args() args = parser.parse_args()
@ -241,7 +228,7 @@ class WorkflowDraftRunIterationNodeApi(Resource):
raise ConversationCompletedError() raise ConversationCompletedError()
except ValueError as e: except ValueError as e:
raise e raise e
except Exception: except Exception as e:
logging.exception("internal server error.") logging.exception("internal server error.")
raise InternalServerError() raise InternalServerError()
@ -259,9 +246,6 @@ class DraftWorkflowRunApi(Resource):
if not current_user.is_editor: if not current_user.is_editor:
raise Forbidden() raise Forbidden()
if not isinstance(current_user, Account):
raise Forbidden()
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("inputs", type=dict, required=True, nullable=False, location="json") parser.add_argument("inputs", type=dict, required=True, nullable=False, location="json")
parser.add_argument("files", type=list, required=False, location="json") parser.add_argument("files", type=list, required=False, location="json")
@ -310,20 +294,13 @@ class DraftWorkflowNodeRunApi(Resource):
if not current_user.is_editor: if not current_user.is_editor:
raise Forbidden() raise Forbidden()
if not isinstance(current_user, Account):
raise Forbidden()
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("inputs", type=dict, required=True, nullable=False, location="json") parser.add_argument("inputs", type=dict, required=True, nullable=False, location="json")
args = parser.parse_args() args = parser.parse_args()
inputs = args.get("inputs")
if inputs == None:
raise ValueError("missing inputs")
workflow_service = WorkflowService() workflow_service = WorkflowService()
workflow_node_execution = workflow_service.run_draft_workflow_node( workflow_node_execution = workflow_service.run_draft_workflow_node(
app_model=app_model, node_id=node_id, user_inputs=inputs, account=current_user app_model=app_model, node_id=node_id, user_inputs=args.get("inputs"), account=current_user
) )
return workflow_node_execution return workflow_node_execution
@ -362,9 +339,6 @@ class PublishedWorkflowApi(Resource):
if not current_user.is_editor: if not current_user.is_editor:
raise Forbidden() raise Forbidden()
if not isinstance(current_user, Account):
raise Forbidden()
workflow_service = WorkflowService() workflow_service = WorkflowService()
workflow = workflow_service.publish_workflow(app_model=app_model, account=current_user) workflow = workflow_service.publish_workflow(app_model=app_model, account=current_user)
@ -402,17 +376,12 @@ class DefaultBlockConfigApi(Resource):
if not current_user.is_editor: if not current_user.is_editor:
raise Forbidden() raise Forbidden()
if not isinstance(current_user, Account):
raise Forbidden()
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("q", type=str, location="args") parser.add_argument("q", type=str, location="args")
args = parser.parse_args() args = parser.parse_args()
q = args.get("q")
filters = None filters = None
if q: if args.get("q"):
try: try:
filters = json.loads(args.get("q", "")) filters = json.loads(args.get("q", ""))
except json.JSONDecodeError: except json.JSONDecodeError:
@ -438,9 +407,6 @@ class ConvertToWorkflowApi(Resource):
if not current_user.is_editor: if not current_user.is_editor:
raise Forbidden() raise Forbidden()
if not isinstance(current_user, Account):
raise Forbidden()
if request.data: if request.data:
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("name", type=str, required=False, nullable=True, location="json") parser.add_argument("name", type=str, required=False, nullable=True, location="json")

View File

@ -59,9 +59,3 @@ class EmailCodeAccountDeletionRateLimitExceededError(BaseHTTPException):
error_code = "email_code_account_deletion_rate_limit_exceeded" error_code = "email_code_account_deletion_rate_limit_exceeded"
description = "Too many account deletion emails have been sent. Please try again in 5 minutes." description = "Too many account deletion emails have been sent. Please try again in 5 minutes."
code = 429 code = 429
class EmailPasswordResetLimitError(BaseHTTPException):
error_code = "email_password_reset_limit"
description = "Too many failed password reset attempts. Please try again in 24 hours."
code = 429

View File

@ -3,18 +3,10 @@ import secrets
from flask import request from flask import request
from flask_restful import Resource, reqparse # type: ignore from flask_restful import Resource, reqparse # type: ignore
from sqlalchemy import select
from sqlalchemy.orm import Session
from constants.languages import languages from constants.languages import languages
from controllers.console import api from controllers.console import api
from controllers.console.auth.error import ( from controllers.console.auth.error import EmailCodeError, InvalidEmailError, InvalidTokenError, PasswordMismatchError
EmailCodeError,
EmailPasswordResetLimitError,
InvalidEmailError,
InvalidTokenError,
PasswordMismatchError,
)
from controllers.console.error import AccountInFreezeError, AccountNotFound, EmailSendIpLimitError from controllers.console.error import AccountInFreezeError, AccountNotFound, EmailSendIpLimitError
from controllers.console.wraps import setup_required from controllers.console.wraps import setup_required
from events.tenant_event import tenant_was_created from events.tenant_event import tenant_was_created
@ -45,8 +37,7 @@ class ForgotPasswordSendEmailApi(Resource):
else: else:
language = "en-US" language = "en-US"
with Session(db.engine) as session: account = Account.query.filter_by(email=args["email"]).first()
account = session.execute(select(Account).filter_by(email=args["email"])).scalar_one_or_none()
token = None token = None
if account is None: if account is None:
if FeatureService.get_system_features().is_allow_register: if FeatureService.get_system_features().is_allow_register:
@ -71,10 +62,6 @@ class ForgotPasswordCheckApi(Resource):
user_email = args["email"] user_email = args["email"]
is_forgot_password_error_rate_limit = AccountService.is_forgot_password_error_rate_limit(args["email"])
if is_forgot_password_error_rate_limit:
raise EmailPasswordResetLimitError()
token_data = AccountService.get_reset_password_data(args["token"]) token_data = AccountService.get_reset_password_data(args["token"])
if token_data is None: if token_data is None:
raise InvalidTokenError() raise InvalidTokenError()
@ -83,10 +70,8 @@ class ForgotPasswordCheckApi(Resource):
raise InvalidEmailError() raise InvalidEmailError()
if args["code"] != token_data.get("code"): if args["code"] != token_data.get("code"):
AccountService.add_forgot_password_error_rate_limit(args["email"])
raise EmailCodeError() raise EmailCodeError()
AccountService.reset_forgot_password_error_rate_limit(args["email"])
return {"is_valid": True, "email": token_data.get("email")} return {"is_valid": True, "email": token_data.get("email")}
@ -119,8 +104,7 @@ class ForgotPasswordResetApi(Resource):
password_hashed = hash_password(new_password, salt) password_hashed = hash_password(new_password, salt)
base64_password_hashed = base64.b64encode(password_hashed).decode() base64_password_hashed = base64.b64encode(password_hashed).decode()
with Session(db.engine) as session: account = Account.query.filter_by(email=reset_data.get("email")).first()
account = session.execute(select(Account).filter_by(email=reset_data.get("email"))).scalar_one_or_none()
if account: if account:
account.password = base64_password_hashed account.password = base64_password_hashed
account.password_salt = base64_salt account.password_salt = base64_salt
@ -141,7 +125,7 @@ class ForgotPasswordResetApi(Resource):
) )
except WorkSpaceNotAllowedCreateError: except WorkSpaceNotAllowedCreateError:
pass pass
except AccountRegisterError: except AccountRegisterError as are:
raise AccountInFreezeError() raise AccountInFreezeError()
return {"result": "success"} return {"result": "success"}

View File

@ -5,8 +5,6 @@ from typing import Optional
import requests import requests
from flask import current_app, redirect, request from flask import current_app, redirect, request
from flask_restful import Resource # type: ignore from flask_restful import Resource # type: ignore
from sqlalchemy import select
from sqlalchemy.orm import Session
from werkzeug.exceptions import Unauthorized from werkzeug.exceptions import Unauthorized
from configs import dify_config from configs import dify_config
@ -137,8 +135,7 @@ def _get_account_by_openid_or_email(provider: str, user_info: OAuthUserInfo) ->
account: Optional[Account] = Account.get_by_openid(provider, user_info.id) account: Optional[Account] = Account.get_by_openid(provider, user_info.id)
if not account: if not account:
with Session(db.engine) as session: account = Account.query.filter_by(email=user_info.email).first()
account = session.execute(select(Account).filter_by(email=user_info.email)).scalar_one_or_none()
return account return account

View File

@ -4,8 +4,6 @@ import json
from flask import request from flask import request
from flask_login import current_user # type: ignore from flask_login import current_user # type: ignore
from flask_restful import Resource, marshal_with, reqparse # type: ignore from flask_restful import Resource, marshal_with, reqparse # type: ignore
from sqlalchemy import select
from sqlalchemy.orm import Session
from werkzeug.exceptions import NotFound from werkzeug.exceptions import NotFound
from controllers.console import api from controllers.console import api
@ -78,10 +76,7 @@ class DataSourceApi(Resource):
def patch(self, binding_id, action): def patch(self, binding_id, action):
binding_id = str(binding_id) binding_id = str(binding_id)
action = str(action) action = str(action)
with Session(db.engine) as session: data_source_binding = DataSourceOauthBinding.query.filter_by(id=binding_id).first()
data_source_binding = session.execute(
select(DataSourceOauthBinding).filter_by(id=binding_id)
).scalar_one_or_none()
if data_source_binding is None: if data_source_binding is None:
raise NotFound("Data source binding not found.") raise NotFound("Data source binding not found.")
# enable binding # enable binding
@ -113,53 +108,47 @@ class DataSourceNotionListApi(Resource):
def get(self): def get(self):
dataset_id = request.args.get("dataset_id", default=None, type=str) dataset_id = request.args.get("dataset_id", default=None, type=str)
exist_page_ids = [] exist_page_ids = []
with Session(db.engine) as session: # import notion in the exist dataset
# import notion in the exist dataset if dataset_id:
if dataset_id: dataset = DatasetService.get_dataset(dataset_id)
dataset = DatasetService.get_dataset(dataset_id) if not dataset:
if not dataset: raise NotFound("Dataset not found.")
raise NotFound("Dataset not found.") if dataset.data_source_type != "notion_import":
if dataset.data_source_type != "notion_import": raise ValueError("Dataset is not notion type.")
raise ValueError("Dataset is not notion type.") documents = Document.query.filter_by(
dataset_id=dataset_id,
documents = session.execute( tenant_id=current_user.current_tenant_id,
select(Document).filter_by( data_source_type="notion_import",
dataset_id=dataset_id, enabled=True,
tenant_id=current_user.current_tenant_id,
data_source_type="notion_import",
enabled=True,
)
).all()
if documents:
for document in documents:
data_source_info = json.loads(document.data_source_info)
exist_page_ids.append(data_source_info["notion_page_id"])
# get all authorized pages
data_source_bindings = session.scalars(
select(DataSourceOauthBinding).filter_by(
tenant_id=current_user.current_tenant_id, provider="notion", disabled=False
)
).all() ).all()
if not data_source_bindings: if documents:
return {"notion_info": []}, 200 for document in documents:
pre_import_info_list = [] data_source_info = json.loads(document.data_source_info)
for data_source_binding in data_source_bindings: exist_page_ids.append(data_source_info["notion_page_id"])
source_info = data_source_binding.source_info # get all authorized pages
pages = source_info["pages"] data_source_bindings = DataSourceOauthBinding.query.filter_by(
# Filter out already bound pages tenant_id=current_user.current_tenant_id, provider="notion", disabled=False
for page in pages: ).all()
if page["page_id"] in exist_page_ids: if not data_source_bindings:
page["is_bound"] = True return {"notion_info": []}, 200
else: pre_import_info_list = []
page["is_bound"] = False for data_source_binding in data_source_bindings:
pre_import_info = { source_info = data_source_binding.source_info
"workspace_name": source_info["workspace_name"], pages = source_info["pages"]
"workspace_icon": source_info["workspace_icon"], # Filter out already bound pages
"workspace_id": source_info["workspace_id"], for page in pages:
"pages": pages, if page["page_id"] in exist_page_ids:
} page["is_bound"] = True
pre_import_info_list.append(pre_import_info) else:
return {"notion_info": pre_import_info_list}, 200 page["is_bound"] = False
pre_import_info = {
"workspace_name": source_info["workspace_name"],
"workspace_icon": source_info["workspace_icon"],
"workspace_id": source_info["workspace_id"],
"pages": pages,
}
pre_import_info_list.append(pre_import_info)
return {"notion_info": pre_import_info_list}, 200
class DataSourceNotionApi(Resource): class DataSourceNotionApi(Resource):
@ -169,17 +158,14 @@ class DataSourceNotionApi(Resource):
def get(self, workspace_id, page_id, page_type): def get(self, workspace_id, page_id, page_type):
workspace_id = str(workspace_id) workspace_id = str(workspace_id)
page_id = str(page_id) page_id = str(page_id)
with Session(db.engine) as session: data_source_binding = DataSourceOauthBinding.query.filter(
data_source_binding = session.execute( db.and_(
select(DataSourceOauthBinding).filter( DataSourceOauthBinding.tenant_id == current_user.current_tenant_id,
db.and_( DataSourceOauthBinding.provider == "notion",
DataSourceOauthBinding.tenant_id == current_user.current_tenant_id, DataSourceOauthBinding.disabled == False,
DataSourceOauthBinding.provider == "notion", DataSourceOauthBinding.source_info["workspace_id"] == f'"{workspace_id}"',
DataSourceOauthBinding.disabled == False, )
DataSourceOauthBinding.source_info["workspace_id"] == f'"{workspace_id}"', ).first()
)
)
).scalar_one_or_none()
if not data_source_binding: if not data_source_binding:
raise NotFound("Data source binding not found.") raise NotFound("Data source binding not found.")

View File

@ -14,7 +14,6 @@ from controllers.console.wraps import account_initialization_required, enterpris
from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
from core.indexing_runner import IndexingRunner from core.indexing_runner import IndexingRunner
from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.entities.model_entities import ModelType
from core.plugin.entities.plugin import ModelProviderID
from core.provider_manager import ProviderManager from core.provider_manager import ProviderManager
from core.rag.datasource.vdb.vector_type import VectorType from core.rag.datasource.vdb.vector_type import VectorType
from core.rag.extractor.entity.extract_setting import ExtractSetting from core.rag.extractor.entity.extract_setting import ExtractSetting
@ -73,9 +72,7 @@ class DatasetListApi(Resource):
data = marshal(datasets, dataset_detail_fields) data = marshal(datasets, dataset_detail_fields)
for item in data: for item in data:
# convert embedding_model_provider to plugin standard format
if item["indexing_technique"] == "high_quality": if item["indexing_technique"] == "high_quality":
item["embedding_model_provider"] = str(ModelProviderID(item["embedding_model_provider"]))
item_model = f"{item['embedding_model']}:{item['embedding_model_provider']}" item_model = f"{item['embedding_model']}:{item['embedding_model_provider']}"
if item_model in model_names: if item_model in model_names:
item["embedding_available"] = True item["embedding_available"] = True

View File

@ -7,6 +7,7 @@ from flask import request
from flask_login import current_user # type: ignore from flask_login import current_user # type: ignore
from flask_restful import Resource, fields, marshal, marshal_with, reqparse # type: ignore from flask_restful import Resource, fields, marshal, marshal_with, reqparse # type: ignore
from sqlalchemy import asc, desc from sqlalchemy import asc, desc
from transformers.hf_argparser import string_to_bool # type: ignore
from werkzeug.exceptions import Forbidden, NotFound from werkzeug.exceptions import Forbidden, NotFound
import services import services
@ -39,7 +40,6 @@ from core.indexing_runner import IndexingRunner
from core.model_manager import ModelManager from core.model_manager import ModelManager
from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.errors.invoke import InvokeAuthorizationError from core.model_runtime.errors.invoke import InvokeAuthorizationError
from core.plugin.manager.exc import PluginDaemonClientSideError
from core.rag.extractor.entity.extract_setting import ExtractSetting from core.rag.extractor.entity.extract_setting import ExtractSetting
from extensions.ext_database import db from extensions.ext_database import db
from extensions.ext_redis import redis_client from extensions.ext_redis import redis_client
@ -150,20 +150,8 @@ class DatasetDocumentListApi(Resource):
sort = request.args.get("sort", default="-created_at", type=str) sort = request.args.get("sort", default="-created_at", type=str)
# "yes", "true", "t", "y", "1" convert to True, while others convert to False. # "yes", "true", "t", "y", "1" convert to True, while others convert to False.
try: try:
fetch_val = request.args.get("fetch", default="false") fetch = string_to_bool(request.args.get("fetch", default="false"))
if isinstance(fetch_val, bool): except (ArgumentTypeError, ValueError, Exception) as e:
fetch = fetch_val
else:
if fetch_val.lower() in ("yes", "true", "t", "y", "1"):
fetch = True
elif fetch_val.lower() in ("no", "false", "f", "n", "0"):
fetch = False
else:
raise ArgumentTypeError(
f"Truthy value expected: got {fetch_val} but expected one of yes/no, true/false, t/f, y/n, 1/0 "
f"(case insensitive)."
)
except (ArgumentTypeError, ValueError, Exception):
fetch = False fetch = False
dataset = DatasetService.get_dataset(dataset_id) dataset = DatasetService.get_dataset(dataset_id)
if not dataset: if not dataset:
@ -322,7 +310,7 @@ class DatasetInitApi(Resource):
@cloud_edition_billing_resource_check("vector_space") @cloud_edition_billing_resource_check("vector_space")
def post(self): def post(self):
# The role of the current user in the ta table must be admin, owner, or editor # The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor: if not current_user.is_dataset_editor:
raise Forbidden() raise Forbidden()
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
@ -441,8 +429,6 @@ class DocumentIndexingEstimateApi(DocumentResource):
) )
except ProviderTokenNotInitError as ex: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description) raise ProviderNotInitializeError(ex.description)
except PluginDaemonClientSideError as ex:
raise ProviderNotInitializeError(ex.description)
except Exception as e: except Exception as e:
raise IndexingEstimateError(str(e)) raise IndexingEstimateError(str(e))
@ -543,8 +529,6 @@ class DocumentBatchIndexingEstimateApi(DocumentResource):
) )
except ProviderTokenNotInitError as ex: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description) raise ProviderNotInitializeError(ex.description)
except PluginDaemonClientSideError as ex:
raise ProviderNotInitializeError(ex.description)
except Exception as e: except Exception as e:
raise IndexingEstimateError(str(e)) raise IndexingEstimateError(str(e))
@ -617,7 +601,7 @@ class DocumentDetailApi(DocumentResource):
raise InvalidMetadataError(f"Invalid metadata value: {metadata}") raise InvalidMetadataError(f"Invalid metadata value: {metadata}")
if metadata == "only": if metadata == "only":
response = {"id": document.id, "doc_type": document.doc_type, "doc_metadata": document.doc_metadata_details} response = {"id": document.id, "doc_type": document.doc_type, "doc_metadata": document.doc_metadata}
elif metadata == "without": elif metadata == "without":
dataset_process_rules = DatasetService.get_process_rules(dataset_id) dataset_process_rules = DatasetService.get_process_rules(dataset_id)
document_process_rules = document.dataset_process_rule.to_dict() document_process_rules = document.dataset_process_rule.to_dict()
@ -678,7 +662,7 @@ class DocumentDetailApi(DocumentResource):
"disabled_by": document.disabled_by, "disabled_by": document.disabled_by,
"archived": document.archived, "archived": document.archived,
"doc_type": document.doc_type, "doc_type": document.doc_type,
"doc_metadata": document.doc_metadata_details, "doc_metadata": document.doc_metadata,
"segment_count": document.segment_count, "segment_count": document.segment_count,
"average_segment_length": document.average_segment_length, "average_segment_length": document.average_segment_length,
"hit_count": document.hit_count, "hit_count": document.hit_count,
@ -700,7 +684,7 @@ class DocumentProcessingApi(DocumentResource):
document = self.get_document(dataset_id, document_id) document = self.get_document(dataset_id, document_id)
# The role of the current user in the ta table must be admin, owner, or editor # The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor: if not current_user.is_dataset_editor:
raise Forbidden() raise Forbidden()
if action == "pause": if action == "pause":
@ -764,7 +748,7 @@ class DocumentMetadataApi(DocumentResource):
doc_metadata = req_data.get("doc_metadata") doc_metadata = req_data.get("doc_metadata")
# The role of the current user in the ta table must be admin, owner, or editor # The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor: if not current_user.is_dataset_editor:
raise Forbidden() raise Forbidden()
if doc_type is None or doc_metadata is None: if doc_type is None or doc_metadata is None:

View File

@ -122,7 +122,7 @@ class DatasetDocumentSegmentListApi(Resource):
segment_ids = request.args.getlist("segment_id") segment_ids = request.args.getlist("segment_id")
# The role of the current user in the ta table must be admin or owner # The role of the current user in the ta table must be admin or owner
if not current_user.is_editor: if not current_user.is_dataset_editor:
raise Forbidden() raise Forbidden()
try: try:
DatasetService.check_dataset_permission(dataset, current_user) DatasetService.check_dataset_permission(dataset, current_user)
@ -149,7 +149,7 @@ class DatasetDocumentSegmentApi(Resource):
# check user's model setting # check user's model setting
DatasetService.check_dataset_model_setting(dataset) DatasetService.check_dataset_model_setting(dataset)
# The role of the current user in the ta table must be admin, owner, or editor # The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor: if not current_user.is_dataset_editor:
raise Forbidden() raise Forbidden()
try: try:
@ -202,7 +202,7 @@ class DatasetDocumentSegmentAddApi(Resource):
document = DocumentService.get_document(dataset_id, document_id) document = DocumentService.get_document(dataset_id, document_id)
if not document: if not document:
raise NotFound("Document not found.") raise NotFound("Document not found.")
if not current_user.is_editor: if not current_user.is_dataset_editor:
raise Forbidden() raise Forbidden()
# check embedding model setting # check embedding model setting
if dataset.indexing_technique == "high_quality": if dataset.indexing_technique == "high_quality":
@ -277,7 +277,7 @@ class DatasetDocumentSegmentUpdateApi(Resource):
if not segment: if not segment:
raise NotFound("Segment not found.") raise NotFound("Segment not found.")
# The role of the current user in the ta table must be admin, owner, or editor # The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor: if not current_user.is_dataset_editor:
raise Forbidden() raise Forbidden()
try: try:
DatasetService.check_dataset_permission(dataset, current_user) DatasetService.check_dataset_permission(dataset, current_user)
@ -320,7 +320,7 @@ class DatasetDocumentSegmentUpdateApi(Resource):
if not segment: if not segment:
raise NotFound("Segment not found.") raise NotFound("Segment not found.")
# The role of the current user in the ta table must be admin or owner # The role of the current user in the ta table must be admin or owner
if not current_user.is_editor: if not current_user.is_dataset_editor:
raise Forbidden() raise Forbidden()
try: try:
DatasetService.check_dataset_permission(dataset, current_user) DatasetService.check_dataset_permission(dataset, current_user)
@ -420,7 +420,7 @@ class ChildChunkAddApi(Resource):
).first() ).first()
if not segment: if not segment:
raise NotFound("Segment not found.") raise NotFound("Segment not found.")
if not current_user.is_editor: if not current_user.is_dataset_editor:
raise Forbidden() raise Forbidden()
# check embedding model setting # check embedding model setting
if dataset.indexing_technique == "high_quality": if dataset.indexing_technique == "high_quality":
@ -520,7 +520,7 @@ class ChildChunkAddApi(Resource):
if not segment: if not segment:
raise NotFound("Segment not found.") raise NotFound("Segment not found.")
# The role of the current user in the ta table must be admin, owner, or editor # The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor: if not current_user.is_dataset_editor:
raise Forbidden() raise Forbidden()
try: try:
DatasetService.check_dataset_permission(dataset, current_user) DatasetService.check_dataset_permission(dataset, current_user)
@ -570,7 +570,7 @@ class ChildChunkUpdateApi(Resource):
if not child_chunk: if not child_chunk:
raise NotFound("Child chunk not found.") raise NotFound("Child chunk not found.")
# The role of the current user in the ta table must be admin or owner # The role of the current user in the ta table must be admin or owner
if not current_user.is_editor: if not current_user.is_dataset_editor:
raise Forbidden() raise Forbidden()
try: try:
DatasetService.check_dataset_permission(dataset, current_user) DatasetService.check_dataset_permission(dataset, current_user)
@ -614,7 +614,7 @@ class ChildChunkUpdateApi(Resource):
if not child_chunk: if not child_chunk:
raise NotFound("Child chunk not found.") raise NotFound("Child chunk not found.")
# The role of the current user in the ta table must be admin or owner # The role of the current user in the ta table must be admin or owner
if not current_user.is_editor: if not current_user.is_dataset_editor:
raise Forbidden() raise Forbidden()
try: try:
DatasetService.check_dataset_permission(dataset, current_user) DatasetService.check_dataset_permission(dataset, current_user)

View File

@ -1,143 +0,0 @@
from flask_login import current_user # type: ignore # type: ignore
from flask_restful import Resource, marshal_with, reqparse # type: ignore
from werkzeug.exceptions import NotFound
from controllers.console import api
from controllers.console.wraps import account_initialization_required, enterprise_license_required, setup_required
from fields.dataset_fields import dataset_metadata_fields
from libs.login import login_required
from services.dataset_service import DatasetService
from services.entities.knowledge_entities.knowledge_entities import (
MetadataArgs,
MetadataOperationData,
)
from services.metadata_service import MetadataService
def _validate_name(name):
if not name or len(name) < 1 or len(name) > 40:
raise ValueError("Name must be between 1 to 40 characters.")
return name
def _validate_description_length(description):
if len(description) > 400:
raise ValueError("Description cannot exceed 400 characters.")
return description
class DatasetListApi(Resource):
@setup_required
@login_required
@account_initialization_required
@enterprise_license_required
@marshal_with(dataset_metadata_fields)
def post(self, dataset_id):
parser = reqparse.RequestParser()
parser.add_argument("type", type=str, required=True, nullable=True, location="json")
parser.add_argument("name", type=str, required=True, nullable=True, location="json")
args = parser.parse_args()
metadata_args = MetadataArgs(**args)
dataset_id_str = str(dataset_id)
dataset = DatasetService.get_dataset(dataset_id_str)
if dataset is None:
raise NotFound("Dataset not found.")
DatasetService.check_dataset_permission(dataset, current_user)
metadata = MetadataService.create_metadata(dataset_id_str, metadata_args)
return metadata, 201
class DatasetMetadataApi(Resource):
@setup_required
@login_required
@account_initialization_required
@enterprise_license_required
def patch(self, dataset_id, metadata_id):
parser = reqparse.RequestParser()
parser.add_argument("name", type=str, required=True, nullable=True, location="json")
args = parser.parse_args()
dataset_id_str = str(dataset_id)
metadata_id_str = str(metadata_id)
dataset = DatasetService.get_dataset(dataset_id_str)
if dataset is None:
raise NotFound("Dataset not found.")
DatasetService.check_dataset_permission(dataset, current_user)
metadata = MetadataService.update_metadata_name(dataset_id_str, metadata_id_str, args.get("name"))
return metadata, 200
@setup_required
@login_required
@account_initialization_required
@enterprise_license_required
def delete(self, dataset_id, metadata_id):
dataset_id_str = str(dataset_id)
metadata_id_str = str(metadata_id)
dataset = DatasetService.get_dataset(dataset_id_str)
if dataset is None:
raise NotFound("Dataset not found.")
DatasetService.check_dataset_permission(dataset, current_user)
MetadataService.delete_metadata(dataset_id_str, metadata_id_str)
return 200
class DatasetMetadataBuiltInFieldApi(Resource):
@setup_required
@login_required
@account_initialization_required
@enterprise_license_required
def get(self):
built_in_fields = MetadataService.get_built_in_fields()
return built_in_fields, 200
class DatasetMetadataBuiltInFieldActionApi(Resource):
@setup_required
@login_required
@account_initialization_required
@enterprise_license_required
def post(self, dataset_id, action):
dataset_id_str = str(dataset_id)
dataset = DatasetService.get_dataset(dataset_id_str)
if dataset is None:
raise NotFound("Dataset not found.")
DatasetService.check_dataset_permission(dataset, current_user)
if action == "enable":
MetadataService.enable_built_in_field(dataset)
elif action == "disable":
MetadataService.disable_built_in_field(dataset)
return 200
class DocumentMetadataApi(Resource):
@setup_required
@login_required
@account_initialization_required
@enterprise_license_required
def post(self, dataset_id):
dataset_id_str = str(dataset_id)
dataset = DatasetService.get_dataset(dataset_id_str)
if dataset is None:
raise NotFound("Dataset not found.")
DatasetService.check_dataset_permission(dataset, current_user)
parser = reqparse.RequestParser()
parser.add_argument("operation_data", type=list, required=True, nullable=True, location="json")
args = parser.parse_args()
metadata_args = MetadataOperationData(**args)
MetadataService.update_documents_metadata(dataset, metadata_args)
return 200
api.add_resource(DatasetListApi, "/datasets/<uuid:dataset_id>/metadata")
api.add_resource(DatasetMetadataApi, "/datasets/<uuid:dataset_id>/metadata/<uuid:metadata_id>")
api.add_resource(DatasetMetadataBuiltInFieldApi, "/datasets/metadata/built-in")
api.add_resource(DatasetMetadataBuiltInFieldActionApi, "/datasets/metadata/built-in/<string:action>")
api.add_resource(DocumentMetadataApi, "/datasets/<uuid:dataset_id>/documents/metadata")

View File

@ -2,11 +2,8 @@ import os
from flask import session from flask import session
from flask_restful import Resource, reqparse # type: ignore from flask_restful import Resource, reqparse # type: ignore
from sqlalchemy import select
from sqlalchemy.orm import Session
from configs import dify_config from configs import dify_config
from extensions.ext_database import db
from libs.helper import StrLen from libs.helper import StrLen
from models.model import DifySetup from models.model import DifySetup
from services.account_service import TenantService from services.account_service import TenantService
@ -45,11 +42,7 @@ class InitValidateAPI(Resource):
def get_init_validate_status(): def get_init_validate_status():
if dify_config.EDITION == "SELF_HOSTED": if dify_config.EDITION == "SELF_HOSTED":
if os.environ.get("INIT_PASSWORD"): if os.environ.get("INIT_PASSWORD"):
if session.get("is_init_validated"): return session.get("is_init_validated") or DifySetup.query.first()
return True
with Session(db.engine) as db_session:
return db_session.execute(select(DifySetup)).scalar_one_or_none()
return True return True

View File

@ -4,7 +4,7 @@ from flask_restful import Resource, reqparse # type: ignore
from configs import dify_config from configs import dify_config
from libs.helper import StrLen, email, extract_remote_ip from libs.helper import StrLen, email, extract_remote_ip
from libs.password import valid_password from libs.password import valid_password
from models.model import DifySetup, db from models.model import DifySetup
from services.account_service import RegisterService, TenantService from services.account_service import RegisterService, TenantService
from . import api from . import api
@ -52,9 +52,8 @@ class SetupApi(Resource):
def get_setup_status(): def get_setup_status():
if dify_config.EDITION == "SELF_HOSTED": if dify_config.EDITION == "SELF_HOSTED":
return db.session.query(DifySetup).first() return DifySetup.query.first()
else: return True
return True
api.add_resource(SetupApi, "/setup") api.add_resource(SetupApi, "/setup")

View File

@ -1,56 +0,0 @@
from functools import wraps
from flask_login import current_user # type: ignore
from sqlalchemy.orm import Session
from werkzeug.exceptions import Forbidden
from extensions.ext_database import db
from models.account import TenantPluginPermission
def plugin_permission_required(
install_required: bool = False,
debug_required: bool = False,
):
def interceptor(view):
@wraps(view)
def decorated(*args, **kwargs):
user = current_user
tenant_id = user.current_tenant_id
with Session(db.engine) as session:
permission = (
session.query(TenantPluginPermission)
.filter(
TenantPluginPermission.tenant_id == tenant_id,
)
.first()
)
if not permission:
# no permission set, allow access for everyone
return view(*args, **kwargs)
if install_required:
if permission.install_permission == TenantPluginPermission.InstallPermission.NOBODY:
raise Forbidden()
if permission.install_permission == TenantPluginPermission.InstallPermission.ADMINS:
if not user.is_admin_or_owner:
raise Forbidden()
if permission.install_permission == TenantPluginPermission.InstallPermission.EVERYONE:
pass
if debug_required:
if permission.debug_permission == TenantPluginPermission.DebugPermission.NOBODY:
raise Forbidden()
if permission.debug_permission == TenantPluginPermission.DebugPermission.ADMINS:
if not user.is_admin_or_owner:
raise Forbidden()
if permission.debug_permission == TenantPluginPermission.DebugPermission.EVERYONE:
pass
return view(*args, **kwargs)
return decorated
return interceptor

View File

@ -1,36 +0,0 @@
from flask_login import current_user # type: ignore
from flask_restful import Resource # type: ignore
from controllers.console import api
from controllers.console.wraps import account_initialization_required, setup_required
from core.model_runtime.utils.encoders import jsonable_encoder
from libs.login import login_required
from services.agent_service import AgentService
class AgentProviderListApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
user = current_user
user_id = user.id
tenant_id = user.current_tenant_id
return jsonable_encoder(AgentService.list_agent_providers(user_id, tenant_id))
class AgentProviderApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, provider_name: str):
user = current_user
user_id = user.id
tenant_id = user.current_tenant_id
return jsonable_encoder(AgentService.get_agent_provider(user_id, tenant_id, provider_name))
api.add_resource(AgentProviderListApi, "/workspaces/current/agent-providers")
api.add_resource(AgentProviderApi, "/workspaces/current/agent-provider/<path:provider_name>")

View File

@ -1,205 +0,0 @@
from flask_login import current_user # type: ignore
from flask_restful import Resource, reqparse # type: ignore
from werkzeug.exceptions import Forbidden
from controllers.console import api
from controllers.console.wraps import account_initialization_required, setup_required
from core.model_runtime.utils.encoders import jsonable_encoder
from libs.login import login_required
from services.plugin.endpoint_service import EndpointService
class EndpointCreateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
user = current_user
if not user.is_admin_or_owner:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument("plugin_unique_identifier", type=str, required=True)
parser.add_argument("settings", type=dict, required=True)
parser.add_argument("name", type=str, required=True)
args = parser.parse_args()
plugin_unique_identifier = args["plugin_unique_identifier"]
settings = args["settings"]
name = args["name"]
return {
"success": EndpointService.create_endpoint(
tenant_id=user.current_tenant_id,
user_id=user.id,
plugin_unique_identifier=plugin_unique_identifier,
name=name,
settings=settings,
)
}
class EndpointListApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
user = current_user
parser = reqparse.RequestParser()
parser.add_argument("page", type=int, required=True, location="args")
parser.add_argument("page_size", type=int, required=True, location="args")
args = parser.parse_args()
page = args["page"]
page_size = args["page_size"]
return jsonable_encoder(
{
"endpoints": EndpointService.list_endpoints(
tenant_id=user.current_tenant_id,
user_id=user.id,
page=page,
page_size=page_size,
)
}
)
class EndpointListForSinglePluginApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
user = current_user
parser = reqparse.RequestParser()
parser.add_argument("page", type=int, required=True, location="args")
parser.add_argument("page_size", type=int, required=True, location="args")
parser.add_argument("plugin_id", type=str, required=True, location="args")
args = parser.parse_args()
page = args["page"]
page_size = args["page_size"]
plugin_id = args["plugin_id"]
return jsonable_encoder(
{
"endpoints": EndpointService.list_endpoints_for_single_plugin(
tenant_id=user.current_tenant_id,
user_id=user.id,
plugin_id=plugin_id,
page=page,
page_size=page_size,
)
}
)
class EndpointDeleteApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
user = current_user
parser = reqparse.RequestParser()
parser.add_argument("endpoint_id", type=str, required=True)
args = parser.parse_args()
if not user.is_admin_or_owner:
raise Forbidden()
endpoint_id = args["endpoint_id"]
return {
"success": EndpointService.delete_endpoint(
tenant_id=user.current_tenant_id, user_id=user.id, endpoint_id=endpoint_id
)
}
class EndpointUpdateApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
user = current_user
parser = reqparse.RequestParser()
parser.add_argument("endpoint_id", type=str, required=True)
parser.add_argument("settings", type=dict, required=True)
parser.add_argument("name", type=str, required=True)
args = parser.parse_args()
endpoint_id = args["endpoint_id"]
settings = args["settings"]
name = args["name"]
if not user.is_admin_or_owner:
raise Forbidden()
return {
"success": EndpointService.update_endpoint(
tenant_id=user.current_tenant_id,
user_id=user.id,
endpoint_id=endpoint_id,
name=name,
settings=settings,
)
}
class EndpointEnableApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
user = current_user
parser = reqparse.RequestParser()
parser.add_argument("endpoint_id", type=str, required=True)
args = parser.parse_args()
endpoint_id = args["endpoint_id"]
if not user.is_admin_or_owner:
raise Forbidden()
return {
"success": EndpointService.enable_endpoint(
tenant_id=user.current_tenant_id, user_id=user.id, endpoint_id=endpoint_id
)
}
class EndpointDisableApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
user = current_user
parser = reqparse.RequestParser()
parser.add_argument("endpoint_id", type=str, required=True)
args = parser.parse_args()
endpoint_id = args["endpoint_id"]
if not user.is_admin_or_owner:
raise Forbidden()
return {
"success": EndpointService.disable_endpoint(
tenant_id=user.current_tenant_id, user_id=user.id, endpoint_id=endpoint_id
)
}
api.add_resource(EndpointCreateApi, "/workspaces/current/endpoints/create")
api.add_resource(EndpointListApi, "/workspaces/current/endpoints/list")
api.add_resource(EndpointListForSinglePluginApi, "/workspaces/current/endpoints/list/plugin")
api.add_resource(EndpointDeleteApi, "/workspaces/current/endpoints/delete")
api.add_resource(EndpointUpdateApi, "/workspaces/current/endpoints/update")
api.add_resource(EndpointEnableApi, "/workspaces/current/endpoints/enable")
api.add_resource(EndpointDisableApi, "/workspaces/current/endpoints/disable")

View File

@ -112,10 +112,10 @@ class LoadBalancingConfigCredentialsValidateApi(Resource):
# Load Balancing Config # Load Balancing Config
api.add_resource( api.add_resource(
LoadBalancingCredentialsValidateApi, LoadBalancingCredentialsValidateApi,
"/workspaces/current/model-providers/<path:provider>/models/load-balancing-configs/credentials-validate", "/workspaces/current/model-providers/<string:provider>/models/load-balancing-configs/credentials-validate",
) )
api.add_resource( api.add_resource(
LoadBalancingConfigCredentialsValidateApi, LoadBalancingConfigCredentialsValidateApi,
"/workspaces/current/model-providers/<path:provider>/models/load-balancing-configs/<string:config_id>/credentials-validate", "/workspaces/current/model-providers/<string:provider>/models/load-balancing-configs/<string:config_id>/credentials-validate",
) )

View File

@ -79,7 +79,7 @@ class ModelProviderValidateApi(Resource):
response = {"result": "success" if result else "error"} response = {"result": "success" if result else "error"}
if not result: if not result:
response["error"] = error or "Unknown error" response["error"] = error
return response return response
@ -125,10 +125,9 @@ class ModelProviderIconApi(Resource):
Get model provider icon Get model provider icon
""" """
def get(self, tenant_id: str, provider: str, icon_type: str, lang: str): def get(self, provider: str, icon_type: str, lang: str):
model_provider_service = ModelProviderService() model_provider_service = ModelProviderService()
icon, mimetype = model_provider_service.get_model_provider_icon( icon, mimetype = model_provider_service.get_model_provider_icon(
tenant_id=tenant_id,
provider=provider, provider=provider,
icon_type=icon_type, icon_type=icon_type,
lang=lang, lang=lang,
@ -184,17 +183,53 @@ class ModelProviderPaymentCheckoutUrlApi(Resource):
return data return data
class ModelProviderFreeQuotaSubmitApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, provider: str):
model_provider_service = ModelProviderService()
result = model_provider_service.free_quota_submit(tenant_id=current_user.current_tenant_id, provider=provider)
return result
class ModelProviderFreeQuotaQualificationVerifyApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, provider: str):
parser = reqparse.RequestParser()
parser.add_argument("token", type=str, required=False, nullable=True, location="args")
args = parser.parse_args()
model_provider_service = ModelProviderService()
result = model_provider_service.free_quota_qualification_verify(
tenant_id=current_user.current_tenant_id, provider=provider, token=args["token"]
)
return result
api.add_resource(ModelProviderListApi, "/workspaces/current/model-providers") api.add_resource(ModelProviderListApi, "/workspaces/current/model-providers")
api.add_resource(ModelProviderCredentialApi, "/workspaces/current/model-providers/<path:provider>/credentials") api.add_resource(ModelProviderCredentialApi, "/workspaces/current/model-providers/<string:provider>/credentials")
api.add_resource(ModelProviderValidateApi, "/workspaces/current/model-providers/<path:provider>/credentials/validate") api.add_resource(ModelProviderValidateApi, "/workspaces/current/model-providers/<string:provider>/credentials/validate")
api.add_resource(ModelProviderApi, "/workspaces/current/model-providers/<path:provider>") api.add_resource(ModelProviderApi, "/workspaces/current/model-providers/<string:provider>")
api.add_resource(
ModelProviderIconApi, "/workspaces/current/model-providers/<string:provider>/<string:icon_type>/<string:lang>"
)
api.add_resource( api.add_resource(
PreferredProviderTypeUpdateApi, "/workspaces/current/model-providers/<path:provider>/preferred-provider-type" PreferredProviderTypeUpdateApi, "/workspaces/current/model-providers/<string:provider>/preferred-provider-type"
) )
api.add_resource(ModelProviderPaymentCheckoutUrlApi, "/workspaces/current/model-providers/<path:provider>/checkout-url")
api.add_resource( api.add_resource(
ModelProviderIconApi, ModelProviderPaymentCheckoutUrlApi, "/workspaces/current/model-providers/<string:provider>/checkout-url"
"/workspaces/<string:tenant_id>/model-providers/<path:provider>/<string:icon_type>/<string:lang>", )
api.add_resource(
ModelProviderFreeQuotaSubmitApi, "/workspaces/current/model-providers/<string:provider>/free-quota-submit"
)
api.add_resource(
ModelProviderFreeQuotaQualificationVerifyApi,
"/workspaces/current/model-providers/<string:provider>/free-quota-qualification-verify",
) )

View File

@ -325,7 +325,7 @@ class ModelProviderModelValidateApi(Resource):
response = {"result": "success" if result else "error"} response = {"result": "success" if result else "error"}
if not result: if not result:
response["error"] = error or "" response["error"] = error
return response return response
@ -362,26 +362,26 @@ class ModelProviderAvailableModelApi(Resource):
return jsonable_encoder({"data": models}) return jsonable_encoder({"data": models})
api.add_resource(ModelProviderModelApi, "/workspaces/current/model-providers/<path:provider>/models") api.add_resource(ModelProviderModelApi, "/workspaces/current/model-providers/<string:provider>/models")
api.add_resource( api.add_resource(
ModelProviderModelEnableApi, ModelProviderModelEnableApi,
"/workspaces/current/model-providers/<path:provider>/models/enable", "/workspaces/current/model-providers/<string:provider>/models/enable",
endpoint="model-provider-model-enable", endpoint="model-provider-model-enable",
) )
api.add_resource( api.add_resource(
ModelProviderModelDisableApi, ModelProviderModelDisableApi,
"/workspaces/current/model-providers/<path:provider>/models/disable", "/workspaces/current/model-providers/<string:provider>/models/disable",
endpoint="model-provider-model-disable", endpoint="model-provider-model-disable",
) )
api.add_resource( api.add_resource(
ModelProviderModelCredentialApi, "/workspaces/current/model-providers/<path:provider>/models/credentials" ModelProviderModelCredentialApi, "/workspaces/current/model-providers/<string:provider>/models/credentials"
) )
api.add_resource( api.add_resource(
ModelProviderModelValidateApi, "/workspaces/current/model-providers/<path:provider>/models/credentials/validate" ModelProviderModelValidateApi, "/workspaces/current/model-providers/<string:provider>/models/credentials/validate"
) )
api.add_resource( api.add_resource(
ModelProviderModelParameterRuleApi, "/workspaces/current/model-providers/<path:provider>/models/parameter-rules" ModelProviderModelParameterRuleApi, "/workspaces/current/model-providers/<string:provider>/models/parameter-rules"
) )
api.add_resource(ModelProviderAvailableModelApi, "/workspaces/current/models/model-types/<string:model_type>") api.add_resource(ModelProviderAvailableModelApi, "/workspaces/current/models/model-types/<string:model_type>")
api.add_resource(DefaultModelApi, "/workspaces/current/default-model") api.add_resource(DefaultModelApi, "/workspaces/current/default-model")

View File

@ -1,475 +0,0 @@
import io
from flask import request, send_file
from flask_login import current_user # type: ignore
from flask_restful import Resource, reqparse # type: ignore
from werkzeug.exceptions import Forbidden
from configs import dify_config
from controllers.console import api
from controllers.console.workspace import plugin_permission_required
from controllers.console.wraps import account_initialization_required, setup_required
from core.model_runtime.utils.encoders import jsonable_encoder
from core.plugin.manager.exc import PluginDaemonClientSideError
from libs.login import login_required
from models.account import TenantPluginPermission
from services.plugin.plugin_permission_service import PluginPermissionService
from services.plugin.plugin_service import PluginService
class PluginDebuggingKeyApi(Resource):
@setup_required
@login_required
@account_initialization_required
@plugin_permission_required(debug_required=True)
def get(self):
tenant_id = current_user.current_tenant_id
try:
return {
"key": PluginService.get_debugging_key(tenant_id),
"host": dify_config.PLUGIN_REMOTE_INSTALL_HOST,
"port": dify_config.PLUGIN_REMOTE_INSTALL_PORT,
}
except PluginDaemonClientSideError as e:
raise ValueError(e)
class PluginListApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
tenant_id = current_user.current_tenant_id
try:
plugins = PluginService.list(tenant_id)
except PluginDaemonClientSideError as e:
raise ValueError(e)
return jsonable_encoder({"plugins": plugins})
class PluginListInstallationsFromIdsApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser()
parser.add_argument("plugin_ids", type=list, required=True, location="json")
args = parser.parse_args()
try:
plugins = PluginService.list_installations_from_ids(tenant_id, args["plugin_ids"])
except PluginDaemonClientSideError as e:
raise ValueError(e)
return jsonable_encoder({"plugins": plugins})
class PluginIconApi(Resource):
@setup_required
def get(self):
req = reqparse.RequestParser()
req.add_argument("tenant_id", type=str, required=True, location="args")
req.add_argument("filename", type=str, required=True, location="args")
args = req.parse_args()
try:
icon_bytes, mimetype = PluginService.get_asset(args["tenant_id"], args["filename"])
except PluginDaemonClientSideError as e:
raise ValueError(e)
icon_cache_max_age = dify_config.TOOL_ICON_CACHE_MAX_AGE
return send_file(io.BytesIO(icon_bytes), mimetype=mimetype, max_age=icon_cache_max_age)
class PluginUploadFromPkgApi(Resource):
@setup_required
@login_required
@account_initialization_required
@plugin_permission_required(install_required=True)
def post(self):
tenant_id = current_user.current_tenant_id
file = request.files["pkg"]
# check file size
if file.content_length > dify_config.PLUGIN_MAX_PACKAGE_SIZE:
raise ValueError("File size exceeds the maximum allowed size")
content = file.read()
try:
response = PluginService.upload_pkg(tenant_id, content)
except PluginDaemonClientSideError as e:
raise ValueError(e)
return jsonable_encoder(response)
class PluginUploadFromGithubApi(Resource):
@setup_required
@login_required
@account_initialization_required
@plugin_permission_required(install_required=True)
def post(self):
tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser()
parser.add_argument("repo", type=str, required=True, location="json")
parser.add_argument("version", type=str, required=True, location="json")
parser.add_argument("package", type=str, required=True, location="json")
args = parser.parse_args()
try:
response = PluginService.upload_pkg_from_github(tenant_id, args["repo"], args["version"], args["package"])
except PluginDaemonClientSideError as e:
raise ValueError(e)
return jsonable_encoder(response)
class PluginUploadFromBundleApi(Resource):
@setup_required
@login_required
@account_initialization_required
@plugin_permission_required(install_required=True)
def post(self):
tenant_id = current_user.current_tenant_id
file = request.files["bundle"]
# check file size
if file.content_length > dify_config.PLUGIN_MAX_BUNDLE_SIZE:
raise ValueError("File size exceeds the maximum allowed size")
content = file.read()
try:
response = PluginService.upload_bundle(tenant_id, content)
except PluginDaemonClientSideError as e:
raise ValueError(e)
return jsonable_encoder(response)
class PluginInstallFromPkgApi(Resource):
@setup_required
@login_required
@account_initialization_required
@plugin_permission_required(install_required=True)
def post(self):
tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser()
parser.add_argument("plugin_unique_identifiers", type=list, required=True, location="json")
args = parser.parse_args()
# check if all plugin_unique_identifiers are valid string
for plugin_unique_identifier in args["plugin_unique_identifiers"]:
if not isinstance(plugin_unique_identifier, str):
raise ValueError("Invalid plugin unique identifier")
try:
response = PluginService.install_from_local_pkg(tenant_id, args["plugin_unique_identifiers"])
except PluginDaemonClientSideError as e:
raise ValueError(e)
return jsonable_encoder(response)
class PluginInstallFromGithubApi(Resource):
@setup_required
@login_required
@account_initialization_required
@plugin_permission_required(install_required=True)
def post(self):
tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser()
parser.add_argument("repo", type=str, required=True, location="json")
parser.add_argument("version", type=str, required=True, location="json")
parser.add_argument("package", type=str, required=True, location="json")
parser.add_argument("plugin_unique_identifier", type=str, required=True, location="json")
args = parser.parse_args()
try:
response = PluginService.install_from_github(
tenant_id,
args["plugin_unique_identifier"],
args["repo"],
args["version"],
args["package"],
)
except PluginDaemonClientSideError as e:
raise ValueError(e)
return jsonable_encoder(response)
class PluginInstallFromMarketplaceApi(Resource):
@setup_required
@login_required
@account_initialization_required
@plugin_permission_required(install_required=True)
def post(self):
tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser()
parser.add_argument("plugin_unique_identifiers", type=list, required=True, location="json")
args = parser.parse_args()
# check if all plugin_unique_identifiers are valid string
for plugin_unique_identifier in args["plugin_unique_identifiers"]:
if not isinstance(plugin_unique_identifier, str):
raise ValueError("Invalid plugin unique identifier")
try:
response = PluginService.install_from_marketplace_pkg(tenant_id, args["plugin_unique_identifiers"])
except PluginDaemonClientSideError as e:
raise ValueError(e)
return jsonable_encoder(response)
class PluginFetchManifestApi(Resource):
@setup_required
@login_required
@account_initialization_required
@plugin_permission_required(debug_required=True)
def get(self):
tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser()
parser.add_argument("plugin_unique_identifier", type=str, required=True, location="args")
args = parser.parse_args()
try:
return jsonable_encoder(
{
"manifest": PluginService.fetch_plugin_manifest(
tenant_id, args["plugin_unique_identifier"]
).model_dump()
}
)
except PluginDaemonClientSideError as e:
raise ValueError(e)
class PluginFetchInstallTasksApi(Resource):
@setup_required
@login_required
@account_initialization_required
@plugin_permission_required(debug_required=True)
def get(self):
tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser()
parser.add_argument("page", type=int, required=True, location="args")
parser.add_argument("page_size", type=int, required=True, location="args")
args = parser.parse_args()
try:
return jsonable_encoder(
{"tasks": PluginService.fetch_install_tasks(tenant_id, args["page"], args["page_size"])}
)
except PluginDaemonClientSideError as e:
raise ValueError(e)
class PluginFetchInstallTaskApi(Resource):
@setup_required
@login_required
@account_initialization_required
@plugin_permission_required(debug_required=True)
def get(self, task_id: str):
tenant_id = current_user.current_tenant_id
try:
return jsonable_encoder({"task": PluginService.fetch_install_task(tenant_id, task_id)})
except PluginDaemonClientSideError as e:
raise ValueError(e)
class PluginDeleteInstallTaskApi(Resource):
@setup_required
@login_required
@account_initialization_required
@plugin_permission_required(debug_required=True)
def post(self, task_id: str):
tenant_id = current_user.current_tenant_id
try:
return {"success": PluginService.delete_install_task(tenant_id, task_id)}
except PluginDaemonClientSideError as e:
raise ValueError(e)
class PluginDeleteAllInstallTaskItemsApi(Resource):
@setup_required
@login_required
@account_initialization_required
@plugin_permission_required(debug_required=True)
def post(self):
tenant_id = current_user.current_tenant_id
try:
return {"success": PluginService.delete_all_install_task_items(tenant_id)}
except PluginDaemonClientSideError as e:
raise ValueError(e)
class PluginDeleteInstallTaskItemApi(Resource):
@setup_required
@login_required
@account_initialization_required
@plugin_permission_required(debug_required=True)
def post(self, task_id: str, identifier: str):
tenant_id = current_user.current_tenant_id
try:
return {"success": PluginService.delete_install_task_item(tenant_id, task_id, identifier)}
except PluginDaemonClientSideError as e:
raise ValueError(e)
class PluginUpgradeFromMarketplaceApi(Resource):
@setup_required
@login_required
@account_initialization_required
@plugin_permission_required(debug_required=True)
def post(self):
tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser()
parser.add_argument("original_plugin_unique_identifier", type=str, required=True, location="json")
parser.add_argument("new_plugin_unique_identifier", type=str, required=True, location="json")
args = parser.parse_args()
try:
return jsonable_encoder(
PluginService.upgrade_plugin_with_marketplace(
tenant_id, args["original_plugin_unique_identifier"], args["new_plugin_unique_identifier"]
)
)
except PluginDaemonClientSideError as e:
raise ValueError(e)
class PluginUpgradeFromGithubApi(Resource):
@setup_required
@login_required
@account_initialization_required
@plugin_permission_required(debug_required=True)
def post(self):
tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser()
parser.add_argument("original_plugin_unique_identifier", type=str, required=True, location="json")
parser.add_argument("new_plugin_unique_identifier", type=str, required=True, location="json")
parser.add_argument("repo", type=str, required=True, location="json")
parser.add_argument("version", type=str, required=True, location="json")
parser.add_argument("package", type=str, required=True, location="json")
args = parser.parse_args()
try:
return jsonable_encoder(
PluginService.upgrade_plugin_with_github(
tenant_id,
args["original_plugin_unique_identifier"],
args["new_plugin_unique_identifier"],
args["repo"],
args["version"],
args["package"],
)
)
except PluginDaemonClientSideError as e:
raise ValueError(e)
class PluginUninstallApi(Resource):
@setup_required
@login_required
@account_initialization_required
@plugin_permission_required(debug_required=True)
def post(self):
req = reqparse.RequestParser()
req.add_argument("plugin_installation_id", type=str, required=True, location="json")
args = req.parse_args()
tenant_id = current_user.current_tenant_id
try:
return {"success": PluginService.uninstall(tenant_id, args["plugin_installation_id"])}
except PluginDaemonClientSideError as e:
raise ValueError(e)
class PluginChangePermissionApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self):
user = current_user
if not user.is_admin_or_owner:
raise Forbidden()
req = reqparse.RequestParser()
req.add_argument("install_permission", type=str, required=True, location="json")
req.add_argument("debug_permission", type=str, required=True, location="json")
args = req.parse_args()
install_permission = TenantPluginPermission.InstallPermission(args["install_permission"])
debug_permission = TenantPluginPermission.DebugPermission(args["debug_permission"])
tenant_id = user.current_tenant_id
return {"success": PluginPermissionService.change_permission(tenant_id, install_permission, debug_permission)}
class PluginFetchPermissionApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self):
tenant_id = current_user.current_tenant_id
permission = PluginPermissionService.get_permission(tenant_id)
if not permission:
return jsonable_encoder(
{
"install_permission": TenantPluginPermission.InstallPermission.EVERYONE,
"debug_permission": TenantPluginPermission.DebugPermission.EVERYONE,
}
)
return jsonable_encoder(
{
"install_permission": permission.install_permission,
"debug_permission": permission.debug_permission,
}
)
api.add_resource(PluginDebuggingKeyApi, "/workspaces/current/plugin/debugging-key")
api.add_resource(PluginListApi, "/workspaces/current/plugin/list")
api.add_resource(PluginListInstallationsFromIdsApi, "/workspaces/current/plugin/list/installations/ids")
api.add_resource(PluginIconApi, "/workspaces/current/plugin/icon")
api.add_resource(PluginUploadFromPkgApi, "/workspaces/current/plugin/upload/pkg")
api.add_resource(PluginUploadFromGithubApi, "/workspaces/current/plugin/upload/github")
api.add_resource(PluginUploadFromBundleApi, "/workspaces/current/plugin/upload/bundle")
api.add_resource(PluginInstallFromPkgApi, "/workspaces/current/plugin/install/pkg")
api.add_resource(PluginInstallFromGithubApi, "/workspaces/current/plugin/install/github")
api.add_resource(PluginUpgradeFromMarketplaceApi, "/workspaces/current/plugin/upgrade/marketplace")
api.add_resource(PluginUpgradeFromGithubApi, "/workspaces/current/plugin/upgrade/github")
api.add_resource(PluginInstallFromMarketplaceApi, "/workspaces/current/plugin/install/marketplace")
api.add_resource(PluginFetchManifestApi, "/workspaces/current/plugin/fetch-manifest")
api.add_resource(PluginFetchInstallTasksApi, "/workspaces/current/plugin/tasks")
api.add_resource(PluginFetchInstallTaskApi, "/workspaces/current/plugin/tasks/<task_id>")
api.add_resource(PluginDeleteInstallTaskApi, "/workspaces/current/plugin/tasks/<task_id>/delete")
api.add_resource(PluginDeleteAllInstallTaskItemsApi, "/workspaces/current/plugin/tasks/delete_all")
api.add_resource(PluginDeleteInstallTaskItemApi, "/workspaces/current/plugin/tasks/<task_id>/delete/<path:identifier>")
api.add_resource(PluginUninstallApi, "/workspaces/current/plugin/uninstall")
api.add_resource(PluginChangePermissionApi, "/workspaces/current/plugin/permission/change")
api.add_resource(PluginFetchPermissionApi, "/workspaces/current/plugin/permission/fetch")

View File

@ -25,10 +25,8 @@ class ToolProviderListApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def get(self): def get(self):
user = current_user user_id = current_user.id
tenant_id = current_user.current_tenant_id
user_id = user.id
tenant_id = user.current_tenant_id
req = reqparse.RequestParser() req = reqparse.RequestParser()
req.add_argument( req.add_argument(
@ -49,43 +47,28 @@ class ToolBuiltinProviderListToolsApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def get(self, provider): def get(self, provider):
user = current_user user_id = current_user.id
tenant_id = current_user.current_tenant_id
tenant_id = user.current_tenant_id
return jsonable_encoder( return jsonable_encoder(
BuiltinToolManageService.list_builtin_tool_provider_tools( BuiltinToolManageService.list_builtin_tool_provider_tools(
user_id,
tenant_id, tenant_id,
provider, provider,
) )
) )
class ToolBuiltinProviderInfoApi(Resource):
@setup_required
@login_required
@account_initialization_required
def get(self, provider):
user = current_user
user_id = user.id
tenant_id = user.current_tenant_id
return jsonable_encoder(BuiltinToolManageService.get_builtin_tool_provider_info(user_id, tenant_id, provider))
class ToolBuiltinProviderDeleteApi(Resource): class ToolBuiltinProviderDeleteApi(Resource):
@setup_required @setup_required
@login_required @login_required
@account_initialization_required @account_initialization_required
def post(self, provider): def post(self, provider):
user = current_user if not current_user.is_admin_or_owner:
if not user.is_admin_or_owner:
raise Forbidden() raise Forbidden()
user_id = user.id user_id = current_user.id
tenant_id = user.current_tenant_id tenant_id = current_user.current_tenant_id
return BuiltinToolManageService.delete_builtin_tool_provider( return BuiltinToolManageService.delete_builtin_tool_provider(
user_id, user_id,
@ -99,13 +82,11 @@ class ToolBuiltinProviderUpdateApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def post(self, provider): def post(self, provider):
user = current_user if not current_user.is_admin_or_owner:
if not user.is_admin_or_owner:
raise Forbidden() raise Forbidden()
user_id = user.id user_id = current_user.id
tenant_id = user.current_tenant_id tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("credentials", type=dict, required=True, nullable=False, location="json") parser.add_argument("credentials", type=dict, required=True, nullable=False, location="json")
@ -150,13 +131,11 @@ class ToolApiProviderAddApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def post(self): def post(self):
user = current_user if not current_user.is_admin_or_owner:
if not user.is_admin_or_owner:
raise Forbidden() raise Forbidden()
user_id = user.id user_id = current_user.id
tenant_id = user.current_tenant_id tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("credentials", type=dict, required=True, nullable=False, location="json") parser.add_argument("credentials", type=dict, required=True, nullable=False, location="json")
@ -189,11 +168,6 @@ class ToolApiProviderGetRemoteSchemaApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def get(self): def get(self):
user = current_user
user_id = user.id
tenant_id = user.current_tenant_id
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("url", type=str, required=True, nullable=False, location="args") parser.add_argument("url", type=str, required=True, nullable=False, location="args")
@ -201,8 +175,8 @@ class ToolApiProviderGetRemoteSchemaApi(Resource):
args = parser.parse_args() args = parser.parse_args()
return ApiToolManageService.get_api_tool_provider_remote_schema( return ApiToolManageService.get_api_tool_provider_remote_schema(
user_id, current_user.id,
tenant_id, current_user.current_tenant_id,
args["url"], args["url"],
) )
@ -212,10 +186,8 @@ class ToolApiProviderListToolsApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def get(self): def get(self):
user = current_user user_id = current_user.id
tenant_id = current_user.current_tenant_id
user_id = user.id
tenant_id = user.current_tenant_id
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
@ -237,13 +209,11 @@ class ToolApiProviderUpdateApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def post(self): def post(self):
user = current_user if not current_user.is_admin_or_owner:
if not user.is_admin_or_owner:
raise Forbidden() raise Forbidden()
user_id = user.id user_id = current_user.id
tenant_id = user.current_tenant_id tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("credentials", type=dict, required=True, nullable=False, location="json") parser.add_argument("credentials", type=dict, required=True, nullable=False, location="json")
@ -278,13 +248,11 @@ class ToolApiProviderDeleteApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def post(self): def post(self):
user = current_user if not current_user.is_admin_or_owner:
if not user.is_admin_or_owner:
raise Forbidden() raise Forbidden()
user_id = user.id user_id = current_user.id
tenant_id = user.current_tenant_id tenant_id = current_user.current_tenant_id
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
@ -304,10 +272,8 @@ class ToolApiProviderGetApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def get(self): def get(self):
user = current_user user_id = current_user.id
tenant_id = current_user.current_tenant_id
user_id = user.id
tenant_id = user.current_tenant_id
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
@ -327,11 +293,7 @@ class ToolBuiltinProviderCredentialsSchemaApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def get(self, provider): def get(self, provider):
user = current_user return BuiltinToolManageService.list_builtin_provider_credentials_schema(provider)
tenant_id = user.current_tenant_id
return BuiltinToolManageService.list_builtin_provider_credentials_schema(provider, tenant_id)
class ToolApiProviderSchemaApi(Resource): class ToolApiProviderSchemaApi(Resource):
@ -382,13 +344,11 @@ class ToolWorkflowProviderCreateApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def post(self): def post(self):
user = current_user if not current_user.is_admin_or_owner:
if not user.is_admin_or_owner:
raise Forbidden() raise Forbidden()
user_id = user.id user_id = current_user.id
tenant_id = user.current_tenant_id tenant_id = current_user.current_tenant_id
reqparser = reqparse.RequestParser() reqparser = reqparse.RequestParser()
reqparser.add_argument("workflow_app_id", type=uuid_value, required=True, nullable=False, location="json") reqparser.add_argument("workflow_app_id", type=uuid_value, required=True, nullable=False, location="json")
@ -421,13 +381,11 @@ class ToolWorkflowProviderUpdateApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def post(self): def post(self):
user = current_user if not current_user.is_admin_or_owner:
if not user.is_admin_or_owner:
raise Forbidden() raise Forbidden()
user_id = user.id user_id = current_user.id
tenant_id = user.current_tenant_id tenant_id = current_user.current_tenant_id
reqparser = reqparse.RequestParser() reqparser = reqparse.RequestParser()
reqparser.add_argument("workflow_tool_id", type=uuid_value, required=True, nullable=False, location="json") reqparser.add_argument("workflow_tool_id", type=uuid_value, required=True, nullable=False, location="json")
@ -463,13 +421,11 @@ class ToolWorkflowProviderDeleteApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def post(self): def post(self):
user = current_user if not current_user.is_admin_or_owner:
if not user.is_admin_or_owner:
raise Forbidden() raise Forbidden()
user_id = user.id user_id = current_user.id
tenant_id = user.current_tenant_id tenant_id = current_user.current_tenant_id
reqparser = reqparse.RequestParser() reqparser = reqparse.RequestParser()
reqparser.add_argument("workflow_tool_id", type=uuid_value, required=True, nullable=False, location="json") reqparser.add_argument("workflow_tool_id", type=uuid_value, required=True, nullable=False, location="json")
@ -488,10 +444,8 @@ class ToolWorkflowProviderGetApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def get(self): def get(self):
user = current_user user_id = current_user.id
tenant_id = current_user.current_tenant_id
user_id = user.id
tenant_id = user.current_tenant_id
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("workflow_tool_id", type=uuid_value, required=False, nullable=True, location="args") parser.add_argument("workflow_tool_id", type=uuid_value, required=False, nullable=True, location="args")
@ -522,10 +476,8 @@ class ToolWorkflowProviderListToolApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def get(self): def get(self):
user = current_user user_id = current_user.id
tenant_id = current_user.current_tenant_id
user_id = user.id
tenant_id = user.current_tenant_id
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("workflow_tool_id", type=uuid_value, required=True, nullable=False, location="args") parser.add_argument("workflow_tool_id", type=uuid_value, required=True, nullable=False, location="args")
@ -546,10 +498,8 @@ class ToolBuiltinListApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def get(self): def get(self):
user = current_user user_id = current_user.id
tenant_id = current_user.current_tenant_id
user_id = user.id
tenant_id = user.current_tenant_id
return jsonable_encoder( return jsonable_encoder(
[ [
@ -567,10 +517,8 @@ class ToolApiListApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def get(self): def get(self):
user = current_user user_id = current_user.id
tenant_id = current_user.current_tenant_id
user_id = user.id
tenant_id = user.current_tenant_id
return jsonable_encoder( return jsonable_encoder(
[ [
@ -588,10 +536,8 @@ class ToolWorkflowListApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def get(self): def get(self):
user = current_user user_id = current_user.id
tenant_id = current_user.current_tenant_id
user_id = user.id
tenant_id = user.current_tenant_id
return jsonable_encoder( return jsonable_encoder(
[ [
@ -617,18 +563,16 @@ class ToolLabelsApi(Resource):
api.add_resource(ToolProviderListApi, "/workspaces/current/tool-providers") api.add_resource(ToolProviderListApi, "/workspaces/current/tool-providers")
# builtin tool provider # builtin tool provider
api.add_resource(ToolBuiltinProviderListToolsApi, "/workspaces/current/tool-provider/builtin/<path:provider>/tools") api.add_resource(ToolBuiltinProviderListToolsApi, "/workspaces/current/tool-provider/builtin/<provider>/tools")
api.add_resource(ToolBuiltinProviderInfoApi, "/workspaces/current/tool-provider/builtin/<path:provider>/info") api.add_resource(ToolBuiltinProviderDeleteApi, "/workspaces/current/tool-provider/builtin/<provider>/delete")
api.add_resource(ToolBuiltinProviderDeleteApi, "/workspaces/current/tool-provider/builtin/<path:provider>/delete") api.add_resource(ToolBuiltinProviderUpdateApi, "/workspaces/current/tool-provider/builtin/<provider>/update")
api.add_resource(ToolBuiltinProviderUpdateApi, "/workspaces/current/tool-provider/builtin/<path:provider>/update")
api.add_resource( api.add_resource(
ToolBuiltinProviderGetCredentialsApi, "/workspaces/current/tool-provider/builtin/<path:provider>/credentials" ToolBuiltinProviderGetCredentialsApi, "/workspaces/current/tool-provider/builtin/<provider>/credentials"
) )
api.add_resource( api.add_resource(
ToolBuiltinProviderCredentialsSchemaApi, ToolBuiltinProviderCredentialsSchemaApi, "/workspaces/current/tool-provider/builtin/<provider>/credentials_schema"
"/workspaces/current/tool-provider/builtin/<path:provider>/credentials_schema",
) )
api.add_resource(ToolBuiltinProviderIconApi, "/workspaces/current/tool-provider/builtin/<path:provider>/icon") api.add_resource(ToolBuiltinProviderIconApi, "/workspaces/current/tool-provider/builtin/<provider>/icon")
# api tool provider # api tool provider
api.add_resource(ToolApiProviderAddApi, "/workspaces/current/tool-provider/api/add") api.add_resource(ToolApiProviderAddApi, "/workspaces/current/tool-provider/api/add")

View File

@ -7,7 +7,6 @@ from flask_login import current_user # type: ignore
from configs import dify_config from configs import dify_config
from controllers.console.workspace.error import AccountNotInitializedError from controllers.console.workspace.error import AccountNotInitializedError
from extensions.ext_database import db
from models.model import DifySetup from models.model import DifySetup
from services.feature_service import FeatureService, LicenseStatus from services.feature_service import FeatureService, LicenseStatus
from services.operation_service import OperationService from services.operation_service import OperationService
@ -135,13 +134,9 @@ def setup_required(view):
@wraps(view) @wraps(view)
def decorated(*args, **kwargs): def decorated(*args, **kwargs):
# check setup # check setup
if ( if dify_config.EDITION == "SELF_HOSTED" and os.environ.get("INIT_PASSWORD") and not DifySetup.query.first():
dify_config.EDITION == "SELF_HOSTED"
and os.environ.get("INIT_PASSWORD")
and not db.session.query(DifySetup).first()
):
raise NotInitValidateError() raise NotInitValidateError()
elif dify_config.EDITION == "SELF_HOSTED" and not db.session.query(DifySetup).first(): elif dify_config.EDITION == "SELF_HOSTED" and not DifySetup.query.first():
raise NotSetupError() raise NotSetupError()
return view(*args, **kwargs) return view(*args, **kwargs)

View File

@ -6,4 +6,4 @@ bp = Blueprint("files", __name__)
api = ExternalApi(bp) api = ExternalApi(bp)
from . import image_preview, tool_files, upload from . import image_preview, tool_files

View File

@ -1,5 +1,3 @@
from urllib.parse import quote
from flask import Response, request from flask import Response, request
from flask_restful import Resource, reqparse # type: ignore from flask_restful import Resource, reqparse # type: ignore
from werkzeug.exceptions import NotFound from werkzeug.exceptions import NotFound
@ -73,8 +71,7 @@ class FilePreviewApi(Resource):
if upload_file.size > 0: if upload_file.size > 0:
response.headers["Content-Length"] = str(upload_file.size) response.headers["Content-Length"] = str(upload_file.size)
if args["as_attachment"]: if args["as_attachment"]:
encoded_filename = quote(upload_file.name) response.headers["Content-Disposition"] = f"attachment; filename={upload_file.name}"
response.headers["Content-Disposition"] = f"attachment; filename*=UTF-8''{encoded_filename}"
return response return response

View File

@ -1,69 +0,0 @@
from flask import request
from flask_restful import Resource, marshal_with # type: ignore
from werkzeug.exceptions import Forbidden
import services
from controllers.console.wraps import setup_required
from controllers.files import api
from controllers.files.error import UnsupportedFileTypeError
from controllers.inner_api.plugin.wraps import get_user
from controllers.service_api.app.error import FileTooLargeError
from core.file.helpers import verify_plugin_file_signature
from fields.file_fields import file_fields
from services.file_service import FileService
class PluginUploadFileApi(Resource):
@setup_required
@marshal_with(file_fields)
def post(self):
# get file from request
file = request.files["file"]
timestamp = request.args.get("timestamp")
nonce = request.args.get("nonce")
sign = request.args.get("sign")
tenant_id = request.args.get("tenant_id")
if not tenant_id:
raise Forbidden("Invalid request.")
user_id = request.args.get("user_id")
user = get_user(tenant_id, user_id)
filename = file.filename
mimetype = file.mimetype
if not filename or not mimetype:
raise Forbidden("Invalid request.")
if not timestamp or not nonce or not sign:
raise Forbidden("Invalid request.")
if not verify_plugin_file_signature(
filename=filename,
mimetype=mimetype,
tenant_id=tenant_id,
user_id=user_id,
timestamp=timestamp,
nonce=nonce,
sign=sign,
):
raise Forbidden("Invalid request.")
try:
upload_file = FileService.upload_file(
filename=filename,
content=file.read(),
mimetype=mimetype,
user=user,
source=None,
)
except services.errors.file.FileTooLargeError as file_too_large_error:
raise FileTooLargeError(file_too_large_error.description)
except services.errors.file.UnsupportedFileTypeError:
raise UnsupportedFileTypeError()
return upload_file, 201
api.add_resource(PluginUploadFileApi, "/files/upload/for-plugin")

View File

@ -5,5 +5,4 @@ from libs.external_api import ExternalApi
bp = Blueprint("inner_api", __name__, url_prefix="/inner/api") bp = Blueprint("inner_api", __name__, url_prefix="/inner/api")
api = ExternalApi(bp) api = ExternalApi(bp)
from .plugin import plugin
from .workspace import workspace from .workspace import workspace

View File

@ -1,293 +0,0 @@
from flask_restful import Resource # type: ignore
from controllers.console.wraps import setup_required
from controllers.inner_api import api
from controllers.inner_api.plugin.wraps import get_user_tenant, plugin_data
from controllers.inner_api.wraps import plugin_inner_api_only
from core.file.helpers import get_signed_file_url_for_plugin
from core.model_runtime.utils.encoders import jsonable_encoder
from core.plugin.backwards_invocation.app import PluginAppBackwardsInvocation
from core.plugin.backwards_invocation.base import BaseBackwardsInvocationResponse
from core.plugin.backwards_invocation.encrypt import PluginEncrypter
from core.plugin.backwards_invocation.model import PluginModelBackwardsInvocation
from core.plugin.backwards_invocation.node import PluginNodeBackwardsInvocation
from core.plugin.backwards_invocation.tool import PluginToolBackwardsInvocation
from core.plugin.entities.request import (
RequestInvokeApp,
RequestInvokeEncrypt,
RequestInvokeLLM,
RequestInvokeModeration,
RequestInvokeParameterExtractorNode,
RequestInvokeQuestionClassifierNode,
RequestInvokeRerank,
RequestInvokeSpeech2Text,
RequestInvokeSummary,
RequestInvokeTextEmbedding,
RequestInvokeTool,
RequestInvokeTTS,
RequestRequestUploadFile,
)
from core.tools.entities.tool_entities import ToolProviderType
from libs.helper import compact_generate_response
from models.account import Account, Tenant
from models.model import EndUser
class PluginInvokeLLMApi(Resource):
@setup_required
@plugin_inner_api_only
@get_user_tenant
@plugin_data(payload_type=RequestInvokeLLM)
def post(self, user_model: Account | EndUser, tenant_model: Tenant, payload: RequestInvokeLLM):
def generator():
response = PluginModelBackwardsInvocation.invoke_llm(user_model.id, tenant_model, payload)
return PluginModelBackwardsInvocation.convert_to_event_stream(response)
return compact_generate_response(generator())
class PluginInvokeTextEmbeddingApi(Resource):
@setup_required
@plugin_inner_api_only
@get_user_tenant
@plugin_data(payload_type=RequestInvokeTextEmbedding)
def post(self, user_model: Account | EndUser, tenant_model: Tenant, payload: RequestInvokeTextEmbedding):
try:
return jsonable_encoder(
BaseBackwardsInvocationResponse(
data=PluginModelBackwardsInvocation.invoke_text_embedding(
user_id=user_model.id,
tenant=tenant_model,
payload=payload,
)
)
)
except Exception as e:
return jsonable_encoder(BaseBackwardsInvocationResponse(error=str(e)))
class PluginInvokeRerankApi(Resource):
@setup_required
@plugin_inner_api_only
@get_user_tenant
@plugin_data(payload_type=RequestInvokeRerank)
def post(self, user_model: Account | EndUser, tenant_model: Tenant, payload: RequestInvokeRerank):
try:
return jsonable_encoder(
BaseBackwardsInvocationResponse(
data=PluginModelBackwardsInvocation.invoke_rerank(
user_id=user_model.id,
tenant=tenant_model,
payload=payload,
)
)
)
except Exception as e:
return jsonable_encoder(BaseBackwardsInvocationResponse(error=str(e)))
class PluginInvokeTTSApi(Resource):
@setup_required
@plugin_inner_api_only
@get_user_tenant
@plugin_data(payload_type=RequestInvokeTTS)
def post(self, user_model: Account | EndUser, tenant_model: Tenant, payload: RequestInvokeTTS):
def generator():
response = PluginModelBackwardsInvocation.invoke_tts(
user_id=user_model.id,
tenant=tenant_model,
payload=payload,
)
return PluginModelBackwardsInvocation.convert_to_event_stream(response)
return compact_generate_response(generator())
class PluginInvokeSpeech2TextApi(Resource):
@setup_required
@plugin_inner_api_only
@get_user_tenant
@plugin_data(payload_type=RequestInvokeSpeech2Text)
def post(self, user_model: Account | EndUser, tenant_model: Tenant, payload: RequestInvokeSpeech2Text):
try:
return jsonable_encoder(
BaseBackwardsInvocationResponse(
data=PluginModelBackwardsInvocation.invoke_speech2text(
user_id=user_model.id,
tenant=tenant_model,
payload=payload,
)
)
)
except Exception as e:
return jsonable_encoder(BaseBackwardsInvocationResponse(error=str(e)))
class PluginInvokeModerationApi(Resource):
@setup_required
@plugin_inner_api_only
@get_user_tenant
@plugin_data(payload_type=RequestInvokeModeration)
def post(self, user_model: Account | EndUser, tenant_model: Tenant, payload: RequestInvokeModeration):
try:
return jsonable_encoder(
BaseBackwardsInvocationResponse(
data=PluginModelBackwardsInvocation.invoke_moderation(
user_id=user_model.id,
tenant=tenant_model,
payload=payload,
)
)
)
except Exception as e:
return jsonable_encoder(BaseBackwardsInvocationResponse(error=str(e)))
class PluginInvokeToolApi(Resource):
@setup_required
@plugin_inner_api_only
@get_user_tenant
@plugin_data(payload_type=RequestInvokeTool)
def post(self, user_model: Account | EndUser, tenant_model: Tenant, payload: RequestInvokeTool):
def generator():
return PluginToolBackwardsInvocation.convert_to_event_stream(
PluginToolBackwardsInvocation.invoke_tool(
tenant_id=tenant_model.id,
user_id=user_model.id,
tool_type=ToolProviderType.value_of(payload.tool_type),
provider=payload.provider,
tool_name=payload.tool,
tool_parameters=payload.tool_parameters,
),
)
return compact_generate_response(generator())
class PluginInvokeParameterExtractorNodeApi(Resource):
@setup_required
@plugin_inner_api_only
@get_user_tenant
@plugin_data(payload_type=RequestInvokeParameterExtractorNode)
def post(self, user_model: Account | EndUser, tenant_model: Tenant, payload: RequestInvokeParameterExtractorNode):
try:
return jsonable_encoder(
BaseBackwardsInvocationResponse(
data=PluginNodeBackwardsInvocation.invoke_parameter_extractor(
tenant_id=tenant_model.id,
user_id=user_model.id,
parameters=payload.parameters,
model_config=payload.model,
instruction=payload.instruction,
query=payload.query,
)
)
)
except Exception as e:
return jsonable_encoder(BaseBackwardsInvocationResponse(error=str(e)))
class PluginInvokeQuestionClassifierNodeApi(Resource):
@setup_required
@plugin_inner_api_only
@get_user_tenant
@plugin_data(payload_type=RequestInvokeQuestionClassifierNode)
def post(self, user_model: Account | EndUser, tenant_model: Tenant, payload: RequestInvokeQuestionClassifierNode):
try:
return jsonable_encoder(
BaseBackwardsInvocationResponse(
data=PluginNodeBackwardsInvocation.invoke_question_classifier(
tenant_id=tenant_model.id,
user_id=user_model.id,
query=payload.query,
model_config=payload.model,
classes=payload.classes,
instruction=payload.instruction,
)
)
)
except Exception as e:
return jsonable_encoder(BaseBackwardsInvocationResponse(error=str(e)))
class PluginInvokeAppApi(Resource):
@setup_required
@plugin_inner_api_only
@get_user_tenant
@plugin_data(payload_type=RequestInvokeApp)
def post(self, user_model: Account | EndUser, tenant_model: Tenant, payload: RequestInvokeApp):
response = PluginAppBackwardsInvocation.invoke_app(
app_id=payload.app_id,
user_id=user_model.id,
tenant_id=tenant_model.id,
conversation_id=payload.conversation_id,
query=payload.query,
stream=payload.response_mode == "streaming",
inputs=payload.inputs,
files=payload.files,
)
return compact_generate_response(PluginAppBackwardsInvocation.convert_to_event_stream(response))
class PluginInvokeEncryptApi(Resource):
@setup_required
@plugin_inner_api_only
@get_user_tenant
@plugin_data(payload_type=RequestInvokeEncrypt)
def post(self, user_model: Account | EndUser, tenant_model: Tenant, payload: RequestInvokeEncrypt):
"""
encrypt or decrypt data
"""
try:
return BaseBackwardsInvocationResponse(
data=PluginEncrypter.invoke_encrypt(tenant_model, payload)
).model_dump()
except Exception as e:
return BaseBackwardsInvocationResponse(error=str(e)).model_dump()
class PluginInvokeSummaryApi(Resource):
@setup_required
@plugin_inner_api_only
@get_user_tenant
@plugin_data(payload_type=RequestInvokeSummary)
def post(self, user_model: Account | EndUser, tenant_model: Tenant, payload: RequestInvokeSummary):
try:
return BaseBackwardsInvocationResponse(
data={
"summary": PluginModelBackwardsInvocation.invoke_summary(
user_id=user_model.id,
tenant=tenant_model,
payload=payload,
)
}
).model_dump()
except Exception as e:
return BaseBackwardsInvocationResponse(error=str(e)).model_dump()
class PluginUploadFileRequestApi(Resource):
@setup_required
@plugin_inner_api_only
@get_user_tenant
@plugin_data(payload_type=RequestRequestUploadFile)
def post(self, user_model: Account | EndUser, tenant_model: Tenant, payload: RequestRequestUploadFile):
# generate signed url
url = get_signed_file_url_for_plugin(payload.filename, payload.mimetype, tenant_model.id, user_model.id)
return BaseBackwardsInvocationResponse(data={"url": url}).model_dump()
api.add_resource(PluginInvokeLLMApi, "/invoke/llm")
api.add_resource(PluginInvokeTextEmbeddingApi, "/invoke/text-embedding")
api.add_resource(PluginInvokeRerankApi, "/invoke/rerank")
api.add_resource(PluginInvokeTTSApi, "/invoke/tts")
api.add_resource(PluginInvokeSpeech2TextApi, "/invoke/speech2text")
api.add_resource(PluginInvokeModerationApi, "/invoke/moderation")
api.add_resource(PluginInvokeToolApi, "/invoke/tool")
api.add_resource(PluginInvokeParameterExtractorNodeApi, "/invoke/parameter-extractor")
api.add_resource(PluginInvokeQuestionClassifierNodeApi, "/invoke/question-classifier")
api.add_resource(PluginInvokeAppApi, "/invoke/app")
api.add_resource(PluginInvokeEncryptApi, "/invoke/encrypt")
api.add_resource(PluginInvokeSummaryApi, "/invoke/summary")
api.add_resource(PluginUploadFileRequestApi, "/upload/file/request")

View File

@ -1,116 +0,0 @@
from collections.abc import Callable
from functools import wraps
from typing import Optional
from flask import request
from flask_restful import reqparse # type: ignore
from pydantic import BaseModel
from sqlalchemy.orm import Session
from extensions.ext_database import db
from models.account import Account, Tenant
from models.model import EndUser
from services.account_service import AccountService
def get_user(tenant_id: str, user_id: str | None) -> Account | EndUser:
try:
with Session(db.engine) as session:
if not user_id:
user_id = "DEFAULT-USER"
if user_id == "DEFAULT-USER":
user_model = session.query(EndUser).filter(EndUser.session_id == "DEFAULT-USER").first()
if not user_model:
user_model = EndUser(
tenant_id=tenant_id,
type="service_api",
is_anonymous=True if user_id == "DEFAULT-USER" else False,
session_id=user_id,
)
session.add(user_model)
session.commit()
else:
user_model = AccountService.load_user(user_id)
if not user_model:
user_model = session.query(EndUser).filter(EndUser.id == user_id).first()
if not user_model:
raise ValueError("user not found")
except Exception:
raise ValueError("user not found")
return user_model
def get_user_tenant(view: Optional[Callable] = None):
def decorator(view_func):
@wraps(view_func)
def decorated_view(*args, **kwargs):
# fetch json body
parser = reqparse.RequestParser()
parser.add_argument("tenant_id", type=str, required=True, location="json")
parser.add_argument("user_id", type=str, required=True, location="json")
kwargs = parser.parse_args()
user_id = kwargs.get("user_id")
tenant_id = kwargs.get("tenant_id")
if not tenant_id:
raise ValueError("tenant_id is required")
if not user_id:
user_id = "DEFAULT-USER"
del kwargs["tenant_id"]
del kwargs["user_id"]
try:
tenant_model = (
db.session.query(Tenant)
.filter(
Tenant.id == tenant_id,
)
.first()
)
except Exception:
raise ValueError("tenant not found")
if not tenant_model:
raise ValueError("tenant not found")
kwargs["tenant_model"] = tenant_model
kwargs["user_model"] = get_user(tenant_id, user_id)
return view_func(*args, **kwargs)
return decorated_view
if view is None:
return decorator
else:
return decorator(view)
def plugin_data(view: Optional[Callable] = None, *, payload_type: type[BaseModel]):
def decorator(view_func):
def decorated_view(*args, **kwargs):
try:
data = request.get_json()
except Exception:
raise ValueError("invalid json")
try:
payload = payload_type(**data)
except Exception as e:
raise ValueError(f"invalid payload: {str(e)}")
kwargs["payload"] = payload
return view_func(*args, **kwargs)
return decorated_view
if view is None:
return decorator
else:
return decorator(view)

View File

@ -4,7 +4,7 @@ from flask_restful import Resource, reqparse # type: ignore
from controllers.console.wraps import setup_required from controllers.console.wraps import setup_required
from controllers.inner_api import api from controllers.inner_api import api
from controllers.inner_api.wraps import enterprise_inner_api_only from controllers.inner_api.wraps import inner_api_only
from events.tenant_event import tenant_was_created from events.tenant_event import tenant_was_created
from models.account import Account from models.account import Account
from services.account_service import TenantService from services.account_service import TenantService
@ -12,7 +12,7 @@ from services.account_service import TenantService
class EnterpriseWorkspace(Resource): class EnterpriseWorkspace(Resource):
@setup_required @setup_required
@enterprise_inner_api_only @inner_api_only
def post(self): def post(self):
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("name", type=str, required=True, location="json") parser.add_argument("name", type=str, required=True, location="json")
@ -33,7 +33,7 @@ class EnterpriseWorkspace(Resource):
class EnterpriseWorkspaceNoOwnerEmail(Resource): class EnterpriseWorkspaceNoOwnerEmail(Resource):
@setup_required @setup_required
@enterprise_inner_api_only @inner_api_only
def post(self): def post(self):
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument("name", type=str, required=True, location="json") parser.add_argument("name", type=str, required=True, location="json")
@ -50,8 +50,8 @@ class EnterpriseWorkspaceNoOwnerEmail(Resource):
"plan": tenant.plan, "plan": tenant.plan,
"status": tenant.status, "status": tenant.status,
"custom_config": json.loads(tenant.custom_config) if tenant.custom_config else {}, "custom_config": json.loads(tenant.custom_config) if tenant.custom_config else {},
"created_at": tenant.created_at.isoformat() + "Z" if tenant.created_at else None, "created_at": tenant.created_at.isoformat() if tenant.created_at else None,
"updated_at": tenant.updated_at.isoformat() + "Z" if tenant.updated_at else None, "updated_at": tenant.updated_at.isoformat() if tenant.updated_at else None,
} }
return { return {

View File

@ -10,7 +10,7 @@ from extensions.ext_database import db
from models.model import EndUser from models.model import EndUser
def enterprise_inner_api_only(view): def inner_api_only(view):
@wraps(view) @wraps(view)
def decorated(*args, **kwargs): def decorated(*args, **kwargs):
if not dify_config.INNER_API: if not dify_config.INNER_API:
@ -18,7 +18,7 @@ def enterprise_inner_api_only(view):
# get header 'X-Inner-Api-Key' # get header 'X-Inner-Api-Key'
inner_api_key = request.headers.get("X-Inner-Api-Key") inner_api_key = request.headers.get("X-Inner-Api-Key")
if not inner_api_key or inner_api_key != dify_config.INNER_API_KEY_FOR_PLUGIN: if not inner_api_key or inner_api_key != dify_config.INNER_API_KEY:
abort(401) abort(401)
return view(*args, **kwargs) return view(*args, **kwargs)
@ -26,7 +26,7 @@ def enterprise_inner_api_only(view):
return decorated return decorated
def enterprise_inner_api_user_auth(view): def inner_api_user_auth(view):
@wraps(view) @wraps(view)
def decorated(*args, **kwargs): def decorated(*args, **kwargs):
if not dify_config.INNER_API: if not dify_config.INNER_API:
@ -60,19 +60,3 @@ def enterprise_inner_api_user_auth(view):
return view(*args, **kwargs) return view(*args, **kwargs)
return decorated return decorated
def plugin_inner_api_only(view):
@wraps(view)
def decorated(*args, **kwargs):
if not dify_config.PLUGIN_DAEMON_KEY:
abort(404)
# get header 'X-Inner-Api-Key'
inner_api_key = request.headers.get("X-Inner-Api-Key")
if not inner_api_key or inner_api_key != dify_config.INNER_API_KEY_FOR_PLUGIN:
abort(404)
return view(*args, **kwargs)
return decorated

View File

@ -10,7 +10,6 @@ from controllers.service_api.app.error import NotChatAppError
from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
from core.app.entities.app_invoke_entities import InvokeFrom from core.app.entities.app_invoke_entities import InvokeFrom
from fields.conversation_fields import message_file_fields from fields.conversation_fields import message_file_fields
from fields.message_fields import feedback_fields, retriever_resource_fields
from fields.raws import FilesContainedField from fields.raws import FilesContainedField
from libs.helper import TimestampField, uuid_value from libs.helper import TimestampField, uuid_value
from models.model import App, AppMode, EndUser from models.model import App, AppMode, EndUser
@ -19,6 +18,26 @@ from services.message_service import MessageService
class MessageListApi(Resource): class MessageListApi(Resource):
feedback_fields = {"rating": fields.String}
retriever_resource_fields = {
"id": fields.String,
"message_id": fields.String,
"position": fields.Integer,
"dataset_id": fields.String,
"dataset_name": fields.String,
"document_id": fields.String,
"document_name": fields.String,
"data_source_type": fields.String,
"segment_id": fields.String,
"score": fields.Float,
"hit_count": fields.Integer,
"word_count": fields.Integer,
"segment_position": fields.Integer,
"index_node_hash": fields.String,
"content": fields.String,
"created_at": TimestampField,
}
agent_thought_fields = { agent_thought_fields = {
"id": fields.String, "id": fields.String,
"chain_id": fields.String, "chain_id": fields.String,
@ -70,7 +89,7 @@ class MessageListApi(Resource):
try: try:
return MessageService.pagination_by_first_id( return MessageService.pagination_by_first_id(
app_model, end_user, args["conversation_id"], args["first_id"], args["limit"], "desc" app_model, end_user, args["conversation_id"], args["first_id"], args["limit"]
) )
except services.errors.conversation.ConversationNotExistsError: except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.") raise NotFound("Conversation Not Exists.")

View File

@ -336,10 +336,6 @@ class DocumentUpdateByFileApi(DatasetApiResource):
if not dataset: if not dataset:
raise ValueError("Dataset is not exist.") raise ValueError("Dataset is not exist.")
# indexing_technique is already set in dataset since this is an update
args["indexing_technique"] = dataset.indexing_technique
if "file" in request.files: if "file" in request.files:
# save file info # save file info
file = request.files["file"] file = request.files["file"]

View File

@ -154,7 +154,7 @@ def validate_dataset_token(view=None):
) # TODO: only owner information is required, so only one is returned. ) # TODO: only owner information is required, so only one is returned.
if tenant_account_join: if tenant_account_join:
tenant, ta = tenant_account_join tenant, ta = tenant_account_join
account = db.session.query(Account).filter(Account.id == ta.account_id).first() account = Account.query.filter_by(id=ta.account_id).first()
# Login admin # Login admin
if account: if account:
account.current_tenant = tenant account.current_tenant = tenant

View File

@ -21,7 +21,7 @@ from core.app.entities.app_invoke_entities import InvokeFrom
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.errors.invoke import InvokeError from core.model_runtime.errors.invoke import InvokeError
from fields.conversation_fields import message_file_fields from fields.conversation_fields import message_file_fields
from fields.message_fields import agent_thought_fields, feedback_fields, retriever_resource_fields from fields.message_fields import agent_thought_fields
from fields.raws import FilesContainedField from fields.raws import FilesContainedField
from libs import helper from libs import helper
from libs.helper import TimestampField, uuid_value from libs.helper import TimestampField, uuid_value
@ -34,6 +34,27 @@ from services.message_service import MessageService
class MessageListApi(WebApiResource): class MessageListApi(WebApiResource):
feedback_fields = {"rating": fields.String}
retriever_resource_fields = {
"id": fields.String,
"message_id": fields.String,
"position": fields.Integer,
"dataset_id": fields.String,
"dataset_name": fields.String,
"document_id": fields.String,
"document_name": fields.String,
"data_source_type": fields.String,
"segment_id": fields.String,
"score": fields.Float,
"hit_count": fields.Integer,
"word_count": fields.Integer,
"segment_position": fields.Integer,
"index_node_hash": fields.String,
"content": fields.String,
"created_at": TimestampField,
}
message_fields = { message_fields = {
"id": fields.String, "id": fields.String,
"conversation_id": fields.String, "conversation_id": fields.String,

View File

@ -1,6 +1,7 @@
import json import json
import logging import logging
import uuid import uuid
from datetime import UTC, datetime
from typing import Optional, Union, cast from typing import Optional, Union, cast
from core.agent.entities import AgentEntity, AgentToolEntity from core.agent.entities import AgentEntity, AgentToolEntity
@ -31,16 +32,19 @@ from core.model_runtime.entities import (
from core.model_runtime.entities.message_entities import ImagePromptMessageContent from core.model_runtime.entities.message_entities import ImagePromptMessageContent
from core.model_runtime.entities.model_entities import ModelFeature from core.model_runtime.entities.model_entities import ModelFeature
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.model_runtime.utils.encoders import jsonable_encoder
from core.prompt.utils.extract_thread_messages import extract_thread_messages from core.prompt.utils.extract_thread_messages import extract_thread_messages
from core.tools.__base.tool import Tool
from core.tools.entities.tool_entities import ( from core.tools.entities.tool_entities import (
ToolParameter, ToolParameter,
ToolRuntimeVariablePool,
) )
from core.tools.tool.dataset_retriever_tool import DatasetRetrieverTool
from core.tools.tool.tool import Tool
from core.tools.tool_manager import ToolManager from core.tools.tool_manager import ToolManager
from core.tools.utils.dataset_retriever_tool import DatasetRetrieverTool
from extensions.ext_database import db from extensions.ext_database import db
from factories import file_factory from factories import file_factory
from models.model import Conversation, Message, MessageAgentThought, MessageFile from models.model import Conversation, Message, MessageAgentThought, MessageFile
from models.tools import ToolConversationVariables
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -58,9 +62,11 @@ class BaseAgentRunner(AppRunner):
queue_manager: AppQueueManager, queue_manager: AppQueueManager,
message: Message, message: Message,
user_id: str, user_id: str,
model_instance: ModelInstance,
memory: Optional[TokenBufferMemory] = None, memory: Optional[TokenBufferMemory] = None,
prompt_messages: Optional[list[PromptMessage]] = None, prompt_messages: Optional[list[PromptMessage]] = None,
variables_pool: Optional[ToolRuntimeVariablePool] = None,
db_variables: Optional[ToolConversationVariables] = None,
model_instance: ModelInstance,
) -> None: ) -> None:
self.tenant_id = tenant_id self.tenant_id = tenant_id
self.application_generate_entity = application_generate_entity self.application_generate_entity = application_generate_entity
@ -73,6 +79,8 @@ class BaseAgentRunner(AppRunner):
self.user_id = user_id self.user_id = user_id
self.memory = memory self.memory = memory
self.history_prompt_messages = self.organize_agent_history(prompt_messages=prompt_messages or []) self.history_prompt_messages = self.organize_agent_history(prompt_messages=prompt_messages or [])
self.variables_pool = variables_pool
self.db_variables_pool = db_variables
self.model_instance = model_instance self.model_instance = model_instance
# init callback # init callback
@ -133,10 +141,11 @@ class BaseAgentRunner(AppRunner):
agent_tool=tool, agent_tool=tool,
invoke_from=self.application_generate_entity.invoke_from, invoke_from=self.application_generate_entity.invoke_from,
) )
assert tool_entity.entity.description tool_entity.load_variables(self.variables_pool)
message_tool = PromptMessageTool( message_tool = PromptMessageTool(
name=tool.tool_name, name=tool.tool_name,
description=tool_entity.entity.description.llm, description=tool_entity.description.llm if tool_entity.description else "",
parameters={ parameters={
"type": "object", "type": "object",
"properties": {}, "properties": {},
@ -144,7 +153,7 @@ class BaseAgentRunner(AppRunner):
}, },
) )
parameters = tool_entity.get_merged_runtime_parameters() parameters = tool_entity.get_all_runtime_parameters()
for parameter in parameters: for parameter in parameters:
if parameter.form != ToolParameter.ToolParameterForm.LLM: if parameter.form != ToolParameter.ToolParameterForm.LLM:
continue continue
@ -177,11 +186,9 @@ class BaseAgentRunner(AppRunner):
""" """
convert dataset retriever tool to prompt message tool convert dataset retriever tool to prompt message tool
""" """
assert tool.entity.description
prompt_tool = PromptMessageTool( prompt_tool = PromptMessageTool(
name=tool.entity.identity.name, name=tool.identity.name if tool.identity else "unknown",
description=tool.entity.description.llm, description=tool.description.llm if tool.description else "",
parameters={ parameters={
"type": "object", "type": "object",
"properties": {}, "properties": {},
@ -227,7 +234,8 @@ class BaseAgentRunner(AppRunner):
# save prompt tool # save prompt tool
prompt_messages_tools.append(prompt_tool) prompt_messages_tools.append(prompt_tool)
# save tool entity # save tool entity
tool_instances[dataset_tool.entity.identity.name] = dataset_tool if dataset_tool.identity is not None:
tool_instances[dataset_tool.identity.name] = dataset_tool
return tool_instances, prompt_messages_tools return tool_instances, prompt_messages_tools
@ -312,24 +320,24 @@ class BaseAgentRunner(AppRunner):
def save_agent_thought( def save_agent_thought(
self, self,
agent_thought: MessageAgentThought, agent_thought: MessageAgentThought,
tool_name: str | None, tool_name: str,
tool_input: Union[str, dict, None], tool_input: Union[str, dict],
thought: str | None, thought: str,
observation: Union[str, dict, None], observation: Union[str, dict, None],
tool_invoke_meta: Union[str, dict, None], tool_invoke_meta: Union[str, dict, None],
answer: str | None, answer: str,
messages_ids: list[str], messages_ids: list[str],
llm_usage: LLMUsage | None = None, llm_usage: LLMUsage | None = None,
): ):
""" """
Save agent thought Save agent thought
""" """
updated_agent_thought = ( queried_thought = (
db.session.query(MessageAgentThought).filter(MessageAgentThought.id == agent_thought.id).first() db.session.query(MessageAgentThought).filter(MessageAgentThought.id == agent_thought.id).first()
) )
if not updated_agent_thought: if not queried_thought:
raise ValueError("agent thought not found") raise ValueError(f"Agent thought {agent_thought.id} not found")
agent_thought = updated_agent_thought agent_thought = queried_thought
if thought: if thought:
agent_thought.thought = thought agent_thought.thought = thought
@ -341,39 +349,39 @@ class BaseAgentRunner(AppRunner):
if isinstance(tool_input, dict): if isinstance(tool_input, dict):
try: try:
tool_input = json.dumps(tool_input, ensure_ascii=False) tool_input = json.dumps(tool_input, ensure_ascii=False)
except Exception: except Exception as e:
tool_input = json.dumps(tool_input) tool_input = json.dumps(tool_input)
updated_agent_thought.tool_input = tool_input agent_thought.tool_input = tool_input
if observation: if observation:
if isinstance(observation, dict): if isinstance(observation, dict):
try: try:
observation = json.dumps(observation, ensure_ascii=False) observation = json.dumps(observation, ensure_ascii=False)
except Exception: except Exception as e:
observation = json.dumps(observation) observation = json.dumps(observation)
updated_agent_thought.observation = observation agent_thought.observation = observation
if answer: if answer:
agent_thought.answer = answer agent_thought.answer = answer
if messages_ids is not None and len(messages_ids) > 0: if messages_ids is not None and len(messages_ids) > 0:
updated_agent_thought.message_files = json.dumps(messages_ids) agent_thought.message_files = json.dumps(messages_ids)
if llm_usage: if llm_usage:
updated_agent_thought.message_token = llm_usage.prompt_tokens agent_thought.message_token = llm_usage.prompt_tokens
updated_agent_thought.message_price_unit = llm_usage.prompt_price_unit agent_thought.message_price_unit = llm_usage.prompt_price_unit
updated_agent_thought.message_unit_price = llm_usage.prompt_unit_price agent_thought.message_unit_price = llm_usage.prompt_unit_price
updated_agent_thought.answer_token = llm_usage.completion_tokens agent_thought.answer_token = llm_usage.completion_tokens
updated_agent_thought.answer_price_unit = llm_usage.completion_price_unit agent_thought.answer_price_unit = llm_usage.completion_price_unit
updated_agent_thought.answer_unit_price = llm_usage.completion_unit_price agent_thought.answer_unit_price = llm_usage.completion_unit_price
updated_agent_thought.tokens = llm_usage.total_tokens agent_thought.tokens = llm_usage.total_tokens
updated_agent_thought.total_price = llm_usage.total_price agent_thought.total_price = llm_usage.total_price
# check if tool labels is not empty # check if tool labels is not empty
labels = updated_agent_thought.tool_labels or {} labels = agent_thought.tool_labels or {}
tools = updated_agent_thought.tool.split(";") if updated_agent_thought.tool else [] tools = agent_thought.tool.split(";") if agent_thought.tool else []
for tool in tools: for tool in tools:
if not tool: if not tool:
continue continue
@ -384,20 +392,42 @@ class BaseAgentRunner(AppRunner):
else: else:
labels[tool] = {"en_US": tool, "zh_Hans": tool} labels[tool] = {"en_US": tool, "zh_Hans": tool}
updated_agent_thought.tool_labels_str = json.dumps(labels) agent_thought.tool_labels_str = json.dumps(labels)
if tool_invoke_meta is not None: if tool_invoke_meta is not None:
if isinstance(tool_invoke_meta, dict): if isinstance(tool_invoke_meta, dict):
try: try:
tool_invoke_meta = json.dumps(tool_invoke_meta, ensure_ascii=False) tool_invoke_meta = json.dumps(tool_invoke_meta, ensure_ascii=False)
except Exception: except Exception as e:
tool_invoke_meta = json.dumps(tool_invoke_meta) tool_invoke_meta = json.dumps(tool_invoke_meta)
updated_agent_thought.tool_meta_str = tool_invoke_meta agent_thought.tool_meta_str = tool_invoke_meta
db.session.commit() db.session.commit()
db.session.close() db.session.close()
def update_db_variables(self, tool_variables: ToolRuntimeVariablePool, db_variables: ToolConversationVariables):
"""
convert tool variables to db variables
"""
queried_variables = (
db.session.query(ToolConversationVariables)
.filter(
ToolConversationVariables.conversation_id == self.message.conversation_id,
)
.first()
)
if not queried_variables:
return
db_variables = queried_variables
db_variables.updated_at = datetime.now(UTC).replace(tzinfo=None)
db_variables.variables_str = json.dumps(jsonable_encoder(tool_variables.pool))
db.session.commit()
db.session.close()
def organize_agent_history(self, prompt_messages: list[PromptMessage]) -> list[PromptMessage]: def organize_agent_history(self, prompt_messages: list[PromptMessage]) -> list[PromptMessage]:
""" """
Organize agent history Organize agent history
@ -434,11 +464,11 @@ class BaseAgentRunner(AppRunner):
tool_call_response: list[ToolPromptMessage] = [] tool_call_response: list[ToolPromptMessage] = []
try: try:
tool_inputs = json.loads(agent_thought.tool_input) tool_inputs = json.loads(agent_thought.tool_input)
except Exception: except Exception as e:
tool_inputs = {tool: {} for tool in tools} tool_inputs = {tool: {} for tool in tools}
try: try:
tool_responses = json.loads(agent_thought.observation) tool_responses = json.loads(agent_thought.observation)
except Exception: except Exception as e:
tool_responses = dict.fromkeys(tools, agent_thought.observation) tool_responses = dict.fromkeys(tools, agent_thought.observation)
for tool in tools: for tool in tools:
@ -485,11 +515,7 @@ class BaseAgentRunner(AppRunner):
files = db.session.query(MessageFile).filter(MessageFile.message_id == message.id).all() files = db.session.query(MessageFile).filter(MessageFile.message_id == message.id).all()
if not files: if not files:
return UserPromptMessage(content=message.query) return UserPromptMessage(content=message.query)
if message.app_model_config: file_extra_config = FileUploadConfigManager.convert(message.app_model_config.to_dict())
file_extra_config = FileUploadConfigManager.convert(message.app_model_config.to_dict())
else:
file_extra_config = None
if not file_extra_config: if not file_extra_config:
return UserPromptMessage(content=message.query) return UserPromptMessage(content=message.query)

View File

@ -1,6 +1,6 @@
import json import json
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from collections.abc import Generator, Mapping, Sequence from collections.abc import Generator, Mapping
from typing import Any, Optional from typing import Any, Optional
from core.agent.base_agent_runner import BaseAgentRunner from core.agent.base_agent_runner import BaseAgentRunner
@ -18,8 +18,8 @@ from core.model_runtime.entities.message_entities import (
) )
from core.ops.ops_trace_manager import TraceQueueManager from core.ops.ops_trace_manager import TraceQueueManager
from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform
from core.tools.__base.tool import Tool
from core.tools.entities.tool_entities import ToolInvokeMeta from core.tools.entities.tool_entities import ToolInvokeMeta
from core.tools.tool.tool import Tool
from core.tools.tool_engine import ToolEngine from core.tools.tool_engine import ToolEngine
from models.model import Message from models.model import Message
@ -27,11 +27,11 @@ from models.model import Message
class CotAgentRunner(BaseAgentRunner, ABC): class CotAgentRunner(BaseAgentRunner, ABC):
_is_first_iteration = True _is_first_iteration = True
_ignore_observation_providers = ["wenxin"] _ignore_observation_providers = ["wenxin"]
_historic_prompt_messages: list[PromptMessage] _historic_prompt_messages: list[PromptMessage] | None = None
_agent_scratchpad: list[AgentScratchpadUnit] _agent_scratchpad: list[AgentScratchpadUnit] | None = None
_instruction: str _instruction: str = "" # FIXME this must be str for now
_query: str _query: str | None = None
_prompt_messages_tools: Sequence[PromptMessageTool] _prompt_messages_tools: list[PromptMessageTool] = []
def run( def run(
self, self,
@ -42,7 +42,6 @@ class CotAgentRunner(BaseAgentRunner, ABC):
""" """
Run Cot agent application Run Cot agent application
""" """
app_generate_entity = self.application_generate_entity app_generate_entity = self.application_generate_entity
self._repack_app_generate_entity(app_generate_entity) self._repack_app_generate_entity(app_generate_entity)
self._init_react_state(query) self._init_react_state(query)
@ -55,19 +54,17 @@ class CotAgentRunner(BaseAgentRunner, ABC):
app_generate_entity.model_conf.stop.append("Observation") app_generate_entity.model_conf.stop.append("Observation")
app_config = self.app_config app_config = self.app_config
assert app_config.agent
# init instruction # init instruction
inputs = inputs or {} inputs = inputs or {}
instruction = app_config.prompt_template.simple_prompt_template or "" instruction = app_config.prompt_template.simple_prompt_template
self._instruction = self._fill_in_inputs_from_external_data_tools(instruction, inputs) self._instruction = self._fill_in_inputs_from_external_data_tools(instruction=instruction or "", inputs=inputs)
iteration_step = 1 iteration_step = 1
max_iteration_steps = min(app_config.agent.max_iteration if app_config.agent else 5, 5) + 1 max_iteration_steps = min(app_config.agent.max_iteration if app_config.agent else 5, 5) + 1
# convert tools into ModelRuntime Tool format # convert tools into ModelRuntime Tool format
tool_instances, prompt_messages_tools = self._init_prompt_tools() tool_instances, self._prompt_messages_tools = self._init_prompt_tools()
self._prompt_messages_tools = prompt_messages_tools
function_call_state = True function_call_state = True
llm_usage: dict[str, Optional[LLMUsage]] = {"usage": None} llm_usage: dict[str, Optional[LLMUsage]] = {"usage": None}
@ -119,7 +116,14 @@ class CotAgentRunner(BaseAgentRunner, ABC):
callbacks=[], callbacks=[],
) )
usage_dict: dict[str, Optional[LLMUsage]] = {} if not isinstance(chunks, Generator):
raise ValueError("Expected streaming response from LLM")
# check llm result
if not chunks:
raise ValueError("failed to invoke llm")
usage_dict: dict[str, Optional[LLMUsage]] = {"usage": None}
react_chunks = CotAgentOutputParser.handle_react_stream_output(chunks, usage_dict) react_chunks = CotAgentOutputParser.handle_react_stream_output(chunks, usage_dict)
scratchpad = AgentScratchpadUnit( scratchpad = AgentScratchpadUnit(
agent_response="", agent_response="",
@ -139,25 +143,25 @@ class CotAgentRunner(BaseAgentRunner, ABC):
if isinstance(chunk, AgentScratchpadUnit.Action): if isinstance(chunk, AgentScratchpadUnit.Action):
action = chunk action = chunk
# detect action # detect action
assert scratchpad.agent_response is not None if scratchpad.agent_response is not None:
scratchpad.agent_response += json.dumps(chunk.model_dump()) scratchpad.agent_response += json.dumps(chunk.model_dump())
scratchpad.action_str = json.dumps(chunk.model_dump()) scratchpad.action_str = json.dumps(chunk.model_dump())
scratchpad.action = action scratchpad.action = action
else: else:
assert scratchpad.agent_response is not None if scratchpad.agent_response is not None:
scratchpad.agent_response += chunk scratchpad.agent_response += chunk
assert scratchpad.thought is not None if scratchpad.thought is not None:
scratchpad.thought += chunk scratchpad.thought += chunk
yield LLMResultChunk( yield LLMResultChunk(
model=self.model_config.model, model=self.model_config.model,
prompt_messages=prompt_messages, prompt_messages=prompt_messages,
system_fingerprint="", system_fingerprint="",
delta=LLMResultChunkDelta(index=0, message=AssistantPromptMessage(content=chunk), usage=None), delta=LLMResultChunkDelta(index=0, message=AssistantPromptMessage(content=chunk), usage=None),
) )
if scratchpad.thought is not None:
assert scratchpad.thought is not None scratchpad.thought = scratchpad.thought.strip() or "I am thinking about how to help you"
scratchpad.thought = scratchpad.thought.strip() or "I am thinking about how to help you" if self._agent_scratchpad is not None:
self._agent_scratchpad.append(scratchpad) self._agent_scratchpad.append(scratchpad)
# get llm usage # get llm usage
if "usage" in usage_dict: if "usage" in usage_dict:
@ -252,6 +256,8 @@ class CotAgentRunner(BaseAgentRunner, ABC):
answer=final_answer, answer=final_answer,
messages_ids=[], messages_ids=[],
) )
if self.variables_pool is not None and self.db_variables_pool is not None:
self.update_db_variables(self.variables_pool, self.db_variables_pool)
# publish end event # publish end event
self.queue_manager.publish( self.queue_manager.publish(
QueueMessageEndEvent( QueueMessageEndEvent(
@ -269,7 +275,7 @@ class CotAgentRunner(BaseAgentRunner, ABC):
def _handle_invoke_action( def _handle_invoke_action(
self, self,
action: AgentScratchpadUnit.Action, action: AgentScratchpadUnit.Action,
tool_instances: Mapping[str, Tool], tool_instances: dict[str, Tool],
message_file_ids: list[str], message_file_ids: list[str],
trace_manager: Optional[TraceQueueManager] = None, trace_manager: Optional[TraceQueueManager] = None,
) -> tuple[str, ToolInvokeMeta]: ) -> tuple[str, ToolInvokeMeta]:
@ -309,7 +315,11 @@ class CotAgentRunner(BaseAgentRunner, ABC):
) )
# publish files # publish files
for message_file_id in message_files: for message_file_id, save_as in message_files:
if save_as is not None and self.variables_pool:
# FIXME the save_as type is confusing, it should be a string or not
self.variables_pool.set_file(tool_name=tool_call_name, value=message_file_id, name=str(save_as))
# publish message file # publish message file
self.queue_manager.publish( self.queue_manager.publish(
QueueMessageFileEvent(message_file_id=message_file_id), PublishFrom.APPLICATION_MANAGER QueueMessageFileEvent(message_file_id=message_file_id), PublishFrom.APPLICATION_MANAGER
@ -332,7 +342,7 @@ class CotAgentRunner(BaseAgentRunner, ABC):
for key, value in inputs.items(): for key, value in inputs.items():
try: try:
instruction = instruction.replace(f"{{{{{key}}}}}", str(value)) instruction = instruction.replace(f"{{{{{key}}}}}", str(value))
except Exception: except Exception as e:
continue continue
return instruction return instruction
@ -369,7 +379,7 @@ class CotAgentRunner(BaseAgentRunner, ABC):
return message return message
def _organize_historic_prompt_messages( def _organize_historic_prompt_messages(
self, current_session_messages: list[PromptMessage] | None = None self, current_session_messages: Optional[list[PromptMessage]] = None
) -> list[PromptMessage]: ) -> list[PromptMessage]:
""" """
organize historic prompt messages organize historic prompt messages
@ -381,7 +391,8 @@ class CotAgentRunner(BaseAgentRunner, ABC):
for message in self.history_prompt_messages: for message in self.history_prompt_messages:
if isinstance(message, AssistantPromptMessage): if isinstance(message, AssistantPromptMessage):
if not current_scratchpad: if not current_scratchpad:
assert isinstance(message.content, str) if not isinstance(message.content, str | None):
raise NotImplementedError("expected str type")
current_scratchpad = AgentScratchpadUnit( current_scratchpad = AgentScratchpadUnit(
agent_response=message.content, agent_response=message.content,
thought=message.content or "I am thinking about how to help you", thought=message.content or "I am thinking about how to help you",
@ -400,8 +411,9 @@ class CotAgentRunner(BaseAgentRunner, ABC):
except: except:
pass pass
elif isinstance(message, ToolPromptMessage): elif isinstance(message, ToolPromptMessage):
if current_scratchpad: if not current_scratchpad:
assert isinstance(message.content, str) continue
if isinstance(message.content, str):
current_scratchpad.observation = message.content current_scratchpad.observation = message.content
else: else:
raise NotImplementedError("expected str type") raise NotImplementedError("expected str type")

View File

@ -19,8 +19,8 @@ class CotChatAgentRunner(CotAgentRunner):
""" """
Organize system prompt Organize system prompt
""" """
assert self.app_config.agent if not self.app_config.agent:
assert self.app_config.agent.prompt raise ValueError("Agent configuration is not set")
prompt_entity = self.app_config.agent.prompt prompt_entity = self.app_config.agent.prompt
if not prompt_entity: if not prompt_entity:
@ -83,10 +83,8 @@ class CotChatAgentRunner(CotAgentRunner):
assistant_message.content = "" # FIXME: type check tell mypy that assistant_message.content is str assistant_message.content = "" # FIXME: type check tell mypy that assistant_message.content is str
for unit in agent_scratchpad: for unit in agent_scratchpad:
if unit.is_final(): if unit.is_final():
assert isinstance(assistant_message.content, str)
assistant_message.content += f"Final Answer: {unit.agent_response}" assistant_message.content += f"Final Answer: {unit.agent_response}"
else: else:
assert isinstance(assistant_message.content, str)
assistant_message.content += f"Thought: {unit.thought}\n\n" assistant_message.content += f"Thought: {unit.thought}\n\n"
if unit.action_str: if unit.action_str:
assistant_message.content += f"Action: {unit.action_str}\n\n" assistant_message.content += f"Action: {unit.action_str}\n\n"

View File

@ -1,9 +1,7 @@
from enum import StrEnum from enum import Enum
from typing import Any, Optional, Union from typing import Any, Literal, Optional, Union
from pydantic import BaseModel, Field from pydantic import BaseModel
from core.tools.entities.tool_entities import ToolInvokeMessage, ToolProviderType
class AgentToolEntity(BaseModel): class AgentToolEntity(BaseModel):
@ -11,11 +9,10 @@ class AgentToolEntity(BaseModel):
Agent Tool Entity. Agent Tool Entity.
""" """
provider_type: ToolProviderType provider_type: Literal["builtin", "api", "workflow"]
provider_id: str provider_id: str
tool_name: str tool_name: str
tool_parameters: dict[str, Any] = Field(default_factory=dict) tool_parameters: dict[str, Any] = {}
plugin_unique_identifier: str | None = None
class AgentPromptEntity(BaseModel): class AgentPromptEntity(BaseModel):
@ -69,7 +66,7 @@ class AgentEntity(BaseModel):
Agent Entity. Agent Entity.
""" """
class Strategy(StrEnum): class Strategy(Enum):
""" """
Agent Strategy. Agent Strategy.
""" """
@ -81,13 +78,5 @@ class AgentEntity(BaseModel):
model: str model: str
strategy: Strategy strategy: Strategy
prompt: Optional[AgentPromptEntity] = None prompt: Optional[AgentPromptEntity] = None
tools: Optional[list[AgentToolEntity]] = None tools: list[AgentToolEntity] | None = None
max_iteration: int = 5 max_iteration: int = 5
class AgentInvokeMessage(ToolInvokeMessage):
"""
Agent Invoke Message.
"""
pass

View File

@ -46,20 +46,18 @@ class FunctionCallAgentRunner(BaseAgentRunner):
# convert tools into ModelRuntime Tool format # convert tools into ModelRuntime Tool format
tool_instances, prompt_messages_tools = self._init_prompt_tools() tool_instances, prompt_messages_tools = self._init_prompt_tools()
assert app_config.agent
iteration_step = 1 iteration_step = 1
max_iteration_steps = min(app_config.agent.max_iteration, 5) + 1 max_iteration_steps = min(app_config.agent.max_iteration, 5) + 1
# continue to run until there is not any tool call # continue to run until there is not any tool call
function_call_state = True function_call_state = True
llm_usage: dict[str, Optional[LLMUsage]] = {"usage": None} llm_usage: dict[str, LLMUsage] = {"usage": LLMUsage.empty_usage()}
final_answer = "" final_answer = ""
# get tracing instance # get tracing instance
trace_manager = app_generate_entity.trace_manager trace_manager = app_generate_entity.trace_manager
def increase_usage(final_llm_usage_dict: dict[str, Optional[LLMUsage]], usage: LLMUsage): def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage):
if not final_llm_usage_dict["usage"]: if not final_llm_usage_dict["usage"]:
final_llm_usage_dict["usage"] = usage final_llm_usage_dict["usage"] = usage
else: else:
@ -109,7 +107,7 @@ class FunctionCallAgentRunner(BaseAgentRunner):
current_llm_usage = None current_llm_usage = None
if isinstance(chunks, Generator): if self.stream_tool_call and isinstance(chunks, Generator):
is_first_chunk = True is_first_chunk = True
for chunk in chunks: for chunk in chunks:
if is_first_chunk: if is_first_chunk:
@ -126,7 +124,7 @@ class FunctionCallAgentRunner(BaseAgentRunner):
tool_call_inputs = json.dumps( tool_call_inputs = json.dumps(
{tool_call[1]: tool_call[2] for tool_call in tool_calls}, ensure_ascii=False {tool_call[1]: tool_call[2] for tool_call in tool_calls}, ensure_ascii=False
) )
except json.JSONDecodeError: except json.JSONDecodeError as e:
# ensure ascii to avoid encoding error # ensure ascii to avoid encoding error
tool_call_inputs = json.dumps({tool_call[1]: tool_call[2] for tool_call in tool_calls}) tool_call_inputs = json.dumps({tool_call[1]: tool_call[2] for tool_call in tool_calls})
@ -142,7 +140,7 @@ class FunctionCallAgentRunner(BaseAgentRunner):
current_llm_usage = chunk.delta.usage current_llm_usage = chunk.delta.usage
yield chunk yield chunk
else: elif not self.stream_tool_call and isinstance(chunks, LLMResult):
result = chunks result = chunks
# check if there is any tool call # check if there is any tool call
if self.check_blocking_tool_calls(result): if self.check_blocking_tool_calls(result):
@ -153,7 +151,7 @@ class FunctionCallAgentRunner(BaseAgentRunner):
tool_call_inputs = json.dumps( tool_call_inputs = json.dumps(
{tool_call[1]: tool_call[2] for tool_call in tool_calls}, ensure_ascii=False {tool_call[1]: tool_call[2] for tool_call in tool_calls}, ensure_ascii=False
) )
except json.JSONDecodeError: except json.JSONDecodeError as e:
# ensure ascii to avoid encoding error # ensure ascii to avoid encoding error
tool_call_inputs = json.dumps({tool_call[1]: tool_call[2] for tool_call in tool_calls}) tool_call_inputs = json.dumps({tool_call[1]: tool_call[2] for tool_call in tool_calls})
@ -185,6 +183,8 @@ class FunctionCallAgentRunner(BaseAgentRunner):
usage=result.usage, usage=result.usage,
), ),
) )
else:
raise RuntimeError(f"invalid chunks type: {type(chunks)}")
assistant_message = AssistantPromptMessage(content="", tool_calls=[]) assistant_message = AssistantPromptMessage(content="", tool_calls=[])
if tool_calls: if tool_calls:
@ -243,12 +243,15 @@ class FunctionCallAgentRunner(BaseAgentRunner):
invoke_from=self.application_generate_entity.invoke_from, invoke_from=self.application_generate_entity.invoke_from,
agent_tool_callback=self.agent_callback, agent_tool_callback=self.agent_callback,
trace_manager=trace_manager, trace_manager=trace_manager,
app_id=self.application_generate_entity.app_config.app_id,
message_id=self.message.id,
conversation_id=self.conversation.id,
) )
# publish files # publish files
for message_file_id in message_files: for message_file_id, save_as in message_files:
if save_as:
if self.variables_pool:
self.variables_pool.set_file(
tool_name=tool_call_name, value=message_file_id, name=save_as
)
# publish message file # publish message file
self.queue_manager.publish( self.queue_manager.publish(
QueueMessageFileEvent(message_file_id=message_file_id), PublishFrom.APPLICATION_MANAGER QueueMessageFileEvent(message_file_id=message_file_id), PublishFrom.APPLICATION_MANAGER
@ -300,6 +303,8 @@ class FunctionCallAgentRunner(BaseAgentRunner):
iteration_step += 1 iteration_step += 1
if self.variables_pool and self.db_variables_pool:
self.update_db_variables(self.variables_pool, self.db_variables_pool)
# publish end event # publish end event
self.queue_manager.publish( self.queue_manager.publish(
QueueMessageEndEvent( QueueMessageEndEvent(
@ -330,7 +335,9 @@ class FunctionCallAgentRunner(BaseAgentRunner):
return True return True
return False return False
def extract_tool_calls(self, llm_result_chunk: LLMResultChunk) -> list[tuple[str, str, dict[str, Any]]]: def extract_tool_calls(
self, llm_result_chunk: LLMResultChunk
) -> Union[None, list[tuple[str, str, dict[str, Any]]]]:
""" """
Extract tool calls from llm result chunk Extract tool calls from llm result chunk
@ -353,7 +360,7 @@ class FunctionCallAgentRunner(BaseAgentRunner):
return tool_calls return tool_calls
def extract_blocking_tool_calls(self, llm_result: LLMResult) -> list[tuple[str, str, dict[str, Any]]]: def extract_blocking_tool_calls(self, llm_result: LLMResult) -> Union[None, list[tuple[str, str, dict[str, Any]]]]:
""" """
Extract blocking tool calls from llm result Extract blocking tool calls from llm result
@ -376,7 +383,9 @@ class FunctionCallAgentRunner(BaseAgentRunner):
return tool_calls return tool_calls
def _init_system_message(self, prompt_template: str, prompt_messages: list[PromptMessage]) -> list[PromptMessage]: def _init_system_message(
self, prompt_template: str, prompt_messages: Optional[list[PromptMessage]] = None
) -> list[PromptMessage]:
""" """
Initialize system message Initialize system message
""" """

View File

@ -1,89 +0,0 @@
import enum
from typing import Any, Optional
from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validator
from core.entities.parameter_entities import CommonParameterType
from core.plugin.entities.parameters import (
PluginParameter,
as_normal_type,
cast_parameter_value,
init_frontend_parameter,
)
from core.tools.entities.common_entities import I18nObject
from core.tools.entities.tool_entities import (
ToolIdentity,
ToolProviderIdentity,
)
class AgentStrategyProviderIdentity(ToolProviderIdentity):
"""
Inherits from ToolProviderIdentity, without any additional fields.
"""
pass
class AgentStrategyParameter(PluginParameter):
class AgentStrategyParameterType(enum.StrEnum):
"""
Keep all the types from PluginParameterType
"""
STRING = CommonParameterType.STRING.value
NUMBER = CommonParameterType.NUMBER.value
BOOLEAN = CommonParameterType.BOOLEAN.value
SELECT = CommonParameterType.SELECT.value
SECRET_INPUT = CommonParameterType.SECRET_INPUT.value
FILE = CommonParameterType.FILE.value
FILES = CommonParameterType.FILES.value
APP_SELECTOR = CommonParameterType.APP_SELECTOR.value
MODEL_SELECTOR = CommonParameterType.MODEL_SELECTOR.value
TOOLS_SELECTOR = CommonParameterType.TOOLS_SELECTOR.value
# deprecated, should not use.
SYSTEM_FILES = CommonParameterType.SYSTEM_FILES.value
def as_normal_type(self):
return as_normal_type(self)
def cast_value(self, value: Any):
return cast_parameter_value(self, value)
type: AgentStrategyParameterType = Field(..., description="The type of the parameter")
def init_frontend_parameter(self, value: Any):
return init_frontend_parameter(self, self.type, value)
class AgentStrategyProviderEntity(BaseModel):
identity: AgentStrategyProviderIdentity
plugin_id: Optional[str] = Field(None, description="The id of the plugin")
class AgentStrategyIdentity(ToolIdentity):
"""
Inherits from ToolIdentity, without any additional fields.
"""
pass
class AgentStrategyEntity(BaseModel):
identity: AgentStrategyIdentity
parameters: list[AgentStrategyParameter] = Field(default_factory=list)
description: I18nObject = Field(..., description="The description of the agent strategy")
output_schema: Optional[dict] = None
# pydantic configs
model_config = ConfigDict(protected_namespaces=())
@field_validator("parameters", mode="before")
@classmethod
def set_parameters(cls, v, validation_info: ValidationInfo) -> list[AgentStrategyParameter]:
return v or []
class AgentProviderEntityWithPlugin(AgentStrategyProviderEntity):
strategies: list[AgentStrategyEntity] = Field(default_factory=list)

View File

@ -1,42 +0,0 @@
from abc import ABC, abstractmethod
from collections.abc import Generator, Sequence
from typing import Any, Optional
from core.agent.entities import AgentInvokeMessage
from core.agent.plugin_entities import AgentStrategyParameter
class BaseAgentStrategy(ABC):
"""
Agent Strategy
"""
def invoke(
self,
params: dict[str, Any],
user_id: str,
conversation_id: Optional[str] = None,
app_id: Optional[str] = None,
message_id: Optional[str] = None,
) -> Generator[AgentInvokeMessage, None, None]:
"""
Invoke the agent strategy.
"""
yield from self._invoke(params, user_id, conversation_id, app_id, message_id)
def get_parameters(self) -> Sequence[AgentStrategyParameter]:
"""
Get the parameters for the agent strategy.
"""
return []
@abstractmethod
def _invoke(
self,
params: dict[str, Any],
user_id: str,
conversation_id: Optional[str] = None,
app_id: Optional[str] = None,
message_id: Optional[str] = None,
) -> Generator[AgentInvokeMessage, None, None]:
pass

View File

@ -1,59 +0,0 @@
from collections.abc import Generator, Sequence
from typing import Any, Optional
from core.agent.entities import AgentInvokeMessage
from core.agent.plugin_entities import AgentStrategyEntity, AgentStrategyParameter
from core.agent.strategy.base import BaseAgentStrategy
from core.plugin.manager.agent import PluginAgentManager
from core.plugin.utils.converter import convert_parameters_to_plugin_format
class PluginAgentStrategy(BaseAgentStrategy):
"""
Agent Strategy
"""
tenant_id: str
declaration: AgentStrategyEntity
def __init__(self, tenant_id: str, declaration: AgentStrategyEntity):
self.tenant_id = tenant_id
self.declaration = declaration
def get_parameters(self) -> Sequence[AgentStrategyParameter]:
return self.declaration.parameters
def initialize_parameters(self, params: dict[str, Any]) -> dict[str, Any]:
"""
Initialize the parameters for the agent strategy.
"""
for parameter in self.declaration.parameters:
params[parameter.name] = parameter.init_frontend_parameter(params.get(parameter.name))
return params
def _invoke(
self,
params: dict[str, Any],
user_id: str,
conversation_id: Optional[str] = None,
app_id: Optional[str] = None,
message_id: Optional[str] = None,
) -> Generator[AgentInvokeMessage, None, None]:
"""
Invoke the agent strategy.
"""
manager = PluginAgentManager()
initialized_params = self.initialize_parameters(params)
params = convert_parameters_to_plugin_format(initialized_params)
yield from manager.invoke(
tenant_id=self.tenant_id,
user_id=user_id,
agent_provider=self.declaration.identity.provider,
agent_strategy=self.declaration.identity.name,
agent_params=params,
conversation_id=conversation_id,
app_id=app_id,
message_id=message_id,
)

View File

@ -4,8 +4,7 @@ from core.app.app_config.entities import EasyUIBasedAppConfig
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
from core.entities.model_entities import ModelStatus from core.entities.model_entities import ModelStatus
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
from core.model_runtime.entities.llm_entities import LLMMode from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.entities.model_entities import ModelPropertyKey, ModelType
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.provider_manager import ProviderManager from core.provider_manager import ProviderManager
@ -64,14 +63,14 @@ class ModelConfigConverter:
stop = completion_params["stop"] stop = completion_params["stop"]
del completion_params["stop"] del completion_params["stop"]
model_schema = model_type_instance.get_model_schema(model_config.model, model_credentials)
# get model mode # get model mode
model_mode = model_config.mode model_mode = model_config.mode
if not model_mode: if not model_mode:
model_mode = LLMMode.CHAT.value mode_enum = model_type_instance.get_model_mode(model=model_config.model, credentials=model_credentials)
if model_schema and model_schema.model_properties.get(ModelPropertyKey.MODE):
model_mode = LLMMode.value_of(model_schema.model_properties[ModelPropertyKey.MODE]).value model_mode = mode_enum.value
model_schema = model_type_instance.get_model_schema(model_config.model, model_credentials)
if not model_schema: if not model_schema:
raise ValueError(f"Model {model_name} not exist.") raise ValueError(f"Model {model_name} not exist.")

View File

@ -3,8 +3,7 @@ from typing import Any
from core.app.app_config.entities import ModelConfigEntity from core.app.app_config.entities import ModelConfigEntity
from core.model_runtime.entities.model_entities import ModelPropertyKey, ModelType from core.model_runtime.entities.model_entities import ModelPropertyKey, ModelType
from core.model_runtime.model_providers.model_provider_factory import ModelProviderFactory from core.model_runtime.model_providers import model_provider_factory
from core.plugin.entities.plugin import ModelProviderID
from core.provider_manager import ProviderManager from core.provider_manager import ProviderManager
@ -54,16 +53,9 @@ class ModelConfigManager:
raise ValueError("model must be of object type") raise ValueError("model must be of object type")
# model.provider # model.provider
model_provider_factory = ModelProviderFactory(tenant_id)
provider_entities = model_provider_factory.get_providers() provider_entities = model_provider_factory.get_providers()
model_provider_names = [provider.provider for provider in provider_entities] model_provider_names = [provider.provider for provider in provider_entities]
if "provider" not in config["model"]: if "provider" not in config["model"] or config["model"]["provider"] not in model_provider_names:
raise ValueError(f"model.provider is required and must be in {str(model_provider_names)}")
if "/" not in config["model"]["provider"]:
config["model"]["provider"] = str(ModelProviderID(config["model"]["provider"]))
if config["model"]["provider"] not in model_provider_names:
raise ValueError(f"model.provider is required and must be in {str(model_provider_names)}") raise ValueError(f"model.provider is required and must be in {str(model_provider_names)}")
# model.name # model.name

View File

@ -17,8 +17,8 @@ class ModelConfigEntity(BaseModel):
provider: str provider: str
model: str model: str
mode: Optional[str] = None mode: Optional[str] = None
parameters: dict[str, Any] = Field(default_factory=dict) parameters: dict[str, Any] = {}
stop: list[str] = Field(default_factory=list) stop: list[str] = []
class AdvancedChatMessageEntity(BaseModel): class AdvancedChatMessageEntity(BaseModel):
@ -132,7 +132,7 @@ class ExternalDataVariableEntity(BaseModel):
variable: str variable: str
type: str type: str
config: dict[str, Any] = Field(default_factory=dict) config: dict[str, Any] = {}
class DatasetRetrieveConfigEntity(BaseModel): class DatasetRetrieveConfigEntity(BaseModel):
@ -188,7 +188,7 @@ class SensitiveWordAvoidanceEntity(BaseModel):
""" """
type: str type: str
config: dict[str, Any] = Field(default_factory=dict) config: dict[str, Any] = {}
class TextToSpeechEntity(BaseModel): class TextToSpeechEntity(BaseModel):

View File

@ -37,6 +37,17 @@ logger = logging.getLogger(__name__)
class AdvancedChatAppGenerator(MessageBasedAppGenerator): class AdvancedChatAppGenerator(MessageBasedAppGenerator):
_dialogue_count: int _dialogue_count: int
@overload
def generate(
self,
app_model: App,
workflow: Workflow,
user: Union[Account, EndUser],
args: Mapping[str, Any],
invoke_from: InvokeFrom,
streaming: Literal[True],
) -> Generator[str, None, None]: ...
@overload @overload
def generate( def generate(
self, self,
@ -54,31 +65,20 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
app_model: App, app_model: App,
workflow: Workflow, workflow: Workflow,
user: Union[Account, EndUser], user: Union[Account, EndUser],
args: Mapping, args: Mapping[str, Any],
invoke_from: InvokeFrom,
streaming: Literal[True],
) -> Generator[Mapping | str, None, None]: ...
@overload
def generate(
self,
app_model: App,
workflow: Workflow,
user: Union[Account, EndUser],
args: Mapping,
invoke_from: InvokeFrom,
streaming: bool,
) -> Mapping[str, Any] | Generator[str | Mapping, None, None]: ...
def generate(
self,
app_model: App,
workflow: Workflow,
user: Union[Account, EndUser],
args: Mapping,
invoke_from: InvokeFrom, invoke_from: InvokeFrom,
streaming: bool = True, streaming: bool = True,
) -> Mapping[str, Any] | Generator[str | Mapping, None, None]: ) -> Union[Mapping[str, Any], Generator[str, None, None]]: ...
def generate(
self,
app_model: App,
workflow: Workflow,
user: Union[Account, EndUser],
args: Mapping[str, Any],
invoke_from: InvokeFrom,
streaming: bool = True,
):
""" """
Generate App response. Generate App response.
@ -140,7 +140,9 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
app_config=app_config, app_config=app_config,
file_upload_config=file_extra_config, file_upload_config=file_extra_config,
conversation_id=conversation.id if conversation else None, conversation_id=conversation.id if conversation else None,
inputs=self._prepare_user_inputs( inputs=conversation.inputs
if conversation
else self._prepare_user_inputs(
user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id
), ),
query=query, query=query,
@ -154,8 +156,6 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
workflow_run_id=workflow_run_id, workflow_run_id=workflow_run_id,
) )
contexts.tenant_id.set(application_generate_entity.app_config.tenant_id) contexts.tenant_id.set(application_generate_entity.app_config.tenant_id)
contexts.plugin_tool_providers.set({})
contexts.plugin_tool_providers_lock.set(threading.Lock())
return self._generate( return self._generate(
workflow=workflow, workflow=workflow,
@ -167,14 +167,8 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
) )
def single_iteration_generate( def single_iteration_generate(
self, self, app_model: App, workflow: Workflow, node_id: str, user: Account, args: dict, streaming: bool = True
app_model: App, ) -> Mapping[str, Any] | Generator[str, None, None]:
workflow: Workflow,
node_id: str,
user: Account | EndUser,
args: Mapping,
streaming: bool = True,
) -> Mapping[str, Any] | Generator[str | Mapping[str, Any], Any, None]:
""" """
Generate App response. Generate App response.
@ -211,8 +205,6 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
), ),
) )
contexts.tenant_id.set(application_generate_entity.app_config.tenant_id) contexts.tenant_id.set(application_generate_entity.app_config.tenant_id)
contexts.plugin_tool_providers.set({})
contexts.plugin_tool_providers_lock.set(threading.Lock())
return self._generate( return self._generate(
workflow=workflow, workflow=workflow,
@ -232,7 +224,7 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
application_generate_entity: AdvancedChatAppGenerateEntity, application_generate_entity: AdvancedChatAppGenerateEntity,
conversation: Optional[Conversation] = None, conversation: Optional[Conversation] = None,
stream: bool = True, stream: bool = True,
) -> Mapping[str, Any] | Generator[str | Mapping[str, Any], Any, None]: ) -> Mapping[str, Any] | Generator[str, None, None]:
""" """
Generate App response. Generate App response.

View File

@ -56,7 +56,7 @@ def _process_future(
class AppGeneratorTTSPublisher: class AppGeneratorTTSPublisher:
def __init__(self, tenant_id: str, voice: str, language: Optional[str] = None): def __init__(self, tenant_id: str, voice: str):
self.logger = logging.getLogger(__name__) self.logger = logging.getLogger(__name__)
self.tenant_id = tenant_id self.tenant_id = tenant_id
self.msg_text = "" self.msg_text = ""
@ -67,7 +67,7 @@ class AppGeneratorTTSPublisher:
self.model_instance = self.model_manager.get_default_model_instance( self.model_instance = self.model_manager.get_default_model_instance(
tenant_id=self.tenant_id, model_type=ModelType.TTS tenant_id=self.tenant_id, model_type=ModelType.TTS
) )
self.voices = self.model_instance.get_tts_voices(language=language) self.voices = self.model_instance.get_tts_voices()
values = [voice.get("value") for voice in self.voices] values = [voice.get("value") for voice in self.voices]
self.voice = voice self.voice = voice
if not voice or voice not in values: if not voice or voice not in values:

View File

@ -77,7 +77,7 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner):
graph, variable_pool = self._get_graph_and_variable_pool_of_single_iteration( graph, variable_pool = self._get_graph_and_variable_pool_of_single_iteration(
workflow=workflow, workflow=workflow,
node_id=self.application_generate_entity.single_iteration_run.node_id, node_id=self.application_generate_entity.single_iteration_run.node_id,
user_inputs=dict(self.application_generate_entity.single_iteration_run.inputs), user_inputs=self.application_generate_entity.single_iteration_run.inputs,
) )
else: else:
inputs = self.application_generate_entity.inputs inputs = self.application_generate_entity.inputs

View File

@ -1,3 +1,4 @@
import json
from collections.abc import Generator from collections.abc import Generator
from typing import Any, cast from typing import Any, cast
@ -57,7 +58,7 @@ class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter):
@classmethod @classmethod
def convert_stream_full_response( def convert_stream_full_response(
cls, stream_response: Generator[AppStreamResponse, None, None] cls, stream_response: Generator[AppStreamResponse, None, None]
) -> Generator[dict | str, Any, None]: ) -> Generator[str, Any, None]:
""" """
Convert stream full response. Convert stream full response.
:param stream_response: stream response :param stream_response: stream response
@ -83,12 +84,12 @@ class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter):
response_chunk.update(data) response_chunk.update(data)
else: else:
response_chunk.update(sub_stream_response.to_dict()) response_chunk.update(sub_stream_response.to_dict())
yield response_chunk yield json.dumps(response_chunk)
@classmethod @classmethod
def convert_stream_simple_response( def convert_stream_simple_response(
cls, stream_response: Generator[AppStreamResponse, None, None] cls, stream_response: Generator[AppStreamResponse, None, None]
) -> Generator[dict | str, Any, None]: ) -> Generator[str, Any, None]:
""" """
Convert stream simple response. Convert stream simple response.
:param stream_response: stream response :param stream_response: stream response
@ -122,4 +123,4 @@ class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter):
else: else:
response_chunk.update(sub_stream_response.to_dict()) response_chunk.update(sub_stream_response.to_dict())
yield response_chunk yield json.dumps(response_chunk)

View File

@ -17,7 +17,6 @@ from core.app.entities.app_invoke_entities import (
) )
from core.app.entities.queue_entities import ( from core.app.entities.queue_entities import (
QueueAdvancedChatMessageEndEvent, QueueAdvancedChatMessageEndEvent,
QueueAgentLogEvent,
QueueAnnotationReplyEvent, QueueAnnotationReplyEvent,
QueueErrorEvent, QueueErrorEvent,
QueueIterationCompletedEvent, QueueIterationCompletedEvent,
@ -220,9 +219,7 @@ class AdvancedChatAppGenerateTaskPipeline:
and features_dict["text_to_speech"].get("enabled") and features_dict["text_to_speech"].get("enabled")
and features_dict["text_to_speech"].get("autoPlay") == "enabled" and features_dict["text_to_speech"].get("autoPlay") == "enabled"
): ):
tts_publisher = AppGeneratorTTSPublisher( tts_publisher = AppGeneratorTTSPublisher(tenant_id, features_dict["text_to_speech"].get("voice"))
tenant_id, features_dict["text_to_speech"].get("voice"), features_dict["text_to_speech"].get("language")
)
for response in self._process_stream_response(tts_publisher=tts_publisher, trace_manager=trace_manager): for response in self._process_stream_response(tts_publisher=tts_publisher, trace_manager=trace_manager):
while True: while True:
@ -250,7 +247,7 @@ class AdvancedChatAppGenerateTaskPipeline:
else: else:
start_listener_time = time.time() start_listener_time = time.time()
yield MessageAudioStreamResponse(audio=audio_trunk.audio, task_id=task_id) yield MessageAudioStreamResponse(audio=audio_trunk.audio, task_id=task_id)
except Exception: except Exception as e:
logger.exception(f"Failed to listen audio message, task_id: {task_id}") logger.exception(f"Failed to listen audio message, task_id: {task_id}")
break break
if tts_publisher: if tts_publisher:
@ -643,10 +640,6 @@ class AdvancedChatAppGenerateTaskPipeline:
session.commit() session.commit()
yield self._message_end_to_stream_response() yield self._message_end_to_stream_response()
elif isinstance(event, QueueAgentLogEvent):
yield self._workflow_cycle_manager._handle_agent_log(
task_id=self._application_generate_entity.task_id, event=event
)
else: else:
continue continue

View File

@ -1,4 +1,3 @@
import contextvars
import logging import logging
import threading import threading
import uuid import uuid
@ -30,6 +29,17 @@ logger = logging.getLogger(__name__)
class AgentChatAppGenerator(MessageBasedAppGenerator): class AgentChatAppGenerator(MessageBasedAppGenerator):
@overload
def generate(
self,
*,
app_model: App,
user: Union[Account, EndUser],
args: Mapping[str, Any],
invoke_from: InvokeFrom,
streaming: Literal[True],
) -> Generator[str, None, None]: ...
@overload @overload
def generate( def generate(
self, self,
@ -41,17 +51,6 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
streaming: Literal[False], streaming: Literal[False],
) -> Mapping[str, Any]: ... ) -> Mapping[str, Any]: ...
@overload
def generate(
self,
*,
app_model: App,
user: Union[Account, EndUser],
args: Mapping[str, Any],
invoke_from: InvokeFrom,
streaming: Literal[True],
) -> Generator[Mapping | str, None, None]: ...
@overload @overload
def generate( def generate(
self, self,
@ -61,7 +60,7 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
args: Mapping[str, Any], args: Mapping[str, Any],
invoke_from: InvokeFrom, invoke_from: InvokeFrom,
streaming: bool, streaming: bool,
) -> Union[Mapping, Generator[Mapping | str, None, None]]: ... ) -> Mapping[str, Any] | Generator[str, None, None]: ...
def generate( def generate(
self, self,
@ -71,7 +70,7 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
args: Mapping[str, Any], args: Mapping[str, Any],
invoke_from: InvokeFrom, invoke_from: InvokeFrom,
streaming: bool = True, streaming: bool = True,
) -> Union[Mapping, Generator[Mapping | str, None, None]]: ):
""" """
Generate App response. Generate App response.
@ -149,7 +148,9 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
model_conf=ModelConfigConverter.convert(app_config), model_conf=ModelConfigConverter.convert(app_config),
file_upload_config=file_extra_config, file_upload_config=file_extra_config,
conversation_id=conversation.id if conversation else None, conversation_id=conversation.id if conversation else None,
inputs=self._prepare_user_inputs( inputs=conversation.inputs
if conversation
else self._prepare_user_inputs(
user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id
), ),
query=query, query=query,
@ -181,7 +182,6 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
target=self._generate_worker, target=self._generate_worker,
kwargs={ kwargs={
"flask_app": current_app._get_current_object(), # type: ignore "flask_app": current_app._get_current_object(), # type: ignore
"context": contextvars.copy_context(),
"application_generate_entity": application_generate_entity, "application_generate_entity": application_generate_entity,
"queue_manager": queue_manager, "queue_manager": queue_manager,
"conversation_id": conversation.id, "conversation_id": conversation.id,
@ -206,7 +206,6 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
def _generate_worker( def _generate_worker(
self, self,
flask_app: Flask, flask_app: Flask,
context: contextvars.Context,
application_generate_entity: AgentChatAppGenerateEntity, application_generate_entity: AgentChatAppGenerateEntity,
queue_manager: AppQueueManager, queue_manager: AppQueueManager,
conversation_id: str, conversation_id: str,
@ -221,9 +220,6 @@ class AgentChatAppGenerator(MessageBasedAppGenerator):
:param message_id: message ID :param message_id: message ID
:return: :return:
""" """
for var, val in context.items():
var.set(val)
with flask_app.app_context(): with flask_app.app_context():
try: try:
# get conversation and message # get conversation and message

View File

@ -8,16 +8,18 @@ from core.agent.fc_agent_runner import FunctionCallAgentRunner
from core.app.apps.agent_chat.app_config_manager import AgentChatAppConfig from core.app.apps.agent_chat.app_config_manager import AgentChatAppConfig
from core.app.apps.base_app_queue_manager import AppQueueManager, PublishFrom from core.app.apps.base_app_queue_manager import AppQueueManager, PublishFrom
from core.app.apps.base_app_runner import AppRunner from core.app.apps.base_app_runner import AppRunner
from core.app.entities.app_invoke_entities import AgentChatAppGenerateEntity from core.app.entities.app_invoke_entities import AgentChatAppGenerateEntity, ModelConfigWithCredentialsEntity
from core.app.entities.queue_entities import QueueAnnotationReplyEvent from core.app.entities.queue_entities import QueueAnnotationReplyEvent
from core.memory.token_buffer_memory import TokenBufferMemory from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance from core.model_manager import ModelInstance
from core.model_runtime.entities.llm_entities import LLMMode from core.model_runtime.entities.llm_entities import LLMMode, LLMUsage
from core.model_runtime.entities.model_entities import ModelFeature, ModelPropertyKey from core.model_runtime.entities.model_entities import ModelFeature, ModelPropertyKey
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.moderation.base import ModerationError from core.moderation.base import ModerationError
from core.tools.entities.tool_entities import ToolRuntimeVariablePool
from extensions.ext_database import db from extensions.ext_database import db
from models.model import App, Conversation, Message from models.model import App, Conversation, Message, MessageAgentThought
from models.tools import ToolConversationVariables
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -62,8 +64,8 @@ class AgentChatAppRunner(AppRunner):
app_record=app_record, app_record=app_record,
model_config=application_generate_entity.model_conf, model_config=application_generate_entity.model_conf,
prompt_template_entity=app_config.prompt_template, prompt_template_entity=app_config.prompt_template,
inputs=dict(inputs), inputs=inputs,
files=list(files), files=files,
query=query, query=query,
) )
@ -84,8 +86,8 @@ class AgentChatAppRunner(AppRunner):
app_record=app_record, app_record=app_record,
model_config=application_generate_entity.model_conf, model_config=application_generate_entity.model_conf,
prompt_template_entity=app_config.prompt_template, prompt_template_entity=app_config.prompt_template,
inputs=dict(inputs), inputs=inputs,
files=list(files), files=files,
query=query, query=query,
memory=memory, memory=memory,
) )
@ -97,8 +99,8 @@ class AgentChatAppRunner(AppRunner):
app_id=app_record.id, app_id=app_record.id,
tenant_id=app_config.tenant_id, tenant_id=app_config.tenant_id,
app_generate_entity=application_generate_entity, app_generate_entity=application_generate_entity,
inputs=dict(inputs), inputs=inputs,
query=query or "", query=query,
message_id=message.id, message_id=message.id,
) )
except ModerationError as e: except ModerationError as e:
@ -154,9 +156,9 @@ class AgentChatAppRunner(AppRunner):
app_record=app_record, app_record=app_record,
model_config=application_generate_entity.model_conf, model_config=application_generate_entity.model_conf,
prompt_template_entity=app_config.prompt_template, prompt_template_entity=app_config.prompt_template,
inputs=dict(inputs), inputs=inputs,
files=list(files), files=files,
query=query or "", query=query,
memory=memory, memory=memory,
) )
@ -171,7 +173,16 @@ class AgentChatAppRunner(AppRunner):
return return
agent_entity = app_config.agent agent_entity = app_config.agent
assert agent_entity is not None if not agent_entity:
raise ValueError("Agent entity not found")
# load tool variables
tool_conversation_variables = self._load_tool_variables(
conversation_id=conversation.id, user_id=application_generate_entity.user_id, tenant_id=app_config.tenant_id
)
# convert db variables to tool variables
tool_variables = self._convert_db_variables_to_tool_variables(tool_conversation_variables)
# init model instance # init model instance
model_instance = ModelInstance( model_instance = ModelInstance(
@ -182,9 +193,9 @@ class AgentChatAppRunner(AppRunner):
app_record=app_record, app_record=app_record,
model_config=application_generate_entity.model_conf, model_config=application_generate_entity.model_conf,
prompt_template_entity=app_config.prompt_template, prompt_template_entity=app_config.prompt_template,
inputs=dict(inputs), inputs=inputs,
files=list(files), files=files,
query=query or "", query=query,
memory=memory, memory=memory,
) )
@ -232,6 +243,8 @@ class AgentChatAppRunner(AppRunner):
user_id=application_generate_entity.user_id, user_id=application_generate_entity.user_id,
memory=memory, memory=memory,
prompt_messages=prompt_message, prompt_messages=prompt_message,
variables_pool=tool_variables,
db_variables=tool_conversation_variables,
model_instance=model_instance, model_instance=model_instance,
) )
@ -248,3 +261,73 @@ class AgentChatAppRunner(AppRunner):
stream=application_generate_entity.stream, stream=application_generate_entity.stream,
agent=True, agent=True,
) )
def _load_tool_variables(self, conversation_id: str, user_id: str, tenant_id: str) -> ToolConversationVariables:
"""
load tool variables from database
"""
tool_variables: ToolConversationVariables | None = (
db.session.query(ToolConversationVariables)
.filter(
ToolConversationVariables.conversation_id == conversation_id,
ToolConversationVariables.tenant_id == tenant_id,
)
.first()
)
if tool_variables:
# save tool variables to session, so that we can update it later
db.session.add(tool_variables)
else:
# create new tool variables
tool_variables = ToolConversationVariables(
conversation_id=conversation_id,
user_id=user_id,
tenant_id=tenant_id,
variables_str="[]",
)
db.session.add(tool_variables)
db.session.commit()
return tool_variables
def _convert_db_variables_to_tool_variables(
self, db_variables: ToolConversationVariables
) -> ToolRuntimeVariablePool:
"""
convert db variables to tool variables
"""
return ToolRuntimeVariablePool(
**{
"conversation_id": db_variables.conversation_id,
"user_id": db_variables.user_id,
"tenant_id": db_variables.tenant_id,
"pool": db_variables.variables,
}
)
def _get_usage_of_all_agent_thoughts(
self, model_config: ModelConfigWithCredentialsEntity, message: Message
) -> LLMUsage:
"""
Get usage of all agent thoughts
:param model_config: model config
:param message: message
:return:
"""
agent_thoughts = (
db.session.query(MessageAgentThought).filter(MessageAgentThought.message_id == message.id).all()
)
all_message_tokens = 0
all_answer_tokens = 0
for agent_thought in agent_thoughts:
all_message_tokens += agent_thought.message_tokens
all_answer_tokens += agent_thought.answer_tokens
model_type_instance = model_config.provider_model_bundle.model_type_instance
model_type_instance = cast(LargeLanguageModel, model_type_instance)
return model_type_instance._calc_response_usage(
model_config.model, model_config.credentials, all_message_tokens, all_answer_tokens
)

View File

@ -1,9 +1,9 @@
import json
from collections.abc import Generator from collections.abc import Generator
from typing import cast from typing import cast
from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter
from core.app.entities.task_entities import ( from core.app.entities.task_entities import (
AppStreamResponse,
ChatbotAppBlockingResponse, ChatbotAppBlockingResponse,
ChatbotAppStreamResponse, ChatbotAppStreamResponse,
ErrorStreamResponse, ErrorStreamResponse,
@ -51,9 +51,10 @@ class AgentChatAppGenerateResponseConverter(AppGenerateResponseConverter):
return response return response
@classmethod @classmethod
def convert_stream_full_response( def convert_stream_full_response( # type: ignore[override]
cls, stream_response: Generator[AppStreamResponse, None, None] cls,
) -> Generator[dict | str, None, None]: stream_response: Generator[ChatbotAppStreamResponse, None, None],
) -> Generator[str, None, None]:
""" """
Convert stream full response. Convert stream full response.
:param stream_response: stream response :param stream_response: stream response
@ -79,12 +80,13 @@ class AgentChatAppGenerateResponseConverter(AppGenerateResponseConverter):
response_chunk.update(data) response_chunk.update(data)
else: else:
response_chunk.update(sub_stream_response.to_dict()) response_chunk.update(sub_stream_response.to_dict())
yield response_chunk yield json.dumps(response_chunk)
@classmethod @classmethod
def convert_stream_simple_response( def convert_stream_simple_response( # type: ignore[override]
cls, stream_response: Generator[AppStreamResponse, None, None] cls,
) -> Generator[dict | str, None, None]: stream_response: Generator[ChatbotAppStreamResponse, None, None],
) -> Generator[str, None, None]:
""" """
Convert stream simple response. Convert stream simple response.
:param stream_response: stream response :param stream_response: stream response
@ -116,4 +118,4 @@ class AgentChatAppGenerateResponseConverter(AppGenerateResponseConverter):
else: else:
response_chunk.update(sub_stream_response.to_dict()) response_chunk.update(sub_stream_response.to_dict())
yield response_chunk yield json.dumps(response_chunk)

View File

@ -14,15 +14,21 @@ class AppGenerateResponseConverter(ABC):
@classmethod @classmethod
def convert( def convert(
cls, response: Union[AppBlockingResponse, Generator[AppStreamResponse, Any, None]], invoke_from: InvokeFrom cls,
) -> Mapping[str, Any] | Generator[str | Mapping[str, Any], Any, None]: response: Union[AppBlockingResponse, Generator[AppStreamResponse, Any, None]],
invoke_from: InvokeFrom,
) -> Mapping[str, Any] | Generator[str, None, None]:
if invoke_from in {InvokeFrom.DEBUGGER, InvokeFrom.SERVICE_API}: if invoke_from in {InvokeFrom.DEBUGGER, InvokeFrom.SERVICE_API}:
if isinstance(response, AppBlockingResponse): if isinstance(response, AppBlockingResponse):
return cls.convert_blocking_full_response(response) return cls.convert_blocking_full_response(response)
else: else:
def _generate_full_response() -> Generator[dict | str, Any, None]: def _generate_full_response() -> Generator[str, Any, None]:
yield from cls.convert_stream_full_response(response) for chunk in cls.convert_stream_full_response(response):
if chunk == "ping":
yield f"event: {chunk}\n\n"
else:
yield f"data: {chunk}\n\n"
return _generate_full_response() return _generate_full_response()
else: else:
@ -30,8 +36,12 @@ class AppGenerateResponseConverter(ABC):
return cls.convert_blocking_simple_response(response) return cls.convert_blocking_simple_response(response)
else: else:
def _generate_simple_response() -> Generator[dict | str, Any, None]: def _generate_simple_response() -> Generator[str, Any, None]:
yield from cls.convert_stream_simple_response(response) for chunk in cls.convert_stream_simple_response(response):
if chunk == "ping":
yield f"event: {chunk}\n\n"
else:
yield f"data: {chunk}\n\n"
return _generate_simple_response() return _generate_simple_response()
@ -49,14 +59,14 @@ class AppGenerateResponseConverter(ABC):
@abstractmethod @abstractmethod
def convert_stream_full_response( def convert_stream_full_response(
cls, stream_response: Generator[AppStreamResponse, None, None] cls, stream_response: Generator[AppStreamResponse, None, None]
) -> Generator[dict | str, None, None]: ) -> Generator[str, None, None]:
raise NotImplementedError raise NotImplementedError
@classmethod @classmethod
@abstractmethod @abstractmethod
def convert_stream_simple_response( def convert_stream_simple_response(
cls, stream_response: Generator[AppStreamResponse, None, None] cls, stream_response: Generator[AppStreamResponse, None, None]
) -> Generator[dict | str, None, None]: ) -> Generator[str, None, None]:
raise NotImplementedError raise NotImplementedError
@classmethod @classmethod

View File

@ -1,6 +1,5 @@
import json from collections.abc import Mapping, Sequence
from collections.abc import Generator, Mapping, Sequence from typing import TYPE_CHECKING, Any, Optional
from typing import TYPE_CHECKING, Any, Optional, Union
from core.app.app_config.entities import VariableEntityType from core.app.app_config.entities import VariableEntityType
from core.file import File, FileUploadConfig from core.file import File, FileUploadConfig
@ -139,21 +138,3 @@ class BaseAppGenerator:
if isinstance(value, str): if isinstance(value, str):
return value.replace("\x00", "") return value.replace("\x00", "")
return value return value
@classmethod
def convert_to_event_stream(cls, generator: Union[Mapping, Generator[Mapping | str, None, None]]):
"""
Convert messages into event stream
"""
if isinstance(generator, dict):
return generator
else:
def gen():
for message in generator:
if isinstance(message, (Mapping, dict)):
yield f"data: {json.dumps(message)}\n\n"
else:
yield f"event: {message}\n\n"
return gen()

View File

@ -2,7 +2,7 @@ import queue
import time import time
from abc import abstractmethod from abc import abstractmethod
from enum import Enum from enum import Enum
from typing import Any, Optional from typing import Any
from sqlalchemy.orm import DeclarativeMeta from sqlalchemy.orm import DeclarativeMeta
@ -115,7 +115,7 @@ class AppQueueManager:
Set task stop flag Set task stop flag
:return: :return:
""" """
result: Optional[Any] = redis_client.get(cls._generate_task_belong_cache_key(task_id)) result = redis_client.get(cls._generate_task_belong_cache_key(task_id))
if result is None: if result is None:
return return

View File

@ -38,7 +38,7 @@ class ChatAppGenerator(MessageBasedAppGenerator):
args: Mapping[str, Any], args: Mapping[str, Any],
invoke_from: InvokeFrom, invoke_from: InvokeFrom,
streaming: Literal[True], streaming: Literal[True],
) -> Generator[Mapping | str, None, None]: ... ) -> Generator[str, None, None]: ...
@overload @overload
def generate( def generate(
@ -58,7 +58,7 @@ class ChatAppGenerator(MessageBasedAppGenerator):
args: Mapping[str, Any], args: Mapping[str, Any],
invoke_from: InvokeFrom, invoke_from: InvokeFrom,
streaming: bool, streaming: bool,
) -> Union[Mapping[str, Any], Generator[Mapping[str, Any] | str, None, None]]: ... ) -> Union[Mapping[str, Any], Generator[str, None, None]]: ...
def generate( def generate(
self, self,
@ -67,7 +67,7 @@ class ChatAppGenerator(MessageBasedAppGenerator):
args: Mapping[str, Any], args: Mapping[str, Any],
invoke_from: InvokeFrom, invoke_from: InvokeFrom,
streaming: bool = True, streaming: bool = True,
) -> Union[Mapping[str, Any], Generator[Mapping[str, Any] | str, None, None]]: ):
""" """
Generate App response. Generate App response.
@ -141,7 +141,9 @@ class ChatAppGenerator(MessageBasedAppGenerator):
model_conf=ModelConfigConverter.convert(app_config), model_conf=ModelConfigConverter.convert(app_config),
file_upload_config=file_extra_config, file_upload_config=file_extra_config,
conversation_id=conversation.id if conversation else None, conversation_id=conversation.id if conversation else None,
inputs=self._prepare_user_inputs( inputs=conversation.inputs
if conversation
else self._prepare_user_inputs(
user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id user_inputs=inputs, variables=app_config.variables, tenant_id=app_model.tenant_id
), ),
query=query, query=query,

View File

@ -1,9 +1,9 @@
import json
from collections.abc import Generator from collections.abc import Generator
from typing import cast from typing import cast
from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter
from core.app.entities.task_entities import ( from core.app.entities.task_entities import (
AppStreamResponse,
ChatbotAppBlockingResponse, ChatbotAppBlockingResponse,
ChatbotAppStreamResponse, ChatbotAppStreamResponse,
ErrorStreamResponse, ErrorStreamResponse,
@ -52,8 +52,9 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter):
@classmethod @classmethod
def convert_stream_full_response( def convert_stream_full_response(
cls, stream_response: Generator[AppStreamResponse, None, None] cls,
) -> Generator[dict | str, None, None]: stream_response: Generator[ChatbotAppStreamResponse, None, None], # type: ignore[override]
) -> Generator[str, None, None]:
""" """
Convert stream full response. Convert stream full response.
:param stream_response: stream response :param stream_response: stream response
@ -79,12 +80,13 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter):
response_chunk.update(data) response_chunk.update(data)
else: else:
response_chunk.update(sub_stream_response.to_dict()) response_chunk.update(sub_stream_response.to_dict())
yield response_chunk yield json.dumps(response_chunk)
@classmethod @classmethod
def convert_stream_simple_response( def convert_stream_simple_response(
cls, stream_response: Generator[AppStreamResponse, None, None] cls,
) -> Generator[dict | str, None, None]: stream_response: Generator[ChatbotAppStreamResponse, None, None], # type: ignore[override]
) -> Generator[str, None, None]:
""" """
Convert stream simple response. Convert stream simple response.
:param stream_response: stream response :param stream_response: stream response
@ -116,4 +118,4 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter):
else: else:
response_chunk.update(sub_stream_response.to_dict()) response_chunk.update(sub_stream_response.to_dict())
yield response_chunk yield json.dumps(response_chunk)

View File

@ -37,7 +37,7 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
args: Mapping[str, Any], args: Mapping[str, Any],
invoke_from: InvokeFrom, invoke_from: InvokeFrom,
streaming: Literal[True], streaming: Literal[True],
) -> Generator[str | Mapping[str, Any], None, None]: ... ) -> Generator[str, None, None]: ...
@overload @overload
def generate( def generate(
@ -56,8 +56,8 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
user: Union[Account, EndUser], user: Union[Account, EndUser],
args: Mapping[str, Any], args: Mapping[str, Any],
invoke_from: InvokeFrom, invoke_from: InvokeFrom,
streaming: bool = False, streaming: bool,
) -> Union[Mapping[str, Any], Generator[str | Mapping[str, Any], None, None]]: ... ) -> Mapping[str, Any] | Generator[str, None, None]: ...
def generate( def generate(
self, self,
@ -66,7 +66,7 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
args: Mapping[str, Any], args: Mapping[str, Any],
invoke_from: InvokeFrom, invoke_from: InvokeFrom,
streaming: bool = True, streaming: bool = True,
) -> Union[Mapping[str, Any], Generator[str | Mapping[str, Any], None, None]]: ):
""" """
Generate App response. Generate App response.
@ -231,7 +231,7 @@ class CompletionAppGenerator(MessageBasedAppGenerator):
user: Union[Account, EndUser], user: Union[Account, EndUser],
invoke_from: InvokeFrom, invoke_from: InvokeFrom,
stream: bool = True, stream: bool = True,
) -> Union[Mapping, Generator[Mapping | str, None, None]]: ) -> Union[Mapping[str, Any], Generator[str, None, None]]:
""" """
Generate App response. Generate App response.

View File

@ -1,9 +1,9 @@
import json
from collections.abc import Generator from collections.abc import Generator
from typing import cast from typing import cast
from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter
from core.app.entities.task_entities import ( from core.app.entities.task_entities import (
AppStreamResponse,
CompletionAppBlockingResponse, CompletionAppBlockingResponse,
CompletionAppStreamResponse, CompletionAppStreamResponse,
ErrorStreamResponse, ErrorStreamResponse,
@ -51,8 +51,9 @@ class CompletionAppGenerateResponseConverter(AppGenerateResponseConverter):
@classmethod @classmethod
def convert_stream_full_response( def convert_stream_full_response(
cls, stream_response: Generator[AppStreamResponse, None, None] cls,
) -> Generator[dict | str, None, None]: stream_response: Generator[CompletionAppStreamResponse, None, None], # type: ignore[override]
) -> Generator[str, None, None]:
""" """
Convert stream full response. Convert stream full response.
:param stream_response: stream response :param stream_response: stream response
@ -77,12 +78,13 @@ class CompletionAppGenerateResponseConverter(AppGenerateResponseConverter):
response_chunk.update(data) response_chunk.update(data)
else: else:
response_chunk.update(sub_stream_response.to_dict()) response_chunk.update(sub_stream_response.to_dict())
yield response_chunk yield json.dumps(response_chunk)
@classmethod @classmethod
def convert_stream_simple_response( def convert_stream_simple_response(
cls, stream_response: Generator[AppStreamResponse, None, None] cls,
) -> Generator[dict | str, None, None]: stream_response: Generator[CompletionAppStreamResponse, None, None], # type: ignore[override]
) -> Generator[str, None, None]:
""" """
Convert stream simple response. Convert stream simple response.
:param stream_response: stream response :param stream_response: stream response
@ -113,4 +115,4 @@ class CompletionAppGenerateResponseConverter(AppGenerateResponseConverter):
else: else:
response_chunk.update(sub_stream_response.to_dict()) response_chunk.update(sub_stream_response.to_dict())
yield response_chunk yield json.dumps(response_chunk)

View File

@ -42,6 +42,7 @@ class MessageBasedAppGenerator(BaseAppGenerator):
ChatAppGenerateEntity, ChatAppGenerateEntity,
CompletionAppGenerateEntity, CompletionAppGenerateEntity,
AgentChatAppGenerateEntity, AgentChatAppGenerateEntity,
AgentChatAppGenerateEntity,
], ],
queue_manager: AppQueueManager, queue_manager: AppQueueManager,
conversation: Conversation, conversation: Conversation,

View File

@ -36,13 +36,13 @@ class WorkflowAppGenerator(BaseAppGenerator):
*, *,
app_model: App, app_model: App,
workflow: Workflow, workflow: Workflow,
user: Union[Account, EndUser], user: Account | EndUser,
args: Mapping[str, Any], args: Mapping[str, Any],
invoke_from: InvokeFrom, invoke_from: InvokeFrom,
streaming: Literal[True], streaming: Literal[True],
call_depth: int, call_depth: int = 0,
workflow_thread_pool_id: Optional[str], workflow_thread_pool_id: Optional[str] = None,
) -> Generator[Mapping | str, None, None]: ... ) -> Generator[str, None, None]: ...
@overload @overload
def generate( def generate(
@ -50,12 +50,12 @@ class WorkflowAppGenerator(BaseAppGenerator):
*, *,
app_model: App, app_model: App,
workflow: Workflow, workflow: Workflow,
user: Union[Account, EndUser], user: Account | EndUser,
args: Mapping[str, Any], args: Mapping[str, Any],
invoke_from: InvokeFrom, invoke_from: InvokeFrom,
streaming: Literal[False], streaming: Literal[False],
call_depth: int, call_depth: int = 0,
workflow_thread_pool_id: Optional[str], workflow_thread_pool_id: Optional[str] = None,
) -> Mapping[str, Any]: ... ) -> Mapping[str, Any]: ...
@overload @overload
@ -64,26 +64,26 @@ class WorkflowAppGenerator(BaseAppGenerator):
*, *,
app_model: App, app_model: App,
workflow: Workflow, workflow: Workflow,
user: Union[Account, EndUser], user: Account | EndUser,
args: Mapping[str, Any], args: Mapping[str, Any],
invoke_from: InvokeFrom, invoke_from: InvokeFrom,
streaming: bool, streaming: bool = True,
call_depth: int, call_depth: int = 0,
workflow_thread_pool_id: Optional[str], workflow_thread_pool_id: Optional[str] = None,
) -> Union[Mapping[str, Any], Generator[Mapping | str, None, None]]: ... ) -> Mapping[str, Any] | Generator[str, None, None]: ...
def generate( def generate(
self, self,
*, *,
app_model: App, app_model: App,
workflow: Workflow, workflow: Workflow,
user: Union[Account, EndUser], user: Account | EndUser,
args: Mapping[str, Any], args: Mapping[str, Any],
invoke_from: InvokeFrom, invoke_from: InvokeFrom,
streaming: bool = True, streaming: bool = True,
call_depth: int = 0, call_depth: int = 0,
workflow_thread_pool_id: Optional[str] = None, workflow_thread_pool_id: Optional[str] = None,
) -> Union[Mapping[str, Any], Generator[Mapping | str, None, None]]: ):
files: Sequence[Mapping[str, Any]] = args.get("files") or [] files: Sequence[Mapping[str, Any]] = args.get("files") or []
# parse files # parse files
@ -124,10 +124,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
trace_manager=trace_manager, trace_manager=trace_manager,
workflow_run_id=workflow_run_id, workflow_run_id=workflow_run_id,
) )
contexts.tenant_id.set(application_generate_entity.app_config.tenant_id) contexts.tenant_id.set(application_generate_entity.app_config.tenant_id)
contexts.plugin_tool_providers.set({})
contexts.plugin_tool_providers_lock.set(threading.Lock())
return self._generate( return self._generate(
app_model=app_model, app_model=app_model,
@ -149,18 +146,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
invoke_from: InvokeFrom, invoke_from: InvokeFrom,
streaming: bool = True, streaming: bool = True,
workflow_thread_pool_id: Optional[str] = None, workflow_thread_pool_id: Optional[str] = None,
) -> Union[Mapping[str, Any], Generator[str | Mapping[str, Any], None, None]]: ) -> Mapping[str, Any] | Generator[str, None, None]:
"""
Generate App response.
:param app_model: App
:param workflow: Workflow
:param user: account or end user
:param application_generate_entity: application generate entity
:param invoke_from: invoke from source
:param stream: is stream
:param workflow_thread_pool_id: workflow thread pool id
"""
# init queue manager # init queue manager
queue_manager = WorkflowAppQueueManager( queue_manager = WorkflowAppQueueManager(
task_id=application_generate_entity.task_id, task_id=application_generate_entity.task_id,
@ -199,10 +185,10 @@ class WorkflowAppGenerator(BaseAppGenerator):
app_model: App, app_model: App,
workflow: Workflow, workflow: Workflow,
node_id: str, node_id: str,
user: Account | EndUser, user: Account,
args: Mapping[str, Any], args: Mapping[str, Any],
streaming: bool = True, streaming: bool = True,
) -> Mapping[str, Any] | Generator[str | Mapping[str, Any], None, None]: ) -> Mapping[str, Any] | Generator[str, None, None]:
""" """
Generate App response. Generate App response.
@ -238,8 +224,6 @@ class WorkflowAppGenerator(BaseAppGenerator):
workflow_run_id=str(uuid.uuid4()), workflow_run_id=str(uuid.uuid4()),
) )
contexts.tenant_id.set(application_generate_entity.app_config.tenant_id) contexts.tenant_id.set(application_generate_entity.app_config.tenant_id)
contexts.plugin_tool_providers.set({})
contexts.plugin_tool_providers_lock.set(threading.Lock())
return self._generate( return self._generate(
app_model=app_model, app_model=app_model,

View File

@ -1,9 +1,9 @@
import json
from collections.abc import Generator from collections.abc import Generator
from typing import cast from typing import cast
from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter
from core.app.entities.task_entities import ( from core.app.entities.task_entities import (
AppStreamResponse,
ErrorStreamResponse, ErrorStreamResponse,
NodeFinishStreamResponse, NodeFinishStreamResponse,
NodeStartStreamResponse, NodeStartStreamResponse,
@ -36,8 +36,9 @@ class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter):
@classmethod @classmethod
def convert_stream_full_response( def convert_stream_full_response(
cls, stream_response: Generator[AppStreamResponse, None, None] cls,
) -> Generator[dict | str, None, None]: stream_response: Generator[WorkflowAppStreamResponse, None, None], # type: ignore[override]
) -> Generator[str, None, None]:
""" """
Convert stream full response. Convert stream full response.
:param stream_response: stream response :param stream_response: stream response
@ -61,12 +62,13 @@ class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter):
response_chunk.update(data) response_chunk.update(data)
else: else:
response_chunk.update(sub_stream_response.to_dict()) response_chunk.update(sub_stream_response.to_dict())
yield response_chunk yield json.dumps(response_chunk)
@classmethod @classmethod
def convert_stream_simple_response( def convert_stream_simple_response(
cls, stream_response: Generator[AppStreamResponse, None, None] cls,
) -> Generator[dict | str, None, None]: stream_response: Generator[WorkflowAppStreamResponse, None, None], # type: ignore[override]
) -> Generator[str, None, None]:
""" """
Convert stream simple response. Convert stream simple response.
:param stream_response: stream response :param stream_response: stream response
@ -92,4 +94,4 @@ class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter):
response_chunk.update(sub_stream_response.to_ignore_detail_dict()) response_chunk.update(sub_stream_response.to_ignore_detail_dict())
else: else:
response_chunk.update(sub_stream_response.to_dict()) response_chunk.update(sub_stream_response.to_dict())
yield response_chunk yield json.dumps(response_chunk)

View File

@ -13,7 +13,6 @@ from core.app.entities.app_invoke_entities import (
WorkflowAppGenerateEntity, WorkflowAppGenerateEntity,
) )
from core.app.entities.queue_entities import ( from core.app.entities.queue_entities import (
QueueAgentLogEvent,
QueueErrorEvent, QueueErrorEvent,
QueueIterationCompletedEvent, QueueIterationCompletedEvent,
QueueIterationNextEvent, QueueIterationNextEvent,
@ -191,9 +190,7 @@ class WorkflowAppGenerateTaskPipeline:
and features_dict["text_to_speech"].get("enabled") and features_dict["text_to_speech"].get("enabled")
and features_dict["text_to_speech"].get("autoPlay") == "enabled" and features_dict["text_to_speech"].get("autoPlay") == "enabled"
): ):
tts_publisher = AppGeneratorTTSPublisher( tts_publisher = AppGeneratorTTSPublisher(tenant_id, features_dict["text_to_speech"].get("voice"))
tenant_id, features_dict["text_to_speech"].get("voice"), features_dict["text_to_speech"].get("language")
)
for response in self._process_stream_response(tts_publisher=tts_publisher, trace_manager=trace_manager): for response in self._process_stream_response(tts_publisher=tts_publisher, trace_manager=trace_manager):
while True: while True:
@ -530,10 +527,6 @@ class WorkflowAppGenerateTaskPipeline:
yield self._text_chunk_to_stream_response( yield self._text_chunk_to_stream_response(
delta_text, from_variable_selector=event.from_variable_selector delta_text, from_variable_selector=event.from_variable_selector
) )
elif isinstance(event, QueueAgentLogEvent):
yield self._workflow_cycle_manager._handle_agent_log(
task_id=self._application_generate_entity.task_id, event=event
)
else: else:
continue continue

Some files were not shown because too many files have changed in this diff Show More