mirror of
https://github.com/langgenius/dify.git
synced 2026-01-28 15:56:00 +08:00
Compare commits
163 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 2ba0ee989a | |||
| b055470147 | |||
| 5943385d42 | |||
| 0abd67288b | |||
| bbe58327c8 | |||
| 299c51ebc4 | |||
| 3a7f58d2a6 | |||
| 6123bba96d | |||
| d5ab3b5072 | |||
| df26f82536 | |||
| dbe0c43515 | |||
| f4052fdbc7 | |||
| b5ade19c75 | |||
| 040eacb8bd | |||
| 20899c44ff | |||
| 35a2beb195 | |||
| 2056093855 | |||
| 2bf48514bc | |||
| c109b1a920 | |||
| 45499328b8 | |||
| 4c61aa399d | |||
| 3e380c082a | |||
| 53db5bab36 | |||
| 6483beb096 | |||
| e61c84ca72 | |||
| d70086b841 | |||
| a3ee037d6d | |||
| 2de18a6490 | |||
| 4134e915ce | |||
| a838ba7b46 | |||
| 5f38214a41 | |||
| 19b5cb1e10 | |||
| 2478c88e07 | |||
| 59e59c19b2 | |||
| c67f626b66 | |||
| f65a3ad1cc | |||
| 490858a4d5 | |||
| 44a1aa5e44 | |||
| a616bf3129 | |||
| f2f19484b8 | |||
| f572b55237 | |||
| 554570dc22 | |||
| 5239b2c7ab | |||
| ae94b067b3 | |||
| 5e772bd10b | |||
| 91bcbd0b26 | |||
| 54bb309d87 | |||
| 75f7a96025 | |||
| ccd80653ff | |||
| 5ca88a4fd9 | |||
| a1c6cecf10 | |||
| c5ccf382df | |||
| 8358d0abfa | |||
| bad3b14438 | |||
| f42ef494f8 | |||
| bb7f454ecd | |||
| 7f48fadd41 | |||
| af2138e8b8 | |||
| 091beffae7 | |||
| 408fb502a1 | |||
| 7660539689 | |||
| 5a6061ff61 | |||
| 970950e3a8 | |||
| 431b2fd4a8 | |||
| 88545184be | |||
| 2c23caacd4 | |||
| 9edea9bc49 | |||
| d43279a1cc | |||
| 10848d74a0 | |||
| f9df23a091 | |||
| 17a1c05728 | |||
| 66782ef19c | |||
| fb7f509e5c | |||
| 1a5acf43aa | |||
| 4ef6392de5 | |||
| effdc824d9 | |||
| 24fa452307 | |||
| 9e00e3894e | |||
| 023783372e | |||
| 1d06eba61a | |||
| 93e99fb343 | |||
| b9ebce7ab7 | |||
| 33b3eaf324 | |||
| b6cca59517 | |||
| 93ae18ea12 | |||
| 99f7e4f277 | |||
| 659c3e7a81 | |||
| 7a16c88092 | |||
| 0bb253efe0 | |||
| d93365d429 | |||
| 8b44dba988 | |||
| d96bcfa4ee | |||
| 380b4b3ddc | |||
| e2bf18053c | |||
| 4350bb9a00 | |||
| fe688b505a | |||
| 056898bf21 | |||
| 0e8afa3aa2 | |||
| 933bd06460 | |||
| b939039201 | |||
| 6da5e54180 | |||
| 1c5f63de7e | |||
| f3219ff107 | |||
| 219011b62a | |||
| 90150a6ca9 | |||
| 7722a7c5cd | |||
| 4ba38465ac | |||
| 9a5ae9f51f | |||
| a7c40a07d8 | |||
| 2d0d3365ed | |||
| 54a6571462 | |||
| c43c3098a0 | |||
| eddd038959 | |||
| 7a2291f450 | |||
| 17a8118154 | |||
| 4db01403ae | |||
| d8425f3f4c | |||
| 38754734a2 | |||
| b42cd38cc9 | |||
| c6f715861a | |||
| b46511dd7b | |||
| e8e8f9e97d | |||
| 18d1f6a6c6 | |||
| 1b6e3ef964 | |||
| 4779fcf6f1 | |||
| e8239ae631 | |||
| 94eb2a623e | |||
| 96809108ca | |||
| 8fc2663693 | |||
| 37c3b8979c | |||
| f68b05d5ec | |||
| 3b3c604eb5 | |||
| a43ef7a926 | |||
| c6ba67a770 | |||
| ac2a1bc954 | |||
| a4481a3f29 | |||
| 15f932573a | |||
| f8eefa31fe | |||
| 0587ff0fba | |||
| ce492d13f1 | |||
| 74d954610f | |||
| 0abee44453 | |||
| 157cb2e048 | |||
| a4713c01d5 | |||
| 8847bb1e45 | |||
| 5fcd5c2499 | |||
| d680fca996 | |||
| 92fb4ab4c1 | |||
| 815f794eef | |||
| 3117619ef3 | |||
| f5b2271c8c | |||
| a8155cba7e | |||
| 0eca93ebd1 | |||
| d8a716d857 | |||
| c2e7fe107a | |||
| 805da40b15 | |||
| 4cfee55ec6 | |||
| bcbdbed352 | |||
| 2e1cd3db28 | |||
| 19eaf27126 | |||
| 4926a0fcb1 | |||
| 58db0fac36 | |||
| 367ef145d6 |
32
.github/ISSUE_TEMPLATE/🐛-bug-report.md
vendored
Normal file
32
.github/ISSUE_TEMPLATE/🐛-bug-report.md
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
name: "\U0001F41B Bug report"
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
Please provide a clear and concise description of what the bug is. Include
|
||||
screenshots if needed. Please test using the latest version of the relevant
|
||||
Dify packages to make sure your issue has not already been fixed.
|
||||
-->
|
||||
|
||||
Dify version: Cloud | Self Host
|
||||
|
||||
## Steps To Reproduce
|
||||
<!--
|
||||
Your bug will get fixed much faster if we can run your code and it doesn't
|
||||
have dependencies other than Dify. Issues without reproduction steps or
|
||||
code examples may be immediately closed as not actionable.
|
||||
-->
|
||||
|
||||
1.
|
||||
2.
|
||||
|
||||
|
||||
## The current behavior
|
||||
|
||||
|
||||
## The expected behavior
|
||||
20
.github/ISSUE_TEMPLATE/🚀-feature-request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/🚀-feature-request.md
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
name: "\U0001F680 Feature request"
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
10
.github/ISSUE_TEMPLATE/🤔-questions-and-help.md
vendored
Normal file
10
.github/ISSUE_TEMPLATE/🤔-questions-and-help.md
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
name: "\U0001F914 Questions and Help"
|
||||
about: Ask a usage or consultation question
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
|
||||
61
.github/workflows/build-api-image.sh
vendored
61
.github/workflows/build-api-image.sh
vendored
@ -1,61 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
SHA=$(git rev-parse HEAD)
|
||||
REPO_NAME=langgenius/dify
|
||||
API_REPO_NAME="${REPO_NAME}-api"
|
||||
|
||||
if [[ "${GITHUB_EVENT_NAME}" == "pull_request" ]]; then
|
||||
REFSPEC=$(echo "${GITHUB_HEAD_REF}" | sed 's/[^a-zA-Z0-9]/-/g' | head -c 40)
|
||||
PR_NUM=$(echo "${GITHUB_REF}" | sed 's:refs/pull/::' | sed 's:/merge::')
|
||||
LATEST_TAG="pr-${PR_NUM}"
|
||||
CACHE_FROM_TAG="latest"
|
||||
elif [[ "${GITHUB_EVENT_NAME}" == "release" ]]; then
|
||||
REFSPEC=$(echo "${GITHUB_REF}" | sed 's:refs/tags/::' | head -c 40)
|
||||
LATEST_TAG="${REFSPEC}"
|
||||
CACHE_FROM_TAG="latest"
|
||||
else
|
||||
REFSPEC=$(echo "${GITHUB_REF}" | sed 's:refs/heads/::' | sed 's/[^a-zA-Z0-9]/-/g' | head -c 40)
|
||||
LATEST_TAG="${REFSPEC}"
|
||||
CACHE_FROM_TAG="${REFSPEC}"
|
||||
fi
|
||||
|
||||
if [[ "${REFSPEC}" == "main" ]]; then
|
||||
LATEST_TAG="latest"
|
||||
CACHE_FROM_TAG="latest"
|
||||
fi
|
||||
|
||||
echo "Pulling cache image ${API_REPO_NAME}:${CACHE_FROM_TAG}"
|
||||
if docker pull "${API_REPO_NAME}:${CACHE_FROM_TAG}"; then
|
||||
API_CACHE_FROM_SCRIPT="--cache-from ${API_REPO_NAME}:${CACHE_FROM_TAG}"
|
||||
else
|
||||
echo "WARNING: Failed to pull ${API_REPO_NAME}:${CACHE_FROM_TAG}, disable build image cache."
|
||||
API_CACHE_FROM_SCRIPT=""
|
||||
fi
|
||||
|
||||
|
||||
cat<<EOF
|
||||
Rolling with tags:
|
||||
- ${API_REPO_NAME}:${SHA}
|
||||
- ${API_REPO_NAME}:${REFSPEC}
|
||||
- ${API_REPO_NAME}:${LATEST_TAG}
|
||||
EOF
|
||||
|
||||
#
|
||||
# Build image
|
||||
#
|
||||
cd api
|
||||
docker build \
|
||||
${API_CACHE_FROM_SCRIPT} \
|
||||
--build-arg COMMIT_SHA=${SHA} \
|
||||
-t "${API_REPO_NAME}:${SHA}" \
|
||||
-t "${API_REPO_NAME}:${REFSPEC}" \
|
||||
-t "${API_REPO_NAME}:${LATEST_TAG}" \
|
||||
--label "sha=${SHA}" \
|
||||
--label "built_at=$(date)" \
|
||||
--label "build_actor=${GITHUB_ACTOR}" \
|
||||
.
|
||||
|
||||
# push
|
||||
docker push --all-tags "${API_REPO_NAME}"
|
||||
43
.github/workflows/build-api-image.yml
vendored
43
.github/workflows/build-api-image.yml
vendored
@ -5,18 +5,19 @@ on:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'deploy/dev'
|
||||
pull_request:
|
||||
types: [synchronize, opened, reopened, ready_for_review]
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.pull_request.draft == false
|
||||
steps:
|
||||
- name: "Checkout ${{ github.ref }} ( ${{ github.sha }} )"
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
@ -24,13 +25,29 @@ jobs:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
shell: bash
|
||||
env:
|
||||
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
|
||||
DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
run: |
|
||||
/bin/bash .github/workflows/build-api-image.sh
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: langgenius/dify-api
|
||||
tags: |
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
type=ref,event=branch
|
||||
type=sha,enable=true,priority=100,prefix=,suffix=,format=long
|
||||
type=semver,pattern={{major}}.{{minor}}.{{patch}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: "{{defaultContext}}:api"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: |
|
||||
COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
- name: Deploy to server
|
||||
if: github.ref == 'refs/heads/deploy/dev'
|
||||
|
||||
60
.github/workflows/build-web-image.sh
vendored
60
.github/workflows/build-web-image.sh
vendored
@ -1,60 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
SHA=$(git rev-parse HEAD)
|
||||
REPO_NAME=langgenius/dify
|
||||
WEB_REPO_NAME="${REPO_NAME}-web"
|
||||
|
||||
if [[ "${GITHUB_EVENT_NAME}" == "pull_request" ]]; then
|
||||
REFSPEC=$(echo "${GITHUB_HEAD_REF}" | sed 's/[^a-zA-Z0-9]/-/g' | head -c 40)
|
||||
PR_NUM=$(echo "${GITHUB_REF}" | sed 's:refs/pull/::' | sed 's:/merge::')
|
||||
LATEST_TAG="pr-${PR_NUM}"
|
||||
CACHE_FROM_TAG="latest"
|
||||
elif [[ "${GITHUB_EVENT_NAME}" == "release" ]]; then
|
||||
REFSPEC=$(echo "${GITHUB_REF}" | sed 's:refs/tags/::' | head -c 40)
|
||||
LATEST_TAG="${REFSPEC}"
|
||||
CACHE_FROM_TAG="latest"
|
||||
else
|
||||
REFSPEC=$(echo "${GITHUB_REF}" | sed 's:refs/heads/::' | sed 's/[^a-zA-Z0-9]/-/g' | head -c 40)
|
||||
LATEST_TAG="${REFSPEC}"
|
||||
CACHE_FROM_TAG="${REFSPEC}"
|
||||
fi
|
||||
|
||||
if [[ "${REFSPEC}" == "main" ]]; then
|
||||
LATEST_TAG="latest"
|
||||
CACHE_FROM_TAG="latest"
|
||||
fi
|
||||
|
||||
echo "Pulling cache image ${WEB_REPO_NAME}:${CACHE_FROM_TAG}"
|
||||
if docker pull "${WEB_REPO_NAME}:${CACHE_FROM_TAG}"; then
|
||||
WEB_CACHE_FROM_SCRIPT="--cache-from ${WEB_REPO_NAME}:${CACHE_FROM_TAG}"
|
||||
else
|
||||
echo "WARNING: Failed to pull ${WEB_REPO_NAME}:${CACHE_FROM_TAG}, disable build image cache."
|
||||
WEB_CACHE_FROM_SCRIPT=""
|
||||
fi
|
||||
|
||||
|
||||
cat<<EOF
|
||||
Rolling with tags:
|
||||
- ${WEB_REPO_NAME}:${SHA}
|
||||
- ${WEB_REPO_NAME}:${REFSPEC}
|
||||
- ${WEB_REPO_NAME}:${LATEST_TAG}
|
||||
EOF
|
||||
|
||||
#
|
||||
# Build image
|
||||
#
|
||||
cd web
|
||||
docker build \
|
||||
${WEB_CACHE_FROM_SCRIPT} \
|
||||
--build-arg COMMIT_SHA=${SHA} \
|
||||
-t "${WEB_REPO_NAME}:${SHA}" \
|
||||
-t "${WEB_REPO_NAME}:${REFSPEC}" \
|
||||
-t "${WEB_REPO_NAME}:${LATEST_TAG}" \
|
||||
--label "sha=${SHA}" \
|
||||
--label "built_at=$(date)" \
|
||||
--label "build_actor=${GITHUB_ACTOR}" \
|
||||
.
|
||||
|
||||
docker push --all-tags "${WEB_REPO_NAME}"
|
||||
43
.github/workflows/build-web-image.yml
vendored
43
.github/workflows/build-web-image.yml
vendored
@ -5,18 +5,19 @@ on:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'deploy/dev'
|
||||
pull_request:
|
||||
types: [synchronize, opened, reopened, ready_for_review]
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.pull_request.draft == false
|
||||
steps:
|
||||
- name: "Checkout ${{ github.ref }} ( ${{ github.sha }} )"
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
@ -24,13 +25,29 @@ jobs:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
shell: bash
|
||||
env:
|
||||
DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
|
||||
DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
run: |
|
||||
/bin/bash .github/workflows/build-web-image.sh
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: langgenius/dify-web
|
||||
tags: |
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
type=ref,event=branch
|
||||
type=sha,enable=true,priority=100,prefix=,suffix=,format=long
|
||||
type=semver,pattern={{major}}.{{minor}}.{{patch}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: "{{defaultContext}}:web"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: |
|
||||
COMMIT_SHA=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
- name: Deploy to server
|
||||
if: github.ref == 'refs/heads/deploy/dev'
|
||||
|
||||
19
.github/workflows/flake8.yml
vendored
19
.github/workflows/flake8.yml
vendored
@ -1,19 +0,0 @@
|
||||
name: PEP8 Check
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
jobs:
|
||||
pep8:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Install flake8
|
||||
run: pip install flake8
|
||||
- name: Run flake8
|
||||
run: flake8 --ignore=E501 .
|
||||
27
.github/workflows/stale.yml
vendored
Normal file
27
.github/workflows/stale.yml
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time.
|
||||
#
|
||||
# You can adjust the behavior by modifying this file.
|
||||
# For more information, see:
|
||||
# https://github.com/actions/stale
|
||||
name: Mark stale issues and pull requests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 3 * * *'
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: "Close due to it's no longer active, if you have any questions, you can reopen it."
|
||||
stale-pr-message: "Close due to it's no longer active, if you have any questions, you can reopen it."
|
||||
stale-issue-label: 'no-issue-activity'
|
||||
stale-pr-label: 'no-pr-activity'
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@ -130,7 +130,7 @@ dmypy.json
|
||||
.idea/'
|
||||
|
||||
.DS_Store
|
||||
.vscode
|
||||
web/.vscode/settings.json
|
||||
|
||||
# Intellij IDEA Files
|
||||
.idea/
|
||||
@ -139,7 +139,7 @@ dmypy.json
|
||||
api/.env
|
||||
api/storage/*
|
||||
|
||||
docker/volumes/app/storage/privkeys/*
|
||||
docker/volumes/app/storage/*
|
||||
docker/volumes/db/data/*
|
||||
docker/volumes/redis/data/*
|
||||
docker/volumes/weaviate/*
|
||||
|
||||
@ -22,14 +22,14 @@ To set up a working development environment, just fork the project git repositor
|
||||
|
||||
### Fork the repository
|
||||
|
||||
you need to fork the [repository](https://github.com/langgenius/langgenius-gateway).
|
||||
you need to fork the [repository](https://github.com/langgenius/dify).
|
||||
|
||||
### Clone the repo
|
||||
|
||||
Clone your GitHub forked repository:
|
||||
|
||||
```
|
||||
git clone git@github.com:<github_username>/langgenius-gateway.git
|
||||
git clone git@github.com:<github_username>/dify.git
|
||||
```
|
||||
|
||||
### Install backend
|
||||
|
||||
55
CONTRIBUTING_JA.md
Normal file
55
CONTRIBUTING_JA.md
Normal file
@ -0,0 +1,55 @@
|
||||
# コントリビュート
|
||||
|
||||
[Dify](https://dify.ai) に興味を持ち、貢献したいと思うようになったことに感謝します!始める前に、
|
||||
[行動規範](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md)を読み、
|
||||
[既存の問題](https://github.com/langgenius/langgenius-gateway/issues)をチェックしてください。
|
||||
本ドキュメントは、[Dify](https://dify.ai) をビルドしてテストするための開発環境の構築方法を説明するものです。
|
||||
|
||||
### 依存関係のインストール
|
||||
|
||||
[Dify](https://dify.ai)をビルドするには、お使いのマシンに以下の依存関係をインストールし、設定する必要があります:
|
||||
|
||||
- [Git](http://git-scm.com/)
|
||||
- [Docker](https://www.docker.com/)
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/)
|
||||
- [Node.js v18.x (LTS)](http://nodejs.org)
|
||||
- [npm](https://www.npmjs.com/) バージョン 8.x.x もしくは [Yarn](https://yarnpkg.com/)
|
||||
- [Python](https://www.python.org/) バージョン 3.10.x
|
||||
|
||||
## ローカル開発
|
||||
|
||||
開発環境を構築するには、プロジェクトの git リポジトリをフォークし、適切なパッケージマネージャを使用してバックエンドとフロントエンドの依存関係をインストールし、docker-compose スタックを実行するように作成します。
|
||||
|
||||
### リポジトリのフォーク
|
||||
|
||||
[リポジトリ](https://github.com/langgenius/dify) をフォークする必要があります。
|
||||
|
||||
### リポジトリのクローン
|
||||
|
||||
GitHub でフォークしたリポジトリのクローンを作成する:
|
||||
|
||||
```
|
||||
git clone git@github.com:<github_username>/dify.git
|
||||
```
|
||||
|
||||
### バックエンドのインストール
|
||||
|
||||
バックエンドアプリケーションのインストール方法については、[Backend README](api/README.md) を参照してください。
|
||||
|
||||
### フロントエンドのインストール
|
||||
|
||||
フロントエンドアプリケーションのインストール方法については、[Frontend README](web/README.md) を参照してください。
|
||||
|
||||
### ブラウザで dify にアクセス
|
||||
|
||||
[Dify](https://dify.ai) をローカル環境で見ることができるようになりました [http://localhost:3000](http://localhost:3000)。
|
||||
|
||||
## プルリクエストの作成
|
||||
|
||||
変更後、プルリクエスト (PR) をオープンしてください。プルリクエストを提出すると、Dify チーム/コミュニティの他の人があなたと一緒にそれをレビューします。
|
||||
|
||||
マージコンフリクトなどの問題が発生したり、プルリクエストの開き方がわからなくなったりしませんでしたか? [GitHub's pull request tutorial](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests) で、マージコンフリクトやその他の問題を解決する方法をチェックしてみてください。あなたの PR がマージされると、[コントリビュータチャート](https://github.com/langgenius/langgenius-gateway/graphs/contributors)にコントリビュータとして誇らしげに掲載されます。
|
||||
|
||||
## コミュニティチャンネル
|
||||
|
||||
お困りですか?何か質問がありますか? [Discord Community サーバ](https://discord.gg/AhzKf7dNgk)に参加してください。私たちがお手伝いします!
|
||||
13
README.md
13
README.md
@ -1,10 +1,11 @@
|
||||

|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_CN.md">简体中文</a>
|
||||
<a href="./README_CN.md">简体中文</a> |
|
||||
<a href="./README_JA.md">日本語</a>
|
||||
</p>
|
||||
|
||||
[Website](http://dify.ai) • [Docs](https://docs.dify.ai) • [Twitter](https://twitter.com/dify_ai)
|
||||
[Website](https://dify.ai) • [Docs](https://docs.dify.ai) • [Twitter](https://twitter.com/dify_ai) • [Discord](https://discord.gg/FngNHpbcY7)
|
||||
|
||||
**Dify** is an easy-to-use LLMOps platform designed to empower more people to create sustainable, AI-native applications. With visual orchestration for various application types, Dify offers out-of-the-box, ready-to-use applications that can also serve as Backend-as-a-Service APIs. Unify your development process with one API for plugins and datasets integration, and streamline your operations using a single interface for prompt engineering, visual analytics, and continuous improvement.
|
||||
|
||||
@ -21,7 +22,7 @@ Dify is compatible with Langchain, meaning we'll gradually support multiple LLMs
|
||||
|
||||
## Use Cloud Services
|
||||
|
||||
Visit [Dify.ai](http://dify.ai)
|
||||
Visit [Dify.ai](https://dify.ai)
|
||||
|
||||
## Install the Community Edition
|
||||
|
||||
@ -38,10 +39,10 @@ The easiest way to start the Dify server is to run our [docker-compose.yml](dock
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
docker-compose up -d
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
After running, you can access the Dify console in your browser at [http://localhost](http://localhost) and start the initialization operation.
|
||||
After running, you can access the Dify dashboard in your browser at [http://localhost/install](http://localhost/install) and start the initialization installation process.
|
||||
|
||||
### Configuration
|
||||
|
||||
@ -86,7 +87,7 @@ A: English and Chinese are currently supported, and you can contribute language
|
||||
If you have any questions, suggestions, or partnership inquiries, feel free to contact us through the following channels:
|
||||
|
||||
- Submit an Issue or PR on our GitHub Repo
|
||||
- Join the discussion in our [Discord](https://discord.gg/AhzKf7dNgk) Community
|
||||
- Join the discussion in our [Discord](https://discord.gg/FngNHpbcY7) Community
|
||||
- Send an email to hello@dify.ai
|
||||
|
||||
We're eager to assist you and together create more fun and useful AI applications!
|
||||
|
||||
13
README_CN.md
13
README_CN.md
@ -1,11 +1,12 @@
|
||||

|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_CN.md">简体中文</a>
|
||||
<a href="./README_CN.md">简体中文</a> |
|
||||
<a href="./README_JA.md">日本語</a>
|
||||
</p>
|
||||
|
||||
|
||||
[官方网站](http://dify.ai) • [文档](https://docs.dify.ai/v/zh-hans) • [Twitter](https://twitter.com/dify_ai)
|
||||
[官方网站](https://dify.ai) • [文档](https://docs.dify.ai/v/zh-hans) • [Twitter](https://twitter.com/dify_ai) • [Discord](https://discord.gg/FngNHpbcY7)
|
||||
|
||||
**Dify** 是一个易用的 LLMOps 平台,旨在让更多人可以创建可持续运营的原生 AI 应用。Dify 提供多种类型应用的可视化编排,应用可开箱即用,也能以“后端即服务”的 API 提供服务。
|
||||
|
||||
@ -23,7 +24,7 @@ Dify 兼容 Langchain,这意味着我们将逐步支持多种 LLMs ,目前
|
||||
|
||||
## 使用云服务
|
||||
|
||||
访问 [Dify.ai](http://cloud.dify.ai)
|
||||
访问 [Dify.ai](https://cloud.dify.ai)
|
||||
|
||||
## 安装社区版
|
||||
|
||||
@ -40,10 +41,10 @@ Dify 兼容 Langchain,这意味着我们将逐步支持多种 LLMs ,目前
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
docker-compose up -d
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
运行后,可以在浏览器上访问 [http://localhost](http://localhost) 进入 Dify 控制台,并开始初始化操作。
|
||||
运行后,可以在浏览器上访问 [http://localhost/install](http://localhost/install) 进入 Dify 控制台并开始初始化安装操作。
|
||||
|
||||
### 配置
|
||||
|
||||
@ -87,7 +88,7 @@ A: 现已支持英文与中文,你可以为我们贡献语言包。
|
||||
如果您有任何问题、建议或合作意向,欢迎通过以下方式联系我们:
|
||||
|
||||
- 在我们的 [GitHub Repo](https://github.com/langgenius/dify) 上提交 Issue 或 PR
|
||||
- 在我们的 [Discord 社区](https://discord.gg/AhzKf7dNgk) 上加入讨论
|
||||
- 在我们的 [Discord 社区](https://discord.gg/FngNHpbcY7) 上加入讨论
|
||||
- 发送邮件至 hello@dify.ai
|
||||
|
||||
## 贡献代码
|
||||
|
||||
117
README_JA.md
Normal file
117
README_JA.md
Normal file
@ -0,0 +1,117 @@
|
||||

|
||||
<p align="center">
|
||||
<a href="./README.md">English</a> |
|
||||
<a href="./README_CN.md">简体中文</a> |
|
||||
<a href="./README_JA.md">日本語</a>
|
||||
</p>
|
||||
|
||||
[Web サイト](https://dify.ai) • [ドキュメント](https://docs.dify.ai) • [Twitter](https://twitter.com/dify_ai) • [Discord](https://discord.gg/FngNHpbcY7)
|
||||
|
||||
|
||||
**Dify** は、より多くの人々が持続可能な AI ネイティブアプリケーションを作成できるように設計された、使いやすい LLMOps プラットフォームです。様々なアプリケーションタイプに対応したビジュアルオーケストレーションにより Dify は Backend-as-a-Service API としても機能する、すぐに使えるアプリケーションを提供します。プラグインやデータセットを統合するための1つの API で開発プロセスを統一し、プロンプトエンジニアリング、ビジュアル分析、継続的な改善のための1つのインターフェイスを使って業務を合理化します。
|
||||
|
||||
Difyで作成したアプリケーションは以下の通りです:
|
||||
|
||||
フォームモードとチャット会話モードをサポートする、すぐに使える Web サイト
|
||||
プラグイン機能、コンテキストの強化などを網羅する単一の API により、バックエンドのコーディングの手間を省きます。
|
||||
アプリケーションの視覚的なデータ分析、ログレビュー、アノテーションが可能です。
|
||||
Dify は LangChain と互換性があり、複数の LLM を徐々にサポートします:
|
||||
|
||||
- GPT 3 (text-davinci-003)
|
||||
- GPT 3.5 Turbo(ChatGPT)
|
||||
- GPT-4
|
||||
|
||||
## クラウドサービスの利用
|
||||
|
||||
[Dify.ai](https://dify.ai) をご覧ください
|
||||
|
||||
## Community Edition のインストール
|
||||
|
||||
### システム要件
|
||||
|
||||
Dify をインストールする前に、お使いのマシンが以下の最低システム要件を満たしていることを確認してください:
|
||||
|
||||
- CPU >= 1 Core
|
||||
- RAM >= 4GB
|
||||
|
||||
### クイックスタート
|
||||
|
||||
Dify サーバーを起動する最も簡単な方法は、[docker-compose.yml](docker/docker-compose.yaml) ファイルを実行することです。インストールコマンドを実行する前に、[Docker](https://docs.docker.com/get-docker/) と [Docker Compose](https://docs.docker.com/compose/install/) がお使いのマシンにインストールされていることを確認してください:
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
実行後、ブラウザで [http://localhost/install](http://localhost/install) にアクセスし、初期化インストール作業を開始することができます。
|
||||
|
||||
### 構成
|
||||
|
||||
カスタマイズが必要な場合は、[docker-compose.yml](docker/docker-compose.yaml) ファイルのコメントを参照し、手動で環境設定をお願いします。変更後、再度 'docker-compose up -d' を実行してください。
|
||||
|
||||
## ロードマップ
|
||||
|
||||
開発中の機能:
|
||||
|
||||
- **データセット**, Notionやウェブページからのコンテンツ同期など、より多くのデータセットをサポートします
|
||||
テキスト、ウェブページ、さらには Notion コンテンツなど、より多くのデータセットをサポートする予定です。ユーザーは、自分のデータソースをもとに AI アプリケーションを構築することができます。
|
||||
- **プラグイン**, アプリケーションに ChatGPT プラグイン標準のプラグインを導入する、または Dify 制作のプラグインを利用する
|
||||
今後、ChatGPT 規格に準拠したプラグインや、ディファイ独自のプラグインを公開し、より多くの機能をアプリケーションで実現できるようにします。
|
||||
- **オープンソースモデル**, 例えばモデルプロバイダーとして Llama を採用したり、さらにファインチューニングを行う
|
||||
Llama のような優れたオープンソースモデルを、私たちのプラットフォームのモデルオプションとして提供したり、さらなる微調整のために使用したりすることで、協力していきます。
|
||||
|
||||
|
||||
## Q&A
|
||||
|
||||
**Q: Dify で何ができるのか?**
|
||||
|
||||
A: Dify はシンプルでパワフルな LLM 開発・運用ツールです。商用グレードのアプリケーション、パーソナルアシスタントを構築するために使用することができます。独自のアプリケーションを開発したい場合、LangDifyGenius は OpenAI と統合する際のバックエンド作業を省き、視覚的な操作機能を提供し、GPT モデルを継続的に改善・訓練することが可能です。
|
||||
|
||||
**Q: Dify を使って、自分のモデルを「トレーニング」するにはどうすればいいのでしょうか?**
|
||||
|
||||
A: プロンプトエンジニアリング、コンテキスト拡張、ファインチューニングからなる価値あるアプリケーションです。プロンプトとプログラミング言語を組み合わせたハイブリッドプログラミングアプローチ(テンプレートエンジンのようなもの)で、長文の埋め込みやユーザー入力の YouTube 動画からの字幕取り込みなどを簡単に実現し、これらはすべて LLM が処理するコンテキストとして提出される予定です。また、アプリケーションの操作性を重視し、ユーザーがアプリケーションを使用する際に生成したデータを分析、アノテーション、継続的なトレーニングに利用できるようにしました。適切なツールがなければ、これらのステップに時間がかかることがあります。
|
||||
|
||||
**Q: 自分でアプリケーションを作りたい場合、何を準備すればよいですか?**
|
||||
|
||||
A: すでに OpenAI API Key をお持ちだと思いますが、お持ちでない場合はご登録ください。もし、すでにトレーニングのコンテキストとなるコンテンツをお持ちでしたら、それは素晴らしいことです!
|
||||
|
||||
**Q: インターフェイスにどの言語が使えますか?**
|
||||
|
||||
A: 現在、英語と中国語に対応しており、言語パックを寄贈することも可能です。
|
||||
|
||||
## Star ヒストリー
|
||||
|
||||
[](https://star-history.com/#langgenius/dify&Date)
|
||||
|
||||
## お問合せ
|
||||
|
||||
ご質問、ご提案、パートナーシップに関するお問い合わせは、以下のチャンネルからお気軽にご連絡ください:
|
||||
|
||||
- GitHub Repo で Issue や PR を提出する
|
||||
- [Discord](https://discord.gg/FngNHpbcY7) コミュニティで議論に参加する。
|
||||
- hello@dify.ai にメールを送信します
|
||||
|
||||
私たちは、皆様のお手伝いをさせていただき、より楽しく、より便利な AI アプリケーションを一緒に作っていきたいと思っています!
|
||||
|
||||
## コントリビュート
|
||||
|
||||
適切なレビューを行うため、コミットへの直接アクセスが可能なコントリビュータを含むすべてのコードコントリビュータは、プルリクエストで提出し、マージされる前にコア開発チームによって承認される必要があります。
|
||||
|
||||
私たちはすべてのプルリクエストを歓迎します!協力したい方は、[コントリビューションガイド](CONTRIBUTING.md) をチェックしてみてください。
|
||||
|
||||
## セキュリティ
|
||||
|
||||
プライバシー保護のため、GitHub へのセキュリティ問題の投稿は避けてください。代わりに、あなたの質問を security@dify.ai に送ってください。より詳細な回答を提供します。
|
||||
|
||||
## 引用
|
||||
|
||||
本ソフトウェアは、以下のオープンソースソフトウェアを使用しています:
|
||||
|
||||
- Chase, H. (2022). LangChain [Computer software]. https://github.com/hwchase17/langchain
|
||||
- Liu, J. (2022). LlamaIndex [Computer software]. doi: 10.5281/zenodo.1234.
|
||||
|
||||
詳しくは、各ソフトウェアの公式サイトまたはライセンス文をご参照ください。
|
||||
|
||||
## ライセンス
|
||||
|
||||
このリポジトリは、[Dify Open Source License](LICENSE) のもとで利用できます。
|
||||
@ -14,7 +14,7 @@ CONSOLE_URL=http://127.0.0.1:5001
|
||||
API_URL=http://127.0.0.1:5001
|
||||
|
||||
# Web APP base URL
|
||||
APP_URL=http://127.0.0.1:5001
|
||||
APP_URL=http://127.0.0.1:3000
|
||||
|
||||
# celery configuration
|
||||
CELERY_BROKER_URL=redis://:difyai123456@localhost:6379/1
|
||||
|
||||
@ -33,3 +33,4 @@
|
||||
flask run --host 0.0.0.0 --port=5001 --debug
|
||||
```
|
||||
7. Setup your application by visiting http://localhost:5001/console/api/setup or other apis...
|
||||
8. If you need to debug local async processing, you can run `celery -A app.celery worker`, celery can do dataset importing and other async tasks.
|
||||
@ -1,18 +1,21 @@
|
||||
import datetime
|
||||
import json
|
||||
import random
|
||||
import string
|
||||
|
||||
import click
|
||||
from flask import current_app
|
||||
|
||||
from libs.password import password_pattern, valid_password, hash_password
|
||||
from libs.helper import email as email_validate
|
||||
from extensions.ext_database import db
|
||||
from models.account import InvitationCode
|
||||
from models.model import Account, AppModelConfig, ApiToken, Site, App, RecommendedApp
|
||||
from libs.rsa import generate_key_pair
|
||||
from models.account import InvitationCode, Tenant
|
||||
from models.model import Account
|
||||
import secrets
|
||||
import base64
|
||||
|
||||
from models.provider import Provider
|
||||
|
||||
|
||||
@click.command('reset-password', help='Reset the account password.')
|
||||
@click.option('--email', prompt=True, help='The email address of the account whose password you need to reset')
|
||||
@ -74,6 +77,31 @@ def reset_email(email, new_email, email_confirm):
|
||||
click.echo(click.style('Congratulations!, email has been reset.', fg='green'))
|
||||
|
||||
|
||||
@click.command('reset-encrypt-key-pair', help='Reset the asymmetric key pair of workspace for encrypt LLM credentials. '
|
||||
'After the reset, all LLM credentials will become invalid, '
|
||||
'requiring re-entry.'
|
||||
'Only support SELF_HOSTED mode.')
|
||||
@click.confirmation_option(prompt=click.style('Are you sure you want to reset encrypt key pair?'
|
||||
' this operation cannot be rolled back!', fg='red'))
|
||||
def reset_encrypt_key_pair():
|
||||
if current_app.config['EDITION'] != 'SELF_HOSTED':
|
||||
click.echo(click.style('Sorry, only support SELF_HOSTED mode.', fg='red'))
|
||||
return
|
||||
|
||||
tenant = db.session.query(Tenant).first()
|
||||
if not tenant:
|
||||
click.echo(click.style('Sorry, no workspace found. Please enter /install to initialize.', fg='red'))
|
||||
return
|
||||
|
||||
tenant.encrypt_public_key = generate_key_pair(tenant.id)
|
||||
|
||||
db.session.query(Provider).filter(Provider.provider_type == 'custom').delete()
|
||||
db.session.commit()
|
||||
|
||||
click.echo(click.style('Congratulations! '
|
||||
'the asymmetric key pair of workspace {} has been reset.'.format(tenant.id), fg='green'))
|
||||
|
||||
|
||||
@click.command('generate-invitation-codes', help='Generate invitation codes.')
|
||||
@click.option('--batch', help='The batch of invitation codes.')
|
||||
@click.option('--count', prompt=True, help='Invitation codes count.')
|
||||
@ -131,30 +159,8 @@ def generate_upper_string():
|
||||
return result
|
||||
|
||||
|
||||
@click.command('gen-recommended-apps', help='Number of records to generate')
|
||||
def generate_recommended_apps():
|
||||
print('Generating recommended app data...')
|
||||
apps = App.query.all()
|
||||
for app in apps:
|
||||
recommended_app = RecommendedApp(
|
||||
app_id=app.id,
|
||||
description={
|
||||
'en': 'Description for ' + app.name,
|
||||
'zh': '描述 ' + app.name
|
||||
},
|
||||
copyright='Copyright ' + str(random.randint(1990, 2020)),
|
||||
privacy_policy='https://privacypolicy.example.com',
|
||||
category=random.choice(['Games', 'News', 'Music', 'Sports']),
|
||||
position=random.randint(1, 100),
|
||||
install_count=random.randint(100, 100000)
|
||||
)
|
||||
db.session.add(recommended_app)
|
||||
db.session.commit()
|
||||
print('Done!')
|
||||
|
||||
|
||||
def register_commands(app):
|
||||
app.cli.add_command(reset_password)
|
||||
app.cli.add_command(reset_email)
|
||||
app.cli.add_command(generate_invitation_codes)
|
||||
app.cli.add_command(generate_recommended_apps)
|
||||
app.cli.add_command(reset_encrypt_key_pair)
|
||||
|
||||
@ -21,9 +21,11 @@ DEFAULTS = {
|
||||
'REDIS_HOST': 'localhost',
|
||||
'REDIS_PORT': '6379',
|
||||
'REDIS_DB': '0',
|
||||
'REDIS_USE_SSL': 'False',
|
||||
'SESSION_REDIS_HOST': 'localhost',
|
||||
'SESSION_REDIS_PORT': '6379',
|
||||
'SESSION_REDIS_DB': '2',
|
||||
'SESSION_REDIS_USE_SSL': 'False',
|
||||
'OAUTH_REDIRECT_PATH': '/console/api/oauth/authorize',
|
||||
'OAUTH_REDIRECT_INDEX_PATH': '/',
|
||||
'CONSOLE_URL': 'https://cloud.dify.ai',
|
||||
@ -44,6 +46,8 @@ DEFAULTS = {
|
||||
'CELERY_BACKEND': 'database',
|
||||
'PDF_PREVIEW': 'True',
|
||||
'LOG_LEVEL': 'INFO',
|
||||
'DISABLE_PROVIDER_CONFIG_VALIDATION': 'False',
|
||||
'DEFAULT_LLM_PROVIDER': 'openai'
|
||||
}
|
||||
|
||||
|
||||
@ -74,7 +78,7 @@ class Config:
|
||||
self.CONSOLE_URL = get_env('CONSOLE_URL')
|
||||
self.API_URL = get_env('API_URL')
|
||||
self.APP_URL = get_env('APP_URL')
|
||||
self.CURRENT_VERSION = "0.2.0"
|
||||
self.CURRENT_VERSION = "0.3.2"
|
||||
self.COMMIT_SHA = get_env('COMMIT_SHA')
|
||||
self.EDITION = "SELF_HOSTED"
|
||||
self.DEPLOY_ENV = get_env('DEPLOY_ENV')
|
||||
@ -105,14 +109,18 @@ class Config:
|
||||
# redis settings
|
||||
self.REDIS_HOST = get_env('REDIS_HOST')
|
||||
self.REDIS_PORT = get_env('REDIS_PORT')
|
||||
self.REDIS_USERNAME = get_env('REDIS_USERNAME')
|
||||
self.REDIS_PASSWORD = get_env('REDIS_PASSWORD')
|
||||
self.REDIS_DB = get_env('REDIS_DB')
|
||||
self.REDIS_USE_SSL = get_bool_env('REDIS_USE_SSL')
|
||||
|
||||
# session redis settings
|
||||
self.SESSION_REDIS_HOST = get_env('SESSION_REDIS_HOST')
|
||||
self.SESSION_REDIS_PORT = get_env('SESSION_REDIS_PORT')
|
||||
self.SESSION_REDIS_USERNAME = get_env('SESSION_REDIS_USERNAME')
|
||||
self.SESSION_REDIS_PASSWORD = get_env('SESSION_REDIS_PASSWORD')
|
||||
self.SESSION_REDIS_DB = get_env('SESSION_REDIS_DB')
|
||||
self.SESSION_REDIS_USE_SSL = get_bool_env('SESSION_REDIS_USE_SSL')
|
||||
|
||||
# storage settings
|
||||
self.STORAGE_TYPE = get_env('STORAGE_TYPE')
|
||||
@ -165,10 +173,18 @@ class Config:
|
||||
self.CELERY_BACKEND = get_env('CELERY_BACKEND')
|
||||
self.CELERY_RESULT_BACKEND = 'db+{}'.format(self.SQLALCHEMY_DATABASE_URI) \
|
||||
if self.CELERY_BACKEND == 'database' else self.CELERY_BROKER_URL
|
||||
self.BROKER_USE_SSL = self.CELERY_BROKER_URL.startswith('rediss://')
|
||||
|
||||
# hosted provider credentials
|
||||
self.OPENAI_API_KEY = get_env('OPENAI_API_KEY')
|
||||
|
||||
# By default it is False
|
||||
# You could disable it for compatibility with certain OpenAPI providers
|
||||
self.DISABLE_PROVIDER_CONFIG_VALIDATION = get_bool_env('DISABLE_PROVIDER_CONFIG_VALIDATION')
|
||||
|
||||
# For temp use only
|
||||
# set default LLM provider, default is 'openai', support `azure_openai`
|
||||
self.DEFAULT_LLM_PROVIDER = get_env('DEFAULT_LLM_PROVIDER')
|
||||
|
||||
class CloudEditionConfig(Config):
|
||||
|
||||
|
||||
@ -5,8 +5,11 @@ from libs.external_api import ExternalApi
|
||||
bp = Blueprint('console', __name__, url_prefix='/console/api')
|
||||
api = ExternalApi(bp)
|
||||
|
||||
# Import other controllers
|
||||
from . import setup, version, apikey, admin
|
||||
|
||||
# Import app controllers
|
||||
from .app import app, site, explore, completion, model_config, statistic, conversation, message
|
||||
from .app import app, site, completion, model_config, statistic, conversation, message, generator
|
||||
|
||||
# Import auth controllers
|
||||
from .auth import login, oauth
|
||||
@ -14,7 +17,8 @@ from .auth import login, oauth
|
||||
# Import datasets controllers
|
||||
from .datasets import datasets, datasets_document, datasets_segments, file, hit_testing
|
||||
|
||||
# Import other controllers
|
||||
from . import setup, version, apikey
|
||||
|
||||
# Import workspace controllers
|
||||
from .workspace import workspace, members, providers, account
|
||||
|
||||
# Import explore controllers
|
||||
from .explore import installed_app, recommended_app, completion, conversation, message, parameter, saved_message
|
||||
|
||||
132
api/controllers/console/admin.py
Normal file
132
api/controllers/console/admin.py
Normal file
@ -0,0 +1,132 @@
|
||||
import os
|
||||
from functools import wraps
|
||||
|
||||
from flask import request
|
||||
from flask_restful import Resource, reqparse
|
||||
from werkzeug.exceptions import NotFound, Unauthorized
|
||||
|
||||
from controllers.console import api
|
||||
from controllers.console.wraps import only_edition_cloud
|
||||
from extensions.ext_database import db
|
||||
from models.model import RecommendedApp, App, InstalledApp
|
||||
|
||||
|
||||
def admin_required(view):
|
||||
@wraps(view)
|
||||
def decorated(*args, **kwargs):
|
||||
if not os.getenv('ADMIN_API_KEY'):
|
||||
raise Unauthorized('API key is invalid.')
|
||||
|
||||
auth_header = request.headers.get('Authorization')
|
||||
if auth_header is None:
|
||||
raise Unauthorized('Authorization header is missing.')
|
||||
|
||||
if ' ' not in auth_header:
|
||||
raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.')
|
||||
|
||||
auth_scheme, auth_token = auth_header.split(None, 1)
|
||||
auth_scheme = auth_scheme.lower()
|
||||
|
||||
if auth_scheme != 'bearer':
|
||||
raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.')
|
||||
|
||||
if os.getenv('ADMIN_API_KEY') != auth_token:
|
||||
raise Unauthorized('API key is invalid.')
|
||||
|
||||
return view(*args, **kwargs)
|
||||
|
||||
return decorated
|
||||
|
||||
|
||||
class InsertExploreAppListApi(Resource):
|
||||
@only_edition_cloud
|
||||
@admin_required
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('app_id', type=str, required=True, nullable=False, location='json')
|
||||
parser.add_argument('desc', type=str, location='json')
|
||||
parser.add_argument('copyright', type=str, location='json')
|
||||
parser.add_argument('privacy_policy', type=str, location='json')
|
||||
parser.add_argument('language', type=str, required=True, nullable=False, choices=['en-US', 'zh-Hans'],
|
||||
location='json')
|
||||
parser.add_argument('category', type=str, required=True, nullable=False, location='json')
|
||||
parser.add_argument('position', type=int, required=True, nullable=False, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
app = App.query.filter(App.id == args['app_id']).first()
|
||||
if not app:
|
||||
raise NotFound('App not found')
|
||||
|
||||
site = app.site
|
||||
if not site:
|
||||
desc = args['desc'] if args['desc'] else ''
|
||||
copy_right = args['copyright'] if args['copyright'] else ''
|
||||
privacy_policy = args['privacy_policy'] if args['privacy_policy'] else ''
|
||||
else:
|
||||
desc = site.description if (site.description if not args['desc'] else args['desc']) else ''
|
||||
copy_right = site.copyright if (site.copyright if not args['copyright'] else args['copyright']) else ''
|
||||
privacy_policy = site.privacy_policy \
|
||||
if (site.privacy_policy if not args['privacy_policy'] else args['privacy_policy']) else ''
|
||||
|
||||
recommended_app = RecommendedApp.query.filter(RecommendedApp.app_id == args['app_id']).first()
|
||||
|
||||
if not recommended_app:
|
||||
recommended_app = RecommendedApp(
|
||||
app_id=app.id,
|
||||
description=desc,
|
||||
copyright=copy_right,
|
||||
privacy_policy=privacy_policy,
|
||||
language=args['language'],
|
||||
category=args['category'],
|
||||
position=args['position']
|
||||
)
|
||||
|
||||
db.session.add(recommended_app)
|
||||
|
||||
app.is_public = True
|
||||
db.session.commit()
|
||||
|
||||
return {'result': 'success'}, 201
|
||||
else:
|
||||
recommended_app.description = desc
|
||||
recommended_app.copyright = copy_right
|
||||
recommended_app.privacy_policy = privacy_policy
|
||||
recommended_app.language = args['language']
|
||||
recommended_app.category = args['category']
|
||||
recommended_app.position = args['position']
|
||||
|
||||
app.is_public = True
|
||||
|
||||
db.session.commit()
|
||||
|
||||
return {'result': 'success'}, 200
|
||||
|
||||
|
||||
class InsertExploreAppApi(Resource):
|
||||
@only_edition_cloud
|
||||
@admin_required
|
||||
def delete(self, app_id):
|
||||
recommended_app = RecommendedApp.query.filter(RecommendedApp.app_id == str(app_id)).first()
|
||||
if not recommended_app:
|
||||
return {'result': 'success'}, 204
|
||||
|
||||
app = App.query.filter(App.id == recommended_app.app_id).first()
|
||||
if app:
|
||||
app.is_public = False
|
||||
|
||||
installed_apps = InstalledApp.query.filter(
|
||||
InstalledApp.app_id == recommended_app.app_id,
|
||||
InstalledApp.tenant_id != InstalledApp.app_owner_tenant_id
|
||||
).all()
|
||||
|
||||
for installed_app in installed_apps:
|
||||
db.session.delete(installed_app)
|
||||
|
||||
db.session.delete(recommended_app)
|
||||
db.session.commit()
|
||||
|
||||
return {'result': 'success'}, 204
|
||||
|
||||
|
||||
api.add_resource(InsertExploreAppListApi, '/admin/insert-explore-apps')
|
||||
api.add_resource(InsertExploreAppApi, '/admin/insert-explore-apps/<uuid:app_id>')
|
||||
@ -17,6 +17,6 @@ def _get_app(app_id, mode=None):
|
||||
raise NotFound("App not found")
|
||||
|
||||
if mode and app.mode != mode:
|
||||
raise AppUnavailableError()
|
||||
raise NotFound("The {} app not found".format(mode))
|
||||
|
||||
return app
|
||||
|
||||
@ -9,18 +9,13 @@ from werkzeug.exceptions import Unauthorized, Forbidden
|
||||
|
||||
from constants.model_template import model_templates, demo_model_templates
|
||||
from controllers.console import api
|
||||
from controllers.console.app.error import AppNotFoundError, ProviderNotInitializeError, ProviderQuotaExceededError, \
|
||||
CompletionRequestError, ProviderModelCurrentlyNotSupportError
|
||||
from controllers.console.app.error import AppNotFoundError
|
||||
from controllers.console.setup import setup_required
|
||||
from controllers.console.wraps import account_initialization_required
|
||||
from core.generator.llm_generator import LLMGenerator
|
||||
from core.llm.error import ProviderTokenNotInitError, QuotaExceededError, LLMBadRequestError, LLMAPIConnectionError, \
|
||||
LLMAPIUnavailableError, LLMRateLimitError, LLMAuthorizationError, ModelCurrentlyNotSupportError
|
||||
from events.app_event import app_was_created, app_was_deleted
|
||||
from libs.helper import TimestampField
|
||||
from extensions.ext_database import db
|
||||
from models.model import App, AppModelConfig, Site, InstalledApp
|
||||
from services.account_service import TenantService
|
||||
from models.model import App, AppModelConfig, Site
|
||||
from services.app_model_config_service import AppModelConfigService
|
||||
|
||||
model_config_fields = {
|
||||
@ -478,35 +473,6 @@ class AppExport(Resource):
|
||||
pass
|
||||
|
||||
|
||||
class IntroductionGenerateApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('prompt_template', type=str, required=True, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
account = current_user
|
||||
|
||||
try:
|
||||
answer = LLMGenerator.generate_introduction(
|
||||
account.current_tenant_id,
|
||||
args['prompt_template']
|
||||
)
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
|
||||
LLMRateLimitError, LLMAuthorizationError) as e:
|
||||
raise CompletionRequestError(str(e))
|
||||
|
||||
return {'introduction': answer}
|
||||
|
||||
|
||||
api.add_resource(AppListApi, '/apps')
|
||||
api.add_resource(AppTemplateApi, '/app-templates')
|
||||
api.add_resource(AppApi, '/apps/<uuid:app_id>')
|
||||
@ -515,4 +481,3 @@ api.add_resource(AppNameApi, '/apps/<uuid:app_id>/name')
|
||||
api.add_resource(AppSiteStatus, '/apps/<uuid:app_id>/site-enable')
|
||||
api.add_resource(AppApiStatus, '/apps/<uuid:app_id>/api-enable')
|
||||
api.add_resource(AppRateLimit, '/apps/<uuid:app_id>/rate-limit')
|
||||
api.add_resource(IntroductionGenerateApi, '/introduction-generate')
|
||||
|
||||
@ -45,7 +45,7 @@ message_detail_fields = {
|
||||
'message_tokens': fields.Integer,
|
||||
'answer': fields.String,
|
||||
'answer_tokens': fields.Integer,
|
||||
'provider_response_latency': fields.Integer,
|
||||
'provider_response_latency': fields.Float,
|
||||
'from_source': fields.String,
|
||||
'from_end_user_id': fields.String,
|
||||
'from_account_id': fields.String,
|
||||
|
||||
@ -9,31 +9,33 @@ class AppNotFoundError(BaseHTTPException):
|
||||
|
||||
class ProviderNotInitializeError(BaseHTTPException):
|
||||
error_code = 'provider_not_initialize'
|
||||
description = "Provider Token not initialize."
|
||||
description = "No valid model provider credentials found. " \
|
||||
"Please go to Settings -> Model Provider to complete your provider credentials."
|
||||
code = 400
|
||||
|
||||
|
||||
class ProviderQuotaExceededError(BaseHTTPException):
|
||||
error_code = 'provider_quota_exceeded'
|
||||
description = "Provider quota exceeded."
|
||||
description = "Your quota for Dify Hosted OpenAI has been exhausted. " \
|
||||
"Please go to Settings -> Model Provider to complete your own provider credentials."
|
||||
code = 400
|
||||
|
||||
|
||||
class ProviderModelCurrentlyNotSupportError(BaseHTTPException):
|
||||
error_code = 'model_currently_not_support'
|
||||
description = "GPT-4 currently not support."
|
||||
description = "Dify Hosted OpenAI trial currently not support the GPT-4 model."
|
||||
code = 400
|
||||
|
||||
|
||||
class ConversationCompletedError(BaseHTTPException):
|
||||
error_code = 'conversation_completed'
|
||||
description = "Conversation was completed."
|
||||
description = "The conversation has ended. Please start a new conversation."
|
||||
code = 400
|
||||
|
||||
|
||||
class AppUnavailableError(BaseHTTPException):
|
||||
error_code = 'app_unavailable'
|
||||
description = "App unavailable."
|
||||
description = "App unavailable, please check your app configurations."
|
||||
code = 400
|
||||
|
||||
|
||||
@ -45,5 +47,5 @@ class CompletionRequestError(BaseHTTPException):
|
||||
|
||||
class AppMoreLikeThisDisabledError(BaseHTTPException):
|
||||
error_code = 'app_more_like_this_disabled'
|
||||
description = "More like this disabled."
|
||||
description = "The 'More like this' feature is disabled. Please refresh your page."
|
||||
code = 403
|
||||
|
||||
@ -1,209 +0,0 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
from datetime import datetime
|
||||
|
||||
from flask_login import login_required, current_user
|
||||
from flask_restful import Resource, reqparse, fields, marshal_with, abort, inputs
|
||||
from sqlalchemy import and_
|
||||
|
||||
from controllers.console import api
|
||||
from extensions.ext_database import db
|
||||
from models.model import Tenant, App, InstalledApp, RecommendedApp
|
||||
from services.account_service import TenantService
|
||||
|
||||
app_fields = {
|
||||
'id': fields.String,
|
||||
'name': fields.String,
|
||||
'mode': fields.String,
|
||||
'icon': fields.String,
|
||||
'icon_background': fields.String
|
||||
}
|
||||
|
||||
installed_app_fields = {
|
||||
'id': fields.String,
|
||||
'app': fields.Nested(app_fields, attribute='app'),
|
||||
'app_owner_tenant_id': fields.String,
|
||||
'is_pinned': fields.Boolean,
|
||||
'last_used_at': fields.DateTime,
|
||||
'editable': fields.Boolean
|
||||
}
|
||||
|
||||
installed_app_list_fields = {
|
||||
'installed_apps': fields.List(fields.Nested(installed_app_fields))
|
||||
}
|
||||
|
||||
recommended_app_fields = {
|
||||
'app': fields.Nested(app_fields, attribute='app'),
|
||||
'app_id': fields.String,
|
||||
'description': fields.String(attribute='description'),
|
||||
'copyright': fields.String,
|
||||
'privacy_policy': fields.String,
|
||||
'category': fields.String,
|
||||
'position': fields.Integer,
|
||||
'is_listed': fields.Boolean,
|
||||
'install_count': fields.Integer,
|
||||
'installed': fields.Boolean,
|
||||
'editable': fields.Boolean
|
||||
}
|
||||
|
||||
recommended_app_list_fields = {
|
||||
'recommended_apps': fields.List(fields.Nested(recommended_app_fields)),
|
||||
'categories': fields.List(fields.String)
|
||||
}
|
||||
|
||||
|
||||
class InstalledAppsListResource(Resource):
|
||||
@login_required
|
||||
@marshal_with(installed_app_list_fields)
|
||||
def get(self):
|
||||
current_tenant_id = Tenant.query.first().id
|
||||
installed_apps = db.session.query(InstalledApp).filter(
|
||||
InstalledApp.tenant_id == current_tenant_id
|
||||
).all()
|
||||
|
||||
current_user.role = TenantService.get_user_role(current_user, current_user.current_tenant)
|
||||
installed_apps = [
|
||||
{
|
||||
**installed_app,
|
||||
"editable": current_user.role in ["owner", "admin"],
|
||||
}
|
||||
for installed_app in installed_apps
|
||||
]
|
||||
installed_apps.sort(key=lambda app: (-app.is_pinned, app.last_used_at))
|
||||
|
||||
return {'installed_apps': installed_apps}
|
||||
|
||||
@login_required
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('app_id', type=str, required=True, help='Invalid app_id')
|
||||
args = parser.parse_args()
|
||||
|
||||
current_tenant_id = Tenant.query.first().id
|
||||
app = App.query.get(args['app_id'])
|
||||
if app is None:
|
||||
abort(404, message='App not found')
|
||||
recommended_app = RecommendedApp.query.filter(RecommendedApp.app_id == args['app_id']).first()
|
||||
if recommended_app is None:
|
||||
abort(404, message='App not found')
|
||||
if not app.is_public:
|
||||
abort(403, message="You can't install a non-public app")
|
||||
|
||||
installed_app = InstalledApp.query.filter(and_(
|
||||
InstalledApp.app_id == args['app_id'],
|
||||
InstalledApp.tenant_id == current_tenant_id
|
||||
)).first()
|
||||
|
||||
if installed_app is None:
|
||||
# todo: position
|
||||
recommended_app.install_count += 1
|
||||
|
||||
new_installed_app = InstalledApp(
|
||||
app_id=args['app_id'],
|
||||
tenant_id=current_tenant_id,
|
||||
is_pinned=False,
|
||||
last_used_at=datetime.utcnow()
|
||||
)
|
||||
db.session.add(new_installed_app)
|
||||
db.session.commit()
|
||||
|
||||
return {'message': 'App installed successfully'}
|
||||
|
||||
|
||||
class InstalledAppResource(Resource):
|
||||
|
||||
@login_required
|
||||
def delete(self, installed_app_id):
|
||||
|
||||
installed_app = InstalledApp.query.filter(and_(
|
||||
InstalledApp.id == str(installed_app_id),
|
||||
InstalledApp.tenant_id == current_user.current_tenant_id
|
||||
)).first()
|
||||
|
||||
if installed_app is None:
|
||||
abort(404, message='App not found')
|
||||
|
||||
if installed_app.app_owner_tenant_id == current_user.current_tenant_id:
|
||||
abort(400, message="You can't uninstall an app owned by the current tenant")
|
||||
|
||||
db.session.delete(installed_app)
|
||||
db.session.commit()
|
||||
|
||||
return {'result': 'success', 'message': 'App uninstalled successfully'}
|
||||
|
||||
@login_required
|
||||
def patch(self, installed_app_id):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('is_pinned', type=inputs.boolean)
|
||||
args = parser.parse_args()
|
||||
|
||||
current_tenant_id = Tenant.query.first().id
|
||||
installed_app = InstalledApp.query.filter(and_(
|
||||
InstalledApp.id == str(installed_app_id),
|
||||
InstalledApp.tenant_id == current_tenant_id
|
||||
)).first()
|
||||
|
||||
if installed_app is None:
|
||||
abort(404, message='Installed app not found')
|
||||
|
||||
commit_args = False
|
||||
if 'is_pinned' in args:
|
||||
installed_app.is_pinned = args['is_pinned']
|
||||
commit_args = True
|
||||
|
||||
if commit_args:
|
||||
db.session.commit()
|
||||
|
||||
return {'result': 'success', 'message': 'App info updated successfully'}
|
||||
|
||||
|
||||
class RecommendedAppsResource(Resource):
|
||||
@login_required
|
||||
@marshal_with(recommended_app_list_fields)
|
||||
def get(self):
|
||||
recommended_apps = db.session.query(RecommendedApp).filter(
|
||||
RecommendedApp.is_listed == True
|
||||
).all()
|
||||
|
||||
categories = set()
|
||||
current_user.role = TenantService.get_user_role(current_user, current_user.current_tenant)
|
||||
recommended_apps_result = []
|
||||
for recommended_app in recommended_apps:
|
||||
installed = db.session.query(InstalledApp).filter(
|
||||
and_(
|
||||
InstalledApp.app_id == recommended_app.app_id,
|
||||
InstalledApp.tenant_id == current_user.current_tenant_id
|
||||
)
|
||||
).first() is not None
|
||||
|
||||
language_prefix = current_user.interface_language.split('-')[0]
|
||||
desc = None
|
||||
if recommended_app.description:
|
||||
if language_prefix in recommended_app.description:
|
||||
desc = recommended_app.description[language_prefix]
|
||||
elif 'en' in recommended_app.description:
|
||||
desc = recommended_app.description['en']
|
||||
|
||||
recommended_app_result = {
|
||||
'id': recommended_app.id,
|
||||
'app': recommended_app.app,
|
||||
'app_id': recommended_app.app_id,
|
||||
'description': desc,
|
||||
'copyright': recommended_app.copyright,
|
||||
'privacy_policy': recommended_app.privacy_policy,
|
||||
'category': recommended_app.category,
|
||||
'position': recommended_app.position,
|
||||
'is_listed': recommended_app.is_listed,
|
||||
'install_count': recommended_app.install_count,
|
||||
'installed': installed,
|
||||
'editable': current_user.role in ['owner', 'admin'],
|
||||
}
|
||||
recommended_apps_result.append(recommended_app_result)
|
||||
|
||||
categories.add(recommended_app.category) # add category to categories
|
||||
|
||||
return {'recommended_apps': recommended_apps_result, 'categories': list(categories)}
|
||||
|
||||
|
||||
api.add_resource(InstalledAppsListResource, '/installed-apps')
|
||||
api.add_resource(InstalledAppResource, '/installed-apps/<uuid:installed_app_id>')
|
||||
api.add_resource(RecommendedAppsResource, '/explore/apps')
|
||||
75
api/controllers/console/app/generator.py
Normal file
75
api/controllers/console/app/generator.py
Normal file
@ -0,0 +1,75 @@
|
||||
from flask_login import login_required, current_user
|
||||
from flask_restful import Resource, reqparse
|
||||
|
||||
from controllers.console import api
|
||||
from controllers.console.app.error import ProviderNotInitializeError, ProviderQuotaExceededError, \
|
||||
CompletionRequestError, ProviderModelCurrentlyNotSupportError
|
||||
from controllers.console.setup import setup_required
|
||||
from controllers.console.wraps import account_initialization_required
|
||||
from core.generator.llm_generator import LLMGenerator
|
||||
from core.llm.error import ProviderTokenNotInitError, QuotaExceededError, LLMBadRequestError, LLMAPIConnectionError, \
|
||||
LLMAPIUnavailableError, LLMRateLimitError, LLMAuthorizationError, ModelCurrentlyNotSupportError
|
||||
|
||||
|
||||
class IntroductionGenerateApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('prompt_template', type=str, required=True, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
account = current_user
|
||||
|
||||
try:
|
||||
answer = LLMGenerator.generate_introduction(
|
||||
account.current_tenant_id,
|
||||
args['prompt_template']
|
||||
)
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
|
||||
LLMRateLimitError, LLMAuthorizationError) as e:
|
||||
raise CompletionRequestError(str(e))
|
||||
|
||||
return {'introduction': answer}
|
||||
|
||||
|
||||
class RuleGenerateApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('audiences', type=str, required=True, nullable=False, location='json')
|
||||
parser.add_argument('hoping_to_solve', type=str, required=True, nullable=False, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
account = current_user
|
||||
|
||||
try:
|
||||
rules = LLMGenerator.generate_rule_config(
|
||||
account.current_tenant_id,
|
||||
args['audiences'],
|
||||
args['hoping_to_solve']
|
||||
)
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
|
||||
LLMRateLimitError, LLMAuthorizationError) as e:
|
||||
raise CompletionRequestError(str(e))
|
||||
|
||||
return rules
|
||||
|
||||
|
||||
api.add_resource(IntroductionGenerateApi, '/introduction-generate')
|
||||
api.add_resource(RuleGenerateApi, '/rule-generate')
|
||||
@ -26,46 +26,46 @@ from services.errors.conversation import ConversationNotExistsError
|
||||
from services.errors.message import MessageNotExistsError
|
||||
from services.message_service import MessageService
|
||||
|
||||
account_fields = {
|
||||
'id': fields.String,
|
||||
'name': fields.String,
|
||||
'email': fields.String
|
||||
}
|
||||
|
||||
class ChatMessageApi(Resource):
|
||||
account_fields = {
|
||||
'id': fields.String,
|
||||
'name': fields.String,
|
||||
'email': fields.String
|
||||
}
|
||||
feedback_fields = {
|
||||
'rating': fields.String,
|
||||
'content': fields.String,
|
||||
'from_source': fields.String,
|
||||
'from_end_user_id': fields.String,
|
||||
'from_account': fields.Nested(account_fields, allow_null=True),
|
||||
}
|
||||
|
||||
feedback_fields = {
|
||||
'rating': fields.String,
|
||||
'content': fields.String,
|
||||
'from_source': fields.String,
|
||||
'from_end_user_id': fields.String,
|
||||
'from_account': fields.Nested(account_fields, allow_null=True),
|
||||
}
|
||||
annotation_fields = {
|
||||
'content': fields.String,
|
||||
'account': fields.Nested(account_fields, allow_null=True),
|
||||
'created_at': TimestampField
|
||||
}
|
||||
|
||||
annotation_fields = {
|
||||
'content': fields.String,
|
||||
'account': fields.Nested(account_fields, allow_null=True),
|
||||
'created_at': TimestampField
|
||||
}
|
||||
message_detail_fields = {
|
||||
'id': fields.String,
|
||||
'conversation_id': fields.String,
|
||||
'inputs': fields.Raw,
|
||||
'query': fields.String,
|
||||
'message': fields.Raw,
|
||||
'message_tokens': fields.Integer,
|
||||
'answer': fields.String,
|
||||
'answer_tokens': fields.Integer,
|
||||
'provider_response_latency': fields.Float,
|
||||
'from_source': fields.String,
|
||||
'from_end_user_id': fields.String,
|
||||
'from_account_id': fields.String,
|
||||
'feedbacks': fields.List(fields.Nested(feedback_fields)),
|
||||
'annotation': fields.Nested(annotation_fields, allow_null=True),
|
||||
'created_at': TimestampField
|
||||
}
|
||||
|
||||
message_detail_fields = {
|
||||
'id': fields.String,
|
||||
'conversation_id': fields.String,
|
||||
'inputs': fields.Raw,
|
||||
'query': fields.String,
|
||||
'message': fields.Raw,
|
||||
'message_tokens': fields.Integer,
|
||||
'answer': fields.String,
|
||||
'answer_tokens': fields.Integer,
|
||||
'provider_response_latency': fields.Integer,
|
||||
'from_source': fields.String,
|
||||
'from_end_user_id': fields.String,
|
||||
'from_account_id': fields.String,
|
||||
'feedbacks': fields.List(fields.Nested(feedback_fields)),
|
||||
'annotation': fields.Nested(annotation_fields, allow_null=True),
|
||||
'created_at': TimestampField
|
||||
}
|
||||
|
||||
class ChatMessageListApi(Resource):
|
||||
message_infinite_scroll_pagination_fields = {
|
||||
'limit': fields.Integer,
|
||||
'has_more': fields.Boolean,
|
||||
@ -253,7 +253,8 @@ class MessageMoreLikeThisApi(Resource):
|
||||
message_id = str(message_id)
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('response_mode', type=str, required=True, choices=['blocking', 'streaming'], location='args')
|
||||
parser.add_argument('response_mode', type=str, required=True, choices=['blocking', 'streaming'],
|
||||
location='args')
|
||||
args = parser.parse_args()
|
||||
|
||||
streaming = args['response_mode'] == 'streaming'
|
||||
@ -301,7 +302,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
|
||||
except QuotaExceededError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
|
||||
except ModelCurrentlyNotSupportError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderModelCurrentlyNotSupportError()).get_json()) + "\n\n"
|
||||
yield "data: " + json.dumps(
|
||||
api.handle_error(ProviderModelCurrentlyNotSupportError()).get_json()) + "\n\n"
|
||||
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
|
||||
LLMRateLimitError, LLMAuthorizationError) as e:
|
||||
yield "data: " + json.dumps(api.handle_error(CompletionRequestError(str(e))).get_json()) + "\n\n"
|
||||
@ -353,9 +355,33 @@ class MessageSuggestedQuestionApi(Resource):
|
||||
return {'data': questions}
|
||||
|
||||
|
||||
class MessageApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@marshal_with(message_detail_fields)
|
||||
def get(self, app_id, message_id):
|
||||
app_id = str(app_id)
|
||||
message_id = str(message_id)
|
||||
|
||||
# get app info
|
||||
app_model = _get_app(app_id, 'chat')
|
||||
|
||||
message = db.session.query(Message).filter(
|
||||
Message.id == message_id,
|
||||
Message.app_id == app_model.id
|
||||
).first()
|
||||
|
||||
if not message:
|
||||
raise NotFound("Message Not Exists.")
|
||||
|
||||
return message
|
||||
|
||||
|
||||
api.add_resource(MessageMoreLikeThisApi, '/apps/<uuid:app_id>/completion-messages/<uuid:message_id>/more-like-this')
|
||||
api.add_resource(MessageSuggestedQuestionApi, '/apps/<uuid:app_id>/chat-messages/<uuid:message_id>/suggested-questions')
|
||||
api.add_resource(ChatMessageApi, '/apps/<uuid:app_id>/chat-messages', endpoint='chat_messages')
|
||||
api.add_resource(ChatMessageListApi, '/apps/<uuid:app_id>/chat-messages', endpoint='console_chat_messages')
|
||||
api.add_resource(MessageFeedbackApi, '/apps/<uuid:app_id>/feedbacks')
|
||||
api.add_resource(MessageAnnotationApi, '/apps/<uuid:app_id>/annotations')
|
||||
api.add_resource(MessageAnnotationCountApi, '/apps/<uuid:app_id>/annotations/count')
|
||||
api.add_resource(MessageApi, '/apps/<uuid:app_id>/messages/<uuid:message_id>', endpoint='console_message')
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
from decimal import Decimal
|
||||
from datetime import datetime
|
||||
|
||||
import pytz
|
||||
@ -59,18 +60,20 @@ class DailyConversationStatistic(Resource):
|
||||
arg_dict['end'] = end_datetime_utc
|
||||
|
||||
sql_query += ' GROUP BY date order by date'
|
||||
rs = db.session.execute(sql_query, arg_dict)
|
||||
|
||||
response_date = []
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
|
||||
response_data = []
|
||||
|
||||
for i in rs:
|
||||
response_date.append({
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'conversation_count': i.conversation_count
|
||||
})
|
||||
|
||||
return jsonify({
|
||||
'data': response_date
|
||||
'data': response_data
|
||||
})
|
||||
|
||||
|
||||
@ -119,18 +122,20 @@ class DailyTerminalsStatistic(Resource):
|
||||
arg_dict['end'] = end_datetime_utc
|
||||
|
||||
sql_query += ' GROUP BY date order by date'
|
||||
rs = db.session.execute(sql_query, arg_dict)
|
||||
|
||||
response_date = []
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
|
||||
response_data = []
|
||||
|
||||
for i in rs:
|
||||
response_date.append({
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'terminal_count': i.terminal_count
|
||||
})
|
||||
|
||||
return jsonify({
|
||||
'data': response_date
|
||||
'data': response_data
|
||||
})
|
||||
|
||||
|
||||
@ -180,12 +185,14 @@ class DailyTokenCostStatistic(Resource):
|
||||
arg_dict['end'] = end_datetime_utc
|
||||
|
||||
sql_query += ' GROUP BY date order by date'
|
||||
rs = db.session.execute(sql_query, arg_dict)
|
||||
|
||||
response_date = []
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
|
||||
response_data = []
|
||||
|
||||
for i in rs:
|
||||
response_date.append({
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'token_count': i.token_count,
|
||||
'total_price': i.total_price,
|
||||
@ -193,10 +200,207 @@ class DailyTokenCostStatistic(Resource):
|
||||
})
|
||||
|
||||
return jsonify({
|
||||
'data': response_date
|
||||
'data': response_data
|
||||
})
|
||||
|
||||
|
||||
class AverageSessionInteractionStatistic(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
def get(self, app_id):
|
||||
account = current_user
|
||||
app_id = str(app_id)
|
||||
app_model = _get_app(app_id, 'chat')
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
|
||||
parser.add_argument('end', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
|
||||
args = parser.parse_args()
|
||||
|
||||
sql_query = """SELECT date(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
|
||||
AVG(subquery.message_count) AS interactions
|
||||
FROM (SELECT m.conversation_id, COUNT(m.id) AS message_count
|
||||
FROM conversations c
|
||||
JOIN messages m ON c.id = m.conversation_id
|
||||
WHERE c.override_model_configs IS NULL AND c.app_id = :app_id"""
|
||||
arg_dict = {'tz': account.timezone, 'app_id': app_model.id}
|
||||
|
||||
timezone = pytz.timezone(account.timezone)
|
||||
utc_timezone = pytz.utc
|
||||
|
||||
if args['start']:
|
||||
start_datetime = datetime.strptime(args['start'], '%Y-%m-%d %H:%M')
|
||||
start_datetime = start_datetime.replace(second=0)
|
||||
|
||||
start_datetime_timezone = timezone.localize(start_datetime)
|
||||
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
sql_query += ' and c.created_at >= :start'
|
||||
arg_dict['start'] = start_datetime_utc
|
||||
|
||||
if args['end']:
|
||||
end_datetime = datetime.strptime(args['end'], '%Y-%m-%d %H:%M')
|
||||
end_datetime = end_datetime.replace(second=0)
|
||||
|
||||
end_datetime_timezone = timezone.localize(end_datetime)
|
||||
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
sql_query += ' and c.created_at < :end'
|
||||
arg_dict['end'] = end_datetime_utc
|
||||
|
||||
sql_query += """
|
||||
GROUP BY m.conversation_id) subquery
|
||||
LEFT JOIN conversations c on c.id=subquery.conversation_id
|
||||
GROUP BY date
|
||||
ORDER BY date"""
|
||||
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
|
||||
response_data = []
|
||||
|
||||
for i in rs:
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'interactions': float(i.interactions.quantize(Decimal('0.01')))
|
||||
})
|
||||
|
||||
return jsonify({
|
||||
'data': response_data
|
||||
})
|
||||
|
||||
|
||||
class UserSatisfactionRateStatistic(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
def get(self, app_id):
|
||||
account = current_user
|
||||
app_id = str(app_id)
|
||||
app_model = _get_app(app_id)
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
|
||||
parser.add_argument('end', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
|
||||
args = parser.parse_args()
|
||||
|
||||
sql_query = '''
|
||||
SELECT date(DATE_TRUNC('day', m.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
|
||||
COUNT(m.id) as message_count, COUNT(mf.id) as feedback_count
|
||||
FROM messages m
|
||||
LEFT JOIN message_feedbacks mf on mf.message_id=m.id
|
||||
WHERE m.app_id = :app_id
|
||||
'''
|
||||
arg_dict = {'tz': account.timezone, 'app_id': app_model.id}
|
||||
|
||||
timezone = pytz.timezone(account.timezone)
|
||||
utc_timezone = pytz.utc
|
||||
|
||||
if args['start']:
|
||||
start_datetime = datetime.strptime(args['start'], '%Y-%m-%d %H:%M')
|
||||
start_datetime = start_datetime.replace(second=0)
|
||||
|
||||
start_datetime_timezone = timezone.localize(start_datetime)
|
||||
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
sql_query += ' and m.created_at >= :start'
|
||||
arg_dict['start'] = start_datetime_utc
|
||||
|
||||
if args['end']:
|
||||
end_datetime = datetime.strptime(args['end'], '%Y-%m-%d %H:%M')
|
||||
end_datetime = end_datetime.replace(second=0)
|
||||
|
||||
end_datetime_timezone = timezone.localize(end_datetime)
|
||||
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
sql_query += ' and m.created_at < :end'
|
||||
arg_dict['end'] = end_datetime_utc
|
||||
|
||||
sql_query += ' GROUP BY date order by date'
|
||||
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
|
||||
response_data = []
|
||||
|
||||
for i in rs:
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'rate': round((i.feedback_count * 1000 / i.message_count) if i.message_count > 0 else 0, 2),
|
||||
})
|
||||
|
||||
return jsonify({
|
||||
'data': response_data
|
||||
})
|
||||
|
||||
|
||||
class AverageResponseTimeStatistic(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
def get(self, app_id):
|
||||
account = current_user
|
||||
app_id = str(app_id)
|
||||
app_model = _get_app(app_id, 'completion')
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('start', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
|
||||
parser.add_argument('end', type=datetime_string('%Y-%m-%d %H:%M'), location='args')
|
||||
args = parser.parse_args()
|
||||
|
||||
sql_query = '''
|
||||
SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
|
||||
AVG(provider_response_latency) as latency
|
||||
FROM messages
|
||||
WHERE app_id = :app_id
|
||||
'''
|
||||
arg_dict = {'tz': account.timezone, 'app_id': app_model.id}
|
||||
|
||||
timezone = pytz.timezone(account.timezone)
|
||||
utc_timezone = pytz.utc
|
||||
|
||||
if args['start']:
|
||||
start_datetime = datetime.strptime(args['start'], '%Y-%m-%d %H:%M')
|
||||
start_datetime = start_datetime.replace(second=0)
|
||||
|
||||
start_datetime_timezone = timezone.localize(start_datetime)
|
||||
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
sql_query += ' and created_at >= :start'
|
||||
arg_dict['start'] = start_datetime_utc
|
||||
|
||||
if args['end']:
|
||||
end_datetime = datetime.strptime(args['end'], '%Y-%m-%d %H:%M')
|
||||
end_datetime = end_datetime.replace(second=0)
|
||||
|
||||
end_datetime_timezone = timezone.localize(end_datetime)
|
||||
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
|
||||
|
||||
sql_query += ' and created_at < :end'
|
||||
arg_dict['end'] = end_datetime_utc
|
||||
|
||||
sql_query += ' GROUP BY date order by date'
|
||||
|
||||
with db.engine.begin() as conn:
|
||||
rs = conn.execute(db.text(sql_query), arg_dict)
|
||||
|
||||
response_data = []
|
||||
|
||||
for i in rs:
|
||||
response_data.append({
|
||||
'date': str(i.date),
|
||||
'latency': round(i.latency * 1000, 4)
|
||||
})
|
||||
|
||||
return jsonify({
|
||||
'data': response_data
|
||||
})
|
||||
|
||||
|
||||
api.add_resource(DailyConversationStatistic, '/apps/<uuid:app_id>/statistics/daily-conversations')
|
||||
api.add_resource(DailyTerminalsStatistic, '/apps/<uuid:app_id>/statistics/daily-end-users')
|
||||
api.add_resource(DailyTokenCostStatistic, '/apps/<uuid:app_id>/statistics/token-costs')
|
||||
api.add_resource(AverageSessionInteractionStatistic, '/apps/<uuid:app_id>/statistics/average-session-interactions')
|
||||
api.add_resource(UserSatisfactionRateStatistic, '/apps/<uuid:app_id>/statistics/user-satisfaction-rate')
|
||||
api.add_resource(AverageResponseTimeStatistic, '/apps/<uuid:app_id>/statistics/average-response-time')
|
||||
|
||||
@ -50,8 +50,8 @@ def _validate_name(name):
|
||||
|
||||
|
||||
def _validate_description_length(description):
|
||||
if len(description) > 200:
|
||||
raise ValueError('Description cannot exceed 200 characters.')
|
||||
if len(description) > 400:
|
||||
raise ValueError('Description cannot exceed 400 characters.')
|
||||
return description
|
||||
|
||||
|
||||
|
||||
@ -10,13 +10,14 @@ from werkzeug.exceptions import NotFound, Forbidden
|
||||
|
||||
import services
|
||||
from controllers.console import api
|
||||
from controllers.console.app.error import ProviderNotInitializeError
|
||||
from controllers.console.app.error import ProviderNotInitializeError, ProviderQuotaExceededError, \
|
||||
ProviderModelCurrentlyNotSupportError
|
||||
from controllers.console.datasets.error import DocumentAlreadyFinishedError, InvalidActionError, DocumentIndexingError, \
|
||||
InvalidMetadataError, ArchivedDocumentImmutableError
|
||||
from controllers.console.setup import setup_required
|
||||
from controllers.console.wraps import account_initialization_required
|
||||
from core.indexing_runner import IndexingRunner
|
||||
from core.llm.error import ProviderTokenNotInitError
|
||||
from core.llm.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
|
||||
from extensions.ext_redis import redis_client
|
||||
from libs.helper import TimestampField
|
||||
from extensions.ext_database import db
|
||||
@ -207,9 +208,10 @@ class DatasetDocumentListApi(Resource):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('indexing_technique', type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, nullable=False,
|
||||
location='json')
|
||||
parser.add_argument('data_source', type=dict, required=True, nullable=True, location='json')
|
||||
parser.add_argument('process_rule', type=dict, required=True, nullable=True, location='json')
|
||||
parser.add_argument('data_source', type=dict, required=False, location='json')
|
||||
parser.add_argument('process_rule', type=dict, required=False, location='json')
|
||||
parser.add_argument('duplicate', type=bool, nullable=False, location='json')
|
||||
parser.add_argument('original_document_id', type=str, required=False, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
if not dataset.indexing_technique and not args['indexing_technique']:
|
||||
@ -222,6 +224,10 @@ class DatasetDocumentListApi(Resource):
|
||||
document = DocumentService.save_document_with_dataset_id(dataset, args, current_user)
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
|
||||
return document
|
||||
|
||||
@ -259,6 +265,10 @@ class DatasetInitApi(Resource):
|
||||
)
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
|
||||
response = {
|
||||
'dataset': dataset,
|
||||
@ -338,10 +348,12 @@ class DocumentIndexingStatusApi(DocumentResource):
|
||||
|
||||
completed_segments = DocumentSegment.query \
|
||||
.filter(DocumentSegment.completed_at.isnot(None),
|
||||
DocumentSegment.document_id == str(document_id)) \
|
||||
DocumentSegment.document_id == str(document_id),
|
||||
DocumentSegment.status != 're_segment') \
|
||||
.count()
|
||||
total_segments = DocumentSegment.query \
|
||||
.filter_by(document_id=str(document_id)) \
|
||||
.filter(DocumentSegment.document_id == str(document_id),
|
||||
DocumentSegment.status != 're_segment') \
|
||||
.count()
|
||||
|
||||
document.completed_segments = completed_segments
|
||||
|
||||
@ -78,12 +78,14 @@ class DatasetDocumentSegmentListApi(Resource):
|
||||
parser.add_argument('hit_count_gte', type=int,
|
||||
default=None, location='args')
|
||||
parser.add_argument('enabled', type=str, default='all', location='args')
|
||||
parser.add_argument('keyword', type=str, default=None, location='args')
|
||||
args = parser.parse_args()
|
||||
|
||||
last_id = args['last_id']
|
||||
limit = min(args['limit'], 100)
|
||||
status_list = args['status']
|
||||
hit_count_gte = args['hit_count_gte']
|
||||
keyword = args['keyword']
|
||||
|
||||
query = DocumentSegment.query.filter(
|
||||
DocumentSegment.document_id == str(document_id),
|
||||
@ -104,6 +106,9 @@ class DatasetDocumentSegmentListApi(Resource):
|
||||
if hit_count_gte is not None:
|
||||
query = query.filter(DocumentSegment.hit_count >= hit_count_gte)
|
||||
|
||||
if keyword:
|
||||
query = query.where(DocumentSegment.content.ilike(f'%{keyword}%'))
|
||||
|
||||
if args['enabled'].lower() != 'all':
|
||||
if args['enabled'].lower() == 'true':
|
||||
query = query.filter(DocumentSegment.enabled == True)
|
||||
|
||||
@ -3,7 +3,7 @@ from libs.exception import BaseHTTPException
|
||||
|
||||
class NoFileUploadedError(BaseHTTPException):
|
||||
error_code = 'no_file_uploaded'
|
||||
description = "No file uploaded."
|
||||
description = "Please upload your file."
|
||||
code = 400
|
||||
|
||||
|
||||
@ -27,25 +27,25 @@ class UnsupportedFileTypeError(BaseHTTPException):
|
||||
|
||||
class HighQualityDatasetOnlyError(BaseHTTPException):
|
||||
error_code = 'high_quality_dataset_only'
|
||||
description = "High quality dataset only."
|
||||
description = "Current operation only supports 'high-quality' datasets."
|
||||
code = 400
|
||||
|
||||
|
||||
class DatasetNotInitializedError(BaseHTTPException):
|
||||
error_code = 'dataset_not_initialized'
|
||||
description = "Dataset not initialized."
|
||||
description = "The dataset is still being initialized or indexing. Please wait a moment."
|
||||
code = 400
|
||||
|
||||
|
||||
class ArchivedDocumentImmutableError(BaseHTTPException):
|
||||
error_code = 'archived_document_immutable'
|
||||
description = "Cannot process an archived document."
|
||||
description = "The archived document is not editable."
|
||||
code = 403
|
||||
|
||||
|
||||
class DatasetNameDuplicateError(BaseHTTPException):
|
||||
error_code = 'dataset_name_duplicate'
|
||||
description = "Dataset name already exists."
|
||||
description = "The dataset name already exists. Please modify your dataset name."
|
||||
code = 409
|
||||
|
||||
|
||||
@ -57,17 +57,17 @@ class InvalidActionError(BaseHTTPException):
|
||||
|
||||
class DocumentAlreadyFinishedError(BaseHTTPException):
|
||||
error_code = 'document_already_finished'
|
||||
description = "Document already finished."
|
||||
description = "The document has been processed. Please refresh the page or go to the document details."
|
||||
code = 400
|
||||
|
||||
|
||||
class DocumentIndexingError(BaseHTTPException):
|
||||
error_code = 'document_indexing'
|
||||
description = "Document indexing."
|
||||
description = "The document is being processed and cannot be edited."
|
||||
code = 400
|
||||
|
||||
|
||||
class InvalidMetadataError(BaseHTTPException):
|
||||
error_code = 'invalid_metadata'
|
||||
description = "Invalid metadata."
|
||||
description = "The metadata content is incorrect. Please check and verify."
|
||||
code = 400
|
||||
|
||||
@ -18,6 +18,7 @@ from controllers.console.setup import setup_required
|
||||
from controllers.console.wraps import account_initialization_required
|
||||
from core.index.readers.html_parser import HTMLParser
|
||||
from core.index.readers.pdf_parser import PDFParser
|
||||
from core.index.readers.xlsx_parser import XLSXParser
|
||||
from extensions.ext_storage import storage
|
||||
from libs.helper import TimestampField
|
||||
from extensions.ext_database import db
|
||||
@ -26,7 +27,7 @@ from models.model import UploadFile
|
||||
cache = TTLCache(maxsize=None, ttl=30)
|
||||
|
||||
FILE_SIZE_LIMIT = 15 * 1024 * 1024 # 15MB
|
||||
ALLOWED_EXTENSIONS = ['txt', 'markdown', 'md', 'pdf', 'html', 'htm']
|
||||
ALLOWED_EXTENSIONS = ['txt', 'markdown', 'md', 'pdf', 'html', 'htm', 'xlsx']
|
||||
PREVIEW_WORDS_LIMIT = 3000
|
||||
|
||||
|
||||
@ -133,6 +134,9 @@ class FilePreviewApi(Resource):
|
||||
# Use BeautifulSoup to extract text
|
||||
parser = HTMLParser()
|
||||
text = parser.parse_file(Path(filepath))
|
||||
elif extension == 'xlsx':
|
||||
parser = XLSXParser()
|
||||
text = parser.parse_file(filepath)
|
||||
else:
|
||||
# ['txt', 'markdown', 'md']
|
||||
with open(filepath, "rb") as fp:
|
||||
|
||||
@ -6,9 +6,12 @@ from werkzeug.exceptions import InternalServerError, NotFound, Forbidden
|
||||
|
||||
import services
|
||||
from controllers.console import api
|
||||
from controllers.console.app.error import ProviderNotInitializeError, ProviderQuotaExceededError, \
|
||||
ProviderModelCurrentlyNotSupportError
|
||||
from controllers.console.datasets.error import HighQualityDatasetOnlyError, DatasetNotInitializedError
|
||||
from controllers.console.setup import setup_required
|
||||
from controllers.console.wraps import account_initialization_required
|
||||
from core.llm.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
|
||||
from libs.helper import TimestampField
|
||||
from services.dataset_service import DatasetService
|
||||
from services.hit_testing_service import HitTestingService
|
||||
@ -92,6 +95,12 @@ class HitTestingApi(Resource):
|
||||
return {"query": response['query'], 'records': marshal(response['records'], hit_testing_record_fields)}
|
||||
except services.errors.index.IndexNotInitializedError:
|
||||
raise DatasetNotInitializedError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except Exception as e:
|
||||
logging.exception("Hit testing failed.")
|
||||
raise InternalServerError(str(e))
|
||||
|
||||
@ -3,13 +3,14 @@ from libs.exception import BaseHTTPException
|
||||
|
||||
class AlreadySetupError(BaseHTTPException):
|
||||
error_code = 'already_setup'
|
||||
description = "Application already setup."
|
||||
description = "Dify has been successfully installed. Please refresh the page or return to the dashboard homepage."
|
||||
code = 403
|
||||
|
||||
|
||||
class NotSetupError(BaseHTTPException):
|
||||
error_code = 'not_setup'
|
||||
description = "Application not setup."
|
||||
description = "Dify has not been initialized and installed yet. " \
|
||||
"Please proceed with the initialization and installation process first."
|
||||
code = 401
|
||||
|
||||
|
||||
|
||||
180
api/controllers/console/explore/completion.py
Normal file
180
api/controllers/console/explore/completion.py
Normal file
@ -0,0 +1,180 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
import json
|
||||
import logging
|
||||
from typing import Generator, Union
|
||||
|
||||
from flask import Response, stream_with_context
|
||||
from flask_login import current_user
|
||||
from flask_restful import reqparse
|
||||
from werkzeug.exceptions import InternalServerError, NotFound
|
||||
|
||||
import services
|
||||
from controllers.console import api
|
||||
from controllers.console.app.error import ConversationCompletedError, AppUnavailableError, ProviderNotInitializeError, \
|
||||
ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, CompletionRequestError
|
||||
from controllers.console.explore.error import NotCompletionAppError, NotChatAppError
|
||||
from controllers.console.explore.wraps import InstalledAppResource
|
||||
from core.conversation_message_task import PubHandler
|
||||
from core.llm.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \
|
||||
LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
|
||||
from libs.helper import uuid_value
|
||||
from services.completion_service import CompletionService
|
||||
|
||||
|
||||
# define completion api for user
|
||||
class CompletionApi(InstalledAppResource):
|
||||
|
||||
def post(self, installed_app):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'completion':
|
||||
raise NotCompletionAppError()
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('inputs', type=dict, required=True, location='json')
|
||||
parser.add_argument('query', type=str, location='json')
|
||||
parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
streaming = args['response_mode'] == 'streaming'
|
||||
|
||||
try:
|
||||
response = CompletionService.completion(
|
||||
app_model=app_model,
|
||||
user=current_user,
|
||||
args=args,
|
||||
from_source='console',
|
||||
streaming=streaming
|
||||
)
|
||||
|
||||
return compact_response(response)
|
||||
except services.errors.conversation.ConversationNotExistsError:
|
||||
raise NotFound("Conversation Not Exists.")
|
||||
except services.errors.conversation.ConversationCompletedError:
|
||||
raise ConversationCompletedError()
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
|
||||
LLMRateLimitError, LLMAuthorizationError) as e:
|
||||
raise CompletionRequestError(str(e))
|
||||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logging.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
class CompletionStopApi(InstalledAppResource):
|
||||
def post(self, installed_app, task_id):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'completion':
|
||||
raise NotCompletionAppError()
|
||||
|
||||
PubHandler.stop(current_user, task_id)
|
||||
|
||||
return {'result': 'success'}, 200
|
||||
|
||||
|
||||
class ChatApi(InstalledAppResource):
|
||||
def post(self, installed_app):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'chat':
|
||||
raise NotChatAppError()
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('inputs', type=dict, required=True, location='json')
|
||||
parser.add_argument('query', type=str, required=True, location='json')
|
||||
parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json')
|
||||
parser.add_argument('conversation_id', type=uuid_value, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
streaming = args['response_mode'] == 'streaming'
|
||||
|
||||
try:
|
||||
response = CompletionService.completion(
|
||||
app_model=app_model,
|
||||
user=current_user,
|
||||
args=args,
|
||||
from_source='console',
|
||||
streaming=streaming
|
||||
)
|
||||
|
||||
return compact_response(response)
|
||||
except services.errors.conversation.ConversationNotExistsError:
|
||||
raise NotFound("Conversation Not Exists.")
|
||||
except services.errors.conversation.ConversationCompletedError:
|
||||
raise ConversationCompletedError()
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
|
||||
LLMRateLimitError, LLMAuthorizationError) as e:
|
||||
raise CompletionRequestError(str(e))
|
||||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logging.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
class ChatStopApi(InstalledAppResource):
|
||||
def post(self, installed_app, task_id):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'chat':
|
||||
raise NotChatAppError()
|
||||
|
||||
PubHandler.stop(current_user, task_id)
|
||||
|
||||
return {'result': 'success'}, 200
|
||||
|
||||
|
||||
def compact_response(response: Union[dict | Generator]) -> Response:
|
||||
if isinstance(response, dict):
|
||||
return Response(response=json.dumps(response), status=200, mimetype='application/json')
|
||||
else:
|
||||
def generate() -> Generator:
|
||||
try:
|
||||
for chunk in response:
|
||||
yield chunk
|
||||
except services.errors.conversation.ConversationNotExistsError:
|
||||
yield "data: " + json.dumps(api.handle_error(NotFound("Conversation Not Exists.")).get_json()) + "\n\n"
|
||||
except services.errors.conversation.ConversationCompletedError:
|
||||
yield "data: " + json.dumps(api.handle_error(ConversationCompletedError()).get_json()) + "\n\n"
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
|
||||
except ProviderTokenNotInitError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
|
||||
except QuotaExceededError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
|
||||
except ModelCurrentlyNotSupportError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderModelCurrentlyNotSupportError()).get_json()) + "\n\n"
|
||||
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
|
||||
LLMRateLimitError, LLMAuthorizationError) as e:
|
||||
yield "data: " + json.dumps(api.handle_error(CompletionRequestError(str(e))).get_json()) + "\n\n"
|
||||
except ValueError as e:
|
||||
yield "data: " + json.dumps(api.handle_error(e).get_json()) + "\n\n"
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
yield "data: " + json.dumps(api.handle_error(InternalServerError()).get_json()) + "\n\n"
|
||||
|
||||
return Response(stream_with_context(generate()), status=200,
|
||||
mimetype='text/event-stream')
|
||||
|
||||
|
||||
api.add_resource(CompletionApi, '/installed-apps/<uuid:installed_app_id>/completion-messages', endpoint='installed_app_completion')
|
||||
api.add_resource(CompletionStopApi, '/installed-apps/<uuid:installed_app_id>/completion-messages/<string:task_id>/stop', endpoint='installed_app_stop_completion')
|
||||
api.add_resource(ChatApi, '/installed-apps/<uuid:installed_app_id>/chat-messages', endpoint='installed_app_chat_completion')
|
||||
api.add_resource(ChatStopApi, '/installed-apps/<uuid:installed_app_id>/chat-messages/<string:task_id>/stop', endpoint='installed_app_stop_chat_completion')
|
||||
127
api/controllers/console/explore/conversation.py
Normal file
127
api/controllers/console/explore/conversation.py
Normal file
@ -0,0 +1,127 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
from flask_login import current_user
|
||||
from flask_restful import fields, reqparse, marshal_with
|
||||
from flask_restful.inputs import int_range
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from controllers.console import api
|
||||
from controllers.console.explore.error import NotChatAppError
|
||||
from controllers.console.explore.wraps import InstalledAppResource
|
||||
from libs.helper import TimestampField, uuid_value
|
||||
from services.conversation_service import ConversationService
|
||||
from services.errors.conversation import LastConversationNotExistsError, ConversationNotExistsError
|
||||
from services.web_conversation_service import WebConversationService
|
||||
|
||||
conversation_fields = {
|
||||
'id': fields.String,
|
||||
'name': fields.String,
|
||||
'inputs': fields.Raw,
|
||||
'status': fields.String,
|
||||
'introduction': fields.String,
|
||||
'created_at': TimestampField
|
||||
}
|
||||
|
||||
conversation_infinite_scroll_pagination_fields = {
|
||||
'limit': fields.Integer,
|
||||
'has_more': fields.Boolean,
|
||||
'data': fields.List(fields.Nested(conversation_fields))
|
||||
}
|
||||
|
||||
|
||||
class ConversationListApi(InstalledAppResource):
|
||||
|
||||
@marshal_with(conversation_infinite_scroll_pagination_fields)
|
||||
def get(self, installed_app):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'chat':
|
||||
raise NotChatAppError()
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('last_id', type=uuid_value, location='args')
|
||||
parser.add_argument('limit', type=int_range(1, 100), required=False, default=20, location='args')
|
||||
parser.add_argument('pinned', type=str, choices=['true', 'false', None], location='args')
|
||||
args = parser.parse_args()
|
||||
|
||||
pinned = None
|
||||
if 'pinned' in args and args['pinned'] is not None:
|
||||
pinned = True if args['pinned'] == 'true' else False
|
||||
|
||||
try:
|
||||
return WebConversationService.pagination_by_last_id(
|
||||
app_model=app_model,
|
||||
user=current_user,
|
||||
last_id=args['last_id'],
|
||||
limit=args['limit'],
|
||||
pinned=pinned
|
||||
)
|
||||
except LastConversationNotExistsError:
|
||||
raise NotFound("Last Conversation Not Exists.")
|
||||
|
||||
|
||||
class ConversationApi(InstalledAppResource):
|
||||
def delete(self, installed_app, c_id):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'chat':
|
||||
raise NotChatAppError()
|
||||
|
||||
conversation_id = str(c_id)
|
||||
ConversationService.delete(app_model, conversation_id, current_user)
|
||||
WebConversationService.unpin(app_model, conversation_id, current_user)
|
||||
|
||||
return {"result": "success"}, 204
|
||||
|
||||
|
||||
class ConversationRenameApi(InstalledAppResource):
|
||||
|
||||
@marshal_with(conversation_fields)
|
||||
def post(self, installed_app, c_id):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'chat':
|
||||
raise NotChatAppError()
|
||||
|
||||
conversation_id = str(c_id)
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('name', type=str, required=True, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
return ConversationService.rename(app_model, conversation_id, current_user, args['name'])
|
||||
except ConversationNotExistsError:
|
||||
raise NotFound("Conversation Not Exists.")
|
||||
|
||||
|
||||
class ConversationPinApi(InstalledAppResource):
|
||||
|
||||
def patch(self, installed_app, c_id):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'chat':
|
||||
raise NotChatAppError()
|
||||
|
||||
conversation_id = str(c_id)
|
||||
|
||||
try:
|
||||
WebConversationService.pin(app_model, conversation_id, current_user)
|
||||
except ConversationNotExistsError:
|
||||
raise NotFound("Conversation Not Exists.")
|
||||
|
||||
return {"result": "success"}
|
||||
|
||||
|
||||
class ConversationUnPinApi(InstalledAppResource):
|
||||
def patch(self, installed_app, c_id):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'chat':
|
||||
raise NotChatAppError()
|
||||
|
||||
conversation_id = str(c_id)
|
||||
WebConversationService.unpin(app_model, conversation_id, current_user)
|
||||
|
||||
return {"result": "success"}
|
||||
|
||||
|
||||
api.add_resource(ConversationRenameApi, '/installed-apps/<uuid:installed_app_id>/conversations/<uuid:c_id>/name', endpoint='installed_app_conversation_rename')
|
||||
api.add_resource(ConversationListApi, '/installed-apps/<uuid:installed_app_id>/conversations', endpoint='installed_app_conversations')
|
||||
api.add_resource(ConversationApi, '/installed-apps/<uuid:installed_app_id>/conversations/<uuid:c_id>', endpoint='installed_app_conversation')
|
||||
api.add_resource(ConversationPinApi, '/installed-apps/<uuid:installed_app_id>/conversations/<uuid:c_id>/pin', endpoint='installed_app_conversation_pin')
|
||||
api.add_resource(ConversationUnPinApi, '/installed-apps/<uuid:installed_app_id>/conversations/<uuid:c_id>/unpin', endpoint='installed_app_conversation_unpin')
|
||||
20
api/controllers/console/explore/error.py
Normal file
20
api/controllers/console/explore/error.py
Normal file
@ -0,0 +1,20 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
from libs.exception import BaseHTTPException
|
||||
|
||||
|
||||
class NotCompletionAppError(BaseHTTPException):
|
||||
error_code = 'not_completion_app'
|
||||
description = "Not Completion App"
|
||||
code = 400
|
||||
|
||||
|
||||
class NotChatAppError(BaseHTTPException):
|
||||
error_code = 'not_chat_app'
|
||||
description = "Not Chat App"
|
||||
code = 400
|
||||
|
||||
|
||||
class AppSuggestedQuestionsAfterAnswerDisabledError(BaseHTTPException):
|
||||
error_code = 'app_suggested_questions_after_answer_disabled'
|
||||
description = "Function Suggested questions after answer disabled."
|
||||
code = 403
|
||||
143
api/controllers/console/explore/installed_app.py
Normal file
143
api/controllers/console/explore/installed_app.py
Normal file
@ -0,0 +1,143 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
from datetime import datetime
|
||||
|
||||
from flask_login import login_required, current_user
|
||||
from flask_restful import Resource, reqparse, fields, marshal_with, inputs
|
||||
from sqlalchemy import and_
|
||||
from werkzeug.exceptions import NotFound, Forbidden, BadRequest
|
||||
|
||||
from controllers.console import api
|
||||
from controllers.console.explore.wraps import InstalledAppResource
|
||||
from controllers.console.wraps import account_initialization_required
|
||||
from extensions.ext_database import db
|
||||
from libs.helper import TimestampField
|
||||
from models.model import App, InstalledApp, RecommendedApp
|
||||
from services.account_service import TenantService
|
||||
|
||||
app_fields = {
|
||||
'id': fields.String,
|
||||
'name': fields.String,
|
||||
'mode': fields.String,
|
||||
'icon': fields.String,
|
||||
'icon_background': fields.String
|
||||
}
|
||||
|
||||
installed_app_fields = {
|
||||
'id': fields.String,
|
||||
'app': fields.Nested(app_fields),
|
||||
'app_owner_tenant_id': fields.String,
|
||||
'is_pinned': fields.Boolean,
|
||||
'last_used_at': TimestampField,
|
||||
'editable': fields.Boolean,
|
||||
'uninstallable': fields.Boolean,
|
||||
}
|
||||
|
||||
installed_app_list_fields = {
|
||||
'installed_apps': fields.List(fields.Nested(installed_app_fields))
|
||||
}
|
||||
|
||||
|
||||
class InstalledAppsListApi(Resource):
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@marshal_with(installed_app_list_fields)
|
||||
def get(self):
|
||||
current_tenant_id = current_user.current_tenant_id
|
||||
installed_apps = db.session.query(InstalledApp).filter(
|
||||
InstalledApp.tenant_id == current_tenant_id
|
||||
).all()
|
||||
|
||||
current_user.role = TenantService.get_user_role(current_user, current_user.current_tenant)
|
||||
installed_apps = [
|
||||
{
|
||||
'id': installed_app.id,
|
||||
'app': installed_app.app,
|
||||
'app_owner_tenant_id': installed_app.app_owner_tenant_id,
|
||||
'is_pinned': installed_app.is_pinned,
|
||||
'last_used_at': installed_app.last_used_at,
|
||||
"editable": current_user.role in ["owner", "admin"],
|
||||
"uninstallable": current_tenant_id == installed_app.app_owner_tenant_id
|
||||
}
|
||||
for installed_app in installed_apps
|
||||
]
|
||||
installed_apps.sort(key=lambda app: (-app['is_pinned'], app['last_used_at']
|
||||
if app['last_used_at'] is not None else datetime.min))
|
||||
|
||||
return {'installed_apps': installed_apps}
|
||||
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('app_id', type=str, required=True, help='Invalid app_id')
|
||||
args = parser.parse_args()
|
||||
|
||||
recommended_app = RecommendedApp.query.filter(RecommendedApp.app_id == args['app_id']).first()
|
||||
if recommended_app is None:
|
||||
raise NotFound('App not found')
|
||||
|
||||
current_tenant_id = current_user.current_tenant_id
|
||||
app = db.session.query(App).filter(
|
||||
App.id == args['app_id']
|
||||
).first()
|
||||
|
||||
if app is None:
|
||||
raise NotFound('App not found')
|
||||
|
||||
if not app.is_public:
|
||||
raise Forbidden('You can\'t install a non-public app')
|
||||
|
||||
installed_app = InstalledApp.query.filter(and_(
|
||||
InstalledApp.app_id == args['app_id'],
|
||||
InstalledApp.tenant_id == current_tenant_id
|
||||
)).first()
|
||||
|
||||
if installed_app is None:
|
||||
# todo: position
|
||||
recommended_app.install_count += 1
|
||||
|
||||
new_installed_app = InstalledApp(
|
||||
app_id=args['app_id'],
|
||||
tenant_id=current_tenant_id,
|
||||
app_owner_tenant_id=app.tenant_id,
|
||||
is_pinned=False,
|
||||
last_used_at=datetime.utcnow()
|
||||
)
|
||||
db.session.add(new_installed_app)
|
||||
db.session.commit()
|
||||
|
||||
return {'message': 'App installed successfully'}
|
||||
|
||||
|
||||
class InstalledAppApi(InstalledAppResource):
|
||||
"""
|
||||
update and delete an installed app
|
||||
use InstalledAppResource to apply default decorators and get installed_app
|
||||
"""
|
||||
def delete(self, installed_app):
|
||||
if installed_app.app_owner_tenant_id == current_user.current_tenant_id:
|
||||
raise BadRequest('You can\'t uninstall an app owned by the current tenant')
|
||||
|
||||
db.session.delete(installed_app)
|
||||
db.session.commit()
|
||||
|
||||
return {'result': 'success', 'message': 'App uninstalled successfully'}
|
||||
|
||||
def patch(self, installed_app):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('is_pinned', type=inputs.boolean)
|
||||
args = parser.parse_args()
|
||||
|
||||
commit_args = False
|
||||
if 'is_pinned' in args:
|
||||
installed_app.is_pinned = args['is_pinned']
|
||||
commit_args = True
|
||||
|
||||
if commit_args:
|
||||
db.session.commit()
|
||||
|
||||
return {'result': 'success', 'message': 'App info updated successfully'}
|
||||
|
||||
|
||||
api.add_resource(InstalledAppsListApi, '/installed-apps')
|
||||
api.add_resource(InstalledAppApi, '/installed-apps/<uuid:installed_app_id>')
|
||||
196
api/controllers/console/explore/message.py
Normal file
196
api/controllers/console/explore/message.py
Normal file
@ -0,0 +1,196 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
import json
|
||||
import logging
|
||||
from typing import Generator, Union
|
||||
|
||||
from flask import stream_with_context, Response
|
||||
from flask_login import current_user
|
||||
from flask_restful import reqparse, fields, marshal_with
|
||||
from flask_restful.inputs import int_range
|
||||
from werkzeug.exceptions import NotFound, InternalServerError
|
||||
|
||||
import services
|
||||
from controllers.console import api
|
||||
from controllers.console.app.error import AppMoreLikeThisDisabledError, ProviderNotInitializeError, \
|
||||
ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, CompletionRequestError
|
||||
from controllers.console.explore.error import NotCompletionAppError, AppSuggestedQuestionsAfterAnswerDisabledError
|
||||
from controllers.console.explore.wraps import InstalledAppResource
|
||||
from core.llm.error import LLMRateLimitError, LLMBadRequestError, LLMAuthorizationError, LLMAPIConnectionError, \
|
||||
ProviderTokenNotInitError, LLMAPIUnavailableError, QuotaExceededError, ModelCurrentlyNotSupportError
|
||||
from libs.helper import uuid_value, TimestampField
|
||||
from services.completion_service import CompletionService
|
||||
from services.errors.app import MoreLikeThisDisabledError
|
||||
from services.errors.conversation import ConversationNotExistsError
|
||||
from services.errors.message import MessageNotExistsError, SuggestedQuestionsAfterAnswerDisabledError
|
||||
from services.message_service import MessageService
|
||||
|
||||
|
||||
class MessageListApi(InstalledAppResource):
|
||||
feedback_fields = {
|
||||
'rating': fields.String
|
||||
}
|
||||
|
||||
message_fields = {
|
||||
'id': fields.String,
|
||||
'conversation_id': fields.String,
|
||||
'inputs': fields.Raw,
|
||||
'query': fields.String,
|
||||
'answer': fields.String,
|
||||
'feedback': fields.Nested(feedback_fields, attribute='user_feedback', allow_null=True),
|
||||
'created_at': TimestampField
|
||||
}
|
||||
|
||||
message_infinite_scroll_pagination_fields = {
|
||||
'limit': fields.Integer,
|
||||
'has_more': fields.Boolean,
|
||||
'data': fields.List(fields.Nested(message_fields))
|
||||
}
|
||||
|
||||
@marshal_with(message_infinite_scroll_pagination_fields)
|
||||
def get(self, installed_app):
|
||||
app_model = installed_app.app
|
||||
|
||||
if app_model.mode != 'chat':
|
||||
raise NotChatAppError()
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('conversation_id', required=True, type=uuid_value, location='args')
|
||||
parser.add_argument('first_id', type=uuid_value, location='args')
|
||||
parser.add_argument('limit', type=int_range(1, 100), required=False, default=20, location='args')
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
return MessageService.pagination_by_first_id(app_model, current_user,
|
||||
args['conversation_id'], args['first_id'], args['limit'])
|
||||
except services.errors.conversation.ConversationNotExistsError:
|
||||
raise NotFound("Conversation Not Exists.")
|
||||
except services.errors.message.FirstMessageNotExistsError:
|
||||
raise NotFound("First Message Not Exists.")
|
||||
|
||||
|
||||
class MessageFeedbackApi(InstalledAppResource):
|
||||
def post(self, installed_app, message_id):
|
||||
app_model = installed_app.app
|
||||
|
||||
message_id = str(message_id)
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('rating', type=str, choices=['like', 'dislike', None], location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
MessageService.create_feedback(app_model, message_id, current_user, args['rating'])
|
||||
except services.errors.message.MessageNotExistsError:
|
||||
raise NotFound("Message Not Exists.")
|
||||
|
||||
return {'result': 'success'}
|
||||
|
||||
|
||||
class MessageMoreLikeThisApi(InstalledAppResource):
|
||||
def get(self, installed_app, message_id):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'completion':
|
||||
raise NotCompletionAppError()
|
||||
|
||||
message_id = str(message_id)
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('response_mode', type=str, required=True, choices=['blocking', 'streaming'], location='args')
|
||||
args = parser.parse_args()
|
||||
|
||||
streaming = args['response_mode'] == 'streaming'
|
||||
|
||||
try:
|
||||
response = CompletionService.generate_more_like_this(app_model, current_user, message_id, streaming)
|
||||
return compact_response(response)
|
||||
except MessageNotExistsError:
|
||||
raise NotFound("Message Not Exists.")
|
||||
except MoreLikeThisDisabledError:
|
||||
raise AppMoreLikeThisDisabledError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
|
||||
LLMRateLimitError, LLMAuthorizationError) as e:
|
||||
raise CompletionRequestError(str(e))
|
||||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
def compact_response(response: Union[dict | Generator]) -> Response:
|
||||
if isinstance(response, dict):
|
||||
return Response(response=json.dumps(response), status=200, mimetype='application/json')
|
||||
else:
|
||||
def generate() -> Generator:
|
||||
try:
|
||||
for chunk in response:
|
||||
yield chunk
|
||||
except MessageNotExistsError:
|
||||
yield "data: " + json.dumps(api.handle_error(NotFound("Message Not Exists.")).get_json()) + "\n\n"
|
||||
except MoreLikeThisDisabledError:
|
||||
yield "data: " + json.dumps(api.handle_error(AppMoreLikeThisDisabledError()).get_json()) + "\n\n"
|
||||
except ProviderTokenNotInitError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
|
||||
except QuotaExceededError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
|
||||
except ModelCurrentlyNotSupportError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderModelCurrentlyNotSupportError()).get_json()) + "\n\n"
|
||||
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
|
||||
LLMRateLimitError, LLMAuthorizationError) as e:
|
||||
yield "data: " + json.dumps(api.handle_error(CompletionRequestError(str(e))).get_json()) + "\n\n"
|
||||
except ValueError as e:
|
||||
yield "data: " + json.dumps(api.handle_error(e).get_json()) + "\n\n"
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
yield "data: " + json.dumps(api.handle_error(InternalServerError()).get_json()) + "\n\n"
|
||||
|
||||
return Response(stream_with_context(generate()), status=200,
|
||||
mimetype='text/event-stream')
|
||||
|
||||
|
||||
class MessageSuggestedQuestionApi(InstalledAppResource):
|
||||
def get(self, installed_app, message_id):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'chat':
|
||||
raise NotCompletionAppError()
|
||||
|
||||
message_id = str(message_id)
|
||||
|
||||
try:
|
||||
questions = MessageService.get_suggested_questions_after_answer(
|
||||
app_model=app_model,
|
||||
user=current_user,
|
||||
message_id=message_id
|
||||
)
|
||||
except MessageNotExistsError:
|
||||
raise NotFound("Message not found")
|
||||
except ConversationNotExistsError:
|
||||
raise NotFound("Conversation not found")
|
||||
except SuggestedQuestionsAfterAnswerDisabledError:
|
||||
raise AppSuggestedQuestionsAfterAnswerDisabledError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
|
||||
LLMRateLimitError, LLMAuthorizationError) as e:
|
||||
raise CompletionRequestError(str(e))
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
return {'data': questions}
|
||||
|
||||
|
||||
api.add_resource(MessageListApi, '/installed-apps/<uuid:installed_app_id>/messages', endpoint='installed_app_messages')
|
||||
api.add_resource(MessageFeedbackApi, '/installed-apps/<uuid:installed_app_id>/messages/<uuid:message_id>/feedbacks', endpoint='installed_app_message_feedback')
|
||||
api.add_resource(MessageMoreLikeThisApi, '/installed-apps/<uuid:installed_app_id>/messages/<uuid:message_id>/more-like-this', endpoint='installed_app_more_like_this')
|
||||
api.add_resource(MessageSuggestedQuestionApi, '/installed-apps/<uuid:installed_app_id>/messages/<uuid:message_id>/suggested-questions', endpoint='installed_app_suggested_question')
|
||||
43
api/controllers/console/explore/parameter.py
Normal file
43
api/controllers/console/explore/parameter.py
Normal file
@ -0,0 +1,43 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
from flask_restful import marshal_with, fields
|
||||
|
||||
from controllers.console import api
|
||||
from controllers.console.explore.wraps import InstalledAppResource
|
||||
|
||||
|
||||
class AppParameterApi(InstalledAppResource):
|
||||
"""Resource for app variables."""
|
||||
variable_fields = {
|
||||
'key': fields.String,
|
||||
'name': fields.String,
|
||||
'description': fields.String,
|
||||
'type': fields.String,
|
||||
'default': fields.String,
|
||||
'max_length': fields.Integer,
|
||||
'options': fields.List(fields.String)
|
||||
}
|
||||
|
||||
parameters_fields = {
|
||||
'opening_statement': fields.String,
|
||||
'suggested_questions': fields.Raw,
|
||||
'suggested_questions_after_answer': fields.Raw,
|
||||
'more_like_this': fields.Raw,
|
||||
'user_input_form': fields.Raw,
|
||||
}
|
||||
|
||||
@marshal_with(parameters_fields)
|
||||
def get(self, installed_app):
|
||||
"""Retrieve app parameters."""
|
||||
app_model = installed_app.app
|
||||
app_model_config = app_model.app_model_config
|
||||
|
||||
return {
|
||||
'opening_statement': app_model_config.opening_statement,
|
||||
'suggested_questions': app_model_config.suggested_questions_list,
|
||||
'suggested_questions_after_answer': app_model_config.suggested_questions_after_answer_dict,
|
||||
'more_like_this': app_model_config.more_like_this_dict,
|
||||
'user_input_form': app_model_config.user_input_form_list
|
||||
}
|
||||
|
||||
|
||||
api.add_resource(AppParameterApi, '/installed-apps/<uuid:installed_app_id>/parameters', endpoint='installed_app_parameters')
|
||||
138
api/controllers/console/explore/recommended_app.py
Normal file
138
api/controllers/console/explore/recommended_app.py
Normal file
@ -0,0 +1,138 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
from flask_login import login_required, current_user
|
||||
from flask_restful import Resource, fields, marshal_with
|
||||
from sqlalchemy import and_
|
||||
|
||||
from controllers.console import api
|
||||
from controllers.console.app.error import AppNotFoundError
|
||||
from controllers.console.wraps import account_initialization_required
|
||||
from extensions.ext_database import db
|
||||
from models.model import App, InstalledApp, RecommendedApp
|
||||
from services.account_service import TenantService
|
||||
|
||||
app_fields = {
|
||||
'id': fields.String,
|
||||
'name': fields.String,
|
||||
'mode': fields.String,
|
||||
'icon': fields.String,
|
||||
'icon_background': fields.String
|
||||
}
|
||||
|
||||
recommended_app_fields = {
|
||||
'app': fields.Nested(app_fields, attribute='app'),
|
||||
'app_id': fields.String,
|
||||
'description': fields.String(attribute='description'),
|
||||
'copyright': fields.String,
|
||||
'privacy_policy': fields.String,
|
||||
'category': fields.String,
|
||||
'position': fields.Integer,
|
||||
'is_listed': fields.Boolean,
|
||||
'install_count': fields.Integer,
|
||||
'installed': fields.Boolean,
|
||||
'editable': fields.Boolean
|
||||
}
|
||||
|
||||
recommended_app_list_fields = {
|
||||
'recommended_apps': fields.List(fields.Nested(recommended_app_fields)),
|
||||
'categories': fields.List(fields.String)
|
||||
}
|
||||
|
||||
|
||||
class RecommendedAppListApi(Resource):
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@marshal_with(recommended_app_list_fields)
|
||||
def get(self):
|
||||
language_prefix = current_user.interface_language if current_user.interface_language else 'en-US'
|
||||
|
||||
recommended_apps = db.session.query(RecommendedApp).filter(
|
||||
RecommendedApp.is_listed == True,
|
||||
RecommendedApp.language == language_prefix
|
||||
).all()
|
||||
|
||||
categories = set()
|
||||
current_user.role = TenantService.get_user_role(current_user, current_user.current_tenant)
|
||||
recommended_apps_result = []
|
||||
for recommended_app in recommended_apps:
|
||||
installed = db.session.query(InstalledApp).filter(
|
||||
and_(
|
||||
InstalledApp.app_id == recommended_app.app_id,
|
||||
InstalledApp.tenant_id == current_user.current_tenant_id
|
||||
)
|
||||
).first() is not None
|
||||
|
||||
app = recommended_app.app
|
||||
if not app or not app.is_public:
|
||||
continue
|
||||
|
||||
site = app.site
|
||||
if not site:
|
||||
continue
|
||||
|
||||
recommended_app_result = {
|
||||
'id': recommended_app.id,
|
||||
'app': app,
|
||||
'app_id': recommended_app.app_id,
|
||||
'description': site.description,
|
||||
'copyright': site.copyright,
|
||||
'privacy_policy': site.privacy_policy,
|
||||
'category': recommended_app.category,
|
||||
'position': recommended_app.position,
|
||||
'is_listed': recommended_app.is_listed,
|
||||
'install_count': recommended_app.install_count,
|
||||
'installed': installed,
|
||||
'editable': current_user.role in ['owner', 'admin'],
|
||||
}
|
||||
recommended_apps_result.append(recommended_app_result)
|
||||
|
||||
categories.add(recommended_app.category) # add category to categories
|
||||
|
||||
return {'recommended_apps': recommended_apps_result, 'categories': list(categories)}
|
||||
|
||||
|
||||
class RecommendedAppApi(Resource):
|
||||
model_config_fields = {
|
||||
'opening_statement': fields.String,
|
||||
'suggested_questions': fields.Raw(attribute='suggested_questions_list'),
|
||||
'suggested_questions_after_answer': fields.Raw(attribute='suggested_questions_after_answer_dict'),
|
||||
'more_like_this': fields.Raw(attribute='more_like_this_dict'),
|
||||
'model': fields.Raw(attribute='model_dict'),
|
||||
'user_input_form': fields.Raw(attribute='user_input_form_list'),
|
||||
'pre_prompt': fields.String,
|
||||
'agent_mode': fields.Raw(attribute='agent_mode_dict'),
|
||||
}
|
||||
|
||||
app_simple_detail_fields = {
|
||||
'id': fields.String,
|
||||
'name': fields.String,
|
||||
'icon': fields.String,
|
||||
'icon_background': fields.String,
|
||||
'mode': fields.String,
|
||||
'app_model_config': fields.Nested(model_config_fields),
|
||||
}
|
||||
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@marshal_with(app_simple_detail_fields)
|
||||
def get(self, app_id):
|
||||
app_id = str(app_id)
|
||||
|
||||
# is in public recommended list
|
||||
recommended_app = db.session.query(RecommendedApp).filter(
|
||||
RecommendedApp.is_listed == True,
|
||||
RecommendedApp.app_id == app_id
|
||||
).first()
|
||||
|
||||
if not recommended_app:
|
||||
raise AppNotFoundError
|
||||
|
||||
# get app detail
|
||||
app = db.session.query(App).filter(App.id == app_id).first()
|
||||
if not app or not app.is_public:
|
||||
raise AppNotFoundError
|
||||
|
||||
return app
|
||||
|
||||
|
||||
api.add_resource(RecommendedAppListApi, '/explore/apps')
|
||||
api.add_resource(RecommendedAppApi, '/explore/apps/<uuid:app_id>')
|
||||
79
api/controllers/console/explore/saved_message.py
Normal file
79
api/controllers/console/explore/saved_message.py
Normal file
@ -0,0 +1,79 @@
|
||||
from flask_login import current_user
|
||||
from flask_restful import reqparse, marshal_with, fields
|
||||
from flask_restful.inputs import int_range
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from controllers.console import api
|
||||
from controllers.console.explore.error import NotCompletionAppError
|
||||
from controllers.console.explore.wraps import InstalledAppResource
|
||||
from libs.helper import uuid_value, TimestampField
|
||||
from services.errors.message import MessageNotExistsError
|
||||
from services.saved_message_service import SavedMessageService
|
||||
|
||||
feedback_fields = {
|
||||
'rating': fields.String
|
||||
}
|
||||
|
||||
message_fields = {
|
||||
'id': fields.String,
|
||||
'inputs': fields.Raw,
|
||||
'query': fields.String,
|
||||
'answer': fields.String,
|
||||
'feedback': fields.Nested(feedback_fields, attribute='user_feedback', allow_null=True),
|
||||
'created_at': TimestampField
|
||||
}
|
||||
|
||||
|
||||
class SavedMessageListApi(InstalledAppResource):
|
||||
saved_message_infinite_scroll_pagination_fields = {
|
||||
'limit': fields.Integer,
|
||||
'has_more': fields.Boolean,
|
||||
'data': fields.List(fields.Nested(message_fields))
|
||||
}
|
||||
|
||||
@marshal_with(saved_message_infinite_scroll_pagination_fields)
|
||||
def get(self, installed_app):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'completion':
|
||||
raise NotCompletionAppError()
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('last_id', type=uuid_value, location='args')
|
||||
parser.add_argument('limit', type=int_range(1, 100), required=False, default=20, location='args')
|
||||
args = parser.parse_args()
|
||||
|
||||
return SavedMessageService.pagination_by_last_id(app_model, current_user, args['last_id'], args['limit'])
|
||||
|
||||
def post(self, installed_app):
|
||||
app_model = installed_app.app
|
||||
if app_model.mode != 'completion':
|
||||
raise NotCompletionAppError()
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument('message_id', type=uuid_value, required=True, location='json')
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
SavedMessageService.save(app_model, current_user, args['message_id'])
|
||||
except MessageNotExistsError:
|
||||
raise NotFound("Message Not Exists.")
|
||||
|
||||
return {'result': 'success'}
|
||||
|
||||
|
||||
class SavedMessageApi(InstalledAppResource):
|
||||
def delete(self, installed_app, message_id):
|
||||
app_model = installed_app.app
|
||||
|
||||
message_id = str(message_id)
|
||||
|
||||
if app_model.mode != 'completion':
|
||||
raise NotCompletionAppError()
|
||||
|
||||
SavedMessageService.delete(app_model, current_user, message_id)
|
||||
|
||||
return {'result': 'success'}
|
||||
|
||||
|
||||
api.add_resource(SavedMessageListApi, '/installed-apps/<uuid:installed_app_id>/saved-messages', endpoint='installed_app_saved_messages')
|
||||
api.add_resource(SavedMessageApi, '/installed-apps/<uuid:installed_app_id>/saved-messages/<uuid:message_id>', endpoint='installed_app_saved_message')
|
||||
48
api/controllers/console/explore/wraps.py
Normal file
48
api/controllers/console/explore/wraps.py
Normal file
@ -0,0 +1,48 @@
|
||||
from flask_login import login_required, current_user
|
||||
from flask_restful import Resource
|
||||
from functools import wraps
|
||||
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from controllers.console.wraps import account_initialization_required
|
||||
from extensions.ext_database import db
|
||||
from models.model import InstalledApp
|
||||
|
||||
|
||||
def installed_app_required(view=None):
|
||||
def decorator(view):
|
||||
@wraps(view)
|
||||
def decorated(*args, **kwargs):
|
||||
if not kwargs.get('installed_app_id'):
|
||||
raise ValueError('missing installed_app_id in path parameters')
|
||||
|
||||
installed_app_id = kwargs.get('installed_app_id')
|
||||
installed_app_id = str(installed_app_id)
|
||||
|
||||
del kwargs['installed_app_id']
|
||||
|
||||
installed_app = db.session.query(InstalledApp).filter(
|
||||
InstalledApp.id == str(installed_app_id),
|
||||
InstalledApp.tenant_id == current_user.current_tenant_id
|
||||
).first()
|
||||
|
||||
if installed_app is None:
|
||||
raise NotFound('Installed app not found')
|
||||
|
||||
if not installed_app.app:
|
||||
db.session.delete(installed_app)
|
||||
db.session.commit()
|
||||
|
||||
raise NotFound('Installed app not found')
|
||||
|
||||
return view(installed_app, *args, **kwargs)
|
||||
return decorated
|
||||
|
||||
if view:
|
||||
return decorator(view)
|
||||
return decorator
|
||||
|
||||
|
||||
class InstalledAppResource(Resource):
|
||||
# must be reversed if there are multiple decorators
|
||||
method_decorators = [installed_app_required, account_initialization_required, login_required]
|
||||
@ -19,6 +19,14 @@ class VersionApi(Resource):
|
||||
args = parser.parse_args()
|
||||
check_update_url = current_app.config['CHECK_UPDATE_URL']
|
||||
|
||||
if not check_update_url:
|
||||
return {
|
||||
'version': '0.0.0',
|
||||
'release_date': '',
|
||||
'release_notes': '',
|
||||
'can_auto_update': False
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.get(check_update_url, {
|
||||
'current_version': args.get('current_version')
|
||||
|
||||
@ -21,11 +21,11 @@ class InvalidInvitationCodeError(BaseHTTPException):
|
||||
|
||||
class AccountAlreadyInitedError(BaseHTTPException):
|
||||
error_code = 'account_already_inited'
|
||||
description = "Account already inited."
|
||||
description = "The account has been initialized. Please refresh the page."
|
||||
code = 400
|
||||
|
||||
|
||||
class AccountNotInitializedError(BaseHTTPException):
|
||||
error_code = 'account_not_initialized'
|
||||
description = "Account not initialized."
|
||||
description = "The account has not been initialized yet. Please proceed with the initialization process first."
|
||||
code = 400
|
||||
|
||||
@ -82,29 +82,33 @@ class ProviderTokenApi(Resource):
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args['token']:
|
||||
raise ValueError('Token is empty')
|
||||
if args['token']:
|
||||
try:
|
||||
ProviderService.validate_provider_configs(
|
||||
tenant=current_user.current_tenant,
|
||||
provider_name=ProviderName(provider),
|
||||
configs=args['token']
|
||||
)
|
||||
token_is_valid = True
|
||||
except ValidateFailedError as ex:
|
||||
raise ValueError(str(ex))
|
||||
|
||||
try:
|
||||
ProviderService.validate_provider_configs(
|
||||
base64_encrypted_token = ProviderService.get_encrypted_token(
|
||||
tenant=current_user.current_tenant,
|
||||
provider_name=ProviderName(provider),
|
||||
configs=args['token']
|
||||
)
|
||||
token_is_valid = True
|
||||
except ValidateFailedError:
|
||||
else:
|
||||
base64_encrypted_token = None
|
||||
token_is_valid = False
|
||||
|
||||
tenant = current_user.current_tenant
|
||||
|
||||
base64_encrypted_token = ProviderService.get_encrypted_token(
|
||||
tenant=current_user.current_tenant,
|
||||
provider_name=ProviderName(provider),
|
||||
configs=args['token']
|
||||
)
|
||||
|
||||
provider_model = Provider.query.filter_by(tenant_id=tenant.id, provider_name=provider,
|
||||
provider_type=ProviderType.CUSTOM.value).first()
|
||||
provider_model = db.session.query(Provider).filter(
|
||||
Provider.tenant_id == tenant.id,
|
||||
Provider.provider_name == provider,
|
||||
Provider.provider_type == ProviderType.CUSTOM.value
|
||||
).first()
|
||||
|
||||
# Only allow updating token for CUSTOM provider type
|
||||
if provider_model:
|
||||
@ -117,6 +121,16 @@ class ProviderTokenApi(Resource):
|
||||
is_valid=token_is_valid)
|
||||
db.session.add(provider_model)
|
||||
|
||||
if provider_model.is_valid:
|
||||
other_providers = db.session.query(Provider).filter(
|
||||
Provider.tenant_id == tenant.id,
|
||||
Provider.provider_name != provider,
|
||||
Provider.provider_type == ProviderType.CUSTOM.value
|
||||
).all()
|
||||
|
||||
for other_provider in other_providers:
|
||||
other_provider.is_valid = False
|
||||
|
||||
db.session.commit()
|
||||
|
||||
if provider in [ProviderName.ANTHROPIC.value, ProviderName.AZURE_OPENAI.value, ProviderName.COHERE.value,
|
||||
@ -143,7 +157,7 @@ class ProviderTokenValidateApi(Resource):
|
||||
args = parser.parse_args()
|
||||
|
||||
# todo: remove this when the provider is supported
|
||||
if provider in [ProviderName.ANTHROPIC.value, ProviderName.AZURE_OPENAI.value, ProviderName.COHERE.value,
|
||||
if provider in [ProviderName.ANTHROPIC.value, ProviderName.COHERE.value,
|
||||
ProviderName.HUGGINGFACEHUB.value]:
|
||||
return {'result': 'success', 'warning': 'MOCK: This provider is not supported yet.'}
|
||||
|
||||
|
||||
@ -4,43 +4,45 @@ from libs.exception import BaseHTTPException
|
||||
|
||||
class AppUnavailableError(BaseHTTPException):
|
||||
error_code = 'app_unavailable'
|
||||
description = "App unavailable."
|
||||
description = "App unavailable, please check your app configurations."
|
||||
code = 400
|
||||
|
||||
|
||||
class NotCompletionAppError(BaseHTTPException):
|
||||
error_code = 'not_completion_app'
|
||||
description = "Not Completion App"
|
||||
description = "Please check if your Completion app mode matches the right API route."
|
||||
code = 400
|
||||
|
||||
|
||||
class NotChatAppError(BaseHTTPException):
|
||||
error_code = 'not_chat_app'
|
||||
description = "Not Chat App"
|
||||
description = "Please check if your Chat app mode matches the right API route."
|
||||
code = 400
|
||||
|
||||
|
||||
class ConversationCompletedError(BaseHTTPException):
|
||||
error_code = 'conversation_completed'
|
||||
description = "Conversation Completed."
|
||||
description = "The conversation has ended. Please start a new conversation."
|
||||
code = 400
|
||||
|
||||
|
||||
class ProviderNotInitializeError(BaseHTTPException):
|
||||
error_code = 'provider_not_initialize'
|
||||
description = "Provider Token not initialize."
|
||||
description = "No valid model provider credentials found. " \
|
||||
"Please go to Settings -> Model Provider to complete your provider credentials."
|
||||
code = 400
|
||||
|
||||
|
||||
class ProviderQuotaExceededError(BaseHTTPException):
|
||||
error_code = 'provider_quota_exceeded'
|
||||
description = "Provider quota exceeded."
|
||||
description = "Your quota for Dify Hosted OpenAI has been exhausted. " \
|
||||
"Please go to Settings -> Model Provider to complete your own provider credentials."
|
||||
code = 400
|
||||
|
||||
|
||||
class ProviderModelCurrentlyNotSupportError(BaseHTTPException):
|
||||
error_code = 'model_currently_not_support'
|
||||
description = "GPT-4 currently not support."
|
||||
description = "Dify Hosted OpenAI trial currently not support the GPT-4 model."
|
||||
code = 400
|
||||
|
||||
|
||||
|
||||
@ -16,5 +16,5 @@ class DocumentIndexingError(BaseHTTPException):
|
||||
|
||||
class DatasetNotInitedError(BaseHTTPException):
|
||||
error_code = 'dataset_not_inited'
|
||||
description = "Dataset not inited."
|
||||
description = "The dataset is still being initialized or indexing. Please wait a moment."
|
||||
code = 403
|
||||
|
||||
@ -47,7 +47,7 @@ class ConversationListApi(WebApiResource):
|
||||
try:
|
||||
return WebConversationService.pagination_by_last_id(
|
||||
app_model=app_model,
|
||||
end_user=end_user,
|
||||
user=end_user,
|
||||
last_id=args['last_id'],
|
||||
limit=args['limit'],
|
||||
pinned=pinned
|
||||
|
||||
@ -4,43 +4,45 @@ from libs.exception import BaseHTTPException
|
||||
|
||||
class AppUnavailableError(BaseHTTPException):
|
||||
error_code = 'app_unavailable'
|
||||
description = "App unavailable."
|
||||
description = "App unavailable, please check your app configurations."
|
||||
code = 400
|
||||
|
||||
|
||||
class NotCompletionAppError(BaseHTTPException):
|
||||
error_code = 'not_completion_app'
|
||||
description = "Not Completion App"
|
||||
description = "Please check if your Completion app mode matches the right API route."
|
||||
code = 400
|
||||
|
||||
|
||||
class NotChatAppError(BaseHTTPException):
|
||||
error_code = 'not_chat_app'
|
||||
description = "Not Chat App"
|
||||
description = "Please check if your Chat app mode matches the right API route."
|
||||
code = 400
|
||||
|
||||
|
||||
class ConversationCompletedError(BaseHTTPException):
|
||||
error_code = 'conversation_completed'
|
||||
description = "Conversation Completed."
|
||||
description = "The conversation has ended. Please start a new conversation."
|
||||
code = 400
|
||||
|
||||
|
||||
class ProviderNotInitializeError(BaseHTTPException):
|
||||
error_code = 'provider_not_initialize'
|
||||
description = "Provider Token not initialize."
|
||||
description = "No valid model provider credentials found. " \
|
||||
"Please go to Settings -> Model Provider to complete your provider credentials."
|
||||
code = 400
|
||||
|
||||
|
||||
class ProviderQuotaExceededError(BaseHTTPException):
|
||||
error_code = 'provider_quota_exceeded'
|
||||
description = "Provider quota exceeded."
|
||||
description = "Your quota for Dify Hosted OpenAI has been exhausted. " \
|
||||
"Please go to Settings -> Model Provider to complete your own provider credentials."
|
||||
code = 400
|
||||
|
||||
|
||||
class ProviderModelCurrentlyNotSupportError(BaseHTTPException):
|
||||
error_code = 'model_currently_not_support'
|
||||
description = "GPT-4 currently not support."
|
||||
description = "Dify Hosted OpenAI trial currently not support the GPT-4 model."
|
||||
code = 400
|
||||
|
||||
|
||||
@ -52,11 +54,11 @@ class CompletionRequestError(BaseHTTPException):
|
||||
|
||||
class AppMoreLikeThisDisabledError(BaseHTTPException):
|
||||
error_code = 'app_more_like_this_disabled'
|
||||
description = "More like this disabled."
|
||||
description = "The 'More like this' feature is disabled. Please refresh your page."
|
||||
code = 403
|
||||
|
||||
|
||||
class AppSuggestedQuestionsAfterAnswerDisabledError(BaseHTTPException):
|
||||
error_code = 'app_suggested_questions_after_answer_disabled'
|
||||
description = "Function Suggested questions after answer disabled."
|
||||
description = "The 'Suggested Questions After Answer' feature is disabled. Please refresh your page."
|
||||
code = 403
|
||||
|
||||
@ -16,7 +16,7 @@ def validate_token(view=None):
|
||||
def decorated(*args, **kwargs):
|
||||
site = validate_and_get_site()
|
||||
|
||||
app_model = db.session.query(App).get(site.app_id)
|
||||
app_model = db.session.query(App).filter(App.id == site.app_id).first()
|
||||
if not app_model:
|
||||
raise NotFound()
|
||||
|
||||
@ -42,13 +42,16 @@ def validate_and_get_site():
|
||||
"""
|
||||
auth_header = request.headers.get('Authorization')
|
||||
if auth_header is None:
|
||||
raise Unauthorized()
|
||||
raise Unauthorized('Authorization header is missing.')
|
||||
|
||||
if ' ' not in auth_header:
|
||||
raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.')
|
||||
|
||||
auth_scheme, auth_token = auth_header.split(None, 1)
|
||||
auth_scheme = auth_scheme.lower()
|
||||
|
||||
if auth_scheme != 'bearer':
|
||||
raise Unauthorized()
|
||||
raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.')
|
||||
|
||||
site = db.session.query(Site).filter(
|
||||
Site.code == auth_token,
|
||||
|
||||
@ -34,5 +34,9 @@ class DatasetIndexToolCallbackHandler(IndexToolCallbackHandler):
|
||||
db.session.query(DocumentSegment).filter(
|
||||
DocumentSegment.dataset_id == self.dataset_id,
|
||||
DocumentSegment.index_node_id == index_node_id
|
||||
).update({DocumentSegment.hit_count: DocumentSegment.hit_count + 1}, synchronize_session=False)
|
||||
).update(
|
||||
{DocumentSegment.hit_count: DocumentSegment.hit_count + 1},
|
||||
synchronize_session=False
|
||||
)
|
||||
|
||||
db.session.commit()
|
||||
|
||||
109
api/core/chain/llm_router_chain.py
Normal file
109
api/core/chain/llm_router_chain.py
Normal file
@ -0,0 +1,109 @@
|
||||
"""Base classes for LLM-powered router chains."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Any, Dict, List, Optional, Type, cast, NamedTuple
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from pydantic import root_validator
|
||||
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.prompts import BasePromptTemplate
|
||||
from langchain.schema import BaseOutputParser, OutputParserException, BaseLanguageModel
|
||||
|
||||
from libs.json_in_md_parser import parse_and_check_json_markdown
|
||||
|
||||
|
||||
class Route(NamedTuple):
|
||||
destination: Optional[str]
|
||||
next_inputs: Dict[str, Any]
|
||||
|
||||
|
||||
class LLMRouterChain(Chain):
|
||||
"""A router chain that uses an LLM chain to perform routing."""
|
||||
|
||||
llm_chain: LLMChain
|
||||
"""LLM chain used to perform routing"""
|
||||
|
||||
@root_validator()
|
||||
def validate_prompt(cls, values: dict) -> dict:
|
||||
prompt = values["llm_chain"].prompt
|
||||
if prompt.output_parser is None:
|
||||
raise ValueError(
|
||||
"LLMRouterChain requires base llm_chain prompt to have an output"
|
||||
" parser that converts LLM text output to a dictionary with keys"
|
||||
" 'destination' and 'next_inputs'. Received a prompt with no output"
|
||||
" parser."
|
||||
)
|
||||
return values
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Will be whatever keys the LLM chain prompt expects.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return self.llm_chain.input_keys
|
||||
|
||||
def _validate_outputs(self, outputs: Dict[str, Any]) -> None:
|
||||
super()._validate_outputs(outputs)
|
||||
if not isinstance(outputs["next_inputs"], dict):
|
||||
raise ValueError
|
||||
|
||||
def _call(
|
||||
self,
|
||||
inputs: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
output = cast(
|
||||
Dict[str, Any],
|
||||
self.llm_chain.predict_and_parse(**inputs),
|
||||
)
|
||||
return output
|
||||
|
||||
@classmethod
|
||||
def from_llm(
|
||||
cls, llm: BaseLanguageModel, prompt: BasePromptTemplate, **kwargs: Any
|
||||
) -> LLMRouterChain:
|
||||
"""Convenience constructor."""
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt)
|
||||
return cls(llm_chain=llm_chain, **kwargs)
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
return ["destination", "next_inputs"]
|
||||
|
||||
def route(self, inputs: Dict[str, Any]) -> Route:
|
||||
result = self(inputs)
|
||||
return Route(result["destination"], result["next_inputs"])
|
||||
|
||||
|
||||
class RouterOutputParser(BaseOutputParser[Dict[str, str]]):
|
||||
"""Parser for output of router chain int he multi-prompt chain."""
|
||||
|
||||
default_destination: str = "DEFAULT"
|
||||
next_inputs_type: Type = str
|
||||
next_inputs_inner_key: str = "input"
|
||||
|
||||
def parse(self, text: str) -> Dict[str, Any]:
|
||||
try:
|
||||
expected_keys = ["destination", "next_inputs"]
|
||||
parsed = parse_and_check_json_markdown(text, expected_keys)
|
||||
if not isinstance(parsed["destination"], str):
|
||||
raise ValueError("Expected 'destination' to be a string.")
|
||||
if not isinstance(parsed["next_inputs"], self.next_inputs_type):
|
||||
raise ValueError(
|
||||
f"Expected 'next_inputs' to be {self.next_inputs_type}."
|
||||
)
|
||||
parsed["next_inputs"] = {self.next_inputs_inner_key: parsed["next_inputs"]}
|
||||
if (
|
||||
parsed["destination"].strip().lower()
|
||||
== self.default_destination.lower()
|
||||
):
|
||||
parsed["destination"] = None
|
||||
else:
|
||||
parsed["destination"] = parsed["destination"].strip()
|
||||
return parsed
|
||||
except Exception as e:
|
||||
raise OutputParserException(
|
||||
f"Parsing text\n{text}\n of llm router raised following error:\n{e}"
|
||||
)
|
||||
@ -1,18 +1,18 @@
|
||||
from typing import Optional, List
|
||||
|
||||
from langchain.callbacks import SharedCallbackManager
|
||||
from langchain.callbacks import SharedCallbackManager, CallbackManager
|
||||
from langchain.chains import SequentialChain
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.memory.chat_memory import BaseChatMemory
|
||||
|
||||
from core.agent.agent_builder import AgentBuilder
|
||||
from core.callback_handler.agent_loop_gather_callback_handler import AgentLoopGatherCallbackHandler
|
||||
from core.callback_handler.dataset_tool_callback_handler import DatasetToolCallbackHandler
|
||||
from core.callback_handler.main_chain_gather_callback_handler import MainChainGatherCallbackHandler
|
||||
from core.callback_handler.std_out_callback_handler import DifyStdOutCallbackHandler
|
||||
from core.chain.chain_builder import ChainBuilder
|
||||
from core.constant import llm_constant
|
||||
from core.chain.multi_dataset_router_chain import MultiDatasetRouterChain
|
||||
from core.conversation_message_task import ConversationMessageTask
|
||||
from core.tool.dataset_tool_builder import DatasetToolBuilder
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Dataset
|
||||
|
||||
|
||||
class MainChainBuilder:
|
||||
@ -31,8 +31,7 @@ class MainChainBuilder:
|
||||
tenant_id=tenant_id,
|
||||
agent_mode=agent_mode,
|
||||
memory=memory,
|
||||
dataset_tool_callback_handler=DatasetToolCallbackHandler(conversation_message_task),
|
||||
agent_loop_gather_callback_handler=chain_callback_handler.agent_loop_gather_callback_handler
|
||||
conversation_message_task=conversation_message_task
|
||||
)
|
||||
chains += tool_chains
|
||||
|
||||
@ -59,15 +58,15 @@ class MainChainBuilder:
|
||||
|
||||
@classmethod
|
||||
def get_agent_chains(cls, tenant_id: str, agent_mode: dict, memory: Optional[BaseChatMemory],
|
||||
dataset_tool_callback_handler: DatasetToolCallbackHandler,
|
||||
agent_loop_gather_callback_handler: AgentLoopGatherCallbackHandler):
|
||||
conversation_message_task: ConversationMessageTask):
|
||||
# agent mode
|
||||
chains = []
|
||||
if agent_mode and agent_mode.get('enabled'):
|
||||
tools = agent_mode.get('tools', [])
|
||||
|
||||
pre_fixed_chains = []
|
||||
agent_tools = []
|
||||
# agent_tools = []
|
||||
datasets = []
|
||||
for tool in tools:
|
||||
tool_type = list(tool.keys())[0]
|
||||
tool_config = list(tool.values())[0]
|
||||
@ -76,34 +75,27 @@ class MainChainBuilder:
|
||||
if chain:
|
||||
pre_fixed_chains.append(chain)
|
||||
elif tool_type == "dataset":
|
||||
dataset_tool = DatasetToolBuilder.build_dataset_tool(
|
||||
tenant_id=tenant_id,
|
||||
dataset_id=tool_config.get("id"),
|
||||
response_mode='no_synthesizer', # "compact"
|
||||
callback_handler=dataset_tool_callback_handler
|
||||
)
|
||||
# get dataset from dataset id
|
||||
dataset = db.session.query(Dataset).filter(
|
||||
Dataset.tenant_id == tenant_id,
|
||||
Dataset.id == tool_config.get("id")
|
||||
).first()
|
||||
|
||||
if dataset_tool:
|
||||
agent_tools.append(dataset_tool)
|
||||
if dataset:
|
||||
datasets.append(dataset)
|
||||
|
||||
# add pre-fixed chains
|
||||
chains += pre_fixed_chains
|
||||
|
||||
if len(agent_tools) == 1:
|
||||
if len(datasets) > 0:
|
||||
# tool to chain
|
||||
tool_chain = ChainBuilder.to_tool_chain(tool=agent_tools[0], output_key='tool_output')
|
||||
chains.append(tool_chain)
|
||||
elif len(agent_tools) > 1:
|
||||
# build agent config
|
||||
agent_chain = AgentBuilder.to_agent_chain(
|
||||
multi_dataset_router_chain = MultiDatasetRouterChain.from_datasets(
|
||||
tenant_id=tenant_id,
|
||||
tools=agent_tools,
|
||||
memory=memory,
|
||||
dataset_tool_callback_handler=dataset_tool_callback_handler,
|
||||
agent_loop_gather_callback_handler=agent_loop_gather_callback_handler
|
||||
datasets=datasets,
|
||||
conversation_message_task=conversation_message_task,
|
||||
callback_manager=CallbackManager([DifyStdOutCallbackHandler()])
|
||||
)
|
||||
|
||||
chains.append(agent_chain)
|
||||
chains.append(multi_dataset_router_chain)
|
||||
|
||||
final_output_key = cls.get_chains_output_key(chains)
|
||||
|
||||
|
||||
144
api/core/chain/multi_dataset_router_chain.py
Normal file
144
api/core/chain/multi_dataset_router_chain.py
Normal file
@ -0,0 +1,144 @@
|
||||
from typing import Mapping, List, Dict, Any, Optional
|
||||
|
||||
from langchain import LLMChain, PromptTemplate, ConversationChain
|
||||
from langchain.callbacks import CallbackManager
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from pydantic import Extra
|
||||
|
||||
from core.callback_handler.dataset_tool_callback_handler import DatasetToolCallbackHandler
|
||||
from core.callback_handler.std_out_callback_handler import DifyStdOutCallbackHandler
|
||||
from core.chain.llm_router_chain import LLMRouterChain, RouterOutputParser
|
||||
from core.conversation_message_task import ConversationMessageTask
|
||||
from core.llm.llm_builder import LLMBuilder
|
||||
from core.tool.dataset_tool_builder import DatasetToolBuilder
|
||||
from core.tool.llama_index_tool import EnhanceLlamaIndexTool
|
||||
from models.dataset import Dataset
|
||||
|
||||
MULTI_PROMPT_ROUTER_TEMPLATE = """
|
||||
Given a raw text input to a language model select the model prompt best suited for \
|
||||
the input. You will be given the names of the available prompts and a description of \
|
||||
what the prompt is best suited for. You may also revise the original input if you \
|
||||
think that revising it will ultimately lead to a better response from the language \
|
||||
model.
|
||||
|
||||
<< FORMATTING >>
|
||||
Return a markdown code snippet with a JSON object formatted to look like, \
|
||||
no any other string out of markdown code snippet:
|
||||
```json
|
||||
{{{{
|
||||
"destination": string \\ name of the prompt to use or "DEFAULT"
|
||||
"next_inputs": string \\ a potentially modified version of the original input
|
||||
}}}}
|
||||
```
|
||||
|
||||
REMEMBER: "destination" MUST be one of the candidate prompt names specified below OR \
|
||||
it can be "DEFAULT" if the input is not well suited for any of the candidate prompts.
|
||||
REMEMBER: "next_inputs" can just be the original input if you don't think any \
|
||||
modifications are needed.
|
||||
|
||||
<< CANDIDATE PROMPTS >>
|
||||
{destinations}
|
||||
|
||||
<< INPUT >>
|
||||
{{input}}
|
||||
|
||||
<< OUTPUT >>
|
||||
"""
|
||||
|
||||
|
||||
class MultiDatasetRouterChain(Chain):
|
||||
"""Use a single chain to route an input to one of multiple candidate chains."""
|
||||
|
||||
router_chain: LLMRouterChain
|
||||
"""Chain for deciding a destination chain and the input to it."""
|
||||
dataset_tools: Mapping[str, EnhanceLlamaIndexTool]
|
||||
"""Map of name to candidate chains that inputs can be routed to."""
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Will be whatever keys the router chain prompt expects.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return self.router_chain.input_keys
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
return ["text"]
|
||||
|
||||
@classmethod
|
||||
def from_datasets(
|
||||
cls,
|
||||
tenant_id: str,
|
||||
datasets: List[Dataset],
|
||||
conversation_message_task: ConversationMessageTask,
|
||||
**kwargs: Any,
|
||||
):
|
||||
"""Convenience constructor for instantiating from destination prompts."""
|
||||
llm_callback_manager = CallbackManager([DifyStdOutCallbackHandler()])
|
||||
llm = LLMBuilder.to_llm(
|
||||
tenant_id=tenant_id,
|
||||
model_name='gpt-3.5-turbo',
|
||||
temperature=0,
|
||||
max_tokens=1024,
|
||||
callback_manager=llm_callback_manager
|
||||
)
|
||||
|
||||
destinations = ["{}: {}".format(d.id, d.description.replace('\n', ' ') if d.description
|
||||
else ('useful for when you want to answer queries about the ' + d.name))
|
||||
for d in datasets]
|
||||
destinations_str = "\n".join(destinations)
|
||||
router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(
|
||||
destinations=destinations_str
|
||||
)
|
||||
router_prompt = PromptTemplate(
|
||||
template=router_template,
|
||||
input_variables=["input"],
|
||||
output_parser=RouterOutputParser(),
|
||||
)
|
||||
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
|
||||
dataset_tools = {}
|
||||
for dataset in datasets:
|
||||
dataset_tool = DatasetToolBuilder.build_dataset_tool(
|
||||
dataset=dataset,
|
||||
response_mode='no_synthesizer', # "compact"
|
||||
callback_handler=DatasetToolCallbackHandler(conversation_message_task)
|
||||
)
|
||||
|
||||
if dataset_tool:
|
||||
dataset_tools[dataset.id] = dataset_tool
|
||||
|
||||
return cls(
|
||||
router_chain=router_chain,
|
||||
dataset_tools=dataset_tools,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def _call(
|
||||
self,
|
||||
inputs: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
if len(self.dataset_tools) == 0:
|
||||
return {"text": ''}
|
||||
elif len(self.dataset_tools) == 1:
|
||||
return {"text": next(iter(self.dataset_tools.values())).run(inputs['input'])}
|
||||
|
||||
route = self.router_chain.route(inputs)
|
||||
|
||||
if not route.destination:
|
||||
return {"text": ''}
|
||||
elif route.destination in self.dataset_tools:
|
||||
return {"text": self.dataset_tools[route.destination].run(
|
||||
route.next_inputs['input']
|
||||
)}
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Received invalid destination chain name '{route.destination}'"
|
||||
)
|
||||
@ -1,14 +1,17 @@
|
||||
from typing import Optional, List, Union
|
||||
import logging
|
||||
from typing import Optional, List, Union, Tuple
|
||||
|
||||
from langchain.callbacks import CallbackManager
|
||||
from langchain.chat_models.base import BaseChatModel
|
||||
from langchain.llms import BaseLLM
|
||||
from langchain.schema import BaseMessage, BaseLanguageModel, HumanMessage
|
||||
from requests.exceptions import ChunkedEncodingError
|
||||
|
||||
from core.constant import llm_constant
|
||||
from core.callback_handler.llm_callback_handler import LLMCallbackHandler
|
||||
from core.callback_handler.std_out_callback_handler import DifyStreamingStdOutCallbackHandler, \
|
||||
DifyStdOutCallbackHandler
|
||||
from core.conversation_message_task import ConversationMessageTask, ConversationTaskStoppedException
|
||||
from core.conversation_message_task import ConversationMessageTask, ConversationTaskStoppedException, PubHandler
|
||||
from core.llm.error import LLMBadRequestError
|
||||
from core.llm.llm_builder import LLMBuilder
|
||||
from core.chain.main_chain_builder import MainChainBuilder
|
||||
@ -39,7 +42,8 @@ class Completion:
|
||||
memory = cls.get_memory_from_conversation(
|
||||
tenant_id=app.tenant_id,
|
||||
app_model_config=app_model_config,
|
||||
conversation=conversation
|
||||
conversation=conversation,
|
||||
return_messages=False
|
||||
)
|
||||
|
||||
inputs = conversation.inputs
|
||||
@ -83,6 +87,11 @@ class Completion:
|
||||
)
|
||||
except ConversationTaskStoppedException:
|
||||
return
|
||||
except ChunkedEncodingError as e:
|
||||
# Interrupt by LLM (like OpenAI), handle it.
|
||||
logging.warning(f'ChunkedEncodingError: {e}')
|
||||
conversation_message_task.end()
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def run_final_llm(cls, tenant_id: str, mode: str, app_model_config: AppModelConfig, query: str, inputs: dict,
|
||||
@ -96,7 +105,7 @@ class Completion:
|
||||
)
|
||||
|
||||
# get llm prompt
|
||||
prompt = cls.get_main_llm_prompt(
|
||||
prompt, stop_words = cls.get_main_llm_prompt(
|
||||
mode=mode,
|
||||
llm=final_llm,
|
||||
pre_prompt=app_model_config.pre_prompt,
|
||||
@ -114,30 +123,47 @@ class Completion:
|
||||
mode=mode
|
||||
)
|
||||
|
||||
response = final_llm.generate([prompt])
|
||||
response = final_llm.generate([prompt], stop_words)
|
||||
|
||||
return response
|
||||
|
||||
@classmethod
|
||||
def get_main_llm_prompt(cls, mode: str, llm: BaseLanguageModel, pre_prompt: str, query: str, inputs: dict, chain_output: Optional[str],
|
||||
def get_main_llm_prompt(cls, mode: str, llm: BaseLanguageModel, pre_prompt: str, query: str, inputs: dict,
|
||||
chain_output: Optional[str],
|
||||
memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory]) -> \
|
||||
Union[str | List[BaseMessage]]:
|
||||
Tuple[Union[str | List[BaseMessage]], Optional[List[str]]]:
|
||||
# disable template string in query
|
||||
query_params = OutLinePromptTemplate.from_template(template=query).input_variables
|
||||
if query_params:
|
||||
for query_param in query_params:
|
||||
if query_param not in inputs:
|
||||
inputs[query_param] = '{' + query_param + '}'
|
||||
|
||||
pre_prompt = PromptBuilder.process_template(pre_prompt) if pre_prompt else pre_prompt
|
||||
if mode == 'completion':
|
||||
prompt_template = OutLinePromptTemplate.from_template(
|
||||
template=("Use the following pieces of [CONTEXT] to answer the question at the end. "
|
||||
"If you don't know the answer, "
|
||||
"just say that you don't know, don't try to make up an answer. \n"
|
||||
"```\n"
|
||||
"[CONTEXT]\n"
|
||||
"{context}\n"
|
||||
"```\n" if chain_output else "")
|
||||
template=("""Use the following CONTEXT as your learned knowledge:
|
||||
[CONTEXT]
|
||||
{context}
|
||||
[END CONTEXT]
|
||||
|
||||
When answer to user:
|
||||
- If you don't know, just say that you don't know.
|
||||
- If you don't know when you are not sure, ask for clarification.
|
||||
Avoid mentioning that you obtained the information from the context.
|
||||
And answer according to the language of the user's question.
|
||||
""" if chain_output else "")
|
||||
+ (pre_prompt + "\n" if pre_prompt else "")
|
||||
+ "{query}\n"
|
||||
)
|
||||
|
||||
if chain_output:
|
||||
inputs['context'] = chain_output
|
||||
context_params = OutLinePromptTemplate.from_template(template=chain_output).input_variables
|
||||
if context_params:
|
||||
for context_param in context_params:
|
||||
if context_param not in inputs:
|
||||
inputs[context_param] = '{' + context_param + '}'
|
||||
|
||||
prompt_inputs = {k: inputs[k] for k in prompt_template.input_variables if k in inputs}
|
||||
prompt_content = prompt_template.format(
|
||||
@ -147,64 +173,83 @@ class Completion:
|
||||
|
||||
if isinstance(llm, BaseChatModel):
|
||||
# use chat llm as completion model
|
||||
return [HumanMessage(content=prompt_content)]
|
||||
return [HumanMessage(content=prompt_content)], None
|
||||
else:
|
||||
return prompt_content
|
||||
return prompt_content, None
|
||||
else:
|
||||
messages: List[BaseMessage] = []
|
||||
|
||||
system_message = None
|
||||
if pre_prompt:
|
||||
# append pre prompt as system message
|
||||
system_message = PromptBuilder.to_system_message(pre_prompt, inputs)
|
||||
|
||||
if chain_output:
|
||||
# append context as system message, currently only use simple stuff prompt
|
||||
context_message = PromptBuilder.to_system_message(
|
||||
"""Use the following pieces of [CONTEXT] to answer the users question.
|
||||
If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
||||
```
|
||||
[CONTEXT]
|
||||
{context}
|
||||
```""",
|
||||
{'context': chain_output}
|
||||
)
|
||||
|
||||
if not system_message:
|
||||
system_message = context_message
|
||||
else:
|
||||
system_message.content = context_message.content + "\n\n" + system_message.content
|
||||
|
||||
if system_message:
|
||||
messages.append(system_message)
|
||||
|
||||
human_inputs = {
|
||||
"query": query
|
||||
}
|
||||
|
||||
# construct main prompt
|
||||
human_message = PromptBuilder.to_human_message(
|
||||
prompt_content="{query}",
|
||||
inputs=human_inputs
|
||||
)
|
||||
human_message_prompt = ""
|
||||
|
||||
if pre_prompt:
|
||||
pre_prompt_inputs = {k: inputs[k] for k in
|
||||
OutLinePromptTemplate.from_template(template=pre_prompt).input_variables
|
||||
if k in inputs}
|
||||
|
||||
if pre_prompt_inputs:
|
||||
human_inputs.update(pre_prompt_inputs)
|
||||
|
||||
if chain_output:
|
||||
human_inputs['context'] = chain_output
|
||||
human_message_prompt += """Use the following CONTEXT as your learned knowledge.
|
||||
[CONTEXT]
|
||||
{context}
|
||||
[END CONTEXT]
|
||||
|
||||
When answer to user:
|
||||
- If you don't know, just say that you don't know.
|
||||
- If you don't know when you are not sure, ask for clarification.
|
||||
Avoid mentioning that you obtained the information from the context.
|
||||
And answer according to the language of the user's question.
|
||||
"""
|
||||
|
||||
if pre_prompt:
|
||||
human_message_prompt += pre_prompt
|
||||
|
||||
query_prompt = "\nHuman: {query}\nAI: "
|
||||
|
||||
if memory:
|
||||
# append chat histories
|
||||
tmp_messages = messages.copy() + [human_message]
|
||||
curr_message_tokens = memory.llm.get_messages_tokens(tmp_messages)
|
||||
rest_tokens = llm_constant.max_context_token_length[
|
||||
memory.llm.model_name] - memory.llm.max_tokens - curr_message_tokens
|
||||
tmp_human_message = PromptBuilder.to_human_message(
|
||||
prompt_content=human_message_prompt + query_prompt,
|
||||
inputs=human_inputs
|
||||
)
|
||||
|
||||
curr_message_tokens = memory.llm.get_messages_tokens([tmp_human_message])
|
||||
rest_tokens = llm_constant.max_context_token_length[memory.llm.model_name] \
|
||||
- memory.llm.max_tokens - curr_message_tokens
|
||||
rest_tokens = max(rest_tokens, 0)
|
||||
history_messages = cls.get_history_messages_from_memory(memory, rest_tokens)
|
||||
messages += history_messages
|
||||
histories = cls.get_history_messages_from_memory(memory, rest_tokens)
|
||||
|
||||
# disable template string in query
|
||||
histories_params = OutLinePromptTemplate.from_template(template=histories).input_variables
|
||||
if histories_params:
|
||||
for histories_param in histories_params:
|
||||
if histories_param not in human_inputs:
|
||||
human_inputs[histories_param] = '{' + histories_param + '}'
|
||||
|
||||
human_message_prompt += "\n\n" + histories
|
||||
|
||||
human_message_prompt += query_prompt
|
||||
|
||||
# construct main prompt
|
||||
human_message = PromptBuilder.to_human_message(
|
||||
prompt_content=human_message_prompt,
|
||||
inputs=human_inputs
|
||||
)
|
||||
|
||||
messages.append(human_message)
|
||||
|
||||
return messages
|
||||
return messages, ['\nHuman:']
|
||||
|
||||
@classmethod
|
||||
def get_llm_callback_manager(cls, llm: Union[StreamableOpenAI, StreamableChatOpenAI],
|
||||
streaming: bool, conversation_message_task: ConversationMessageTask) -> CallbackManager:
|
||||
streaming: bool,
|
||||
conversation_message_task: ConversationMessageTask) -> CallbackManager:
|
||||
llm_callback_handler = LLMCallbackHandler(llm, conversation_message_task)
|
||||
if streaming:
|
||||
callback_handlers = [llm_callback_handler, DifyStreamingStdOutCallbackHandler()]
|
||||
@ -216,7 +261,7 @@ If you don't know the answer, just say that you don't know, don't try to make up
|
||||
@classmethod
|
||||
def get_history_messages_from_memory(cls, memory: ReadOnlyConversationTokenDBBufferSharedMemory,
|
||||
max_token_limit: int) -> \
|
||||
List[BaseMessage]:
|
||||
str:
|
||||
"""Get memory messages."""
|
||||
memory.max_token_limit = max_token_limit
|
||||
memory_key = memory.memory_variables[0]
|
||||
@ -286,7 +331,7 @@ If you don't know the answer, just say that you don't know, don't try to make up
|
||||
)
|
||||
|
||||
# get llm prompt
|
||||
original_prompt = cls.get_main_llm_prompt(
|
||||
original_prompt, _ = cls.get_main_llm_prompt(
|
||||
mode="completion",
|
||||
llm=llm,
|
||||
pre_prompt=pre_prompt,
|
||||
|
||||
@ -2,8 +2,6 @@ import decimal
|
||||
import json
|
||||
from typing import Optional, Union
|
||||
|
||||
from gunicorn.config import User
|
||||
|
||||
from core.callback_handler.entity.agent_loop import AgentLoop
|
||||
from core.callback_handler.entity.dataset_query import DatasetQueryObj
|
||||
from core.callback_handler.entity.llm_message import LLMMessage
|
||||
@ -58,6 +56,9 @@ class ConversationMessageTask:
|
||||
)
|
||||
|
||||
def init(self):
|
||||
provider_name = LLMBuilder.get_default_provider(self.app.tenant_id)
|
||||
self.model_dict['provider'] = provider_name
|
||||
|
||||
override_model_configs = None
|
||||
if self.is_override:
|
||||
override_model_configs = {
|
||||
@ -79,7 +80,10 @@ class ConversationMessageTask:
|
||||
if introduction:
|
||||
prompt_template = OutLinePromptTemplate.from_template(template=PromptBuilder.process_template(introduction))
|
||||
prompt_inputs = {k: self.inputs[k] for k in prompt_template.input_variables if k in self.inputs}
|
||||
introduction = prompt_template.format(**prompt_inputs)
|
||||
try:
|
||||
introduction = prompt_template.format(**prompt_inputs)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if self.app_model_config.pre_prompt:
|
||||
pre_prompt = PromptBuilder.process_template(self.app_model_config.pre_prompt)
|
||||
@ -170,7 +174,7 @@ class ConversationMessageTask:
|
||||
)
|
||||
|
||||
if not by_stopped:
|
||||
self._pub_handler.pub_end()
|
||||
self.end()
|
||||
|
||||
def update_provider_quota(self):
|
||||
llm_provider_service = LLMProviderService(
|
||||
@ -267,9 +271,12 @@ class ConversationMessageTask:
|
||||
total_price = message_tokens_per_1k * message_unit_price + answer_tokens_per_1k * answer_unit_price
|
||||
return total_price.quantize(decimal.Decimal('0.0000001'), rounding=decimal.ROUND_HALF_UP)
|
||||
|
||||
def end(self):
|
||||
self._pub_handler.pub_end()
|
||||
|
||||
|
||||
class PubHandler:
|
||||
def __init__(self, user: Union[Account | User], task_id: str,
|
||||
def __init__(self, user: Union[Account | EndUser], task_id: str,
|
||||
message: Message, conversation: Conversation,
|
||||
chain_pub: bool = False, agent_thought_pub: bool = False):
|
||||
self._channel = PubHandler.generate_channel_name(user, task_id)
|
||||
@ -282,12 +289,15 @@ class PubHandler:
|
||||
self._agent_thought_pub = agent_thought_pub
|
||||
|
||||
@classmethod
|
||||
def generate_channel_name(cls, user: Union[Account | User], task_id: str):
|
||||
def generate_channel_name(cls, user: Union[Account | EndUser], task_id: str):
|
||||
if not user:
|
||||
raise ValueError("user is required")
|
||||
|
||||
user_str = 'account-' + user.id if isinstance(user, Account) else 'end-user-' + user.id
|
||||
return "generate_result:{}-{}".format(user_str, task_id)
|
||||
|
||||
@classmethod
|
||||
def generate_stopped_cache_key(cls, user: Union[Account | User], task_id: str):
|
||||
def generate_stopped_cache_key(cls, user: Union[Account | EndUser], task_id: str):
|
||||
user_str = 'account-' + user.id if isinstance(user, Account) else 'end-user-' + user.id
|
||||
return "generate_result_stopped:{}-{}".format(user_str, task_id)
|
||||
|
||||
@ -366,7 +376,7 @@ class PubHandler:
|
||||
redis_client.publish(self._channel, json.dumps(content))
|
||||
|
||||
@classmethod
|
||||
def pub_error(cls, user: Union[Account | User], task_id: str, e):
|
||||
def pub_error(cls, user: Union[Account | EndUser], task_id: str, e):
|
||||
content = {
|
||||
'error': type(e).__name__,
|
||||
'description': e.description if getattr(e, 'description', None) is not None else str(e)
|
||||
@ -379,7 +389,7 @@ class PubHandler:
|
||||
return redis_client.get(self._stopped_cache_key) is not None
|
||||
|
||||
@classmethod
|
||||
def stop(cls, user: Union[Account | User], task_id: str):
|
||||
def stop(cls, user: Union[Account | EndUser], task_id: str):
|
||||
stopped_cache_key = cls.generate_stopped_cache_key(user, task_id)
|
||||
redis_client.setex(stopped_cache_key, 600, 1)
|
||||
|
||||
|
||||
@ -11,9 +11,10 @@ from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_except
|
||||
|
||||
@retry(reraise=True, wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
|
||||
def get_embedding(
|
||||
text: str,
|
||||
engine: Optional[str] = None,
|
||||
openai_api_key: Optional[str] = None,
|
||||
text: str,
|
||||
engine: Optional[str] = None,
|
||||
api_key: Optional[str] = None,
|
||||
**kwargs
|
||||
) -> List[float]:
|
||||
"""Get embedding.
|
||||
|
||||
@ -25,11 +26,12 @@ def get_embedding(
|
||||
|
||||
"""
|
||||
text = text.replace("\n", " ")
|
||||
return openai.Embedding.create(input=[text], engine=engine, api_key=openai_api_key)["data"][0]["embedding"]
|
||||
return openai.Embedding.create(input=[text], engine=engine, api_key=api_key, **kwargs)["data"][0]["embedding"]
|
||||
|
||||
|
||||
@retry(reraise=True, wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
|
||||
async def aget_embedding(text: str, engine: Optional[str] = None, openai_api_key: Optional[str] = None) -> List[float]:
|
||||
async def aget_embedding(text: str, engine: Optional[str] = None, api_key: Optional[str] = None, **kwargs) -> List[
|
||||
float]:
|
||||
"""Asynchronously get embedding.
|
||||
|
||||
NOTE: Copied from OpenAI's embedding utils:
|
||||
@ -42,16 +44,17 @@ async def aget_embedding(text: str, engine: Optional[str] = None, openai_api_key
|
||||
# replace newlines, which can negatively affect performance.
|
||||
text = text.replace("\n", " ")
|
||||
|
||||
return (await openai.Embedding.acreate(input=[text], engine=engine, api_key=openai_api_key))["data"][0][
|
||||
return (await openai.Embedding.acreate(input=[text], engine=engine, api_key=api_key, **kwargs))["data"][0][
|
||||
"embedding"
|
||||
]
|
||||
|
||||
|
||||
@retry(reraise=True, wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
|
||||
def get_embeddings(
|
||||
list_of_text: List[str],
|
||||
engine: Optional[str] = None,
|
||||
openai_api_key: Optional[str] = None
|
||||
list_of_text: List[str],
|
||||
engine: Optional[str] = None,
|
||||
api_key: Optional[str] = None,
|
||||
**kwargs
|
||||
) -> List[List[float]]:
|
||||
"""Get embeddings.
|
||||
|
||||
@ -67,14 +70,14 @@ def get_embeddings(
|
||||
# replace newlines, which can negatively affect performance.
|
||||
list_of_text = [text.replace("\n", " ") for text in list_of_text]
|
||||
|
||||
data = openai.Embedding.create(input=list_of_text, engine=engine, api_key=openai_api_key).data
|
||||
data = openai.Embedding.create(input=list_of_text, engine=engine, api_key=api_key, **kwargs).data
|
||||
data = sorted(data, key=lambda x: x["index"]) # maintain the same order as input.
|
||||
return [d["embedding"] for d in data]
|
||||
|
||||
|
||||
@retry(reraise=True, wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
|
||||
async def aget_embeddings(
|
||||
list_of_text: List[str], engine: Optional[str] = None, openai_api_key: Optional[str] = None
|
||||
list_of_text: List[str], engine: Optional[str] = None, api_key: Optional[str] = None, **kwargs
|
||||
) -> List[List[float]]:
|
||||
"""Asynchronously get embeddings.
|
||||
|
||||
@ -90,7 +93,7 @@ async def aget_embeddings(
|
||||
# replace newlines, which can negatively affect performance.
|
||||
list_of_text = [text.replace("\n", " ") for text in list_of_text]
|
||||
|
||||
data = (await openai.Embedding.acreate(input=list_of_text, engine=engine, api_key=openai_api_key)).data
|
||||
data = (await openai.Embedding.acreate(input=list_of_text, engine=engine, api_key=api_key, **kwargs)).data
|
||||
data = sorted(data, key=lambda x: x["index"]) # maintain the same order as input.
|
||||
return [d["embedding"] for d in data]
|
||||
|
||||
@ -98,19 +101,30 @@ async def aget_embeddings(
|
||||
class OpenAIEmbedding(BaseEmbedding):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
mode: str = OpenAIEmbeddingMode.TEXT_SEARCH_MODE,
|
||||
model: str = OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002,
|
||||
deployment_name: Optional[str] = None,
|
||||
openai_api_key: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
self,
|
||||
mode: str = OpenAIEmbeddingMode.TEXT_SEARCH_MODE,
|
||||
model: str = OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002,
|
||||
deployment_name: Optional[str] = None,
|
||||
openai_api_key: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Init params."""
|
||||
super().__init__(**kwargs)
|
||||
new_kwargs = {}
|
||||
|
||||
if 'embed_batch_size' in kwargs:
|
||||
new_kwargs['embed_batch_size'] = kwargs['embed_batch_size']
|
||||
|
||||
if 'tokenizer' in kwargs:
|
||||
new_kwargs['tokenizer'] = kwargs['tokenizer']
|
||||
|
||||
super().__init__(**new_kwargs)
|
||||
self.mode = OpenAIEmbeddingMode(mode)
|
||||
self.model = OpenAIEmbeddingModelType(model)
|
||||
self.deployment_name = deployment_name
|
||||
self.openai_api_key = openai_api_key
|
||||
self.openai_api_type = kwargs.get('openai_api_type')
|
||||
self.openai_api_version = kwargs.get('openai_api_version')
|
||||
self.openai_api_base = kwargs.get('openai_api_base')
|
||||
|
||||
@handle_llm_exceptions
|
||||
def _get_query_embedding(self, query: str) -> List[float]:
|
||||
@ -122,7 +136,9 @@ class OpenAIEmbedding(BaseEmbedding):
|
||||
if key not in _QUERY_MODE_MODEL_DICT:
|
||||
raise ValueError(f"Invalid mode, model combination: {key}")
|
||||
engine = _QUERY_MODE_MODEL_DICT[key]
|
||||
return get_embedding(query, engine=engine, openai_api_key=self.openai_api_key)
|
||||
return get_embedding(query, engine=engine, api_key=self.openai_api_key,
|
||||
api_type=self.openai_api_type, api_version=self.openai_api_version,
|
||||
api_base=self.openai_api_base)
|
||||
|
||||
def _get_text_embedding(self, text: str) -> List[float]:
|
||||
"""Get text embedding."""
|
||||
@ -133,7 +149,9 @@ class OpenAIEmbedding(BaseEmbedding):
|
||||
if key not in _TEXT_MODE_MODEL_DICT:
|
||||
raise ValueError(f"Invalid mode, model combination: {key}")
|
||||
engine = _TEXT_MODE_MODEL_DICT[key]
|
||||
return get_embedding(text, engine=engine, openai_api_key=self.openai_api_key)
|
||||
return get_embedding(text, engine=engine, api_key=self.openai_api_key,
|
||||
api_type=self.openai_api_type, api_version=self.openai_api_version,
|
||||
api_base=self.openai_api_base)
|
||||
|
||||
async def _aget_text_embedding(self, text: str) -> List[float]:
|
||||
"""Asynchronously get text embedding."""
|
||||
@ -144,7 +162,9 @@ class OpenAIEmbedding(BaseEmbedding):
|
||||
if key not in _TEXT_MODE_MODEL_DICT:
|
||||
raise ValueError(f"Invalid mode, model combination: {key}")
|
||||
engine = _TEXT_MODE_MODEL_DICT[key]
|
||||
return await aget_embedding(text, engine=engine, openai_api_key=self.openai_api_key)
|
||||
return await aget_embedding(text, engine=engine, api_key=self.openai_api_key,
|
||||
api_type=self.openai_api_type, api_version=self.openai_api_version,
|
||||
api_base=self.openai_api_base)
|
||||
|
||||
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
|
||||
"""Get text embeddings.
|
||||
@ -153,6 +173,13 @@ class OpenAIEmbedding(BaseEmbedding):
|
||||
Can be overriden for batch queries.
|
||||
|
||||
"""
|
||||
if self.openai_api_type and self.openai_api_type == 'azure':
|
||||
embeddings = []
|
||||
for text in texts:
|
||||
embeddings.append(self._get_text_embedding(text))
|
||||
|
||||
return embeddings
|
||||
|
||||
if self.deployment_name is not None:
|
||||
engine = self.deployment_name
|
||||
else:
|
||||
@ -160,11 +187,20 @@ class OpenAIEmbedding(BaseEmbedding):
|
||||
if key not in _TEXT_MODE_MODEL_DICT:
|
||||
raise ValueError(f"Invalid mode, model combination: {key}")
|
||||
engine = _TEXT_MODE_MODEL_DICT[key]
|
||||
embeddings = get_embeddings(texts, engine=engine, openai_api_key=self.openai_api_key)
|
||||
embeddings = get_embeddings(texts, engine=engine, api_key=self.openai_api_key,
|
||||
api_type=self.openai_api_type, api_version=self.openai_api_version,
|
||||
api_base=self.openai_api_base)
|
||||
return embeddings
|
||||
|
||||
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
|
||||
"""Asynchronously get text embeddings."""
|
||||
if self.openai_api_type and self.openai_api_type == 'azure':
|
||||
embeddings = []
|
||||
for text in texts:
|
||||
embeddings.append(await self._aget_text_embedding(text))
|
||||
|
||||
return embeddings
|
||||
|
||||
if self.deployment_name is not None:
|
||||
engine = self.deployment_name
|
||||
else:
|
||||
@ -172,5 +208,7 @@ class OpenAIEmbedding(BaseEmbedding):
|
||||
if key not in _TEXT_MODE_MODEL_DICT:
|
||||
raise ValueError(f"Invalid mode, model combination: {key}")
|
||||
engine = _TEXT_MODE_MODEL_DICT[key]
|
||||
embeddings = await aget_embeddings(texts, engine=engine, openai_api_key=self.openai_api_key)
|
||||
embeddings = await aget_embeddings(texts, engine=engine, api_key=self.openai_api_key,
|
||||
api_type=self.openai_api_type, api_version=self.openai_api_version,
|
||||
api_base=self.openai_api_base)
|
||||
return embeddings
|
||||
|
||||
@ -1,12 +1,13 @@
|
||||
import logging
|
||||
|
||||
from langchain.chat_models.base import BaseChatModel
|
||||
from langchain.schema import HumanMessage
|
||||
from langchain.schema import HumanMessage, OutputParserException
|
||||
|
||||
from core.constant import llm_constant
|
||||
from core.llm.llm_builder import LLMBuilder
|
||||
from core.llm.streamable_open_ai import StreamableOpenAI
|
||||
from core.llm.token_calculator import TokenCalculator
|
||||
from core.prompt.output_parser.rule_config_generator import RuleConfigGeneratorOutputParser
|
||||
|
||||
from core.prompt.output_parser.suggested_questions_after_answer import SuggestedQuestionsAfterAnswerOutputParser
|
||||
from core.prompt.prompt_template import OutLinePromptTemplate
|
||||
@ -118,3 +119,48 @@ class LLMGenerator:
|
||||
questions = []
|
||||
|
||||
return questions
|
||||
|
||||
@classmethod
|
||||
def generate_rule_config(cls, tenant_id: str, audiences: str, hoping_to_solve: str) -> dict:
|
||||
output_parser = RuleConfigGeneratorOutputParser()
|
||||
|
||||
prompt = OutLinePromptTemplate(
|
||||
template=output_parser.get_format_instructions(),
|
||||
input_variables=["audiences", "hoping_to_solve"],
|
||||
partial_variables={
|
||||
"variable": '{variable}',
|
||||
"lanA": '{lanA}',
|
||||
"lanB": '{lanB}',
|
||||
"topic": '{topic}'
|
||||
},
|
||||
validate_template=False
|
||||
)
|
||||
|
||||
_input = prompt.format_prompt(audiences=audiences, hoping_to_solve=hoping_to_solve)
|
||||
|
||||
llm: StreamableOpenAI = LLMBuilder.to_llm(
|
||||
tenant_id=tenant_id,
|
||||
model_name=generate_base_model,
|
||||
temperature=0,
|
||||
max_tokens=512
|
||||
)
|
||||
|
||||
if isinstance(llm, BaseChatModel):
|
||||
query = [HumanMessage(content=_input.to_string())]
|
||||
else:
|
||||
query = _input.to_string()
|
||||
|
||||
try:
|
||||
output = llm(query)
|
||||
rule_config = output_parser.parse(output)
|
||||
except OutputParserException:
|
||||
raise ValueError('Please give a valid input for intended audience or hoping to solve problems.')
|
||||
except Exception:
|
||||
logging.exception("Error generating prompt")
|
||||
rule_config = {
|
||||
"prompt": "",
|
||||
"variables": [],
|
||||
"opening_statement": ""
|
||||
}
|
||||
|
||||
return rule_config
|
||||
|
||||
@ -33,8 +33,11 @@ class IndexBuilder:
|
||||
max_chunk_overlap=20
|
||||
)
|
||||
|
||||
provider = LLMBuilder.get_default_provider(tenant_id)
|
||||
|
||||
model_credentials = LLMBuilder.get_model_credentials(
|
||||
tenant_id=tenant_id,
|
||||
model_provider=provider,
|
||||
model_name='text-embedding-ada-002'
|
||||
)
|
||||
|
||||
@ -43,3 +46,15 @@ class IndexBuilder:
|
||||
prompt_helper=prompt_helper,
|
||||
embed_model=OpenAIEmbedding(**model_credentials),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_fake_llm_service_context(cls, tenant_id: str) -> ServiceContext:
|
||||
llm = LLMBuilder.to_llm(
|
||||
tenant_id=tenant_id,
|
||||
model_name='fake'
|
||||
)
|
||||
|
||||
return ServiceContext.from_defaults(
|
||||
llm_predictor=LLMPredictor(llm=llm),
|
||||
embed_model=OpenAIEmbedding()
|
||||
)
|
||||
|
||||
111
api/core/index/readers/markdown_parser.py
Normal file
111
api/core/index/readers/markdown_parser.py
Normal file
@ -0,0 +1,111 @@
|
||||
"""Markdown parser.
|
||||
|
||||
Contains parser for md files.
|
||||
|
||||
"""
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union, cast
|
||||
|
||||
from llama_index.readers.file.base_parser import BaseParser
|
||||
|
||||
|
||||
class MarkdownParser(BaseParser):
|
||||
"""Markdown parser.
|
||||
|
||||
Extract text from markdown files.
|
||||
Returns dictionary with keys as headers and values as the text between headers.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*args: Any,
|
||||
remove_hyperlinks: bool = True,
|
||||
remove_images: bool = True,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Init params."""
|
||||
super().__init__(*args, **kwargs)
|
||||
self._remove_hyperlinks = remove_hyperlinks
|
||||
self._remove_images = remove_images
|
||||
|
||||
def markdown_to_tups(self, markdown_text: str) -> List[Tuple[Optional[str], str]]:
|
||||
"""Convert a markdown file to a dictionary.
|
||||
|
||||
The keys are the headers and the values are the text under each header.
|
||||
|
||||
"""
|
||||
markdown_tups: List[Tuple[Optional[str], str]] = []
|
||||
lines = markdown_text.split("\n")
|
||||
|
||||
current_header = None
|
||||
current_text = ""
|
||||
|
||||
for line in lines:
|
||||
header_match = re.match(r"^#+\s", line)
|
||||
if header_match:
|
||||
if current_header is not None:
|
||||
markdown_tups.append((current_header, current_text))
|
||||
|
||||
current_header = line
|
||||
current_text = ""
|
||||
else:
|
||||
current_text += line + "\n"
|
||||
markdown_tups.append((current_header, current_text))
|
||||
|
||||
if current_header is not None:
|
||||
# pass linting, assert keys are defined
|
||||
markdown_tups = [
|
||||
(re.sub(r"#", "", cast(str, key)).strip(), re.sub(r"<.*?>", "", value))
|
||||
for key, value in markdown_tups
|
||||
]
|
||||
else:
|
||||
markdown_tups = [
|
||||
(key, re.sub("\n", "", value)) for key, value in markdown_tups
|
||||
]
|
||||
|
||||
return markdown_tups
|
||||
|
||||
def remove_images(self, content: str) -> str:
|
||||
"""Get a dictionary of a markdown file from its path."""
|
||||
pattern = r"!{1}\[\[(.*)\]\]"
|
||||
content = re.sub(pattern, "", content)
|
||||
return content
|
||||
|
||||
def remove_hyperlinks(self, content: str) -> str:
|
||||
"""Get a dictionary of a markdown file from its path."""
|
||||
pattern = r"\[(.*?)\]\((.*?)\)"
|
||||
content = re.sub(pattern, r"\1", content)
|
||||
return content
|
||||
|
||||
def _init_parser(self) -> Dict:
|
||||
"""Initialize the parser with the config."""
|
||||
return {}
|
||||
|
||||
def parse_tups(
|
||||
self, filepath: Path, errors: str = "ignore"
|
||||
) -> List[Tuple[Optional[str], str]]:
|
||||
"""Parse file into tuples."""
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
if self._remove_hyperlinks:
|
||||
content = self.remove_hyperlinks(content)
|
||||
if self._remove_images:
|
||||
content = self.remove_images(content)
|
||||
markdown_tups = self.markdown_to_tups(content)
|
||||
return markdown_tups
|
||||
|
||||
def parse_file(
|
||||
self, filepath: Path, errors: str = "ignore"
|
||||
) -> Union[str, List[str]]:
|
||||
"""Parse file into string."""
|
||||
tups = self.parse_tups(filepath, errors=errors)
|
||||
results = []
|
||||
# TODO: don't include headers right now
|
||||
for header, value in tups:
|
||||
if header is None:
|
||||
results.append(value)
|
||||
else:
|
||||
results.append(f"\n\n{header}\n{value}")
|
||||
return results
|
||||
31
api/core/index/readers/xlsx_parser.py
Normal file
31
api/core/index/readers/xlsx_parser.py
Normal file
@ -0,0 +1,31 @@
|
||||
from pathlib import Path
|
||||
import json
|
||||
from typing import Dict
|
||||
from openpyxl import load_workbook
|
||||
|
||||
from llama_index.readers.file.base_parser import BaseParser
|
||||
from flask import current_app
|
||||
|
||||
|
||||
class XLSXParser(BaseParser):
|
||||
"""XLSX parser."""
|
||||
|
||||
def _init_parser(self) -> Dict:
|
||||
"""Init parser"""
|
||||
return {}
|
||||
|
||||
def parse_file(self, file: Path, errors: str = "ignore") -> str:
|
||||
data = []
|
||||
keys = []
|
||||
with open(file, "r") as fp:
|
||||
wb = load_workbook(filename=file, read_only=True)
|
||||
# loop over all sheets
|
||||
for sheet in wb:
|
||||
for row in sheet.iter_rows(values_only=True):
|
||||
if all(v is None for v in row):
|
||||
continue
|
||||
if keys == []:
|
||||
keys = row
|
||||
else:
|
||||
data.append(json.dumps(dict(zip(keys, row)), ensure_ascii=False))
|
||||
return data
|
||||
68
api/core/index/spiltter/fixed_text_splitter.py
Normal file
68
api/core/index/spiltter/fixed_text_splitter.py
Normal file
@ -0,0 +1,68 @@
|
||||
"""Functionality for splitting text."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
List,
|
||||
Optional,
|
||||
)
|
||||
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
|
||||
|
||||
class FixedRecursiveCharacterTextSplitter(RecursiveCharacterTextSplitter):
|
||||
def __init__(self, fixed_separator: str = "\n\n", separators: Optional[List[str]] = None, **kwargs: Any):
|
||||
"""Create a new TextSplitter."""
|
||||
super().__init__(**kwargs)
|
||||
self._fixed_separator = fixed_separator
|
||||
self._separators = separators or ["\n\n", "\n", " ", ""]
|
||||
|
||||
def split_text(self, text: str) -> List[str]:
|
||||
"""Split incoming text and return chunks."""
|
||||
if self._fixed_separator:
|
||||
chunks = text.split(self._fixed_separator)
|
||||
else:
|
||||
chunks = list(text)
|
||||
|
||||
final_chunks = []
|
||||
for chunk in chunks:
|
||||
if self._length_function(chunk) > self._chunk_size:
|
||||
final_chunks.extend(self.recursive_split_text(chunk))
|
||||
else:
|
||||
final_chunks.append(chunk)
|
||||
|
||||
return final_chunks
|
||||
|
||||
def recursive_split_text(self, text: str) -> List[str]:
|
||||
"""Split incoming text and return chunks."""
|
||||
final_chunks = []
|
||||
# Get appropriate separator to use
|
||||
separator = self._separators[-1]
|
||||
for _s in self._separators:
|
||||
if _s == "":
|
||||
separator = _s
|
||||
break
|
||||
if _s in text:
|
||||
separator = _s
|
||||
break
|
||||
# Now that we have the separator, split the text
|
||||
if separator:
|
||||
splits = text.split(separator)
|
||||
else:
|
||||
splits = list(text)
|
||||
# Now go merging things, recursively splitting longer texts.
|
||||
_good_splits = []
|
||||
for s in splits:
|
||||
if self._length_function(s) < self._chunk_size:
|
||||
_good_splits.append(s)
|
||||
else:
|
||||
if _good_splits:
|
||||
merged_text = self._merge_splits(_good_splits, separator)
|
||||
final_chunks.extend(merged_text)
|
||||
_good_splits = []
|
||||
other_info = self.recursive_split_text(s)
|
||||
final_chunks.extend(other_info)
|
||||
if _good_splits:
|
||||
merged_text = self._merge_splits(_good_splits, separator)
|
||||
final_chunks.extend(merged_text)
|
||||
return final_chunks
|
||||
@ -83,7 +83,7 @@ class VectorIndex:
|
||||
if not self._dataset.index_struct_dict:
|
||||
return
|
||||
|
||||
service_context = IndexBuilder.get_default_service_context(tenant_id=self._dataset.tenant_id)
|
||||
service_context = IndexBuilder.get_fake_llm_service_context(tenant_id=self._dataset.tenant_id)
|
||||
|
||||
index = vector_store.get_index(
|
||||
service_context=service_context,
|
||||
@ -101,7 +101,7 @@ class VectorIndex:
|
||||
if not self._dataset.index_struct_dict:
|
||||
return
|
||||
|
||||
service_context = IndexBuilder.get_default_service_context(tenant_id=self._dataset.tenant_id)
|
||||
service_context = IndexBuilder.get_fake_llm_service_context(tenant_id=self._dataset.tenant_id)
|
||||
|
||||
index = vector_store.get_index(
|
||||
service_context=service_context,
|
||||
|
||||
@ -13,11 +13,13 @@ from llama_index.data_structs.node_v2 import DocumentRelationship
|
||||
from llama_index.node_parser import SimpleNodeParser, NodeParser
|
||||
from llama_index.readers.file.base import DEFAULT_FILE_EXTRACTOR
|
||||
from llama_index.readers.file.markdown_parser import MarkdownParser
|
||||
|
||||
from core.index.readers.xlsx_parser import XLSXParser
|
||||
from core.docstore.dataset_docstore import DatesetDocumentStore
|
||||
from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.readers.html_parser import HTMLParser
|
||||
from core.index.readers.markdown_parser import MarkdownParser
|
||||
from core.index.readers.pdf_parser import PDFParser
|
||||
from core.index.spiltter.fixed_text_splitter import FixedRecursiveCharacterTextSplitter
|
||||
from core.index.vector_index import VectorIndex
|
||||
from core.llm.token_calculator import TokenCalculator
|
||||
from extensions.ext_database import db
|
||||
@ -246,9 +248,11 @@ class IndexingRunner:
|
||||
|
||||
file_extractor = DEFAULT_FILE_EXTRACTOR.copy()
|
||||
file_extractor[".markdown"] = MarkdownParser()
|
||||
file_extractor[".md"] = MarkdownParser()
|
||||
file_extractor[".html"] = HTMLParser()
|
||||
file_extractor[".htm"] = HTMLParser()
|
||||
file_extractor[".pdf"] = PDFParser({'upload_file': upload_file})
|
||||
file_extractor[".xlsx"] = XLSXParser()
|
||||
|
||||
loader = SimpleDirectoryReader(input_files=[filepath], file_extractor=file_extractor)
|
||||
text_docs = loader.load_data()
|
||||
@ -267,16 +271,14 @@ class IndexingRunner:
|
||||
raise ValueError("Custom segment length should be between 50 and 1000.")
|
||||
|
||||
separator = segmentation["separator"]
|
||||
if not separator:
|
||||
separators = ["\n\n", "。", ".", " ", ""]
|
||||
else:
|
||||
if separator:
|
||||
separator = separator.replace('\\n', '\n')
|
||||
separators = [separator, ""]
|
||||
|
||||
character_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
|
||||
character_splitter = FixedRecursiveCharacterTextSplitter.from_tiktoken_encoder(
|
||||
chunk_size=segmentation["max_tokens"],
|
||||
chunk_overlap=0,
|
||||
separators=separators
|
||||
fixed_separator=separator,
|
||||
separators=["\n\n", "。", ".", " ", ""]
|
||||
)
|
||||
else:
|
||||
# Automatic segmentation
|
||||
@ -344,7 +346,7 @@ class IndexingRunner:
|
||||
|
||||
# parse document to nodes
|
||||
nodes = node_parser.get_nodes_from_documents([text_doc])
|
||||
|
||||
nodes = [node for node in nodes if node.text is not None and node.text.strip()]
|
||||
all_nodes.extend(nodes)
|
||||
|
||||
return all_nodes
|
||||
|
||||
@ -4,9 +4,14 @@ from langchain.callbacks import CallbackManager
|
||||
from langchain.llms.fake import FakeListLLM
|
||||
|
||||
from core.constant import llm_constant
|
||||
from core.llm.error import ProviderTokenNotInitError
|
||||
from core.llm.provider.base import BaseProvider
|
||||
from core.llm.provider.llm_provider_service import LLMProviderService
|
||||
from core.llm.streamable_azure_chat_open_ai import StreamableAzureChatOpenAI
|
||||
from core.llm.streamable_azure_open_ai import StreamableAzureOpenAI
|
||||
from core.llm.streamable_chat_open_ai import StreamableChatOpenAI
|
||||
from core.llm.streamable_open_ai import StreamableOpenAI
|
||||
from models.provider import ProviderType
|
||||
|
||||
|
||||
class LLMBuilder:
|
||||
@ -31,16 +36,23 @@ class LLMBuilder:
|
||||
if model_name == 'fake':
|
||||
return FakeListLLM(responses=[])
|
||||
|
||||
provider = cls.get_default_provider(tenant_id)
|
||||
|
||||
mode = cls.get_mode_by_model(model_name)
|
||||
if mode == 'chat':
|
||||
# llm_cls = StreamableAzureChatOpenAI
|
||||
llm_cls = StreamableChatOpenAI
|
||||
if provider == 'openai':
|
||||
llm_cls = StreamableChatOpenAI
|
||||
else:
|
||||
llm_cls = StreamableAzureChatOpenAI
|
||||
elif mode == 'completion':
|
||||
llm_cls = StreamableOpenAI
|
||||
if provider == 'openai':
|
||||
llm_cls = StreamableOpenAI
|
||||
else:
|
||||
llm_cls = StreamableAzureOpenAI
|
||||
else:
|
||||
raise ValueError(f"model name {model_name} is not supported.")
|
||||
|
||||
model_credentials = cls.get_model_credentials(tenant_id, model_name)
|
||||
model_credentials = cls.get_model_credentials(tenant_id, provider, model_name)
|
||||
|
||||
return llm_cls(
|
||||
model_name=model_name,
|
||||
@ -86,18 +98,31 @@ class LLMBuilder:
|
||||
raise ValueError(f"model name {model_name} is not supported.")
|
||||
|
||||
@classmethod
|
||||
def get_model_credentials(cls, tenant_id: str, model_name: str) -> dict:
|
||||
def get_model_credentials(cls, tenant_id: str, model_provider: str, model_name: str) -> dict:
|
||||
"""
|
||||
Returns the API credentials for the given tenant_id and model_name, based on the model's provider.
|
||||
Raises an exception if the model_name is not found or if the provider is not found.
|
||||
"""
|
||||
if not model_name:
|
||||
raise Exception('model name not found')
|
||||
#
|
||||
# if model_name not in llm_constant.models:
|
||||
# raise Exception('model {} not found'.format(model_name))
|
||||
|
||||
if model_name not in llm_constant.models:
|
||||
raise Exception('model {} not found'.format(model_name))
|
||||
|
||||
model_provider = llm_constant.models[model_name]
|
||||
# model_provider = llm_constant.models[model_name]
|
||||
|
||||
provider_service = LLMProviderService(tenant_id=tenant_id, provider_name=model_provider)
|
||||
return provider_service.get_credentials(model_name)
|
||||
|
||||
@classmethod
|
||||
def get_default_provider(cls, tenant_id: str) -> str:
|
||||
provider = BaseProvider.get_valid_provider(tenant_id)
|
||||
if not provider:
|
||||
raise ProviderTokenNotInitError()
|
||||
|
||||
if provider.provider_type == ProviderType.SYSTEM.value:
|
||||
provider_name = 'openai'
|
||||
else:
|
||||
provider_name = provider.provider_name
|
||||
|
||||
return provider_name
|
||||
|
||||
@ -1,22 +1,24 @@
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional, Union
|
||||
|
||||
import requests
|
||||
|
||||
from core.llm.provider.base import BaseProvider
|
||||
from core.llm.provider.errors import ValidateFailedError
|
||||
from models.provider import ProviderName
|
||||
|
||||
|
||||
class AzureProvider(BaseProvider):
|
||||
def get_models(self, model_id: Optional[str] = None) -> list[dict]:
|
||||
credentials = self.get_credentials(model_id)
|
||||
def get_models(self, model_id: Optional[str] = None, credentials: Optional[dict] = None) -> list[dict]:
|
||||
credentials = self.get_credentials(model_id) if not credentials else credentials
|
||||
url = "{}/openai/deployments?api-version={}".format(
|
||||
credentials.get('openai_api_base'),
|
||||
credentials.get('openai_api_version')
|
||||
str(credentials.get('openai_api_base')),
|
||||
str(credentials.get('openai_api_version'))
|
||||
)
|
||||
|
||||
headers = {
|
||||
"api-key": credentials.get('openai_api_key'),
|
||||
"api-key": str(credentials.get('openai_api_key')),
|
||||
"content-type": "application/json; charset=utf-8"
|
||||
}
|
||||
|
||||
@ -29,17 +31,18 @@ class AzureProvider(BaseProvider):
|
||||
'name': '{} ({})'.format(deployment['id'], deployment['model'])
|
||||
} for deployment in result['data'] if deployment['status'] == 'succeeded']
|
||||
else:
|
||||
# TODO: optimize in future
|
||||
raise Exception('Failed to get deployments from Azure OpenAI. Status code: {}'.format(response.status_code))
|
||||
if response.status_code == 401:
|
||||
raise AzureAuthenticationError()
|
||||
else:
|
||||
raise AzureRequestFailedError('Failed to request Azure OpenAI. Status code: {}'.format(response.status_code))
|
||||
|
||||
def get_credentials(self, model_id: Optional[str] = None) -> dict:
|
||||
"""
|
||||
Returns the API credentials for Azure OpenAI as a dictionary.
|
||||
"""
|
||||
encrypted_config = self.get_provider_api_key(model_id=model_id)
|
||||
config = json.loads(encrypted_config)
|
||||
config = self.get_provider_api_key(model_id=model_id)
|
||||
config['openai_api_type'] = 'azure'
|
||||
config['deployment_name'] = model_id
|
||||
config['deployment_name'] = model_id.replace('.', '') if model_id else None
|
||||
return config
|
||||
|
||||
def get_provider_name(self):
|
||||
@ -51,12 +54,11 @@ class AzureProvider(BaseProvider):
|
||||
"""
|
||||
try:
|
||||
config = self.get_provider_api_key()
|
||||
config = json.loads(config)
|
||||
except:
|
||||
config = {
|
||||
'openai_api_type': 'azure',
|
||||
'openai_api_version': '2023-03-15-preview',
|
||||
'openai_api_base': 'https://foo.microsoft.com/bar',
|
||||
'openai_api_base': '',
|
||||
'openai_api_key': ''
|
||||
}
|
||||
|
||||
@ -65,7 +67,7 @@ class AzureProvider(BaseProvider):
|
||||
config = {
|
||||
'openai_api_type': 'azure',
|
||||
'openai_api_version': '2023-03-15-preview',
|
||||
'openai_api_base': 'https://foo.microsoft.com/bar',
|
||||
'openai_api_base': '',
|
||||
'openai_api_key': ''
|
||||
}
|
||||
|
||||
@ -76,14 +78,49 @@ class AzureProvider(BaseProvider):
|
||||
|
||||
def get_token_type(self):
|
||||
# TODO: change to dict when implemented
|
||||
return lambda value: value
|
||||
return dict
|
||||
|
||||
def config_validate(self, config: Union[dict | str]):
|
||||
"""
|
||||
Validates the given config.
|
||||
"""
|
||||
# TODO: implement
|
||||
pass
|
||||
try:
|
||||
if not isinstance(config, dict):
|
||||
raise ValueError('Config must be a object.')
|
||||
|
||||
if 'openai_api_version' not in config:
|
||||
config['openai_api_version'] = '2023-03-15-preview'
|
||||
|
||||
models = self.get_models(credentials=config)
|
||||
|
||||
if not models:
|
||||
raise ValidateFailedError("Please add deployments for 'text-davinci-003', "
|
||||
"'gpt-3.5-turbo', 'text-embedding-ada-002'.")
|
||||
|
||||
fixed_model_ids = [
|
||||
'text-davinci-003',
|
||||
'gpt-35-turbo',
|
||||
'text-embedding-ada-002'
|
||||
]
|
||||
|
||||
current_model_ids = [model['id'] for model in models]
|
||||
|
||||
missing_model_ids = [fixed_model_id for fixed_model_id in fixed_model_ids if
|
||||
fixed_model_id not in current_model_ids]
|
||||
|
||||
if missing_model_ids:
|
||||
raise ValidateFailedError("Please add deployments for '{}'.".format(", ".join(missing_model_ids)))
|
||||
except ValidateFailedError as e:
|
||||
raise e
|
||||
except AzureAuthenticationError:
|
||||
raise ValidateFailedError('Validation failed, please check your API Key.')
|
||||
except (requests.ConnectionError, requests.RequestException):
|
||||
raise ValidateFailedError('Validation failed, please check your API Base Endpoint.')
|
||||
except AzureRequestFailedError as ex:
|
||||
raise ValidateFailedError('Validation failed, error: {}.'.format(str(ex)))
|
||||
except Exception as ex:
|
||||
logging.exception('Azure OpenAI Credentials validation failed')
|
||||
raise ValidateFailedError('Validation failed, error: {}.'.format(str(ex)))
|
||||
|
||||
def get_encrypted_token(self, config: Union[dict | str]):
|
||||
"""
|
||||
@ -103,3 +140,11 @@ class AzureProvider(BaseProvider):
|
||||
config = json.loads(token)
|
||||
config['openai_api_key'] = self.decrypt_token(config['openai_api_key'])
|
||||
return config
|
||||
|
||||
|
||||
class AzureAuthenticationError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AzureRequestFailedError(Exception):
|
||||
pass
|
||||
|
||||
@ -14,7 +14,7 @@ class BaseProvider(ABC):
|
||||
def __init__(self, tenant_id: str):
|
||||
self.tenant_id = tenant_id
|
||||
|
||||
def get_provider_api_key(self, model_id: Optional[str] = None, prefer_custom: bool = True) -> str:
|
||||
def get_provider_api_key(self, model_id: Optional[str] = None, prefer_custom: bool = True) -> Union[str | dict]:
|
||||
"""
|
||||
Returns the decrypted API key for the given tenant_id and provider_name.
|
||||
If the provider is of type SYSTEM and the quota is exceeded, raises a QuotaExceededError.
|
||||
@ -43,23 +43,35 @@ class BaseProvider(ABC):
|
||||
Returns the Provider instance for the given tenant_id and provider_name.
|
||||
If both CUSTOM and System providers exist, the preferred provider will be returned based on the prefer_custom flag.
|
||||
"""
|
||||
providers = db.session.query(Provider).filter(
|
||||
Provider.tenant_id == self.tenant_id,
|
||||
Provider.provider_name == self.get_provider_name().value
|
||||
).order_by(Provider.provider_type.desc() if prefer_custom else Provider.provider_type).all()
|
||||
return BaseProvider.get_valid_provider(self.tenant_id, self.get_provider_name().value, prefer_custom)
|
||||
|
||||
@classmethod
|
||||
def get_valid_provider(cls, tenant_id: str, provider_name: str = None, prefer_custom: bool = False) -> Optional[Provider]:
|
||||
"""
|
||||
Returns the Provider instance for the given tenant_id and provider_name.
|
||||
If both CUSTOM and System providers exist, the preferred provider will be returned based on the prefer_custom flag.
|
||||
"""
|
||||
query = db.session.query(Provider).filter(
|
||||
Provider.tenant_id == tenant_id
|
||||
)
|
||||
|
||||
if provider_name:
|
||||
query = query.filter(Provider.provider_name == provider_name)
|
||||
|
||||
providers = query.order_by(Provider.provider_type.desc() if prefer_custom else Provider.provider_type).all()
|
||||
|
||||
custom_provider = None
|
||||
system_provider = None
|
||||
|
||||
for provider in providers:
|
||||
if provider.provider_type == ProviderType.CUSTOM.value:
|
||||
if provider.provider_type == ProviderType.CUSTOM.value and provider.is_valid and provider.encrypted_config:
|
||||
custom_provider = provider
|
||||
elif provider.provider_type == ProviderType.SYSTEM.value:
|
||||
elif provider.provider_type == ProviderType.SYSTEM.value and provider.is_valid:
|
||||
system_provider = provider
|
||||
|
||||
if custom_provider and custom_provider.is_valid and custom_provider.encrypted_config:
|
||||
if custom_provider:
|
||||
return custom_provider
|
||||
elif system_provider and system_provider.is_valid:
|
||||
elif system_provider:
|
||||
return system_provider
|
||||
else:
|
||||
return None
|
||||
@ -80,7 +92,7 @@ class BaseProvider(ABC):
|
||||
try:
|
||||
config = self.get_provider_api_key()
|
||||
except:
|
||||
config = 'THIS-IS-A-MOCK-TOKEN'
|
||||
config = ''
|
||||
|
||||
if obfuscated:
|
||||
return self.obfuscated_token(config)
|
||||
|
||||
@ -1,12 +1,50 @@
|
||||
import requests
|
||||
from langchain.schema import BaseMessage, ChatResult, LLMResult
|
||||
from langchain.chat_models import AzureChatOpenAI
|
||||
from typing import Optional, List
|
||||
from typing import Optional, List, Dict, Any
|
||||
|
||||
from pydantic import root_validator
|
||||
|
||||
from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async
|
||||
|
||||
|
||||
class StreamableAzureChatOpenAI(AzureChatOpenAI):
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
try:
|
||||
import openai
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import openai python package. "
|
||||
"Please install it with `pip install openai`."
|
||||
)
|
||||
try:
|
||||
values["client"] = openai.ChatCompletion
|
||||
except AttributeError:
|
||||
raise ValueError(
|
||||
"`openai` has no `ChatCompletion` attribute, this is likely "
|
||||
"due to an old version of the openai package. Try upgrading it "
|
||||
"with `pip install --upgrade openai`."
|
||||
)
|
||||
if values["n"] < 1:
|
||||
raise ValueError("n must be at least 1.")
|
||||
if values["n"] > 1 and values["streaming"]:
|
||||
raise ValueError("n must be 1 when streaming.")
|
||||
return values
|
||||
|
||||
@property
|
||||
def _default_params(self) -> Dict[str, Any]:
|
||||
"""Get the default parameters for calling OpenAI API."""
|
||||
return {
|
||||
**super()._default_params,
|
||||
"engine": self.deployment_name,
|
||||
"api_type": self.openai_api_type,
|
||||
"api_base": self.openai_api_base,
|
||||
"api_version": self.openai_api_version,
|
||||
"api_key": self.openai_api_key,
|
||||
"organization": self.openai_organization if self.openai_organization else None,
|
||||
}
|
||||
|
||||
def get_messages_tokens(self, messages: List[BaseMessage]) -> int:
|
||||
"""Get the number of tokens in a list of messages.
|
||||
|
||||
|
||||
64
api/core/llm/streamable_azure_open_ai.py
Normal file
64
api/core/llm/streamable_azure_open_ai.py
Normal file
@ -0,0 +1,64 @@
|
||||
import os
|
||||
|
||||
from langchain.llms import AzureOpenAI
|
||||
from langchain.schema import LLMResult
|
||||
from typing import Optional, List, Dict, Mapping, Any
|
||||
|
||||
from pydantic import root_validator
|
||||
|
||||
from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async
|
||||
|
||||
|
||||
class StreamableAzureOpenAI(AzureOpenAI):
|
||||
openai_api_type: str = "azure"
|
||||
openai_api_version: str = ""
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
try:
|
||||
import openai
|
||||
|
||||
values["client"] = openai.Completion
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import openai python package. "
|
||||
"Please install it with `pip install openai`."
|
||||
)
|
||||
if values["streaming"] and values["n"] > 1:
|
||||
raise ValueError("Cannot stream results when n > 1.")
|
||||
if values["streaming"] and values["best_of"] > 1:
|
||||
raise ValueError("Cannot stream results when best_of > 1.")
|
||||
return values
|
||||
|
||||
@property
|
||||
def _invocation_params(self) -> Dict[str, Any]:
|
||||
return {**super()._invocation_params, **{
|
||||
"api_type": self.openai_api_type,
|
||||
"api_base": self.openai_api_base,
|
||||
"api_version": self.openai_api_version,
|
||||
"api_key": self.openai_api_key,
|
||||
"organization": self.openai_organization if self.openai_organization else None,
|
||||
}}
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Mapping[str, Any]:
|
||||
return {**super()._identifying_params, **{
|
||||
"api_type": self.openai_api_type,
|
||||
"api_base": self.openai_api_base,
|
||||
"api_version": self.openai_api_version,
|
||||
"api_key": self.openai_api_key,
|
||||
"organization": self.openai_organization if self.openai_organization else None,
|
||||
}}
|
||||
|
||||
@handle_llm_exceptions
|
||||
def generate(
|
||||
self, prompts: List[str], stop: Optional[List[str]] = None
|
||||
) -> LLMResult:
|
||||
return super().generate(prompts, stop)
|
||||
|
||||
@handle_llm_exceptions_async
|
||||
async def agenerate(
|
||||
self, prompts: List[str], stop: Optional[List[str]] = None
|
||||
) -> LLMResult:
|
||||
return await super().agenerate(prompts, stop)
|
||||
@ -1,12 +1,52 @@
|
||||
import os
|
||||
|
||||
from langchain.schema import BaseMessage, ChatResult, LLMResult
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from typing import Optional, List
|
||||
from typing import Optional, List, Dict, Any
|
||||
|
||||
from pydantic import root_validator
|
||||
|
||||
from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async
|
||||
|
||||
|
||||
class StreamableChatOpenAI(ChatOpenAI):
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
try:
|
||||
import openai
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import openai python package. "
|
||||
"Please install it with `pip install openai`."
|
||||
)
|
||||
try:
|
||||
values["client"] = openai.ChatCompletion
|
||||
except AttributeError:
|
||||
raise ValueError(
|
||||
"`openai` has no `ChatCompletion` attribute, this is likely "
|
||||
"due to an old version of the openai package. Try upgrading it "
|
||||
"with `pip install --upgrade openai`."
|
||||
)
|
||||
if values["n"] < 1:
|
||||
raise ValueError("n must be at least 1.")
|
||||
if values["n"] > 1 and values["streaming"]:
|
||||
raise ValueError("n must be 1 when streaming.")
|
||||
return values
|
||||
|
||||
@property
|
||||
def _default_params(self) -> Dict[str, Any]:
|
||||
"""Get the default parameters for calling OpenAI API."""
|
||||
return {
|
||||
**super()._default_params,
|
||||
"api_type": 'openai',
|
||||
"api_base": os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
|
||||
"api_version": None,
|
||||
"api_key": self.openai_api_key,
|
||||
"organization": self.openai_organization if self.openai_organization else None,
|
||||
}
|
||||
|
||||
def get_messages_tokens(self, messages: List[BaseMessage]) -> int:
|
||||
"""Get the number of tokens in a list of messages.
|
||||
|
||||
|
||||
@ -1,12 +1,54 @@
|
||||
import os
|
||||
|
||||
from langchain.schema import LLMResult
|
||||
from typing import Optional, List
|
||||
from typing import Optional, List, Dict, Any, Mapping
|
||||
from langchain import OpenAI
|
||||
from pydantic import root_validator
|
||||
|
||||
from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async
|
||||
|
||||
|
||||
class StreamableOpenAI(OpenAI):
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
try:
|
||||
import openai
|
||||
|
||||
values["client"] = openai.Completion
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import openai python package. "
|
||||
"Please install it with `pip install openai`."
|
||||
)
|
||||
if values["streaming"] and values["n"] > 1:
|
||||
raise ValueError("Cannot stream results when n > 1.")
|
||||
if values["streaming"] and values["best_of"] > 1:
|
||||
raise ValueError("Cannot stream results when best_of > 1.")
|
||||
return values
|
||||
|
||||
@property
|
||||
def _invocation_params(self) -> Dict[str, Any]:
|
||||
return {**super()._invocation_params, **{
|
||||
"api_type": 'openai',
|
||||
"api_base": os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
|
||||
"api_version": None,
|
||||
"api_key": self.openai_api_key,
|
||||
"organization": self.openai_organization if self.openai_organization else None,
|
||||
}}
|
||||
|
||||
@property
|
||||
def _identifying_params(self) -> Mapping[str, Any]:
|
||||
return {**super()._identifying_params, **{
|
||||
"api_type": 'openai',
|
||||
"api_base": os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"),
|
||||
"api_version": None,
|
||||
"api_key": self.openai_api_key,
|
||||
"organization": self.openai_organization if self.openai_organization else None,
|
||||
}}
|
||||
|
||||
|
||||
@handle_llm_exceptions
|
||||
def generate(
|
||||
self, prompts: List[str], stop: Optional[List[str]] = None
|
||||
|
||||
32
api/core/prompt/output_parser/rule_config_generator.py
Normal file
32
api/core/prompt/output_parser/rule_config_generator.py
Normal file
@ -0,0 +1,32 @@
|
||||
from typing import Any
|
||||
|
||||
from langchain.schema import BaseOutputParser, OutputParserException
|
||||
from core.prompt.prompts import RULE_CONFIG_GENERATE_TEMPLATE
|
||||
from libs.json_in_md_parser import parse_and_check_json_markdown
|
||||
|
||||
|
||||
class RuleConfigGeneratorOutputParser(BaseOutputParser):
|
||||
|
||||
def get_format_instructions(self) -> str:
|
||||
return RULE_CONFIG_GENERATE_TEMPLATE
|
||||
|
||||
def parse(self, text: str) -> Any:
|
||||
try:
|
||||
expected_keys = ["prompt", "variables", "opening_statement"]
|
||||
parsed = parse_and_check_json_markdown(text, expected_keys)
|
||||
if not isinstance(parsed["prompt"], str):
|
||||
raise ValueError("Expected 'prompt' to be a string.")
|
||||
if not isinstance(parsed["variables"], list):
|
||||
raise ValueError(
|
||||
f"Expected 'variables' to be a list."
|
||||
)
|
||||
if not isinstance(parsed["opening_statement"], str):
|
||||
raise ValueError(
|
||||
f"Expected 'opening_statement' to be a str."
|
||||
)
|
||||
return parsed
|
||||
except Exception as e:
|
||||
raise OutputParserException(
|
||||
f"Parsing text\n{text}\n of rule config generator raised following error:\n{e}"
|
||||
)
|
||||
|
||||
@ -32,6 +32,6 @@ class PromptBuilder:
|
||||
|
||||
@classmethod
|
||||
def process_template(cls, template: str):
|
||||
processed_template = re.sub(r'\{(.+?)\}', r'\1', template)
|
||||
processed_template = re.sub(r'\{\{(.+?)\}\}', r'{\1}', processed_template)
|
||||
processed_template = re.sub(r'\{([a-zA-Z_]\w+?)\}', r'\1', template)
|
||||
processed_template = re.sub(r'\{\{([a-zA-Z_]\w+?)\}\}', r'{\1}', processed_template)
|
||||
return processed_template
|
||||
|
||||
@ -61,3 +61,60 @@ QUERY_KEYWORD_EXTRACT_TEMPLATE_TMPL = (
|
||||
QUERY_KEYWORD_EXTRACT_TEMPLATE = QueryKeywordExtractPrompt(
|
||||
QUERY_KEYWORD_EXTRACT_TEMPLATE_TMPL
|
||||
)
|
||||
|
||||
RULE_CONFIG_GENERATE_TEMPLATE = """Given MY INTENDED AUDIENCES and HOPING TO SOLVE using a language model, please select \
|
||||
the model prompt that best suits the input.
|
||||
You will be provided with the prompt, variables, and an opening statement.
|
||||
Only the content enclosed in double curly braces, such as {{variable}}, in the prompt can be considered as a variable; \
|
||||
otherwise, it cannot exist as a variable in the variables.
|
||||
If you believe revising the original input will result in a better response from the language model, you may \
|
||||
suggest revisions.
|
||||
|
||||
<< FORMATTING >>
|
||||
Return a markdown code snippet with a JSON object formatted to look like, \
|
||||
no any other string out of markdown code snippet:
|
||||
```json
|
||||
{{{{
|
||||
"prompt": string \\ generated prompt
|
||||
"variables": list of string \\ variables
|
||||
"opening_statement": string \\ an opening statement to guide users on how to ask questions with generated prompt \
|
||||
and fill in variables, with a welcome sentence, and keep TLDR.
|
||||
}}}}
|
||||
```
|
||||
|
||||
<< EXAMPLES >>
|
||||
[EXAMPLE A]
|
||||
```json
|
||||
{
|
||||
"prompt": "Write a letter about love",
|
||||
"variables": [],
|
||||
"opening_statement": "Hi! I'm your love letter writer AI."
|
||||
}
|
||||
```
|
||||
|
||||
[EXAMPLE B]
|
||||
```json
|
||||
{
|
||||
"prompt": "Translate from {{lanA}} to {{lanB}}",
|
||||
"variables": ["lanA", "lanB"],
|
||||
"opening_statement": "Welcome to use translate app"
|
||||
}
|
||||
```
|
||||
|
||||
[EXAMPLE C]
|
||||
```json
|
||||
{
|
||||
"prompt": "Write a story about {{topic}}",
|
||||
"variables": ["topic"],
|
||||
"opening_statement": "I'm your story writer"
|
||||
}
|
||||
```
|
||||
|
||||
<< MY INTENDED AUDIENCES >>
|
||||
{audiences}
|
||||
|
||||
<< HOPING TO SOLVE >>
|
||||
{hoping_to_solve}
|
||||
|
||||
<< OUTPUT >>
|
||||
"""
|
||||
@ -10,24 +10,14 @@ from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.vector_index import VectorIndex
|
||||
from core.prompt.prompts import QUERY_KEYWORD_EXTRACT_TEMPLATE
|
||||
from core.tool.llama_index_tool import EnhanceLlamaIndexTool
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Dataset
|
||||
|
||||
|
||||
class DatasetToolBuilder:
|
||||
@classmethod
|
||||
def build_dataset_tool(cls, tenant_id: str, dataset_id: str,
|
||||
def build_dataset_tool(cls, dataset: Dataset,
|
||||
response_mode: str = "no_synthesizer",
|
||||
callback_handler: Optional[DatasetToolCallbackHandler] = None):
|
||||
# get dataset from dataset id
|
||||
dataset = db.session.query(Dataset).filter(
|
||||
Dataset.tenant_id == tenant_id,
|
||||
Dataset.id == dataset_id
|
||||
).first()
|
||||
|
||||
if not dataset:
|
||||
return None
|
||||
|
||||
if dataset.indexing_technique == "economy":
|
||||
# use keyword table query
|
||||
index = KeywordTableIndex(dataset=dataset).query_index
|
||||
@ -65,7 +55,7 @@ class DatasetToolBuilder:
|
||||
|
||||
index_tool_config = IndexToolConfig(
|
||||
index=index,
|
||||
name=f"dataset-{dataset_id}",
|
||||
name=f"dataset-{dataset.id}",
|
||||
description=description,
|
||||
index_query_kwargs=query_kwargs,
|
||||
tool_kwargs={
|
||||
@ -75,7 +65,7 @@ class DatasetToolBuilder:
|
||||
# return_direct: Whether to return LLM results directly or process the output data with an Output Parser
|
||||
)
|
||||
|
||||
index_callback_handler = DatasetIndexToolCallbackHandler(dataset_id=dataset_id)
|
||||
index_callback_handler = DatasetIndexToolCallbackHandler(dataset_id=dataset.id)
|
||||
|
||||
return EnhanceLlamaIndexTool.from_tool_config(
|
||||
tool_config=index_tool_config,
|
||||
|
||||
@ -29,7 +29,7 @@ class WeaviateVectorStoreClient(BaseVectorStoreClient):
|
||||
return weaviate.Client(
|
||||
url=endpoint,
|
||||
auth_client_secret=auth_config,
|
||||
timeout_config=(5, 15),
|
||||
timeout_config=(5, 60),
|
||||
startup_period=None
|
||||
)
|
||||
|
||||
|
||||
@ -15,9 +15,24 @@ def init_app(app: Flask) -> Celery:
|
||||
backend=app.config["CELERY_BACKEND"],
|
||||
task_ignore_result=True,
|
||||
)
|
||||
|
||||
# Add SSL options to the Celery configuration
|
||||
ssl_options = {
|
||||
"ssl_cert_reqs": None,
|
||||
"ssl_ca_certs": None,
|
||||
"ssl_certfile": None,
|
||||
"ssl_keyfile": None,
|
||||
}
|
||||
|
||||
celery_app.conf.update(
|
||||
result_backend=app.config["CELERY_RESULT_BACKEND"],
|
||||
)
|
||||
|
||||
if app.config["BROKER_USE_SSL"]:
|
||||
celery_app.conf.update(
|
||||
broker_use_ssl=ssl_options, # Add the SSL options to the broker configuration
|
||||
)
|
||||
|
||||
celery_app.set_default()
|
||||
app.extensions["celery"] = celery_app
|
||||
return celery_app
|
||||
|
||||
@ -1,18 +1,23 @@
|
||||
import redis
|
||||
|
||||
from redis.connection import SSLConnection, Connection
|
||||
|
||||
redis_client = redis.Redis()
|
||||
|
||||
|
||||
def init_app(app):
|
||||
connection_class = Connection
|
||||
if app.config.get('REDIS_USE_SSL', False):
|
||||
connection_class = SSLConnection
|
||||
|
||||
redis_client.connection_pool = redis.ConnectionPool(**{
|
||||
'host': app.config.get('REDIS_HOST', 'localhost'),
|
||||
'port': app.config.get('REDIS_PORT', 6379),
|
||||
'username': app.config.get('REDIS_USERNAME', None),
|
||||
'password': app.config.get('REDIS_PASSWORD', None),
|
||||
'db': app.config.get('REDIS_DB', 0),
|
||||
'encoding': 'utf-8',
|
||||
'encoding_errors': 'strict',
|
||||
'decode_responses': False
|
||||
})
|
||||
}, connection_class=connection_class)
|
||||
|
||||
app.extensions['redis'] = redis_client
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
import redis
|
||||
from redis.connection import SSLConnection, Connection
|
||||
from flask import request
|
||||
from flask_session import Session, SqlAlchemySessionInterface, RedisSessionInterface
|
||||
from flask_session.sessions import total_seconds
|
||||
@ -23,16 +24,21 @@ def init_app(app):
|
||||
if session_type == 'sqlalchemy':
|
||||
app.session_interface = sqlalchemy_session_interface
|
||||
elif session_type == 'redis':
|
||||
connection_class = Connection
|
||||
if app.config.get('SESSION_REDIS_USE_SSL', False):
|
||||
connection_class = SSLConnection
|
||||
|
||||
sess_redis_client = redis.Redis()
|
||||
sess_redis_client.connection_pool = redis.ConnectionPool(**{
|
||||
'host': app.config.get('SESSION_REDIS_HOST', 'localhost'),
|
||||
'port': app.config.get('SESSION_REDIS_PORT', 6379),
|
||||
'username': app.config.get('SESSION_REDIS_USERNAME', None),
|
||||
'password': app.config.get('SESSION_REDIS_PASSWORD', None),
|
||||
'db': app.config.get('SESSION_REDIS_DB', 2),
|
||||
'encoding': 'utf-8',
|
||||
'encoding_errors': 'strict',
|
||||
'decode_responses': False
|
||||
})
|
||||
}, connection_class=connection_class)
|
||||
|
||||
app.extensions['session_redis'] = sess_redis_client
|
||||
|
||||
|
||||
@ -21,7 +21,7 @@ class TimestampField(fields.Raw):
|
||||
|
||||
def email(email):
|
||||
# Define a regex pattern for email addresses
|
||||
pattern = r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$"
|
||||
pattern = r"^[\w\.-]+@([\w-]+\.)+[\w-]{2,}$"
|
||||
# Check if the email matches the pattern
|
||||
if re.match(pattern, email) is not None:
|
||||
return email
|
||||
|
||||
44
api/libs/json_in_md_parser.py
Normal file
44
api/libs/json_in_md_parser.py
Normal file
@ -0,0 +1,44 @@
|
||||
import json
|
||||
from typing import List
|
||||
|
||||
from langchain.schema import OutputParserException
|
||||
|
||||
|
||||
def parse_json_markdown(json_string: str) -> dict:
|
||||
# Remove the triple backticks if present
|
||||
json_string = json_string.strip()
|
||||
start_index = json_string.find("```json")
|
||||
end_index = json_string.find("```", start_index + len("```json"))
|
||||
|
||||
if start_index != -1 and end_index != -1:
|
||||
extracted_content = json_string[start_index + len("```json"):end_index].strip()
|
||||
|
||||
# Parse the JSON string into a Python dictionary
|
||||
parsed = json.loads(extracted_content)
|
||||
elif start_index != -1 and end_index == -1 and json_string.endswith("``"):
|
||||
end_index = json_string.find("``", start_index + len("```json"))
|
||||
extracted_content = json_string[start_index + len("```json"):end_index].strip()
|
||||
|
||||
# Parse the JSON string into a Python dictionary
|
||||
parsed = json.loads(extracted_content)
|
||||
elif json_string.startswith("{"):
|
||||
# Parse the JSON string into a Python dictionary
|
||||
parsed = json.loads(json_string)
|
||||
else:
|
||||
raise Exception("Could not find JSON block in the output.")
|
||||
|
||||
return parsed
|
||||
|
||||
|
||||
def parse_and_check_json_markdown(text: str, expected_keys: List[str]) -> dict:
|
||||
try:
|
||||
json_obj = parse_json_markdown(text)
|
||||
except json.JSONDecodeError as e:
|
||||
raise OutputParserException(f"Got invalid JSON object. Error: {e}")
|
||||
for key in expected_keys:
|
||||
if key not in json_obj:
|
||||
raise OutputParserException(
|
||||
f"Got invalid return object. Expected key `{key}` "
|
||||
f"to be present, but got {json_obj}"
|
||||
)
|
||||
return json_obj
|
||||
@ -18,6 +18,8 @@ depends_on = None
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.execute('CREATE EXTENSION IF NOT EXISTS "uuid-ossp";')
|
||||
|
||||
op.create_table('account_integrates',
|
||||
sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False),
|
||||
sa.Column('account_id', postgresql.UUID(), nullable=False),
|
||||
@ -790,4 +792,6 @@ def downgrade():
|
||||
|
||||
op.drop_table('accounts')
|
||||
op.drop_table('account_integrates')
|
||||
|
||||
op.execute('DROP EXTENSION IF EXISTS "uuid-ossp";')
|
||||
# ### end Alembic commands ###
|
||||
|
||||
46
api/migrations/versions/9f4e3427ea84_add_created_by_role.py
Normal file
46
api/migrations/versions/9f4e3427ea84_add_created_by_role.py
Normal file
@ -0,0 +1,46 @@
|
||||
"""add created by role
|
||||
|
||||
Revision ID: 9f4e3427ea84
|
||||
Revises: 64b051264f32
|
||||
Create Date: 2023-05-17 17:29:01.060435
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '9f4e3427ea84'
|
||||
down_revision = '64b051264f32'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('pinned_conversations', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('created_by_role', sa.String(length=255), server_default=sa.text("'end_user'::character varying"), nullable=False))
|
||||
batch_op.drop_index('pinned_conversation_conversation_idx')
|
||||
batch_op.create_index('pinned_conversation_conversation_idx', ['app_id', 'conversation_id', 'created_by_role', 'created_by'], unique=False)
|
||||
|
||||
with op.batch_alter_table('saved_messages', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('created_by_role', sa.String(length=255), server_default=sa.text("'end_user'::character varying"), nullable=False))
|
||||
batch_op.drop_index('saved_message_message_idx')
|
||||
batch_op.create_index('saved_message_message_idx', ['app_id', 'message_id', 'created_by_role', 'created_by'], unique=False)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('saved_messages', schema=None) as batch_op:
|
||||
batch_op.drop_index('saved_message_message_idx')
|
||||
batch_op.create_index('saved_message_message_idx', ['app_id', 'message_id', 'created_by'], unique=False)
|
||||
batch_op.drop_column('created_by_role')
|
||||
|
||||
with op.batch_alter_table('pinned_conversations', schema=None) as batch_op:
|
||||
batch_op.drop_index('pinned_conversation_conversation_idx')
|
||||
batch_op.create_index('pinned_conversation_conversation_idx', ['app_id', 'conversation_id', 'created_by'], unique=False)
|
||||
batch_op.drop_column('created_by_role')
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@ -0,0 +1,36 @@
|
||||
"""add language to recommend apps
|
||||
|
||||
Revision ID: a45f4dfde53b
|
||||
Revises: 9f4e3427ea84
|
||||
Create Date: 2023-05-25 17:50:32.052335
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'a45f4dfde53b'
|
||||
down_revision = '9f4e3427ea84'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('recommended_apps', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('language', sa.String(length=255), server_default=sa.text("'en-US'::character varying"), nullable=False))
|
||||
batch_op.drop_index('recommended_app_is_listed_idx')
|
||||
batch_op.create_index('recommended_app_is_listed_idx', ['is_listed', 'language'], unique=False)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('recommended_apps', schema=None) as batch_op:
|
||||
batch_op.drop_index('recommended_app_is_listed_idx')
|
||||
batch_op.create_index('recommended_app_is_listed_idx', ['is_listed'], unique=False)
|
||||
batch_op.drop_column('language')
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@ -1,6 +1,6 @@
|
||||
import json
|
||||
|
||||
from flask import current_app
|
||||
from flask import current_app, request
|
||||
from flask_login import UserMixin
|
||||
from sqlalchemy.dialects.postgresql import UUID
|
||||
|
||||
@ -56,7 +56,7 @@ class App(db.Model):
|
||||
|
||||
@property
|
||||
def api_base_url(self):
|
||||
return current_app.config['API_URL'] + '/v1'
|
||||
return (current_app.config['API_URL'] if current_app.config['API_URL'] else request.host_url.rstrip('/')) + '/v1'
|
||||
|
||||
@property
|
||||
def tenant(self):
|
||||
@ -123,7 +123,7 @@ class RecommendedApp(db.Model):
|
||||
__table_args__ = (
|
||||
db.PrimaryKeyConstraint('id', name='recommended_app_pkey'),
|
||||
db.Index('recommended_app_app_id_idx', 'app_id'),
|
||||
db.Index('recommended_app_is_listed_idx', 'is_listed')
|
||||
db.Index('recommended_app_is_listed_idx', 'is_listed', 'language')
|
||||
)
|
||||
|
||||
id = db.Column(UUID, primary_key=True, server_default=db.text('uuid_generate_v4()'))
|
||||
@ -135,6 +135,7 @@ class RecommendedApp(db.Model):
|
||||
position = db.Column(db.Integer, nullable=False, default=0)
|
||||
is_listed = db.Column(db.Boolean, nullable=False, default=True)
|
||||
install_count = db.Column(db.Integer, nullable=False, default=0)
|
||||
language = db.Column(db.String(255), nullable=False, server_default=db.text("'en-US'::character varying"))
|
||||
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
|
||||
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
|
||||
|
||||
@ -143,17 +144,6 @@ class RecommendedApp(db.Model):
|
||||
app = db.session.query(App).filter(App.id == self.app_id).first()
|
||||
return app
|
||||
|
||||
# def set_description(self, lang, desc):
|
||||
# if self.description is None:
|
||||
# self.description = {}
|
||||
# self.description[lang] = desc
|
||||
|
||||
def get_description(self, lang):
|
||||
if self.description and lang in self.description:
|
||||
return self.description[lang]
|
||||
else:
|
||||
return self.description.get('en')
|
||||
|
||||
|
||||
class InstalledApp(db.Model):
|
||||
__tablename__ = 'installed_apps'
|
||||
@ -314,6 +304,10 @@ class Conversation(db.Model):
|
||||
def app(self):
|
||||
return db.session.query(App).filter(App.id == self.app_id).first()
|
||||
|
||||
@property
|
||||
def in_debug_mode(self):
|
||||
return self.override_model_configs is not None
|
||||
|
||||
|
||||
class Message(db.Model):
|
||||
__tablename__ = 'messages'
|
||||
@ -380,6 +374,10 @@ class Message(db.Model):
|
||||
|
||||
return None
|
||||
|
||||
@property
|
||||
def in_debug_mode(self):
|
||||
return self.override_model_configs is not None
|
||||
|
||||
|
||||
class MessageFeedback(db.Model):
|
||||
__tablename__ = 'message_feedbacks'
|
||||
@ -505,7 +503,7 @@ class Site(db.Model):
|
||||
|
||||
@property
|
||||
def app_base_url(self):
|
||||
return current_app.config['APP_URL']
|
||||
return (current_app.config['APP_URL'] if current_app.config['APP_URL'] else request.host_url.rstrip('/'))
|
||||
|
||||
|
||||
class ApiToken(db.Model):
|
||||
|
||||
@ -8,12 +8,13 @@ class SavedMessage(db.Model):
|
||||
__tablename__ = 'saved_messages'
|
||||
__table_args__ = (
|
||||
db.PrimaryKeyConstraint('id', name='saved_message_pkey'),
|
||||
db.Index('saved_message_message_idx', 'app_id', 'message_id', 'created_by'),
|
||||
db.Index('saved_message_message_idx', 'app_id', 'message_id', 'created_by_role', 'created_by'),
|
||||
)
|
||||
|
||||
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
|
||||
app_id = db.Column(UUID, nullable=False)
|
||||
message_id = db.Column(UUID, nullable=False)
|
||||
created_by_role = db.Column(db.String(255), nullable=False, server_default=db.text("'end_user'::character varying"))
|
||||
created_by = db.Column(UUID, nullable=False)
|
||||
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
|
||||
|
||||
@ -26,11 +27,12 @@ class PinnedConversation(db.Model):
|
||||
__tablename__ = 'pinned_conversations'
|
||||
__table_args__ = (
|
||||
db.PrimaryKeyConstraint('id', name='pinned_conversation_pkey'),
|
||||
db.Index('pinned_conversation_conversation_idx', 'app_id', 'conversation_id', 'created_by'),
|
||||
db.Index('pinned_conversation_conversation_idx', 'app_id', 'conversation_id', 'created_by_role', 'created_by'),
|
||||
)
|
||||
|
||||
id = db.Column(UUID, server_default=db.text('uuid_generate_v4()'))
|
||||
app_id = db.Column(UUID, nullable=False)
|
||||
conversation_id = db.Column(UUID, nullable=False)
|
||||
created_by_role = db.Column(db.String(255), nullable=False, server_default=db.text("'end_user'::character varying"))
|
||||
created_by = db.Column(UUID, nullable=False)
|
||||
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
|
||||
|
||||
@ -29,4 +29,5 @@ sentry-sdk[flask]~=1.21.1
|
||||
jieba==0.42.1
|
||||
celery==5.2.7
|
||||
redis~=4.5.4
|
||||
pypdf==3.8.1
|
||||
pypdf==3.8.1
|
||||
openpyxl==3.1.2
|
||||
@ -267,9 +267,10 @@ class TenantService:
|
||||
}
|
||||
if action not in ['add', 'remove', 'update']:
|
||||
raise InvalidActionError("Invalid action.")
|
||||
|
||||
if operator.id == member.id:
|
||||
raise CannotOperateSelfError("Cannot operate self.")
|
||||
|
||||
if member:
|
||||
if operator.id == member.id:
|
||||
raise CannotOperateSelfError("Cannot operate self.")
|
||||
|
||||
ta_operator = TenantAccountJoin.query.filter_by(
|
||||
tenant_id=tenant.id,
|
||||
@ -365,6 +366,7 @@ class RegisterService:
|
||||
account = Account.query.filter_by(email=email).first()
|
||||
|
||||
if not account:
|
||||
TenantService.check_member_permission(tenant, inviter, None, 'add')
|
||||
name = email.split('@')[0]
|
||||
account = AccountService.create_account(email, name)
|
||||
account.status = AccountStatus.PENDING.value
|
||||
|
||||
@ -33,6 +33,10 @@ class CompletionService:
|
||||
# is streaming mode
|
||||
inputs = args['inputs']
|
||||
query = args['query']
|
||||
|
||||
if not query:
|
||||
raise ValueError('query is required')
|
||||
|
||||
conversation_id = args['conversation_id'] if 'conversation_id' in args else None
|
||||
|
||||
conversation = None
|
||||
|
||||
@ -12,13 +12,15 @@ from events.dataset_event import dataset_was_deleted
|
||||
from events.document_event import document_was_deleted
|
||||
from extensions.ext_database import db
|
||||
from models.account import Account
|
||||
from models.dataset import Dataset, Document, DatasetQuery, DatasetProcessRule, AppDatasetJoin
|
||||
from models.dataset import Dataset, Document, DatasetQuery, DatasetProcessRule, AppDatasetJoin, DocumentSegment
|
||||
from models.model import UploadFile
|
||||
from services.errors.account import NoPermissionError
|
||||
from services.errors.dataset import DatasetNameDuplicateError
|
||||
from services.errors.document import DocumentIndexingError
|
||||
from services.errors.file import FileNotExistsError
|
||||
from tasks.deal_dataset_vector_index_task import deal_dataset_vector_index_task
|
||||
from tasks.document_indexing_task import document_indexing_task
|
||||
from tasks.document_indexing_update_task import document_indexing_update_task
|
||||
|
||||
|
||||
class DatasetService:
|
||||
@ -97,7 +99,12 @@ class DatasetService:
|
||||
def update_dataset(dataset_id, data, user):
|
||||
dataset = DatasetService.get_dataset(dataset_id)
|
||||
DatasetService.check_dataset_permission(dataset, user)
|
||||
|
||||
if dataset.indexing_technique != data['indexing_technique']:
|
||||
# if update indexing_technique
|
||||
if data['indexing_technique'] == 'economy':
|
||||
deal_dataset_vector_index_task.delay(dataset_id, 'remove')
|
||||
elif data['indexing_technique'] == 'high_quality':
|
||||
deal_dataset_vector_index_task.delay(dataset_id, 'add')
|
||||
filtered_data = {k: v for k, v in data.items() if v is not None or k == 'description'}
|
||||
|
||||
filtered_data['updated_by'] = user.id
|
||||
@ -270,6 +277,14 @@ class DocumentService:
|
||||
|
||||
return document
|
||||
|
||||
@staticmethod
|
||||
def get_document_by_id(document_id: str) -> Optional[Document]:
|
||||
document = db.session.query(Document).filter(
|
||||
Document.id == document_id
|
||||
).first()
|
||||
|
||||
return document
|
||||
|
||||
@staticmethod
|
||||
def get_document_file_detail(file_id: str):
|
||||
file_detail = db.session.query(UploadFile). \
|
||||
@ -349,8 +364,79 @@ class DocumentService:
|
||||
if dataset.indexing_technique == 'high_quality':
|
||||
IndexBuilder.get_default_service_context(dataset.tenant_id)
|
||||
|
||||
if 'original_document_id' in document_data and document_data["original_document_id"]:
|
||||
document = DocumentService.update_document_with_dataset_id(dataset, document_data, account)
|
||||
else:
|
||||
# save process rule
|
||||
if not dataset_process_rule:
|
||||
process_rule = document_data["process_rule"]
|
||||
if process_rule["mode"] == "custom":
|
||||
dataset_process_rule = DatasetProcessRule(
|
||||
dataset_id=dataset.id,
|
||||
mode=process_rule["mode"],
|
||||
rules=json.dumps(process_rule["rules"]),
|
||||
created_by=account.id
|
||||
)
|
||||
elif process_rule["mode"] == "automatic":
|
||||
dataset_process_rule = DatasetProcessRule(
|
||||
dataset_id=dataset.id,
|
||||
mode=process_rule["mode"],
|
||||
rules=json.dumps(DatasetProcessRule.AUTOMATIC_RULES),
|
||||
created_by=account.id
|
||||
)
|
||||
db.session.add(dataset_process_rule)
|
||||
db.session.commit()
|
||||
|
||||
file_name = ''
|
||||
data_source_info = {}
|
||||
if document_data["data_source"]["type"] == "upload_file":
|
||||
file_id = document_data["data_source"]["info"]
|
||||
file = db.session.query(UploadFile).filter(
|
||||
UploadFile.tenant_id == dataset.tenant_id,
|
||||
UploadFile.id == file_id
|
||||
).first()
|
||||
|
||||
# raise error if file not found
|
||||
if not file:
|
||||
raise FileNotExistsError()
|
||||
|
||||
file_name = file.name
|
||||
data_source_info = {
|
||||
"upload_file_id": file_id,
|
||||
}
|
||||
|
||||
# save document
|
||||
position = DocumentService.get_documents_position(dataset.id)
|
||||
document = Document(
|
||||
tenant_id=dataset.tenant_id,
|
||||
dataset_id=dataset.id,
|
||||
position=position,
|
||||
data_source_type=document_data["data_source"]["type"],
|
||||
data_source_info=json.dumps(data_source_info),
|
||||
dataset_process_rule_id=dataset_process_rule.id,
|
||||
batch=time.strftime('%Y%m%d%H%M%S') + str(random.randint(100000, 999999)),
|
||||
name=file_name,
|
||||
created_from=created_from,
|
||||
created_by=account.id,
|
||||
# created_api_request_id = db.Column(UUID, nullable=True)
|
||||
)
|
||||
|
||||
db.session.add(document)
|
||||
db.session.commit()
|
||||
|
||||
# trigger async task
|
||||
document_indexing_task.delay(document.dataset_id, document.id)
|
||||
return document
|
||||
|
||||
@staticmethod
|
||||
def update_document_with_dataset_id(dataset: Dataset, document_data: dict,
|
||||
account: Account, dataset_process_rule: Optional[DatasetProcessRule] = None,
|
||||
created_from: str = 'web'):
|
||||
document = DocumentService.get_document(dataset.id, document_data["original_document_id"])
|
||||
if document.display_status != 'available':
|
||||
raise ValueError("Document is not available")
|
||||
# save process rule
|
||||
if not dataset_process_rule:
|
||||
if 'process_rule' in document_data and document_data['process_rule']:
|
||||
process_rule = document_data["process_rule"]
|
||||
if process_rule["mode"] == "custom":
|
||||
dataset_process_rule = DatasetProcessRule(
|
||||
@ -368,46 +454,48 @@ class DocumentService:
|
||||
)
|
||||
db.session.add(dataset_process_rule)
|
||||
db.session.commit()
|
||||
document.dataset_process_rule_id = dataset_process_rule.id
|
||||
# update document data source
|
||||
if 'data_source' in document_data and document_data['data_source']:
|
||||
file_name = ''
|
||||
data_source_info = {}
|
||||
if document_data["data_source"]["type"] == "upload_file":
|
||||
file_id = document_data["data_source"]["info"]
|
||||
file = db.session.query(UploadFile).filter(
|
||||
UploadFile.tenant_id == dataset.tenant_id,
|
||||
UploadFile.id == file_id
|
||||
).first()
|
||||
|
||||
file_name = ''
|
||||
data_source_info = {}
|
||||
if document_data["data_source"]["type"] == "upload_file":
|
||||
file_id = document_data["data_source"]["info"]
|
||||
file = db.session.query(UploadFile).filter(
|
||||
UploadFile.tenant_id == dataset.tenant_id,
|
||||
UploadFile.id == file_id
|
||||
).first()
|
||||
|
||||
# raise error if file not found
|
||||
if not file:
|
||||
raise FileNotExistsError()
|
||||
|
||||
file_name = file.name
|
||||
data_source_info = {
|
||||
"upload_file_id": file_id,
|
||||
}
|
||||
|
||||
# save document
|
||||
position = DocumentService.get_documents_position(dataset.id)
|
||||
document = Document(
|
||||
tenant_id=dataset.tenant_id,
|
||||
dataset_id=dataset.id,
|
||||
position=position,
|
||||
data_source_type=document_data["data_source"]["type"],
|
||||
data_source_info=json.dumps(data_source_info),
|
||||
dataset_process_rule_id=dataset_process_rule.id,
|
||||
batch=time.strftime('%Y%m%d%H%M%S') + str(random.randint(100000, 999999)),
|
||||
name=file_name,
|
||||
created_from=created_from,
|
||||
created_by=account.id,
|
||||
# created_api_request_id = db.Column(UUID, nullable=True)
|
||||
)
|
||||
# raise error if file not found
|
||||
if not file:
|
||||
raise FileNotExistsError()
|
||||
|
||||
file_name = file.name
|
||||
data_source_info = {
|
||||
"upload_file_id": file_id,
|
||||
}
|
||||
document.data_source_type = document_data["data_source"]["type"]
|
||||
document.data_source_info = json.dumps(data_source_info)
|
||||
document.name = file_name
|
||||
# update document to be waiting
|
||||
document.indexing_status = 'waiting'
|
||||
document.completed_at = None
|
||||
document.processing_started_at = None
|
||||
document.parsing_completed_at = None
|
||||
document.cleaning_completed_at = None
|
||||
document.splitting_completed_at = None
|
||||
document.updated_at = datetime.datetime.utcnow()
|
||||
document.created_from = created_from
|
||||
db.session.add(document)
|
||||
db.session.commit()
|
||||
|
||||
# update document segment
|
||||
update_params = {
|
||||
DocumentSegment.status: 're_segment'
|
||||
}
|
||||
DocumentSegment.query.filter_by(document_id=document.id).update(update_params)
|
||||
db.session.commit()
|
||||
# trigger async task
|
||||
document_indexing_task.delay(document.dataset_id, document.id)
|
||||
document_indexing_update_task.delay(document.dataset_id, document.id)
|
||||
|
||||
return document
|
||||
|
||||
@ -437,6 +525,21 @@ class DocumentService:
|
||||
|
||||
@classmethod
|
||||
def document_create_args_validate(cls, args: dict):
|
||||
if 'original_document_id' not in args or not args['original_document_id']:
|
||||
DocumentService.data_source_args_validate(args)
|
||||
DocumentService.process_rule_args_validate(args)
|
||||
else:
|
||||
if ('data_source' not in args and not args['data_source'])\
|
||||
and ('process_rule' not in args and not args['process_rule']):
|
||||
raise ValueError("Data source or Process rule is required")
|
||||
else:
|
||||
if 'data_source' in args and args['data_source']:
|
||||
DocumentService.data_source_args_validate(args)
|
||||
if 'process_rule' in args and args['process_rule']:
|
||||
DocumentService.process_rule_args_validate(args)
|
||||
|
||||
@classmethod
|
||||
def data_source_args_validate(cls, args: dict):
|
||||
if 'data_source' not in args or not args['data_source']:
|
||||
raise ValueError("Data source is required")
|
||||
|
||||
@ -453,6 +556,8 @@ class DocumentService:
|
||||
if 'info' not in args['data_source'] or not args['data_source']['info']:
|
||||
raise ValueError("Data source info is required")
|
||||
|
||||
@classmethod
|
||||
def process_rule_args_validate(cls, args: dict):
|
||||
if 'process_rule' not in args or not args['process_rule']:
|
||||
raise ValueError("Process rule is required")
|
||||
|
||||
|
||||
@ -127,7 +127,7 @@ class MessageService:
|
||||
message_id=message_id
|
||||
)
|
||||
|
||||
feedback = message.user_feedback
|
||||
feedback = message.user_feedback if isinstance(user, EndUser) else message.admin_feedback
|
||||
|
||||
if not rating and feedback:
|
||||
db.session.delete(feedback)
|
||||
|
||||
@ -62,6 +62,8 @@ class ProviderService:
|
||||
|
||||
@staticmethod
|
||||
def validate_provider_configs(tenant, provider_name: ProviderName, configs: Union[dict | str]):
|
||||
if current_app.config['DISABLE_PROVIDER_CONFIG_VALIDATION']:
|
||||
return
|
||||
llm_provider_service = LLMProviderService(tenant.id, provider_name.value)
|
||||
return llm_provider_service.config_validate(configs)
|
||||
|
||||
|
||||
@ -1,7 +1,8 @@
|
||||
from typing import Optional
|
||||
from typing import Optional, Union
|
||||
|
||||
from libs.infinite_scroll_pagination import InfiniteScrollPagination
|
||||
from extensions.ext_database import db
|
||||
from models.account import Account
|
||||
from models.model import App, EndUser
|
||||
from models.web import SavedMessage
|
||||
from services.message_service import MessageService
|
||||
@ -9,27 +10,29 @@ from services.message_service import MessageService
|
||||
|
||||
class SavedMessageService:
|
||||
@classmethod
|
||||
def pagination_by_last_id(cls, app_model: App, end_user: Optional[EndUser],
|
||||
def pagination_by_last_id(cls, app_model: App, user: Optional[Union[Account | EndUser]],
|
||||
last_id: Optional[str], limit: int) -> InfiniteScrollPagination:
|
||||
saved_messages = db.session.query(SavedMessage).filter(
|
||||
SavedMessage.app_id == app_model.id,
|
||||
SavedMessage.created_by == end_user.id
|
||||
SavedMessage.created_by_role == ('account' if isinstance(user, Account) else 'end_user'),
|
||||
SavedMessage.created_by == user.id
|
||||
).order_by(SavedMessage.created_at.desc()).all()
|
||||
message_ids = [sm.message_id for sm in saved_messages]
|
||||
|
||||
return MessageService.pagination_by_last_id(
|
||||
app_model=app_model,
|
||||
user=end_user,
|
||||
user=user,
|
||||
last_id=last_id,
|
||||
limit=limit,
|
||||
include_ids=message_ids
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def save(cls, app_model: App, user: Optional[EndUser], message_id: str):
|
||||
def save(cls, app_model: App, user: Optional[Union[Account | EndUser]], message_id: str):
|
||||
saved_message = db.session.query(SavedMessage).filter(
|
||||
SavedMessage.app_id == app_model.id,
|
||||
SavedMessage.message_id == message_id,
|
||||
SavedMessage.created_by_role == ('account' if isinstance(user, Account) else 'end_user'),
|
||||
SavedMessage.created_by == user.id
|
||||
).first()
|
||||
|
||||
@ -45,6 +48,7 @@ class SavedMessageService:
|
||||
saved_message = SavedMessage(
|
||||
app_id=app_model.id,
|
||||
message_id=message.id,
|
||||
created_by_role='account' if isinstance(user, Account) else 'end_user',
|
||||
created_by=user.id
|
||||
)
|
||||
|
||||
@ -52,10 +56,11 @@ class SavedMessageService:
|
||||
db.session.commit()
|
||||
|
||||
@classmethod
|
||||
def delete(cls, app_model: App, user: Optional[EndUser], message_id: str):
|
||||
def delete(cls, app_model: App, user: Optional[Union[Account | EndUser]], message_id: str):
|
||||
saved_message = db.session.query(SavedMessage).filter(
|
||||
SavedMessage.app_id == app_model.id,
|
||||
SavedMessage.message_id == message_id,
|
||||
SavedMessage.created_by_role == ('account' if isinstance(user, Account) else 'end_user'),
|
||||
SavedMessage.created_by == user.id
|
||||
).first()
|
||||
|
||||
|
||||
@ -2,6 +2,7 @@ from typing import Optional, Union
|
||||
|
||||
from libs.infinite_scroll_pagination import InfiniteScrollPagination
|
||||
from extensions.ext_database import db
|
||||
from models.account import Account
|
||||
from models.model import App, EndUser
|
||||
from models.web import PinnedConversation
|
||||
from services.conversation_service import ConversationService
|
||||
@ -9,14 +10,15 @@ from services.conversation_service import ConversationService
|
||||
|
||||
class WebConversationService:
|
||||
@classmethod
|
||||
def pagination_by_last_id(cls, app_model: App, end_user: Optional[EndUser],
|
||||
def pagination_by_last_id(cls, app_model: App, user: Optional[Union[Account | EndUser]],
|
||||
last_id: Optional[str], limit: int, pinned: Optional[bool] = None) -> InfiniteScrollPagination:
|
||||
include_ids = None
|
||||
exclude_ids = None
|
||||
if pinned is not None:
|
||||
pinned_conversations = db.session.query(PinnedConversation).filter(
|
||||
PinnedConversation.app_id == app_model.id,
|
||||
PinnedConversation.created_by == end_user.id
|
||||
PinnedConversation.created_by_role == ('account' if isinstance(user, Account) else 'end_user'),
|
||||
PinnedConversation.created_by == user.id
|
||||
).order_by(PinnedConversation.created_at.desc()).all()
|
||||
pinned_conversation_ids = [pc.conversation_id for pc in pinned_conversations]
|
||||
if pinned:
|
||||
@ -26,7 +28,7 @@ class WebConversationService:
|
||||
|
||||
return ConversationService.pagination_by_last_id(
|
||||
app_model=app_model,
|
||||
user=end_user,
|
||||
user=user,
|
||||
last_id=last_id,
|
||||
limit=limit,
|
||||
include_ids=include_ids,
|
||||
@ -34,10 +36,11 @@ class WebConversationService:
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def pin(cls, app_model: App, conversation_id: str, user: Optional[EndUser]):
|
||||
def pin(cls, app_model: App, conversation_id: str, user: Optional[Union[Account | EndUser]]):
|
||||
pinned_conversation = db.session.query(PinnedConversation).filter(
|
||||
PinnedConversation.app_id == app_model.id,
|
||||
PinnedConversation.conversation_id == conversation_id,
|
||||
PinnedConversation.created_by_role == ('account' if isinstance(user, Account) else 'end_user'),
|
||||
PinnedConversation.created_by == user.id
|
||||
).first()
|
||||
|
||||
@ -53,6 +56,7 @@ class WebConversationService:
|
||||
pinned_conversation = PinnedConversation(
|
||||
app_id=app_model.id,
|
||||
conversation_id=conversation.id,
|
||||
created_by_role='account' if isinstance(user, Account) else 'end_user',
|
||||
created_by=user.id
|
||||
)
|
||||
|
||||
@ -60,10 +64,11 @@ class WebConversationService:
|
||||
db.session.commit()
|
||||
|
||||
@classmethod
|
||||
def unpin(cls, app_model: App, conversation_id: str, user: Optional[EndUser]):
|
||||
def unpin(cls, app_model: App, conversation_id: str, user: Optional[Union[Account | EndUser]]):
|
||||
pinned_conversation = db.session.query(PinnedConversation).filter(
|
||||
PinnedConversation.app_id == app_model.id,
|
||||
PinnedConversation.conversation_id == conversation_id,
|
||||
PinnedConversation.created_by_role == ('account' if isinstance(user, Account) else 'end_user'),
|
||||
PinnedConversation.created_by == user.id
|
||||
).first()
|
||||
|
||||
|
||||
@ -35,8 +35,7 @@ def clean_document_task(document_id: str, dataset_id: str):
|
||||
index_node_ids = [segment.index_node_id for segment in segments]
|
||||
|
||||
# delete from vector index
|
||||
if dataset.indexing_technique == "high_quality":
|
||||
vector_index.del_nodes(index_node_ids)
|
||||
vector_index.del_nodes(index_node_ids)
|
||||
|
||||
# delete from keyword index
|
||||
if index_node_ids:
|
||||
@ -44,7 +43,7 @@ def clean_document_task(document_id: str, dataset_id: str):
|
||||
|
||||
for segment in segments:
|
||||
db.session.delete(segment)
|
||||
|
||||
db.session.commit()
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
click.style('Cleaned document when document deleted: {} latency: {}'.format(document_id, end_at - start_at), fg='green'))
|
||||
|
||||
75
api/tasks/deal_dataset_vector_index_task.py
Normal file
75
api/tasks/deal_dataset_vector_index_task.py
Normal file
@ -0,0 +1,75 @@
|
||||
import logging
|
||||
import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from llama_index.data_structs.node_v2 import DocumentRelationship, Node
|
||||
from core.index.vector_index import VectorIndex
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import DocumentSegment, Document, Dataset
|
||||
|
||||
|
||||
@shared_task
|
||||
def deal_dataset_vector_index_task(dataset_id: str, action: str):
|
||||
"""
|
||||
Async deal dataset from index
|
||||
:param dataset_id: dataset_id
|
||||
:param action: action
|
||||
Usage: deal_dataset_vector_index_task.delay(dataset_id, action)
|
||||
"""
|
||||
logging.info(click.style('Start deal dataset vector index: {}'.format(dataset_id), fg='green'))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
dataset = Dataset.query.filter_by(
|
||||
id=dataset_id
|
||||
).first()
|
||||
if not dataset:
|
||||
raise Exception('Dataset not found')
|
||||
documents = Document.query.filter_by(dataset_id=dataset_id).all()
|
||||
if documents:
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
for document in documents:
|
||||
# delete from vector index
|
||||
if action == "remove":
|
||||
vector_index.del_doc(document.id)
|
||||
elif action == "add":
|
||||
segments = db.session.query(DocumentSegment).filter(
|
||||
DocumentSegment.document_id == document.id,
|
||||
DocumentSegment.enabled == True
|
||||
) .order_by(DocumentSegment.position.asc()).all()
|
||||
|
||||
nodes = []
|
||||
previous_node = None
|
||||
for segment in segments:
|
||||
relationships = {
|
||||
DocumentRelationship.SOURCE: document.id
|
||||
}
|
||||
|
||||
if previous_node:
|
||||
relationships[DocumentRelationship.PREVIOUS] = previous_node.doc_id
|
||||
|
||||
previous_node.relationships[DocumentRelationship.NEXT] = segment.index_node_id
|
||||
|
||||
node = Node(
|
||||
doc_id=segment.index_node_id,
|
||||
doc_hash=segment.index_node_hash,
|
||||
text=segment.content,
|
||||
extra_info=None,
|
||||
node_info=None,
|
||||
relationships=relationships
|
||||
)
|
||||
|
||||
previous_node = node
|
||||
nodes.append(node)
|
||||
# save vector index
|
||||
vector_index.add_nodes(
|
||||
nodes=nodes,
|
||||
duplicate_check=True
|
||||
)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
click.style('Deal dataset vector index: {} latency: {}'.format(dataset_id, end_at - start_at), fg='green'))
|
||||
except Exception:
|
||||
logging.exception("Deal dataset vector index failed")
|
||||
85
api/tasks/document_indexing_update_task.py
Normal file
85
api/tasks/document_indexing_update_task.py
Normal file
@ -0,0 +1,85 @@
|
||||
import datetime
|
||||
import logging
|
||||
import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.vector_index import VectorIndex
|
||||
from core.indexing_runner import IndexingRunner, DocumentIsPausedException
|
||||
from core.llm.error import ProviderTokenNotInitError
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Document, Dataset, DocumentSegment
|
||||
|
||||
|
||||
@shared_task
|
||||
def document_indexing_update_task(dataset_id: str, document_id: str):
|
||||
"""
|
||||
Async update document
|
||||
:param dataset_id:
|
||||
:param document_id:
|
||||
|
||||
Usage: document_indexing_update_task.delay(dataset_id, document_id)
|
||||
"""
|
||||
logging.info(click.style('Start update document: {}'.format(document_id), fg='green'))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
document = db.session.query(Document).filter(
|
||||
Document.id == document_id,
|
||||
Document.dataset_id == dataset_id
|
||||
).first()
|
||||
|
||||
if not document:
|
||||
raise NotFound('Document not found')
|
||||
|
||||
document.indexing_status = 'parsing'
|
||||
document.processing_started_at = datetime.datetime.utcnow()
|
||||
db.session.commit()
|
||||
|
||||
# delete all document segment and index
|
||||
try:
|
||||
dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
|
||||
if not dataset:
|
||||
raise Exception('Dataset not found')
|
||||
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
keyword_table_index = KeywordTableIndex(dataset=dataset)
|
||||
|
||||
segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document_id).all()
|
||||
index_node_ids = [segment.index_node_id for segment in segments]
|
||||
|
||||
# delete from vector index
|
||||
vector_index.del_nodes(index_node_ids)
|
||||
|
||||
# delete from keyword index
|
||||
if index_node_ids:
|
||||
keyword_table_index.del_nodes(index_node_ids)
|
||||
|
||||
for segment in segments:
|
||||
db.session.delete(segment)
|
||||
db.session.commit()
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
click.style('Cleaned document when document update data source or process rule: {} latency: {}'.format(document_id, end_at - start_at), fg='green'))
|
||||
except Exception:
|
||||
logging.exception("Cleaned document when document update data source or process rule failed")
|
||||
try:
|
||||
indexing_runner = IndexingRunner()
|
||||
indexing_runner.run(document)
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style('update document: {} latency: {}'.format(document.id, end_at - start_at), fg='green'))
|
||||
except DocumentIsPausedException:
|
||||
logging.info(click.style('Document update paused, document id: {}'.format(document.id), fg='yellow'))
|
||||
except ProviderTokenNotInitError as e:
|
||||
document.indexing_status = 'error'
|
||||
document.error = str(e.description)
|
||||
document.stopped_at = datetime.datetime.utcnow()
|
||||
db.session.commit()
|
||||
except Exception as e:
|
||||
logging.exception("consume update document failed")
|
||||
document.indexing_status = 'error'
|
||||
document.error = str(e)
|
||||
document.stopped_at = datetime.datetime.utcnow()
|
||||
db.session.commit()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user