Compare commits

...

50 Commits

Author SHA1 Message Date
96008f1f3d feat: 白名单中去除 maas 平台 2024-06-27 17:49:43 +08:00
c106a896a5 feat: optimize whitelist for model providers 2024-06-21 10:56:20 +08:00
87a4776272 feat(model/tools): filter unregistered tools and models 2024-06-19 17:34:11 +08:00
7d5ebbb611 docs(readme): Optimize the content in the readme file (#5364)
Co-authored-by: 开坦克的贝塔 <k@aircode.io>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-06-18 18:33:22 +08:00
85eee0dfbb Update README.md (#5359) 2024-06-18 18:21:45 +08:00
369a395ee9 fix: resolve issue with cot_agent_runner not analyzing user-uploaded images correctly (#5360)
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-06-18 18:15:41 +08:00
4e3d76a1d1 chore: add novita_client to pyproject.toml (#5349) 2024-06-18 14:52:20 +08:00
7450b9acf3 dep: bump chromadb from 0.5.0 to 0.5.1 (#5345) 2024-06-18 14:05:14 +08:00
c7d378555a chore: set build system to Poetry and remove unnecessary settings with package mode disabled (#5263) 2024-06-18 13:27:03 +08:00
5f0ce5811a feat: add flask upgrade-db command for running db upgrade with redis lock (#5333) 2024-06-18 13:26:01 +08:00
9b7fdadce4 fix: wrong token usage in iteration node for streaming result (#5336) 2024-06-18 13:08:40 +08:00
132f5fb3de feat: add Novita AI image generation tool, implemented model search, text-to-image and create tile functionalities (#5308)
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-06-18 11:08:25 +08:00
3828d4cd22 feat: support Latex (#5001)
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-06-18 10:43:47 +08:00
c7641be093 fix: workflow results in FAIL status due to null reference error (#5332) 2024-06-18 09:33:33 +08:00
8266842809 chore: update llm.py (#5335) 2024-06-18 09:29:14 +08:00
d7213b12cc fix: extract params by function calling for models supporting tool call (#5334) 2024-06-17 23:25:29 +08:00
c163521b9e Update and fix the model param of Deepseek (#5329) 2024-06-17 21:40:04 +08:00
7305713b97 fix: allow special characters in email (#5327)
Co-authored-by: crazywoola <427733928@qq.com>
2024-06-17 21:32:59 +08:00
edffa5666d fix: got unknown type of prompt message in multi-round ReAct agent chat (#5245) 2024-06-17 21:20:17 +08:00
54756cd3b2 chore(core/workflow/utils/variable_template_parser): Refactor VariableTemplateParser class for better readability and maintainability. (#5328) 2024-06-17 21:18:56 +08:00
b73ec87afc fix(core/workflow): Handle special values in node run result outputs (#5321) 2024-06-17 20:41:57 +08:00
61f4f08744 Add bedrock command r models (#4521)
Co-authored-by: Justin Wu <justin.wu@ringcentral.com>
Co-authored-by: Chenhe Gu <guchenhe@gmail.com>
2024-06-17 20:37:46 +08:00
07387e9586 add the filename length limit (#5326) 2024-06-17 20:36:54 +08:00
147a39b984 feat: support tencent cos storage (#5297) 2024-06-17 19:18:52 +08:00
7a758a35fe fix: pin tenacity to 8.3.0 (#5319) 2024-06-17 18:03:42 +08:00
f146bebe5a fix:update Member field error (#5295) 2024-06-17 17:22:16 +08:00
be3512aa57 fix: unable to reindex documents (#5276) 2024-06-17 17:19:43 +08:00
cc4a4ec796 feat: permission and security fixes (#5266) 2024-06-17 16:06:32 +08:00
a1d8c86ee3 chore: upgrade next to 14.1.1 (#5310) 2024-06-17 15:50:41 +08:00
61ebcd8adb Fix: workflow result display (#5299) 2024-06-17 14:36:17 +08:00
24282236f0 fix: not checked require_summary of duckduckgo search raise error (#5303) 2024-06-17 14:18:49 +08:00
5a99aeb864 fix(core): Reorder field_validator and classmethod to fit Pydantic V2. (#5257) 2024-06-17 10:04:28 +08:00
e95f8fa3dc Dalle3 add seed (#5288)
Co-authored-by: luowei <glpat-EjySCyNjWiLqAED-YmwM>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
2024-06-17 09:27:27 +08:00
9a64aa76c1 fix: typo and check (#5287) 2024-06-17 09:15:43 +08:00
42029791e4 fix: add event handler to delete the site when the related app deleted (#5282) 2024-06-17 08:47:26 +08:00
4f60fe7bc6 Fixed wrong /text-to-audio curl example (#5286) 2024-06-17 08:45:51 +08:00
baf5490504 Fix: z-index of delete account modal (#5277) 2024-06-16 20:42:47 +08:00
013bffc161 fix: copyright with latest time (#5271) 2024-06-16 14:39:29 +08:00
c03e6ee41b Feat: support delete account (#5208)
Co-authored-by: crazywoola <427733928@qq.com>
2024-06-16 10:26:39 +08:00
d94279ae75 fix: casting non-string type value for tool parameter options (#5267) 2024-06-16 09:47:20 +08:00
3a423e8ce7 fix: visioin model always with low quality (#5253) 2024-06-16 09:46:17 +08:00
37c87164dc fix: respect the interface language specified by the user on the activation success screen (#5258) 2024-06-16 09:37:19 +08:00
4b54843ed7 fix: run agent with Vertex AI Gemini models (#5260)
Co-authored-by: Wenming Pan <pwm@google.com>
2024-06-16 09:36:31 +08:00
ef55d0da78 chore: add icon in .idea (#5259)
Signed-off-by: Gallardot <gallardot@apache.org>
2024-06-16 09:25:11 +08:00
9961cdd7c8 fix: modal z-index cleanup (#5234) 2024-06-15 21:09:19 +08:00
2e842333b1 fix: correct typos in the icons for microsoft (#5243) 2024-06-15 21:02:47 +08:00
6ccde0452a feat: Added hindi translation i18n (#5240) 2024-06-15 21:01:03 +08:00
795714bc2f feat(Tools): Add Serply Web/Job/Scholar/News Search tool for more options (#5186)
Co-authored-by: teampen <136991215+teampen@users.noreply.github.com>
2024-06-15 20:09:33 +08:00
d9bee03ff6 fix: embedding job fails using IAM role (#5252) 2024-06-15 18:57:54 +08:00
4f0488abb5 fix: wrong order of history prompts in ReAct agent mode (#5236) 2024-06-15 10:53:30 +08:00
171 changed files with 7301 additions and 445 deletions

View File

@ -38,13 +38,14 @@ jobs:
- name: Install dependencies
run: poetry install -C api
- name: Set up Middleware
- name: Set up Middlewares
uses: hoverkraft-tech/compose-action@v2.0.0
with:
compose-file: |
docker/docker-compose.middleware.yaml
services: |
db
redis
- name: Prepare configs
run: |
@ -54,4 +55,4 @@ jobs:
- name: Run DB Migration
run: |
cd api
poetry run python -m flask db upgrade
poetry run python -m flask upgrade-db

View File

@ -39,7 +39,7 @@ jobs:
- name: Ruff check
if: steps.changed-files.outputs.any_changed == 'true'
run: poetry run -C api ruff check --preview ./api
run: poetry run -C api ruff check ./api
- name: Dotenv check
if: steps.changed-files.outputs.any_changed == 'true'

1
.gitignore vendored
View File

@ -136,6 +136,7 @@ web/.vscode/settings.json
# Intellij IDEA Files
.idea/*
!.idea/vcs.xml
!.idea/icon.png
.ideaDataSources/
api/.env

BIN
.idea/icon.png generated Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 KiB

View File

@ -65,6 +65,13 @@ ALIYUN_OSS_REGION=your-region
GOOGLE_STORAGE_BUCKET_NAME=yout-bucket-name
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON=your-google-service-account-json-base64-string
# Tencent COS Storage configuration
TENCENT_COS_BUCKET_NAME=your-bucket-name
TENCENT_COS_SECRET_KEY=your-secret-key
TENCENT_COS_SECRET_ID=your-secret-id
TENCENT_COS_REGION=your-region
TENCENT_COS_SCHEME=your-scheme
# CORS configuration
WEB_API_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*
CONSOLE_CORS_ALLOW_ORIGINS=http://127.0.0.1:3000,*

View File

@ -11,24 +11,34 @@
docker-compose -f docker-compose.middleware.yaml -p dify up -d
cd ../api
```
2. Copy `.env.example` to `.env`
3. Generate a `SECRET_KEY` in the `.env` file.
```bash
```bash for Linux
sed -i "/^SECRET_KEY=/c\SECRET_KEY=$(openssl rand -base64 42)" .env
```
```bash for Mac
secret_key=$(openssl rand -base64 42)
sed -i '' "/^SECRET_KEY=/c\\
SECRET_KEY=${secret_key}" .env
```
4. Create environment.
Dify API service uses [Poetry](https://python-poetry.org/docs/) to manage dependencies. You can execute `poetry shell` to activate the environment.
> Using pip can be found [below](#usage-with-pip).
6. Install dependencies
> Using pip can be found [below](#usage-with-pip).
5. Install dependencies
=======
```bash
poetry env use 3.10
poetry install
```
In case of contributors missing to update dependencies for `pyproject.toml`, you can perform the following shell instead.
```bash
@ -37,23 +47,23 @@
poetry add $(cat requirements-dev.txt) --group dev # install dependencies of development and update pyproject.toml
```
7. Run migrate
6. Run migrate
Before the first launch, migrate the database to the latest version.
```bash
poetry run python -m flask db upgrade
```
8. Start backend
7. Start backend
```bash
poetry run python -m flask run --host 0.0.0.0 --port=5001 --debug
```
9. Start Dify [web](../web) service.
10. Setup your application by visiting `http://localhost:3000`...
11. If you need to debug local async processing, please start the worker service.
8. Start Dify [web](../web) service.
9. Setup your application by visiting `http://localhost:3000`...
10. If you need to debug local async processing, please start the worker service.
```bash
poetry run python -m celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail
@ -61,23 +71,21 @@
The started celery app handles the async tasks, e.g. dataset importing and documents indexing.
## Testing
1. Install dependencies for both the backend and the test environment
```bash
poetry install --with dev
```
```
2. Run the tests locally with mocked system environment variables in `tool.pytest_env` section in `pyproject.toml`
```bash
cd ../
poetry run -C api bash dev/pytest/pytest_all_tests.sh
```
## Usage with pip
> [!NOTE]
@ -92,7 +100,7 @@
docker-compose -f docker-compose.middleware.yaml -p dify up -d
cd ../api
```
2. Copy `.env.example` to `.env`
3. Generate a `SECRET_KEY` in the `.env` file.
@ -101,21 +109,21 @@
```
4. Create environment.
If you use Anaconda, create a new environment and activate it
```bash
conda create --name dify python=3.10
conda activate dify
```
6. Install dependencies
5. Install dependencies
```bash
pip install -r requirements.txt
```
7. Run migrate
6. Run migrate
Before the first launch, migrate the database to the latest version.
@ -123,27 +131,17 @@
flask db upgrade
```
8. Start backend:
7. Start backend:
```bash
flask run --host 0.0.0.0 --port=5001 --debug
```
9. Setup your application by visiting http://localhost:5001/console/api/setup or other apis...
10. If you need to debug local async processing, please start the worker service.
8. Setup your application by visiting <http://localhost:5001/console/api/setup> or other apis...
9. If you need to debug local async processing, please start the worker service.
```bash
celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail
```
The started celery app handles the async tasks, e.g. dataset importing and documents indexing.
## Testing
1. Install dependencies for both the backend and the test environment
```bash
pip install -r requirements.txt -r requirements-dev.txt
```
2. Run the tests locally with mocked system environment variables in `tool.pytest_env` section in `pyproject.toml`
```bash
dev/pytest/pytest_all_tests.sh
```

View File

@ -1,5 +1,6 @@
import base64
import json
import logging
import secrets
from typing import Optional
@ -12,6 +13,7 @@ from core.rag.datasource.vdb.vector_factory import Vector
from core.rag.datasource.vdb.vector_type import VectorType
from core.rag.models.document import Document
from extensions.ext_database import db
from extensions.ext_redis import redis_client
from libs.helper import email as email_validate
from libs.password import hash_password, password_pattern, valid_password
from libs.rsa import generate_key_pair
@ -553,6 +555,28 @@ def create_tenant(email: str, language: Optional[str] = None):
'Account: {}\nPassword: {}'.format(email, new_password), fg='green'))
@click.command('upgrade-db', help='upgrade the database')
def upgrade_db():
click.echo('Preparing database migration...')
lock = redis_client.lock(name='db_upgrade_lock', timeout=60)
if lock.acquire(blocking=False):
try:
click.echo(click.style('Start database migration.', fg='green'))
# run db migration
import flask_migrate
flask_migrate.upgrade()
click.echo(click.style('Database migration successful!', fg='green'))
except Exception as e:
logging.exception(f'Database migration failed, error: {e}')
finally:
lock.release()
else:
click.echo('Database migration skipped')
def register_commands(app):
app.cli.add_command(reset_password)
app.cli.add_command(reset_email)
@ -561,4 +585,4 @@ def register_commands(app):
app.cli.add_command(convert_to_agent_apps)
app.cli.add_command(add_qdrant_doc_id_index)
app.cli.add_command(create_tenant)
app.cli.add_command(upgrade_db)

View File

@ -253,6 +253,13 @@ class Config:
self.GOOGLE_STORAGE_BUCKET_NAME = get_env('GOOGLE_STORAGE_BUCKET_NAME')
self.GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64 = get_env('GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64')
# Tencent Cos Storage settings
self.TENCENT_COS_BUCKET_NAME = get_env('TENCENT_COS_BUCKET_NAME')
self.TENCENT_COS_REGION = get_env('TENCENT_COS_REGION')
self.TENCENT_COS_SECRET_ID = get_env('TENCENT_COS_SECRET_ID')
self.TENCENT_COS_SECRET_KEY = get_env('TENCENT_COS_SECRET_KEY')
self.TENCENT_COS_SCHEME = get_env('TENCENT_COS_SCHEME')
# ------------------------
# Vector Store Configurations.
# Currently, only support: qdrant, milvus, zilliz, weaviate, relyt, pgvector

View File

@ -1,6 +1,6 @@
languages = ['en-US', 'zh-Hans', 'zh-Hant', 'pt-BR', 'es-ES', 'fr-FR', 'de-DE', 'ja-JP', 'ko-KR', 'ru-RU', 'it-IT', 'uk-UA', 'vi-VN', 'pl-PL']
languages = ['en-US', 'zh-Hans', 'zh-Hant', 'pt-BR', 'es-ES', 'fr-FR', 'de-DE', 'ja-JP', 'ko-KR', 'ru-RU', 'it-IT', 'uk-UA', 'vi-VN', 'pl-PL', 'hi-IN']
language_timezone_mapping = {
'en-US': 'America/New_York',
@ -18,6 +18,7 @@ language_timezone_mapping = {
'vi-VN': 'Asia/Ho_Chi_Minh',
'ro-RO': 'Europe/Bucharest',
'pl-PL': 'Europe/Warsaw',
'hi-IN': 'Asia/Kolkata'
}

View File

@ -129,6 +129,10 @@ class AppApi(Resource):
@marshal_with(app_detail_fields_with_site)
def put(self, app_model):
"""Update app"""
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=True, nullable=False, location='json')
parser.add_argument('description', type=str, location='json')
@ -147,6 +151,7 @@ class AppApi(Resource):
@get_app_model
def delete(self, app_model):
"""Delete app"""
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
@ -203,6 +208,10 @@ class AppNameApi(Resource):
@get_app_model
@marshal_with(app_detail_fields)
def post(self, app_model):
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=True, location='json')
args = parser.parse_args()
@ -220,6 +229,10 @@ class AppIconApi(Resource):
@get_app_model
@marshal_with(app_detail_fields)
def post(self, app_model):
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('icon', type=str, location='json')
parser.add_argument('icon_background', type=str, location='json')
@ -241,6 +254,7 @@ class AppSiteStatus(Resource):
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('enable_site', type=bool, required=True, location='json')
args = parser.parse_args()
@ -261,6 +275,7 @@ class AppApiStatus(Resource):
# The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('enable_api', type=bool, required=True, location='json')
args = parser.parse_args()

View File

@ -3,7 +3,7 @@ import logging
from flask import abort, request
from flask_restful import Resource, marshal_with, reqparse
from werkzeug.exceptions import InternalServerError, NotFound
from werkzeug.exceptions import Forbidden, InternalServerError, NotFound
import services
from controllers.console import api
@ -36,6 +36,10 @@ class DraftWorkflowApi(Resource):
"""
Get draft workflow
"""
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
# fetch draft workflow by app_model
workflow_service = WorkflowService()
workflow = workflow_service.get_draft_workflow(app_model=app_model)
@ -54,6 +58,10 @@ class DraftWorkflowApi(Resource):
"""
Sync draft workflow
"""
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
content_type = request.headers.get('Content-Type')
if 'application/json' in content_type:
@ -110,6 +118,10 @@ class AdvancedChatDraftWorkflowRunApi(Resource):
"""
Run draft workflow
"""
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, location='json')
parser.add_argument('query', type=str, required=True, location='json', default='')
@ -146,6 +158,10 @@ class AdvancedChatDraftRunIterationNodeApi(Resource):
"""
Run draft workflow iteration node
"""
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, location='json')
args = parser.parse_args()
@ -179,6 +195,10 @@ class WorkflowDraftRunIterationNodeApi(Resource):
"""
Run draft workflow iteration node
"""
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, location='json')
args = parser.parse_args()
@ -212,6 +232,10 @@ class DraftWorkflowRunApi(Resource):
"""
Run draft workflow
"""
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, required=True, nullable=False, location='json')
parser.add_argument('files', type=list, required=False, location='json')
@ -243,6 +267,10 @@ class WorkflowTaskStopApi(Resource):
"""
Stop workflow task
"""
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
AppQueueManager.set_stop_flag(task_id, InvokeFrom.DEBUGGER, current_user.id)
return {
@ -260,6 +288,10 @@ class DraftWorkflowNodeRunApi(Resource):
"""
Run draft workflow node
"""
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('inputs', type=dict, required=True, nullable=False, location='json')
args = parser.parse_args()
@ -286,6 +318,10 @@ class PublishedWorkflowApi(Resource):
"""
Get published workflow
"""
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
# fetch published workflow by app_model
workflow_service = WorkflowService()
workflow = workflow_service.get_published_workflow(app_model=app_model)
@ -301,6 +337,10 @@ class PublishedWorkflowApi(Resource):
"""
Publish workflow
"""
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
workflow_service = WorkflowService()
workflow = workflow_service.publish_workflow(app_model=app_model, account=current_user)
@ -319,6 +359,10 @@ class DefaultBlockConfigsApi(Resource):
"""
Get default block config
"""
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
# Get default block configs
workflow_service = WorkflowService()
return workflow_service.get_default_block_configs()
@ -333,6 +377,10 @@ class DefaultBlockConfigApi(Resource):
"""
Get default block config
"""
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
parser.add_argument('q', type=str, location='args')
args = parser.parse_args()
@ -363,6 +411,10 @@ class ConvertToWorkflowApi(Resource):
Convert expert mode of chatbot app to workflow mode
Convert Completion App to Workflow App
"""
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_editor:
raise Forbidden()
if request.data:
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, required=False, nullable=True, location='json')

View File

@ -16,15 +16,21 @@ class ApiKeyAuthDataSource(Resource):
@login_required
@account_initialization_required
def get(self):
# The role of the current user in the table must be admin or owner
if not current_user.is_admin_or_owner:
raise Forbidden()
data_source_api_key_bindings = ApiKeyAuthService.get_provider_auth_list(current_user.current_tenant_id)
if data_source_api_key_bindings:
return {
'settings': [data_source_api_key_binding.to_dict() for data_source_api_key_binding in
data_source_api_key_bindings]}
return {'settings': []}
'sources': [{
'id': data_source_api_key_binding.id,
'category': data_source_api_key_binding.category,
'provider': data_source_api_key_binding.provider,
'disabled': data_source_api_key_binding.disabled,
'created_at': int(data_source_api_key_binding.created_at.timestamp()),
'updated_at': int(data_source_api_key_binding.updated_at.timestamp()),
}
for data_source_api_key_binding in
data_source_api_key_bindings]
}
return {'sources': []}
class ApiKeyAuthDataSourceBinding(Resource):

View File

@ -32,9 +32,9 @@ class CotAgentRunner(BaseAgentRunner, ABC):
_prompt_messages_tools: list[PromptMessage] = None
def run(self, message: Message,
query: str,
inputs: dict[str, str],
) -> Union[Generator, LLMResult]:
query: str,
inputs: dict[str, str],
) -> Union[Generator, LLMResult]:
"""
Run Cot agent application
"""
@ -52,7 +52,8 @@ class CotAgentRunner(BaseAgentRunner, ABC):
# init instruction
inputs = inputs or {}
instruction = app_config.prompt_template.simple_prompt_template
self._instruction = self._fill_in_inputs_from_external_data_tools(instruction, inputs)
self._instruction = self._fill_in_inputs_from_external_data_tools(
instruction, inputs)
iteration_step = 1
max_iteration_steps = min(app_config.agent.max_iteration, 5) + 1
@ -60,8 +61,6 @@ class CotAgentRunner(BaseAgentRunner, ABC):
# convert tools into ModelRuntime Tool format
tool_instances, self._prompt_messages_tools = self._init_prompt_tools()
prompt_messages = self._organize_prompt_messages()
function_call_state = True
llm_usage = {
'usage': None
@ -120,9 +119,10 @@ class CotAgentRunner(BaseAgentRunner, ABC):
# check llm result
if not chunks:
raise ValueError("failed to invoke llm")
usage_dict = {}
react_chunks = CotAgentOutputParser.handle_react_stream_output(chunks, usage_dict)
react_chunks = CotAgentOutputParser.handle_react_stream_output(
chunks, usage_dict)
scratchpad = AgentScratchpadUnit(
agent_response='',
thought='',
@ -160,15 +160,16 @@ class CotAgentRunner(BaseAgentRunner, ABC):
)
)
scratchpad.thought = scratchpad.thought.strip() or 'I am thinking about how to help you'
scratchpad.thought = scratchpad.thought.strip(
) or 'I am thinking about how to help you'
self._agent_scratchpad.append(scratchpad)
# get llm usage
if 'usage' in usage_dict:
increase_usage(llm_usage, usage_dict['usage'])
else:
usage_dict['usage'] = LLMUsage.empty_usage()
self.save_agent_thought(
agent_thought=agent_thought,
tool_name=scratchpad.action.action_name if scratchpad.action else '',
@ -182,7 +183,7 @@ class CotAgentRunner(BaseAgentRunner, ABC):
messages_ids=[],
llm_usage=usage_dict['usage']
)
if not scratchpad.is_final():
self.queue_manager.publish(QueueAgentThoughtEvent(
agent_thought_id=agent_thought.id
@ -196,7 +197,8 @@ class CotAgentRunner(BaseAgentRunner, ABC):
# action is final answer, return final answer directly
try:
if isinstance(scratchpad.action.action_input, dict):
final_answer = json.dumps(scratchpad.action.action_input)
final_answer = json.dumps(
scratchpad.action.action_input)
elif isinstance(scratchpad.action.action_input, str):
final_answer = scratchpad.action.action_input
else:
@ -207,7 +209,7 @@ class CotAgentRunner(BaseAgentRunner, ABC):
function_call_state = True
# action is tool call, invoke tool
tool_invoke_response, tool_invoke_meta = self._handle_invoke_action(
action=scratchpad.action,
action=scratchpad.action,
tool_instances=tool_instances,
message_file_ids=message_file_ids
)
@ -217,10 +219,13 @@ class CotAgentRunner(BaseAgentRunner, ABC):
self.save_agent_thought(
agent_thought=agent_thought,
tool_name=scratchpad.action.action_name,
tool_input={scratchpad.action.action_name: scratchpad.action.action_input},
tool_input={
scratchpad.action.action_name: scratchpad.action.action_input},
thought=scratchpad.thought,
observation={scratchpad.action.action_name: tool_invoke_response},
tool_invoke_meta={scratchpad.action.action_name: tool_invoke_meta.to_dict()},
observation={
scratchpad.action.action_name: tool_invoke_response},
tool_invoke_meta={
scratchpad.action.action_name: tool_invoke_meta.to_dict()},
answer=scratchpad.agent_response,
messages_ids=message_file_ids,
llm_usage=usage_dict['usage']
@ -232,7 +237,8 @@ class CotAgentRunner(BaseAgentRunner, ABC):
# update prompt tool message
for prompt_tool in self._prompt_messages_tools:
self.update_prompt_message_tool(tool_instances[prompt_tool.name], prompt_tool)
self.update_prompt_message_tool(
tool_instances[prompt_tool.name], prompt_tool)
iteration_step += 1
@ -251,12 +257,12 @@ class CotAgentRunner(BaseAgentRunner, ABC):
# save agent thought
self.save_agent_thought(
agent_thought=agent_thought,
agent_thought=agent_thought,
tool_name='',
tool_input={},
tool_invoke_meta={},
thought=final_answer,
observation={},
observation={},
answer=final_answer,
messages_ids=[]
)
@ -269,11 +275,12 @@ class CotAgentRunner(BaseAgentRunner, ABC):
message=AssistantPromptMessage(
content=final_answer
),
usage=llm_usage['usage'] if llm_usage['usage'] else LLMUsage.empty_usage(),
usage=llm_usage['usage'] if llm_usage['usage'] else LLMUsage.empty_usage(
),
system_fingerprint=''
)), PublishFrom.APPLICATION_MANAGER)
def _handle_invoke_action(self, action: AgentScratchpadUnit.Action,
def _handle_invoke_action(self, action: AgentScratchpadUnit.Action,
tool_instances: dict[str, Tool],
message_file_ids: list[str]) -> tuple[str, ToolInvokeMeta]:
"""
@ -290,7 +297,7 @@ class CotAgentRunner(BaseAgentRunner, ABC):
if not tool_instance:
answer = f"there is not a tool named {tool_call_name}"
return answer, ToolInvokeMeta.error_instance(answer)
if isinstance(tool_call_args, str):
try:
tool_call_args = json.loads(tool_call_args)
@ -311,7 +318,8 @@ class CotAgentRunner(BaseAgentRunner, ABC):
# publish files
for message_file, save_as in message_files:
if save_as:
self.variables_pool.set_file(tool_name=tool_call_name, value=message_file.id, name=save_as)
self.variables_pool.set_file(
tool_name=tool_call_name, value=message_file.id, name=save_as)
# publish message file
self.queue_manager.publish(QueueMessageFileEvent(
@ -342,7 +350,7 @@ class CotAgentRunner(BaseAgentRunner, ABC):
continue
return instruction
def _init_react_state(self, query) -> None:
"""
init agent scratchpad
@ -350,7 +358,7 @@ class CotAgentRunner(BaseAgentRunner, ABC):
self._query = query
self._agent_scratchpad = []
self._historic_prompt_messages = self._organize_historic_prompt_messages()
@abstractmethod
def _organize_prompt_messages(self) -> list[PromptMessage]:
"""
@ -379,54 +387,54 @@ class CotAgentRunner(BaseAgentRunner, ABC):
organize historic prompt messages
"""
result: list[PromptMessage] = []
scratchpad: list[AgentScratchpadUnit] = []
scratchpads: list[AgentScratchpadUnit] = []
current_scratchpad: AgentScratchpadUnit = None
self.history_prompt_messages = AgentHistoryPromptTransform(
model_config=self.model_config,
prompt_messages=current_session_messages or [],
history_messages=self.history_prompt_messages,
memory=self.memory
).get_prompt()
for message in self.history_prompt_messages:
if isinstance(message, AssistantPromptMessage):
current_scratchpad = AgentScratchpadUnit(
agent_response=message.content,
thought=message.content or 'I am thinking about how to help you',
action_str='',
action=None,
observation=None,
)
if not current_scratchpad:
current_scratchpad = AgentScratchpadUnit(
agent_response=message.content,
thought=message.content or 'I am thinking about how to help you',
action_str='',
action=None,
observation=None,
)
scratchpads.append(current_scratchpad)
if message.tool_calls:
try:
current_scratchpad.action = AgentScratchpadUnit.Action(
action_name=message.tool_calls[0].function.name,
action_input=json.loads(message.tool_calls[0].function.arguments)
action_input=json.loads(
message.tool_calls[0].function.arguments)
)
current_scratchpad.action_str = json.dumps(
current_scratchpad.action.to_dict()
)
except:
pass
scratchpad.append(current_scratchpad)
elif isinstance(message, ToolPromptMessage):
if current_scratchpad:
current_scratchpad.observation = message.content
elif isinstance(message, UserPromptMessage):
if scratchpads:
result.append(AssistantPromptMessage(
content=self._format_assistant_message(scratchpads)
))
scratchpads = []
current_scratchpad = None
result.append(message)
if scratchpad:
result.append(AssistantPromptMessage(
content=self._format_assistant_message(scratchpad)
))
scratchpad = []
if scratchpad:
if scratchpads:
result.append(AssistantPromptMessage(
content=self._format_assistant_message(scratchpad)
content=self._format_assistant_message(scratchpads)
))
return result
historic_prompts = AgentHistoryPromptTransform(
model_config=self.model_config,
prompt_messages=current_session_messages or [],
history_messages=result,
memory=self.memory
).get_prompt()
return historic_prompts

View File

@ -5,6 +5,7 @@ from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
PromptMessage,
SystemPromptMessage,
TextPromptMessageContent,
UserPromptMessage,
)
from core.model_runtime.utils.encoders import jsonable_encoder
@ -25,6 +26,21 @@ class CotChatAgentRunner(CotAgentRunner):
return SystemPromptMessage(content=system_prompt)
def _organize_user_query(self, query, prompt_messages: list[PromptMessage] = None) -> list[PromptMessage]:
"""
Organize user query
"""
if self.files:
prompt_message_contents = [TextPromptMessageContent(data=query)]
for file_obj in self.files:
prompt_message_contents.append(file_obj.prompt_message_content)
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
else:
prompt_messages.append(UserPromptMessage(content=query))
return prompt_messages
def _organize_prompt_messages(self) -> list[PromptMessage]:
"""
Organize
@ -51,27 +67,27 @@ class CotChatAgentRunner(CotAgentRunner):
assistant_messages = [assistant_message]
# query messages
query_messages = UserPromptMessage(content=self._query)
query_messages = self._organize_user_query(self._query, [])
if assistant_messages:
# organize historic prompt messages
historic_messages = self._organize_historic_prompt_messages([
system_message,
query_messages,
*query_messages,
*assistant_messages,
UserPromptMessage(content='continue')
])
])
messages = [
system_message,
*historic_messages,
query_messages,
*query_messages,
*assistant_messages,
UserPromptMessage(content='continue')
]
else:
# organize historic prompt messages
historic_messages = self._organize_historic_prompt_messages([system_message, query_messages])
messages = [system_message, *historic_messages, query_messages]
historic_messages = self._organize_historic_prompt_messages([system_message, *query_messages])
messages = [system_message, *historic_messages, *query_messages]
# join all messages
return messages
return messages

View File

@ -17,6 +17,10 @@ class CotAgentOutputParser:
action_name = None
action_input = None
# cohere always returns a list
if isinstance(action, list) and len(action) == 1:
action = action[0]
for key, value in action.items():
if 'input' in key.lower():
action_input = value

View File

@ -77,8 +77,8 @@ class QueueIterationNextEvent(AppQueueEvent):
node_run_index: int
output: Optional[Any] = None # output for the current iteration
@classmethod
@field_validator('output', mode='before')
@classmethod
def set_output(cls, v):
"""
Set output

View File

@ -367,7 +367,7 @@ class IterationNodeNextStreamResponse(StreamResponse):
class IterationNodeCompletedStreamResponse(StreamResponse):
"""
NodeStartStreamResponse entity
NodeCompletedStreamResponse entity
"""
class Data(BaseModel):
"""
@ -385,6 +385,7 @@ class IterationNodeCompletedStreamResponse(StreamResponse):
error: Optional[str] = None
elapsed_time: float
total_tokens: int
execution_metadata: Optional[dict] = None
finished_at: int
steps: int
@ -545,4 +546,4 @@ class WorkflowIterationState(BaseModel):
total_tokens: int = 0
node_data: BaseNodeData
current_iterations: dict[str, Data] = None
current_iterations: dict[str, Data] = None

View File

@ -17,6 +17,7 @@ from core.app.entities.task_entities import (
)
from core.app.task_pipeline.workflow_cycle_state_manager import WorkflowCycleStateManager
from core.workflow.entities.node_entities import NodeType
from core.workflow.workflow_engine_manager import WorkflowEngineManager
from extensions.ext_database import db
from models.workflow import (
WorkflowNodeExecution,
@ -94,6 +95,9 @@ class WorkflowIterationCycleManage(WorkflowCycleStateManager):
error=None,
elapsed_time=time.perf_counter() - current_iteration.started_at,
total_tokens=current_iteration.total_tokens,
execution_metadata={
'total_tokens': current_iteration.total_tokens,
},
finished_at=int(time.time()),
steps=current_iteration.current_index
)
@ -205,7 +209,7 @@ class WorkflowIterationCycleManage(WorkflowCycleStateManager):
db.session.close()
def _handle_iteration_completed(self, event: QueueIterationCompletedEvent) -> WorkflowNodeExecution:
def _handle_iteration_completed(self, event: QueueIterationCompletedEvent):
if event.node_id not in self._iteration_state.current_iterations:
return
@ -215,9 +219,9 @@ class WorkflowIterationCycleManage(WorkflowCycleStateManager):
).first()
workflow_node_execution.status = WorkflowNodeExecutionStatus.SUCCEEDED.value
workflow_node_execution.outputs = json.dumps(event.outputs) if event.outputs else None
workflow_node_execution.outputs = json.dumps(WorkflowEngineManager.handle_special_values(event.outputs)) if event.outputs else None
workflow_node_execution.elapsed_time = time.perf_counter() - current_iteration.started_at
original_node_execution_metadata = workflow_node_execution.execution_metadata_dict
if original_node_execution_metadata:
original_node_execution_metadata['steps_boundary'] = current_iteration.iteration_steps_boundary
@ -275,7 +279,10 @@ class WorkflowIterationCycleManage(WorkflowCycleStateManager):
error=error,
elapsed_time=time.perf_counter() - current_iteration.started_at,
total_tokens=current_iteration.total_tokens,
execution_metadata={
'total_tokens': current_iteration.total_tokens,
},
finished_at=int(time.time()),
steps=current_iteration.current_index
)
)
)

View File

@ -124,6 +124,7 @@ class AssistantPromptMessage(PromptMessage):
function: ToolCallFunction
@field_validator('id', mode='before')
@classmethod
def transform_id_to_str(cls, value) -> str:
if not isinstance(value, str):
return str(value)

View File

@ -1,5 +1,5 @@
<svg width="21" height="22" viewBox="0 0 21 22" fill="none" xmlns="http://www.w3.org/2000/svg">
<g id="Microsfot">
<g id="Microsoft">
<rect id="Rectangle 1010" y="0.5" width="10" height="10" fill="#EF4F21"/>
<rect id="Rectangle 1012" y="11.5" width="10" height="10" fill="#03A4EE"/>
<rect id="Rectangle 1011" x="11" y="0.5" width="10" height="10" fill="#7EB903"/>

Before

Width:  |  Height:  |  Size: 439 B

After

Width:  |  Height:  |  Size: 439 B

View File

@ -23,7 +23,7 @@ provider_credential_schema:
- variable: aws_access_key_id
required: false
label:
en_US: Access Key (If not provided, credentials are obtained from your running environment. e.g. IAM role)
en_US: Access Key (If not provided, credentials are obtained from the running environment.)
zh_Hans: Access Key
type: secret-input
placeholder:

View File

@ -8,6 +8,8 @@
- anthropic.claude-3-haiku-v1:0
- cohere.command-light-text-v14
- cohere.command-text-v14
- cohere.command-r-plus-v1.0
- cohere.command-r-v1.0
- meta.llama3-8b-instruct-v1:0
- meta.llama3-70b-instruct-v1:0
- meta.llama2-13b-chat-v1

View File

@ -0,0 +1,45 @@
model: cohere.command-r-plus-v1:0
label:
en_US: Command R+
model_type: llm
features:
#- multi-tool-call
- agent-thought
#- stream-tool-call
model_properties:
mode: chat
context_size: 128000
parameter_rules:
- name: temperature
use_template: temperature
max: 5.0
- name: p
use_template: top_p
default: 0.75
min: 0.01
max: 0.99
- name: k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
default: 0
min: 0
max: 500
- name: presence_penalty
use_template: presence_penalty
- name: frequency_penalty
use_template: frequency_penalty
- name: max_tokens
use_template: max_tokens
default: 1024
max: 4096
pricing:
input: '3'
output: '15'
unit: '0.000001'
currency: USD

View File

@ -0,0 +1,45 @@
model: cohere.command-r-v1:0
label:
en_US: Command R
model_type: llm
features:
#- multi-tool-call
- agent-thought
#- stream-tool-call
model_properties:
mode: chat
context_size: 128000
parameter_rules:
- name: temperature
use_template: temperature
max: 5.0
- name: p
use_template: top_p
default: 0.75
min: 0.01
max: 0.99
- name: k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
default: 0
min: 0
max: 500
- name: presence_penalty
use_template: presence_penalty
- name: frequency_penalty
use_template: frequency_penalty
- name: max_tokens
use_template: max_tokens
default: 1024
max: 4096
pricing:
input: '0.5'
output: '1.5'
unit: '0.000001'
currency: USD

View File

@ -25,6 +25,7 @@ from botocore.exceptions import (
ServiceNotInRegionError,
UnknownServiceError,
)
from cohere import ChatMessage
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from core.model_runtime.entities.message_entities import (
@ -48,6 +49,7 @@ from core.model_runtime.errors.invoke import (
)
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.model_runtime.model_providers.cohere.llm.llm import CohereLargeLanguageModel
logger = logging.getLogger(__name__)
@ -75,8 +77,86 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
# invoke anthropic models via anthropic official SDK
if "anthropic" in model:
return self._generate_anthropic(model, credentials, prompt_messages, model_parameters, stop, stream, user)
# invoke Cohere models via boto3 client
if "cohere.command-r" in model:
return self._generate_cohere_chat(model, credentials, prompt_messages, model_parameters, stop, stream, user, tools)
# invoke other models via boto3 client
return self._generate(model, credentials, prompt_messages, model_parameters, stop, stream, user)
def _generate_cohere_chat(
self, model: str, credentials: dict, prompt_messages: list[PromptMessage], model_parameters: dict,
stop: Optional[list[str]] = None, stream: bool = True, user: Optional[str] = None,
tools: Optional[list[PromptMessageTool]] = None,) -> Union[LLMResult, Generator]:
cohere_llm = CohereLargeLanguageModel()
client_config = Config(
region_name=credentials["aws_region"]
)
runtime_client = boto3.client(
service_name='bedrock-runtime',
config=client_config,
aws_access_key_id=credentials["aws_access_key_id"],
aws_secret_access_key=credentials["aws_secret_access_key"]
)
extra_model_kwargs = {}
if stop:
extra_model_kwargs['stop_sequences'] = stop
if tools:
tools = cohere_llm._convert_tools(tools)
model_parameters['tools'] = tools
message, chat_histories, tool_results \
= cohere_llm._convert_prompt_messages_to_message_and_chat_histories(prompt_messages)
if tool_results:
model_parameters['tool_results'] = tool_results
payload = {
**model_parameters,
"message": message,
"chat_history": chat_histories,
}
# need workaround for ai21 models which doesn't support streaming
if stream:
invoke = runtime_client.invoke_model_with_response_stream
else:
invoke = runtime_client.invoke_model
def serialize(obj):
if isinstance(obj, ChatMessage):
return obj.__dict__
raise TypeError(f"Type {type(obj)} not serializable")
try:
body_jsonstr=json.dumps(payload, default=serialize)
response = invoke(
modelId=model,
contentType="application/json",
accept="*/*",
body=body_jsonstr
)
except ClientError as ex:
error_code = ex.response['Error']['Code']
full_error_msg = f"{error_code}: {ex.response['Error']['Message']}"
raise self._map_client_to_invoke_error(error_code, full_error_msg)
except (EndpointConnectionError, NoRegionError, ServiceNotInRegionError) as ex:
raise InvokeConnectionError(str(ex))
except UnknownServiceError as ex:
raise InvokeServerUnavailableError(str(ex))
except Exception as ex:
raise InvokeError(str(ex))
if stream:
return self._handle_generate_stream_response(model, credentials, response, prompt_messages)
return self._handle_generate_response(model, credentials, response, prompt_messages)
def _generate_anthropic(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], model_parameters: dict,
stop: Optional[list[str]] = None, stream: bool = True, user: Optional[str] = None) -> Union[LLMResult, Generator]:

View File

@ -49,8 +49,8 @@ class BedrockTextEmbeddingModel(TextEmbeddingModel):
bedrock_runtime = boto3.client(
service_name='bedrock-runtime',
config=client_config,
aws_access_key_id=credentials["aws_access_key_id"],
aws_secret_access_key=credentials["aws_secret_access_key"]
aws_access_key_id=credentials.get("aws_access_key_id", None),
aws_secret_access_key=credentials.get("aws_secret_access_key", None)
)
embeddings = []

View File

@ -23,7 +23,7 @@ parameter_rules:
type: int
default: 4096
min: 1
max: 32000
max: 4096
help:
zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.

View File

@ -7,7 +7,7 @@ features:
- agent-thought
model_properties:
mode: chat
context_size: 16000
context_size: 32000
parameter_rules:
- name: temperature
use_template: temperature
@ -22,5 +22,5 @@ parameter_rules:
- name: max_tokens
use_template: max_tokens
min: 1
max: 32000
max: 4096
default: 1024

View File

@ -73,7 +73,7 @@ class LocalAILanguageModel(LargeLanguageModel):
def tokens(text: str):
"""
We cloud not determine which tokenizer to use, cause the model is customized.
We could not determine which tokenizer to use, cause the model is customized.
So we use gpt2 tokenizer to calculate the num tokens for convenience.
"""
return self._get_num_tokens_by_gpt2(text)

View File

@ -201,10 +201,15 @@ class ModelProviderFactory:
model_providers_path = os.path.dirname(current_path)
# get all folders path under model_providers_path that do not start with __
whitelist = [
"baichuan", "chatglm", "deepseek", "hunyuan", "minimax", "moonshot",
"tongyi",
"wenxin", "yi", "zhipuai"
]
model_provider_dir_paths = [
os.path.join(model_providers_path, model_provider_dir)
for model_provider_dir in os.listdir(model_providers_path)
if not model_provider_dir.startswith('__')
if model_provider_dir in whitelist
and os.path.isdir(os.path.join(model_providers_path, model_provider_dir))
]

View File

@ -3,9 +3,8 @@ label:
en_US: Gemini 1.0 Pro Vision
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 16384

View File

@ -4,8 +4,6 @@ label:
model_type: llm
features:
- agent-thought
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 32760

View File

@ -1,11 +1,10 @@
model: gemini-1.5-flash-preview-0514
model: gemini-1.5-flash-001
label:
en_US: Gemini 1.5 Flash
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 1048576

View File

@ -1,12 +1,10 @@
model: gemini-1.5-pro-preview-0514
model: gemini-1.5-pro-001
label:
en_US: Gemini 1.5 Pro
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 1048576

View File

@ -99,7 +99,7 @@ class VertexAiLargeLanguageModel(LargeLanguageModel):
credentials.refresh(request)
token = credentials.token
# Vertex AI Anthropic Claude3 Opus model avaiable in us-east5 region, Sonnet and Haiku avaiable in us-central1 region
# Vertex AI Anthropic Claude3 Opus model available in us-east5 region, Sonnet and Haiku available in us-central1 region
if 'opus' in model:
location = 'us-east5'
else:

View File

@ -174,10 +174,15 @@ class WeaviateVector(BaseVector):
schema = self._default_schema(self._collection_name)
if self._client.schema.contains(schema):
for uuid in ids:
self._client.data_object.delete(
class_name=self._collection_name,
uuid=uuid,
)
try:
self._client.data_object.delete(
class_name=self._collection_name,
uuid=uuid,
)
except weaviate.UnexpectedStatusCodeException as e:
# tolerate not found error
if e.status_code != 404:
raise e
def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]:
"""Look up similar documents by embedding vector in Weaviate."""

View File

@ -116,10 +116,10 @@ class ToolParameterOption(BaseModel):
value: str = Field(..., description="The value of the option")
label: I18nObject = Field(..., description="The label of the option")
@classmethod
@field_validator('value', mode='before')
@classmethod
def transform_id_to_str(cls, value) -> str:
if isinstance(value, bool):
if not isinstance(value, str):
return str(value)
else:
return value

View File

@ -17,4 +17,7 @@ class BuiltinToolProviderSort:
sorted_providers = sort_by_position_map(cls._position, providers, name_func)
return sorted_providers
blacklist = ['duckduckgo', 'brave', 'dalle', 'github', 'google', 'jina', 'slack', 'stablediffusion', 'youtube']
filtered_providers = [provider for provider in sorted_providers if provider.name not in blacklist]
return filtered_providers

View File

@ -1,3 +1,4 @@
import random
from base64 import b64decode
from typing import Any, Union

View File

@ -54,7 +54,6 @@ parameters:
form: form
- name: require_summary
type: boolean
required: true
default: false
label:
en_US: Require Summary

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.1 KiB

View File

@ -0,0 +1,30 @@
from typing import Any
from core.tools.errors import ToolProviderCredentialValidationError
from core.tools.provider.builtin.novitaai.tools.novitaai_txt2img import NovitaAiTxt2ImgTool
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
class NovitaAIProvider(BuiltinToolProviderController):
def _validate_credentials(self, credentials: dict[str, Any]) -> None:
try:
result = NovitaAiTxt2ImgTool().fork_tool_runtime(
runtime={
"credentials": credentials,
}
).invoke(
user_id='',
tool_parameters={
'model_name': 'cinenautXLATRUE_cinenautV10_392434.safetensors',
'prompt': 'a futuristic city with flying cars',
'negative_prompt': '',
'width': 128,
'height': 128,
'image_num': 1,
'guidance_scale': 7.5,
'seed': -1,
'steps': 1,
},
)
except Exception as e:
raise ToolProviderCredentialValidationError(str(e))

View File

@ -0,0 +1,32 @@
identity:
author: Xiao Ley
name: novitaai
label:
en_US: Novita AI
zh_Hans: Novita AI
pt_BR: Novita AI
description:
en_US: Innovative AI for Image Generation
zh_Hans: 用于图像生成的创新人工智能。
pt_BR: Innovative AI for Image Generation
icon: icon.ico
tags:
- image
- productivity
credentials_for_provider:
api_key:
type: secret-input
required: true
label:
en_US: API Key
zh_Hans: API 密钥
pt_BR: Chave API
placeholder:
en_US: Please enter your Novita AI API key
zh_Hans: 请输入你的 Novita AI API 密钥
pt_BR: Por favor, insira sua chave de API do Novita AI
help:
en_US: Get your Novita AI API key from Novita AI
zh_Hans: 从 Novita AI 获取您的 Novita AI API 密钥
pt_BR: Obtenha sua chave de API do Novita AI na Novita AI
url: https://novita.ai

View File

@ -0,0 +1,51 @@
from base64 import b64decode
from copy import deepcopy
from typing import Any, Union
from novita_client import (
NovitaClient,
)
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.errors import ToolProviderCredentialValidationError
from core.tools.tool.builtin_tool import BuiltinTool
class NovitaAiCreateTileTool(BuiltinTool):
def _invoke(self,
user_id: str,
tool_parameters: dict[str, Any],
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
"""
invoke tools
"""
if 'api_key' not in self.runtime.credentials or not self.runtime.credentials.get('api_key'):
raise ToolProviderCredentialValidationError("Novita AI API Key is required.")
api_key = self.runtime.credentials.get('api_key')
client = NovitaClient(api_key=api_key)
param = self._process_parameters(tool_parameters)
client_result = client.create_tile(**param)
results = []
results.append(
self.create_blob_message(blob=b64decode(client_result.image_file),
meta={'mime_type': f'image/{client_result.image_type}'},
save_as=self.VARIABLE_KEY.IMAGE.value)
)
return results
def _process_parameters(self, parameters: dict[str, Any]) -> dict[str, Any]:
"""
process parameters
"""
res_parameters = deepcopy(parameters)
# delete none and empty
keys_to_delete = [k for k, v in res_parameters.items() if v is None or v == '']
for k in keys_to_delete:
del res_parameters[k]
return res_parameters

View File

@ -0,0 +1,80 @@
identity:
name: novitaai_createtile
author: Xiao Ley
label:
en_US: Novita AI Create Tile
zh_Hans: Novita AI 创建平铺图案
description:
human:
en_US: This feature produces images designed for seamless tiling, ideal for creating continuous patterns in fabrics, wallpapers, and various textures.
zh_Hans: 该功能生成设计用于无缝平铺的图像,非常适合用于制作连续图案的织物、壁纸和各种纹理。
llm: A tool for create images designed for seamless tiling, ideal for creating continuous patterns in fabrics, wallpapers, and various textures.
parameters:
- name: prompt
type: string
required: true
label:
en_US: prompt
zh_Hans: 提示
human_description:
en_US: Positive prompt word of the created tile, divided by `,`, Range [1, 512]. Only English input is allowed.
zh_Hans: 生成平铺图案的正向提示,用 `,` 分隔,范围 [1, 512]。仅允许输入英文。
llm_description: Image prompt of Novita AI, you should describe the image you want to generate as a list of words as possible as detailed, divided by `,`, Range [1, 512]. Only English input is allowed.
form: llm
- name: negative_prompt
type: string
required: false
label:
en_US: negative prompt
zh_Hans: 负向提示
human_description:
en_US: Negtive prompt word of the created tile, divided by `,`, Range [1, 512]. Only English input is allowed.
zh_Hans: 生成平铺图案的负向提示,用 `,` 分隔,范围 [1, 512]。仅允许输入英文。
llm_description: Image negative prompt of Novita AI, divided by `,`, Range [1, 512]. Only English input is allowed.
form: llm
- name: width
type: number
default: 256
min: 128
max: 1024
required: true
label:
en_US: width
zh_Hans:
human_description:
en_US: Image width, Range [128, 1024].
zh_Hans: 图像宽度,范围 [128, 1024]
form: form
- name: height
type: number
default: 256
min: 128
max: 1024
required: true
label:
en_US: height
zh_Hans:
human_description:
en_US: Image height, Range [128, 1024].
zh_Hans: 图像高度,范围 [128, 1024]
form: form
- name: response_image_type
type: select
default: jpeg
required: false
label:
en_US: response image type
zh_Hans: 响应图像类型
human_description:
en_US: Response image type, png or jpeg
zh_Hans: 响应图像类型png 或 jpeg
form: form
options:
- value: jpeg
label:
en_US: jpeg
zh_Hans: jpeg
- value: png
label:
en_US: png
zh_Hans: png

View File

@ -0,0 +1,137 @@
import json
from copy import deepcopy
from typing import Any, Union
from pandas import DataFrame
from yarl import URL
from core.helper import ssrf_proxy
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.errors import ToolProviderCredentialValidationError
from core.tools.tool.builtin_tool import BuiltinTool
class NovitaAiModelQueryTool(BuiltinTool):
_model_query_endpoint = 'https://api.novita.ai/v3/model'
def _invoke(self,
user_id: str,
tool_parameters: dict[str, Any],
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
"""
invoke tools
"""
if 'api_key' not in self.runtime.credentials or not self.runtime.credentials.get('api_key'):
raise ToolProviderCredentialValidationError("Novita AI API Key is required.")
api_key = self.runtime.credentials.get('api_key')
headers = {
'Content-Type': 'application/json',
'Authorization': "Bearer " + api_key
}
params = self._process_parameters(tool_parameters)
result_type = params.get('result_type')
del params['result_type']
models_data = self._query_models(
models_data=[],
headers=headers,
params=params,
recursive=False if result_type == 'first sd_name' or result_type == 'first name sd_name pair' else True
)
result_str = ''
if result_type == 'first sd_name':
result_str = models_data[0]['sd_name_in_api']
elif result_type == 'first name sd_name pair':
result_str = json.dumps({'name': models_data[0]['name'], 'sd_name': models_data[0]['sd_name_in_api']})
elif result_type == 'sd_name array':
sd_name_array = [model['sd_name_in_api'] for model in models_data]
result_str = json.dumps(sd_name_array)
elif result_type == 'name array':
name_array = [model['name'] for model in models_data]
result_str = json.dumps(name_array)
elif result_type == 'name sd_name pair array':
name_sd_name_pair_array = [{'name': model['name'], 'sd_name': model['sd_name_in_api']} for model in models_data]
result_str = json.dumps(name_sd_name_pair_array)
elif result_type == 'whole info array':
result_str = json.dumps(models_data)
else:
raise NotImplementedError
return self.create_text_message(result_str)
def _query_models(self, models_data: list, headers: dict[str, Any],
params: dict[str, Any], pagination_cursor: str = '', recursive: bool = True) -> list:
"""
query models
"""
inside_params = deepcopy(params)
if pagination_cursor != '':
inside_params['pagination.cursor'] = pagination_cursor
response = ssrf_proxy.get(
url=str(URL(self._model_query_endpoint)),
headers=headers,
params=params,
timeout=(10, 60)
)
res_data = response.json()
models_data.extend(res_data['models'])
res_data_len = len(res_data['models'])
if res_data_len == 0 or res_data_len < int(params['pagination.limit']) or recursive is False:
# deduplicate
df = DataFrame.from_dict(models_data)
df_unique = df.drop_duplicates(subset=['id'])
models_data = df_unique.to_dict('records')
return models_data
return self._query_models(
models_data=models_data,
headers=headers,
params=inside_params,
pagination_cursor=res_data['pagination']['next_cursor']
)
def _process_parameters(self, parameters: dict[str, Any]) -> dict[str, Any]:
"""
process parameters
"""
process_parameters = deepcopy(parameters)
res_parameters = {}
# delete none or empty
keys_to_delete = [k for k, v in process_parameters.items() if v is None or v == '']
for k in keys_to_delete:
del process_parameters[k]
if 'query' in process_parameters and process_parameters.get('query') != 'unspecified':
res_parameters['filter.query'] = process_parameters['query']
if 'visibility' in process_parameters and process_parameters.get('visibility') != 'unspecified':
res_parameters['filter.visibility'] = process_parameters['visibility']
if 'source' in process_parameters and process_parameters.get('source') != 'unspecified':
res_parameters['filter.source'] = process_parameters['source']
if 'type' in process_parameters and process_parameters.get('type') != 'unspecified':
res_parameters['filter.types'] = process_parameters['type']
if 'is_sdxl' in process_parameters:
if process_parameters['is_sdxl'] == 'true':
res_parameters['filter.is_sdxl'] = True
elif process_parameters['is_sdxl'] == 'false':
res_parameters['filter.is_sdxl'] = False
res_parameters['result_type'] = process_parameters.get('result_type', 'first sd_name')
res_parameters['pagination.limit'] = 1 \
if res_parameters.get('result_type') == 'first sd_name' \
or res_parameters.get('result_type') == 'first name sd_name pair'\
else 100
return res_parameters

View File

@ -0,0 +1,174 @@
identity:
name: novitaai_modelquery
author: Xiao Ley
label:
en_US: Novita AI Model Query
zh_Hans: Novita AI 模型查询
description:
human:
en_US: Retrieve information on both public and private models. It allows users to access details such as model specifications, status, and usage guidelines, ensuring comprehensive insight into the available modeling resources.
zh_Hans: 检索公开和私有模型信息。它允许用户访问模型规范、状态和使用指南等详细信息,确保了解可用的建模资源。
llm: A tool for retrieve information on both public and private Novita AI models.
parameters:
- name: query
type: string
required: false
label:
en_US: query
zh_Hans: 查询
human_description:
en_US: Seaching the content of sd_name, name, tags.
zh_Hans: 搜索 sd_name、name、tags 中的内容
form: form
- name: result_type
type: select
default: "first sd_name"
required: true
label:
en_US: result format
zh_Hans: 结果格式
human_description:
en_US: The format of result
zh_Hans: 请求结果的格式
form: form
options:
- value: "first sd_name"
label:
en_US: "first sd_name"
zh_Hans: "第一个 sd_name"
- value: "first name sd_name pair"
label:
en_US: "first name and sd_name pair: {name, sd_name}"
zh_Hans: "第一个 name sd_name 组合:{name, sd_name}"
- value: "sd_name array"
label:
en_US: "sd_name array: [sd_name]"
zh_Hans: "sd_name 数组:[sd_name]"
- value: "name array"
label:
en_US: "name array: [name]"
zh_Hans: "name 数组:[name]"
- value: "name sd_name pair array"
label:
en_US: "name and sd_name pair array: [{name, sd_name}]"
zh_Hans: "name sd_name 组合数组:[{name, sd_name}]"
- value: "whole info array"
label:
en_US: whole info array
zh_Hans: 完整信息数组
- name: visibility
type: select
default: unspecified
required: false
label:
en_US: visibility
zh_Hans: 可见性
human_description:
en_US: Whether the model is public or private
zh_Hans: 模型是否公开或私有
form: form
options:
- value: unspecified
label:
en_US: Unspecified
zh_Hans: 未指定
- value: public
label:
en_US: Public
zh_Hans: 公开
- value: private
label:
en_US: Private
zh_Hans: 私有
- name: source
type: select
default: unspecified
required: false
label:
en_US: source
zh_Hans: 来源
human_description:
en_US: Source of the model
zh_Hans: 模型来源
form: form
options:
- value: unspecified
label:
en_US: Unspecified
zh_Hans: 未指定
- value: civitai
label:
en_US: Civitai
zh_Hans: Civitai
- value: training
label:
en_US: Training
zh_Hans: 训练
- value: uploading
label:
en_US: Uploading
zh_Hans: 上传
- name: type
type: select
default: unspecified
required: false
label:
en_US: type
zh_Hans: 类型
human_description:
en_US: Specifies the type of models to include in the query.
zh_Hans: 指定要查询的模型类型
form: form
options:
- value: unspecified
label:
en_US: Unspecified
zh_Hans: 未指定
- value: checkpoint
label:
en_US: Checkpoint
zh_Hans: Checkpoint
- value: lora
label:
en_US: LoRA
zh_Hans: LoRA
- value: vae
label:
en_US: VAE
zh_Hans: VAE
- value: controlnet
label:
en_US: ControlNet
zh_Hans: ControlNet
- value: upscaler
label:
en_US: Upscaler
zh_Hans: Upscaler
- value: textualinversion
label:
en_US: Textual inversion
zh_Hans: Textual Inversion
- name: is_sdxl
type: select
default: unspecified
required: false
label:
en_US: is sdxl
zh_Hans: 是否是 SDXL
human_description:
en_US: Whether sdxl model or not. Setting this parameter to `true` includes only sdxl models in the query results, which are typically large-scale, high-performance models designed for extensive data processing tasks. Conversely, setting it to `false` excludes these models from the results. If left unspecified, the filter will not discriminate based on the sdxl classification, including all model types in the search results.
zh_Hans: 是否是 SDXL 模型。设置此参数为 `是`,只查询 SDXL 模型,并包含大规模,高性能的模型。相反,设置为 `否`,将排除这些模型。如果未指定,将不会根据 SDXL 分类进行区分,包括查询结果中的所有模型类型。
form: form
options:
- value: unspecified
label:
en_US: Unspecified
zh_Hans: 未指定
- value: "true"
label:
en_US: "True"
zh_Hans:
- value: "false"
label:
en_US: "False"
zh_Hans:

View File

@ -0,0 +1,137 @@
from base64 import b64decode
from copy import deepcopy
from typing import Any, Union
from novita_client import (
NovitaClient,
Txt2ImgV3Embedding,
Txt2ImgV3HiresFix,
Txt2ImgV3LoRA,
Txt2ImgV3Refiner,
V3TaskImage,
)
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.errors import ToolProviderCredentialValidationError
from core.tools.tool.builtin_tool import BuiltinTool
class NovitaAiTxt2ImgTool(BuiltinTool):
def _invoke(self,
user_id: str,
tool_parameters: dict[str, Any],
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
"""
invoke tools
"""
if 'api_key' not in self.runtime.credentials or not self.runtime.credentials.get('api_key'):
raise ToolProviderCredentialValidationError("Novita AI API Key is required.")
api_key = self.runtime.credentials.get('api_key')
client = NovitaClient(api_key=api_key)
param = self._process_parameters(tool_parameters)
client_result = client.txt2img_v3(**param)
results = []
for image_encoded, image in zip(client_result.images_encoded, client_result.images):
if self._is_hit_nsfw_detection(image, 0.8):
results = self.create_text_message(text='NSFW detected!')
break
results.append(
self.create_blob_message(blob=b64decode(image_encoded),
meta={'mime_type': f'image/{image.image_type}'},
save_as=self.VARIABLE_KEY.IMAGE.value)
)
return results
def _process_parameters(self, parameters: dict[str, Any]) -> dict[str, Any]:
"""
process parameters
"""
res_parameters = deepcopy(parameters)
# delete none and empty
keys_to_delete = [k for k, v in res_parameters.items() if v is None or v == '']
for k in keys_to_delete:
del res_parameters[k]
if 'clip_skip' in res_parameters and res_parameters.get('clip_skip') == 0:
del res_parameters['clip_skip']
if 'refiner_switch_at' in res_parameters and res_parameters.get('refiner_switch_at') == 0:
del res_parameters['refiner_switch_at']
if 'enabled_enterprise_plan' in res_parameters:
res_parameters['enterprise_plan'] = {'enabled': res_parameters['enabled_enterprise_plan']}
del res_parameters['enabled_enterprise_plan']
if 'nsfw_detection_level' in res_parameters:
res_parameters['nsfw_detection_level'] = int(res_parameters['nsfw_detection_level'])
# process loras
if 'loras' in res_parameters:
loras_ori_list = res_parameters.get('loras').strip().split(';')
locals_list = []
for lora_str in loras_ori_list:
lora_info = lora_str.strip().split(',')
lora = Txt2ImgV3LoRA(
model_name=lora_info[0].strip(),
strength=float(lora_info[1]),
)
locals_list.append(lora)
res_parameters['loras'] = locals_list
# process embeddings
if 'embeddings' in res_parameters:
embeddings_ori_list = res_parameters.get('embeddings').strip().split(';')
locals_list = []
for embedding_str in embeddings_ori_list:
embedding = Txt2ImgV3Embedding(
model_name=embedding_str.strip()
)
locals_list.append(embedding)
res_parameters['embeddings'] = locals_list
# process hires_fix
if 'hires_fix' in res_parameters:
hires_fix_ori = res_parameters.get('hires_fix')
hires_fix_info = hires_fix_ori.strip().split(',')
if 'upscaler' in hires_fix_info:
hires_fix = Txt2ImgV3HiresFix(
target_width=int(hires_fix_info[0]),
target_height=int(hires_fix_info[1]),
strength=float(hires_fix_info[2]),
upscaler=hires_fix_info[3].strip()
)
else:
hires_fix = Txt2ImgV3HiresFix(
target_width=int(hires_fix_info[0]),
target_height=int(hires_fix_info[1]),
strength=float(hires_fix_info[2])
)
res_parameters['hires_fix'] = hires_fix
if 'refiner_switch_at' in res_parameters:
refiner = Txt2ImgV3Refiner(
switch_at=float(res_parameters.get('refiner_switch_at'))
)
del res_parameters['refiner_switch_at']
res_parameters['refiner'] = refiner
return res_parameters
def _is_hit_nsfw_detection(self, image: V3TaskImage, confidence_threshold: float) -> bool:
"""
is hit nsfw
"""
if image.nsfw_detection_result is None:
return False
if image.nsfw_detection_result.valid and image.nsfw_detection_result.confidence >= confidence_threshold:
return True
return False

View File

@ -0,0 +1,341 @@
identity:
name: novitaai_txt2img
author: Xiao Ley
label:
en_US: Novita AI Text to Image
zh_Hans: Novita AI 文字转图像
description:
human:
en_US: Generate images from text prompts using Stable Diffusion models
zh_Hans: 通过 Stable Diffusion 模型根据文字提示生成图像
llm: A tool for generate images from English text prompts.
parameters:
- name: model_name
type: string
required: true
label:
en_US: model name
zh_Hans: 模块名字
human_description:
en_US: Specify the name of the model checkpoint. You can use the "Novita AI Model Query" tool to query the corresponding "sd_name" value (type select "Checkpoint").
zh_Hans: 指定 Model Checkpoint 名称。可通过“Novita AI 模型请求”工具查询对应的“sd_name”值类型选择“Checkpoint”
form: form
- name: prompt
type: string
required: true
label:
en_US: prompt
zh_Hans: 提示
human_description:
en_US: Text input required to guide the image generation, divided by `,`, Range [1, 1024]. Only English input is allowed.
zh_Hans: 生成图像的正向提示,用 `,` 分隔,范围 [1, 1024]。仅允许输入英文。
llm_description: Image prompt of Novita AI, you should describe the image you want to generate as a list of words as possible as detailed, divided by `,`, Range [1, 1024]. Only English input is allowed.
form: llm
- name: negative_prompt
type: string
required: false
label:
en_US: negative prompt
zh_Hans: 负向提示
human_description:
en_US: Text input that will not guide the image generation, divided by `,`, Range [1, 1024]. Only English input is allowed.
zh_Hans: 生成图像的负向提示,用 `,` 分隔,范围 [1, 1024]。仅允许输入英文。
llm_description: Image negative prompt of Novita AI, divided by `,`, Range [1, 1024]. Only English input is allowed.
form: llm
- name: width
type: number
default: 512
min: 128
max: 2048
required: true
label:
en_US: width
zh_Hans:
human_description:
en_US: Image width, Range [128, 2048].
zh_Hans: 图像宽度,范围 [128, 2048]
form: form
- name: height
type: number
default: 512
min: 128
max: 2048
required: true
label:
en_US: height
zh_Hans:
human_description:
en_US: Image height, Range [128, 2048].
zh_Hans: 图像高度,范围 [128, 2048]
form: form
- name: image_num
type: number
default: 1
min: 1
max: 8
required: true
label:
en_US: image num
zh_Hans: 图片数
human_description:
en_US: Image num, Range [1, 8].
zh_Hans: 图片数,范围 [1, 8]
form: form
- name: steps
type: number
default: 20
min: 1
max: 100
required: true
label:
en_US: steps
zh_Hans: 步数
human_description:
en_US: The number of denoising steps. More steps usually can produce higher quality images, but take more time to generate, Range [1, 100].
zh_Hans: 生成步数。更多步数可能会产生更好的图像,但生成时间更长,范围 [1, 100]
form: form
- name: seed
type: number
default: -1
required: true
label:
en_US: seed
zh_Hans: 种子
human_description:
en_US: A seed is a number from which Stable Diffusion generates noise, which, makes generation deterministic. Using the same seed and set of parameters will produce identical image each time, minimum -1.
zh_Hans: 种子是 Stable Diffusion 生成噪声的数字,它使生成具有确定性。使用相同的种子和参数设置将生成每次生成相同的图像,最小值 -1。
form: form
- name: clip_skip
type: number
min: 1
max: 12
required: false
label:
en_US: clip skip
zh_Hans: 层跳过数
human_description:
en_US: This parameter indicates the number of layers to stop from the bottom during optimization, so clip_skip on 2 would mean, that in SD1.x model where the CLIP has 12 layers, you would stop at 10th layer, Range [1, 12], get reference at https://novita.ai/get-started/Misc.html#what-s-clip-skip.
zh_Hans: 此参数表示优化过程中从底部停止的层数,因此 clip_skip 的值为 2表示在 SD1.x 模型中CLIP 有 12 层,你将停止在 10 层,范围 [1, 12],参考 https://novita.ai/get-started/Misc.html#what-s-clip-skip。
form: form
- name: guidance_scale
type: number
default: "7.5"
min: 1.0
max: 30.0
required: true
label:
en_US: guidance scale
zh_Hans: 提示词遵守程度
human_description:
en_US: This setting says how close the Stable Diffusion will listen to your prompt, higer guidance forces the model to better follow the prompt, but result in lower quality output.Range [1, 30].
zh_Hans: 此设置表明 Stable Diffusion 如何听从您的提示,较高的 guidance_scale 会强制模型更好跟随提示,但结果会更低质量输出。范围 [1.0, 30.0]。
form: form
- name: sampler_name
type: select
required: true
label:
en_US: sampler name
zh_Hans: 采样器名称
human_description:
en_US: This parameter determines the denoising algorithm employed during the sampling phase of Stable Diffusion. Get reference at https://novita.ai/get-started/Misc.htmll#what-is-samplers.
zh_Hans: 此参数决定了在稳定扩散采样阶段使用的去噪算法。参考 https://novita.ai/get-started/Misc.htmll#what-is-samplers。
form: form
options:
- value: "Euler a"
label:
en_US: Euler a
zh_Hans: Euler a
- value: "Euler"
label:
en_US: Euler
zh_Hans: Euler
- value: "LMS"
label:
en_US: LMS
zh_Hans: LMS
- value: "Heun"
label:
en_US: Heun
zh_Hans: Heun
- value: "DPM2"
label:
en_US: DPM2
zh_Hans: DPM2
- value: "DPM2 a"
label:
en_US: DPM2 a
zh_Hans: DPM2 a
- value: "DPM++ 2S a"
label:
en_US: DPM++ 2S a
zh_Hans: DPM++ 2S a
- value: "DPM++ 2M"
label:
en_US: DPM++ 2M
zh_Hans: DPM++ 2M
- value: "DPM++ SDE"
label:
en_US: DPM++ SDE
zh_Hans: DPM++ SDE
- value: "DPM fast"
label:
en_US: DPM fast
zh_Hans: DPM fast
- value: "DPM adaptive"
label:
en_US: DPM adaptive
zh_Hans: DPM adaptive
- value: "LMS Karras"
label:
en_US: LMS Karras
zh_Hans: LMS Karras
- value: "DPM2 Karras"
label:
en_US: DPM2 Karras
zh_Hans: DPM2 Karras
- value: "DPM2 a Karras"
label:
en_US: DPM2 a Karras
zh_Hans: DPM2 a Karras
- value: "DPM++ 2S a Karras"
label:
en_US: DPM++ 2S a Karras
zh_Hans: DPM++ 2S a Karras
- value: "DPM++ 2M Karras"
label:
en_US: DPM++ 2M Karras
zh_Hans: DPM++ 2M Karras
- value: "DPM++ SDE Karras"
label:
en_US: DPM++ SDE Karras
zh_Hans: DPM++ SDE Karras
- value: "DDIM"
label:
en_US: DDIM
zh_Hans: DDIM
- value: "PLMS"
label:
en_US: PLMS
zh_Hans: PLMS
- value: "UniPC"
label:
en_US: UniPC
zh_Hans: UniPC
- name: sd_vae
type: string
required: false
label:
en_US: sd vae
zh_Hans: sd vae
human_description:
en_US: VAE(Variational Autoencoder), get reference at https://novita.ai/get-started/Misc.html#what-s-variational-autoencoders-vae. You can use the "Novita AI Model Query" tool to query the corresponding "sd_name" value (type select "VAE").
zh_Hans: VAE变分自编码器参考 https://novita.ai/get-started/Misc.html#what-s-variational-autoencoders-vae。可通过“Novita AI 模型请求”工具查询对应的“sd_name”值类型选择“VAE”
form: form
- name: loras
type: string
required: false
label:
en_US: loRAs
zh_Hans: loRAs
human_description:
en_US: LoRA models. Currenlty supports up to 5 LoRAs. You can use the "Novita AI Model Query" tool to query the corresponding "sd_name" value (type select "LoRA"). Input template is "<sd_name>,<strength [0-1.0]>;<sd_name>,<strength [0-1.0]>;...". Such as"Film Grain style_331903,0.5;DoggystylePOV_9600,0.5"
zh_Hans: LoRA 模型。目前仅支持 5 个 LoRA。可通过“Novita AI 模型请求”工具查询对应的“sd_name”值类型选择“LoRA”。输入模板“<sd_name>,<strength [0-1.0]>;<sd_name>,<strength [0-1.0]>;...”例如“Film Grain style_331903,0.5;DoggystylePOV_9600,0.5”
form: form
- name: embeddings
type: string
required: false
label:
en_US: text embeddings
zh_Hans: 文本嵌入
human_description:
en_US: Textual Inversion is a training method for personalizing models by learning new text embeddings from a few example images, currenlty supports up to 5 embeddings. You can use the "Novita AI Model Query" tool to query the corresponding "sd_name" value (type select "Text Inversion"). Input template is "<sd_name>;<sd_name>;...". Such as "EasyNegativeV2_75525;AS-YoungerV2"
zh_Hans: 文本反转是一种通过从一些示例图像中学习新的文本嵌入来个性化模型的训练方法,目前仅支持 5 个嵌入。可通过“Novita AI 模型请求”工具查询对应的“sd_name”值类型选择“Text Inversion”。输入模板“<sd_name>;<sd_name>;...”例如“EasyNegativeV2_75525;AS-YoungerV2”
form: form
- name: hires_fix
type: string
required: false
label:
en_US: hires fix
zh_Hans: 高分辨率修复
human_description:
en_US: Use high resolution image fix. Input template is "<target_width [128, 4096]>,<target_height [128, 4096]>,<strength [0, 1.0]>,<upscaler (optional, selec type, `RealESRGAN_x4plus_anime_6B`, `RealESRNet_x4plus` or `Latent`)>". Such as "1024,1024,0.8", "1024,1024,0.8,RealESRGAN_x4plus_anime_6B"
zh_Hans: 使用高分辨率修复。输入模板 “<target_width [128, 4096]>,<target_height [128, 4096]>,<strength [0, 1.0]>,<upscaler (可选, 选项类型, `RealESRGAN_x4plus_anime_6B`, `RealESRNet_x4plus` 或 `Latent`)>”。例如 “1024,1024,0.8”、“1024,1024,0.8,RealESRGAN_x4plus_anime_6B”
form: form
- name: refiner_switch_at
type: number
min: 0.0
max: 1.0
required: false
label:
en_US: refiner switch at
zh_Hans: 重采样参与时刻
human_description:
en_US: This parameter in the context of a refiner allows you to set the extent to which the refiner alters the output of a model. When set to 0, the refiner has no effect; at 1, it's fully active. Intermediate values like 0.5 provide a balanced effect, where the refiner is moderately engaged, enhancing or adjusting the output without dominating the original model's characteristics. This setting is particularly useful for fine-tuning the output to achieve a desired balance between refinement and the original generative features, Range [0, 1.0]. Is not all models support refiners!
zh_Hans: 此参数允许您设置重采样更改模型输出的程度。当设置为0时重采样不起作用1时它处于完全活动状态。像0.5这样的中间值提供了一种平衡效果,其中重采样适度参与,增强或调整输出,而不会主导原始模型的特性。此设置对于微调输出特别有用,范围 [0, 1.0]。不是所有模型都支持重采样!
form: form
- name: response_image_type
type: select
default: jpeg
required: false
label:
en_US: response image type
zh_Hans: 响应图像类型
human_description:
en_US: Response image type, png or jpeg
zh_Hans: 响应图像类型png 或 jpeg
form: form
options:
- value: jpeg
label:
en_US: jpeg
zh_Hans: jpeg
- value: png
label:
en_US: png
zh_Hans: png
- name: enabled_enterprise_plan
type: boolean
default: false
required: false
label:
en_US: enterprise plan enabled
zh_Hans: 企业版计划启用
human_description:
en_US: Enable enterprise plan
zh_Hans: 启用企业版计划
form: form
- name: enable_nsfw_detection
type: boolean
default: false
required: false
label:
en_US: enable nsfw detection
zh_Hans: 启用 NSFW 检测
human_description:
en_US: Enable nsfw detection
zh_Hans: 启用 NSFW 检测
form: form
- name: nsfw_detection_level
type: select
default: "2"
required: false
label:
en_US: nsfw detection level
zh_Hans: NSFW 检测级别
human_description:
en_US: Nsfw detection level, from low to high
zh_Hans: NSFW 检测级别,越高越严格
form: form
options:
- value: "0"
label:
en_US: low
zh_Hans:
- value: "1"
label:
en_US: middle
zh_Hans:
- value: "2"
label:
en_US: high
zh_Hans:

View File

@ -32,8 +32,8 @@ class TwilioAPIWrapper(BaseModel):
must be empty.
"""
@classmethod
@field_validator('client', mode='before')
@classmethod
def set_validator(cls, values: dict) -> dict:
"""Validate that api key and python package exists in environment."""
try:

View File

@ -0,0 +1,23 @@
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
width="100%" viewBox="0 0 176 323" enable-background="new 0 0 176 323" xml:space="preserve">
<path fill="#F03F30" opacity="1.000000" stroke="none"
d="
M41.044968,257.626282
C27.696184,236.155075 31.957315,212.441483 52.045750,197.344925
C79.002586,177.086716 106.149689,157.081665 133.645813,136.647858
C139.760117,144.272156 142.374191,152.876511 142.340073,162.386032
C142.284485,177.881165 135.226303,189.665787 122.998329,198.722351
C97.096687,217.906250 71.232124,237.140259 45.308189,256.293945
C44.162773,257.140198 42.580475,257.395203 41.044968,257.626282
z"/>
<path fill="#F03F30" opacity="1.000000" stroke="none"
d="
M99.096405,138.076599
C79.500099,152.629852 60.200359,166.985321 40.905945,181.336838
C27.259842,163.327515 29.469128,137.268982 46.741867,123.517693
C67.654190,106.868828 89.408379,91.276695 110.817230,75.252617
C117.976021,69.894424 125.193146,64.614182 132.376923,59.303246
C147.358932,78.560143 142.926590,105.166771 122.208939,120.964401
C114.681282,126.704391 107.002701,132.246460 99.096405,138.076599
z"/>
</svg>

After

Width:  |  Height:  |  Size: 1.2 KiB

View File

@ -0,0 +1,51 @@
from typing import Any, Union
import requests
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.tool.builtin_tool import BuiltinTool
BASE_URL = "https://api.serply.io/v1/request"
class SerplyApi:
"""
SerplyAPI tool provider.
"""
def __init__(self, api_key: str) -> None:
"""Initialize SerplyAPI tool provider."""
self.serply_api_key = api_key
def run(self, url: str, **kwargs: Any) -> str:
"""Run query through SerplyAPI and parse result."""
location = kwargs.get("location", "US")
headers = {
"X-API-KEY": self.serply_api_key,
"X-User-Agent": kwargs.get("device", "desktop"),
"X-Proxy-Location": location,
"User-Agent": "Dify",
}
data = {"url": url, "method": "GET", "response_type": "markdown"}
res = requests.post(url, headers=headers, json=data)
return res.text
class GetMarkdownTool(BuiltinTool):
def _invoke(
self,
user_id: str,
tool_parameters: dict[str, Any],
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
"""
Invoke the SerplyApi tool.
"""
url = tool_parameters["url"]
location = tool_parameters.get("location", None)
api_key = self.runtime.credentials["serply_api_key"]
result = SerplyApi(api_key).run(url, location=location)
return self.create_text_message(text=result)

View File

@ -0,0 +1,96 @@
identity:
name: get_markdown
author: Dify
label:
en_US: Get Markdown API
zh_Hans: Get Markdown API
description:
human:
en_US: A tool to perform convert a webpage to markdown to make it easier for LLMs to understand.
zh_Hans: 一个将网页转换为 Markdown 的工具,以便模型更容易理解
llm: A tool to perform convert a webpage to markdown to make it easier for LLMs to understand.
parameters:
- name: url
type: string
required: true
label:
en_US: URL
zh_Hans: URL
human_description:
en_US: URL that you want to grab the content from
zh_Hans: 您要从中获取内容的 URL
llm_description: Defines the link want to grab content from.
form: llm
- name: location
type: string
required: false
default: US
label:
en_US: Location
zh_Hans: 询问
human_description:
en_US: Defines from where you want the search to originate. (For example - New York)
zh_Hans: 定义您想要搜索的起始位置。 (例如 - 纽约)
llm_description: Defines from where you want the search to originate. (For example - New York)
form: form
options:
- value: AU
label:
en_US: Australia
zh_Hans: 澳大利亚
pt_BR: Australia
- value: BR
label:
en_US: Brazil
zh_Hans: 巴西
pt_BR: Brazil
- value: CA
label:
en_US: Canada
zh_Hans: 加拿大
pt_BR: Canada
- value: DE
label:
en_US: Germany
zh_Hans: 德国
pt_BR: Germany
- value: FR
label:
en_US: France
zh_Hans: 法国
pt_BR: France
- value: GB
label:
en_US: United Kingdom
zh_Hans: 英国
pt_BR: United Kingdom
- value: US
label:
en_US: United States
zh_Hans: 美国
pt_BR: United States
- value: JP
label:
en_US: Japan
zh_Hans: 日本
pt_BR: Japan
- value: IN
label:
en_US: India
zh_Hans: 印度
pt_BR: India
- value: KR
label:
en_US: Korea
zh_Hans: 韩国
pt_BR: Korea
- value: SG
label:
en_US: Singapore
zh_Hans: 新加坡
pt_BR: Singapore
- value: SE
label:
en_US: Sweden
zh_Hans: 瑞典
pt_BR: Sweden

View File

@ -0,0 +1,86 @@
from typing import Any, Union
from urllib.parse import urlencode
import requests
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.tool.builtin_tool import BuiltinTool
BASE_URL = "https://api.serply.io/v1/news/"
class SerplyApi:
"""
SerplyAPI tool provider.
"""
def __init__(self, api_key: str) -> None:
"""Initialize SerplyAPI tool provider."""
self.serply_api_key = api_key
def run(self, query: str, **kwargs: Any) -> str:
"""Run query through SerplyAPI and parse result."""
params = {"q": query, "hl": kwargs.get("hl", "en"), "gl": kwargs.get("gl", "US"), "num": kwargs.get("num", 10)}
location = kwargs.get("location", "US")
headers = {
"X-API-KEY": self.serply_api_key,
"X-User-Agent": kwargs.get("device", "desktop"),
"X-Proxy-Location": location,
"User-Agent": "Dify",
}
url = f"{BASE_URL}{urlencode(params)}"
res = requests.get(
url,
headers=headers,
)
res = res.json()
return self.parse_results(res)
@staticmethod
def parse_results(res: dict) -> str:
"""Process response from Serply Job Search."""
jobs = res.get("jobs", [])
if not jobs:
raise ValueError(f"Got error from Serply: {res}")
string = []
for job in jobs[:10]:
try:
string.append(
"\n".join([
f"Position: {job['position']}",
f"Employer: {job['employer']}",
f"Location: {job['location']}",
f"Link: {job['link']}",
f"""Highest: {", ".join([h for h in job["highlights"]])}""",
"---",
])
)
except KeyError:
continue
content = "\n".join(string)
return f"\nJobs results:\n {content}\n"
class JobSearchTool(BuiltinTool):
def _invoke(
self,
user_id: str,
tool_parameters: dict[str, Any],
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
"""
Invoke the SerplyApi tool.
"""
query = tool_parameters["query"]
gl = tool_parameters.get("gl", "us")
hl = tool_parameters.get("hl", "en")
location = tool_parameters.get("location", None)
api_key = self.runtime.credentials["serply_api_key"]
result = SerplyApi(api_key).run(query, gl=gl, hl=hl, location=location)
return self.create_text_message(text=result)

View File

@ -0,0 +1,41 @@
identity:
name: job_search
author: Dify
label:
en_US: Job Search API
zh_Hans: Job Search API
description:
human:
en_US: A tool to retrieve job titles, company names and description from Google Jobs engine.
zh_Hans: 一个从 Google 招聘引擎检索职位名称、公司名称和描述的工具。
llm: A tool to retrieve job titles, company names and description from Google Jobs engine.
parameters:
- name: query
type: string
required: true
label:
en_US: Query
zh_Hans: 询问
human_description:
en_US: Defines the query you want to search.
zh_Hans: 定义您要搜索的查询。
llm_description: Defines the search query you want to search.
form: llm
- name: location
type: string
required: false
default: US
label:
en_US: Location
zh_Hans: 询问
human_description:
en_US: Defines from where you want the search to originate. (For example - New York)
zh_Hans: 定义您想要搜索的起始位置。 (例如 - 纽约)
llm_description: Defines from where you want the search to originate. (For example - New York)
form: form
options:
- value: US
label:
en_US: United States
zh_Hans: 美国
pt_BR: United States

View File

@ -0,0 +1,88 @@
from typing import Any, Union
from urllib.parse import urlencode
import requests
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.tool.builtin_tool import BuiltinTool
BASE_URL = "https://api.serply.io/v1/news/"
class SerplyApi:
"""
SerplyApi tool provider.
"""
def __init__(self, api_key: str) -> None:
"""Initialize SerplyApi tool provider."""
self.serply_api_key = api_key
def run(self, query: str, **kwargs: Any) -> str:
"""Run query through SerplyApi and parse result."""
params = {"q": query, "hl": kwargs.get("hl", "en"), "gl": kwargs.get("gl", "US"), "num": kwargs.get("num", 10)}
location = kwargs.get("location", "US")
headers = {
"X-API-KEY": self.serply_api_key,
"X-User-Agent": kwargs.get("device", "desktop"),
"X-Proxy-Location": location,
"User-Agent": "Dify",
}
url = f"{BASE_URL}{urlencode(params)}"
res = requests.get(
url,
headers=headers,
)
res = res.json()
return self.parse_results(res)
@staticmethod
def parse_results(res: dict) -> str:
"""Process response from Serply News Search."""
news = res.get("entries", [])
if not news:
raise ValueError(f"Got error from Serply: {res}")
string = []
for entry in news:
try:
# follow url
r = requests.get(entry["link"])
final_link = r.history[-1].headers["Location"]
string.append(
"\n".join([
f"Title: {entry['title']}",
f"Link: {final_link}",
f"Source: {entry['source']['title']}",
f"Published: {entry['published']}",
"---",
])
)
except KeyError:
continue
content = "\n".join(string)
return f"\nNews:\n {content}\n"
class NewsSearchTool(BuiltinTool):
def _invoke(
self,
user_id: str,
tool_parameters: dict[str, Any],
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
"""
Invoke the SerplyApi tool.
"""
query = tool_parameters["query"]
gl = tool_parameters.get("gl", "us")
hl = tool_parameters.get("hl", "en")
location = tool_parameters.get("location", None)
api_key = self.runtime.credentials["serply_api_key"]
result = SerplyApi(api_key).run(query, gl=gl, hl=hl, location=location)
return self.create_text_message(text=result)

View File

@ -0,0 +1,501 @@
identity:
name: news_search
author: Dify
label:
en_US: News Search API
zh_Hans: News Search API
description:
human:
en_US: A tool to retrieve organic search results snippets and links from Google News engine.
zh_Hans: 一种从 Google 新闻引擎检索有机搜索结果片段和链接的工具。
llm: A tool to retrieve organic search results snippets and links from Google News engine.
parameters:
- name: query
type: string
required: true
label:
en_US: Query
zh_Hans: 询问
human_description:
en_US: Defines the query you want to search.
zh_Hans: 定义您要搜索的查询。
llm_description: Defines the search query you want to search.
form: llm
- name: location
type: string
required: false
default: US
label:
en_US: Location
zh_Hans: 询问
human_description:
en_US: Defines from where you want the search to originate. (For example - New York)
zh_Hans: 定义您想要搜索的起始位置。 (例如 - 纽约)
llm_description: Defines from where you want the search to originate. (For example - New York)
form: form
options:
- value: AU
label:
en_US: Australia
zh_Hans: 澳大利亚
pt_BR: Australia
- value: BR
label:
en_US: Brazil
zh_Hans: 巴西
pt_BR: Brazil
- value: CA
label:
en_US: Canada
zh_Hans: 加拿大
pt_BR: Canada
- value: DE
label:
en_US: Germany
zh_Hans: 德国
pt_BR: Germany
- value: FR
label:
en_US: France
zh_Hans: 法国
pt_BR: France
- value: GB
label:
en_US: United Kingdom
zh_Hans: 英国
pt_BR: United Kingdom
- value: US
label:
en_US: United States
zh_Hans: 美国
pt_BR: United States
- value: JP
label:
en_US: Japan
zh_Hans: 日本
pt_BR: Japan
- value: IN
label:
en_US: India
zh_Hans: 印度
pt_BR: India
- value: KR
label:
en_US: Korea
zh_Hans: 韩国
pt_BR: Korea
- value: SG
label:
en_US: Singapore
zh_Hans: 新加坡
pt_BR: Singapore
- value: SE
label:
en_US: Sweden
zh_Hans: 瑞典
pt_BR: Sweden
- name: gl
type: select
label:
en_US: Country
zh_Hans: 国家/地区
required: false
human_description:
en_US: Defines the country of the search. Default is "US".
zh_Hans: 定义搜索的国家/地区。默认为“美国”。
llm_description: Defines the gl parameter of the Google search.
form: form
default: US
options:
- value: AR
label:
en_US: Argentina
zh_Hans: 阿根廷
pt_BR: Argentina
- value: AU
label:
en_US: Australia
zh_Hans: 澳大利亚
pt_BR: Australia
- value: AT
label:
en_US: Austria
zh_Hans: 奥地利
pt_BR: Austria
- value: BE
label:
en_US: Belgium
zh_Hans: 比利时
pt_BR: Belgium
- value: BR
label:
en_US: Brazil
zh_Hans: 巴西
pt_BR: Brazil
- value: CA
label:
en_US: Canada
zh_Hans: 加拿大
pt_BR: Canada
- value: CL
label:
en_US: Chile
zh_Hans: 智利
pt_BR: Chile
- value: CO
label:
en_US: Colombia
zh_Hans: 哥伦比亚
pt_BR: Colombia
- value: CN
label:
en_US: China
zh_Hans: 中国
pt_BR: China
- value: CZ
label:
en_US: Czech Republic
zh_Hans: 捷克共和国
pt_BR: Czech Republic
- value: DK
label:
en_US: Denmark
zh_Hans: 丹麦
pt_BR: Denmark
- value: FI
label:
en_US: Finland
zh_Hans: 芬兰
pt_BR: Finland
- value: FR
label:
en_US: France
zh_Hans: 法国
pt_BR: France
- value: DE
label:
en_US: Germany
zh_Hans: 德国
pt_BR: Germany
- value: HK
label:
en_US: Hong Kong
zh_Hans: 香港
pt_BR: Hong Kong
- value: IN
label:
en_US: India
zh_Hans: 印度
pt_BR: India
- value: ID
label:
en_US: Indonesia
zh_Hans: 印度尼西亚
pt_BR: Indonesia
- value: IT
label:
en_US: Italy
zh_Hans: 意大利
pt_BR: Italy
- value: JP
label:
en_US: Japan
zh_Hans: 日本
pt_BR: Japan
- value: KR
label:
en_US: Korea
zh_Hans: 韩国
pt_BR: Korea
- value: MY
label:
en_US: Malaysia
zh_Hans: 马来西亚
pt_BR: Malaysia
- value: MX
label:
en_US: Mexico
zh_Hans: 墨西哥
pt_BR: Mexico
- value: NL
label:
en_US: Netherlands
zh_Hans: 荷兰
pt_BR: Netherlands
- value: NZ
label:
en_US: New Zealand
zh_Hans: 新西兰
pt_BR: New Zealand
- value: NO
label:
en_US: Norway
zh_Hans: 挪威
pt_BR: Norway
- value: PH
label:
en_US: Philippines
zh_Hans: 菲律宾
pt_BR: Philippines
- value: PL
label:
en_US: Poland
zh_Hans: 波兰
pt_BR: Poland
- value: PT
label:
en_US: Portugal
zh_Hans: 葡萄牙
pt_BR: Portugal
- value: RU
label:
en_US: Russia
zh_Hans: 俄罗斯
pt_BR: Russia
- value: SA
label:
en_US: Saudi Arabia
zh_Hans: 沙特阿拉伯
pt_BR: Saudi Arabia
- value: SG
label:
en_US: Singapore
zh_Hans: 新加坡
pt_BR: Singapore
- value: ZA
label:
en_US: South Africa
zh_Hans: 南非
pt_BR: South Africa
- value: ES
label:
en_US: Spain
zh_Hans: 西班牙
pt_BR: Spain
- value: SE
label:
en_US: Sweden
zh_Hans: 瑞典
pt_BR: Sweden
- value: CH
label:
en_US: Switzerland
zh_Hans: 瑞士
pt_BR: Switzerland
- value: TW
label:
en_US: Taiwan
zh_Hans: 台湾
pt_BR: Taiwan
- value: TH
label:
en_US: Thailand
zh_Hans: 泰国
pt_BR: Thailand
- value: TR
label:
en_US: Turkey
zh_Hans: 土耳其
pt_BR: Turkey
- value: GB
label:
en_US: United Kingdom
zh_Hans: 英国
pt_BR: United Kingdom
- value: US
label:
en_US: United States
zh_Hans: 美国
pt_BR: United States
- name: hl
type: select
label:
en_US: Language
zh_Hans: 语言
human_description:
en_US: Defines the interface language of the search. Default is "en".
zh_Hans: 定义搜索的界面语言。默认为“en”。
required: false
default: en
form: form
options:
- value: ar
label:
en_US: Arabic
zh_Hans: 阿拉伯语
- value: bg
label:
en_US: Bulgarian
zh_Hans: 保加利亚语
- value: ca
label:
en_US: Catalan
zh_Hans: 加泰罗尼亚语
- value: zh-cn
label:
en_US: Chinese (Simplified)
zh_Hans: 中文(简体)
- value: zh-tw
label:
en_US: Chinese (Traditional)
zh_Hans: 中文(繁体)
- value: cs
label:
en_US: Czech
zh_Hans: 捷克语
- value: da
label:
en_US: Danish
zh_Hans: 丹麦语
- value: nl
label:
en_US: Dutch
zh_Hans: 荷兰语
- value: en
label:
en_US: English
zh_Hans: 英语
- value: et
label:
en_US: Estonian
zh_Hans: 爱沙尼亚语
- value: fi
label:
en_US: Finnish
zh_Hans: 芬兰语
- value: fr
label:
en_US: French
zh_Hans: 法语
- value: de
label:
en_US: German
zh_Hans: 德语
- value: el
label:
en_US: Greek
zh_Hans: 希腊语
- value: iw
label:
en_US: Hebrew
zh_Hans: 希伯来语
- value: hi
label:
en_US: Hindi
zh_Hans: 印地语
- value: hu
label:
en_US: Hungarian
zh_Hans: 匈牙利语
- value: id
label:
en_US: Indonesian
zh_Hans: 印尼语
- value: it
label:
en_US: Italian
zh_Hans: 意大利语
- value: ja
label:
en_US: Japanese
zh_Hans: 日语
- value: kn
label:
en_US: Kannada
zh_Hans: 卡纳达语
- value: ko
label:
en_US: Korean
zh_Hans: 韩语
- value: lv
label:
en_US: Latvian
zh_Hans: 拉脱维亚语
- value: lt
label:
en_US: Lithuanian
zh_Hans: 立陶宛语
- value: my
label:
en_US: Malay
zh_Hans: 马来语
- value: ml
label:
en_US: Malayalam
zh_Hans: 马拉雅拉姆语
- value: mr
label:
en_US: Marathi
zh_Hans: 马拉地语
- value: "no"
label:
en_US: Norwegian
zh_Hans: 挪威语
- value: pl
label:
en_US: Polish
zh_Hans: 波兰语
- value: pt-br
label:
en_US: Portuguese (Brazil)
zh_Hans: 葡萄牙语(巴西)
- value: pt-pt
label:
en_US: Portuguese (Portugal)
zh_Hans: 葡萄牙语(葡萄牙)
- value: pa
label:
en_US: Punjabi
zh_Hans: 旁遮普语
- value: ro
label:
en_US: Romanian
zh_Hans: 罗马尼亚语
- value: ru
label:
en_US: Russian
zh_Hans: 俄语
- value: sr
label:
en_US: Serbian
zh_Hans: 塞尔维亚语
- value: sk
label:
en_US: Slovak
zh_Hans: 斯洛伐克语
- value: sl
label:
en_US: Slovenian
zh_Hans: 斯洛文尼亚语
- value: es
label:
en_US: Spanish
zh_Hans: 西班牙语
- value: sv
label:
en_US: Swedish
zh_Hans: 瑞典语
- value: ta
label:
en_US: Tamil
zh_Hans: 泰米尔语
- value: te
label:
en_US: Telugu
zh_Hans: 泰卢固语
- value: th
label:
en_US: Thai
zh_Hans: 泰语
- value: tr
label:
en_US: Turkish
zh_Hans: 土耳其语
- value: uk
label:
en_US: Ukrainian
zh_Hans: 乌克兰语
- value: vi
label:
en_US: Vietnamese
zh_Hans: 越南语

View File

@ -0,0 +1,91 @@
from typing import Any, Union
from urllib.parse import urlencode
import requests
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.tool.builtin_tool import BuiltinTool
BASE_URL = "https://api.serply.io/v1/scholar/"
class SerplyApi:
"""
SerplyApi tool provider.
"""
def __init__(self, api_key: str) -> None:
"""Initialize SerplyApi tool provider."""
self.serply_api_key = api_key
def run(self, query: str, **kwargs: Any) -> str:
"""Run query through SerplyApi and parse result."""
params = {"q": query, "hl": kwargs.get("hl", "en"), "gl": kwargs.get("gl", "US"), "num": kwargs.get("num", 10)}
location = kwargs.get("location", "US")
headers = {
"X-API-KEY": self.serply_api_key,
"X-User-Agent": kwargs.get("device", "desktop"),
"X-Proxy-Location": location,
"User-Agent": "Dify",
}
url = f"{BASE_URL}{urlencode(params)}"
res = requests.get(
url,
headers=headers,
)
res = res.json()
return self.parse_results(res)
@staticmethod
def parse_results(res: dict) -> str:
"""Process response from Serply News Search."""
articles = res.get("articles", [])
if not articles:
raise ValueError(f"Got error from Serply: {res}")
string = []
for article in articles:
try:
if "doc" in article:
link = article["doc"]["link"]
else:
link = article["link"]
authors = [author["name"] for author in article["author"]["authors"]]
string.append(
"\n".join([
f"Title: {article['title']}",
f"Link: {link}",
f"Description: {article['description']}",
f"Cite: {article['cite']}",
f"Authors: {', '.join(authors)}",
"---",
])
)
except KeyError:
continue
content = "\n".join(string)
return f"\nScholar results:\n {content}\n"
class ScholarSearchTool(BuiltinTool):
def _invoke(
self,
user_id: str,
tool_parameters: dict[str, Any],
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
"""
Invoke the SerplyApi tool.
"""
query = tool_parameters["query"]
gl = tool_parameters.get("gl", "us")
hl = tool_parameters.get("hl", "en")
location = tool_parameters.get("location", None)
api_key = self.runtime.credentials["serply_api_key"]
result = SerplyApi(api_key).run(query, gl=gl, hl=hl, location=location)
return self.create_text_message(text=result)

View File

@ -0,0 +1,501 @@
identity:
name: scholar_search
author: Dify
label:
en_US: Scholar API
zh_Hans: Scholar API
description:
human:
en_US: A tool to retrieve scholarly literature.
zh_Hans: 学术文献检索工具
llm: A tool to retrieve scholarly literature.
parameters:
- name: query
type: string
required: true
label:
en_US: Query
zh_Hans: 询问
human_description:
en_US: Defines the query you want to search.
zh_Hans: 定义您要搜索的查询。
llm_description: Defines the search query you want to search.
form: llm
- name: location
type: string
required: false
default: US
label:
en_US: Location
zh_Hans: 询问
human_description:
en_US: Defines from where you want the search to originate. (For example - New York)
zh_Hans: 定义您想要搜索的起始位置。 (例如 - 纽约)
llm_description: Defines from where you want the search to originate. (For example - New York)
form: form
options:
- value: AU
label:
en_US: Australia
zh_Hans: 澳大利亚
pt_BR: Australia
- value: BR
label:
en_US: Brazil
zh_Hans: 巴西
pt_BR: Brazil
- value: CA
label:
en_US: Canada
zh_Hans: 加拿大
pt_BR: Canada
- value: DE
label:
en_US: Germany
zh_Hans: 德国
pt_BR: Germany
- value: FR
label:
en_US: France
zh_Hans: 法国
pt_BR: France
- value: GB
label:
en_US: United Kingdom
zh_Hans: 英国
pt_BR: United Kingdom
- value: US
label:
en_US: United States
zh_Hans: 美国
pt_BR: United States
- value: JP
label:
en_US: Japan
zh_Hans: 日本
pt_BR: Japan
- value: IN
label:
en_US: India
zh_Hans: 印度
pt_BR: India
- value: KR
label:
en_US: Korea
zh_Hans: 韩国
pt_BR: Korea
- value: SG
label:
en_US: Singapore
zh_Hans: 新加坡
pt_BR: Singapore
- value: SE
label:
en_US: Sweden
zh_Hans: 瑞典
pt_BR: Sweden
- name: gl
type: select
label:
en_US: Country
zh_Hans: 国家/地区
required: false
human_description:
en_US: Defines the country of the search. Default is "US".
zh_Hans: 定义搜索的国家/地区。默认为“美国”。
llm_description: Defines the gl parameter of the Google search.
form: form
default: US
options:
- value: AR
label:
en_US: Argentina
zh_Hans: 阿根廷
pt_BR: Argentina
- value: AU
label:
en_US: Australia
zh_Hans: 澳大利亚
pt_BR: Australia
- value: AT
label:
en_US: Austria
zh_Hans: 奥地利
pt_BR: Austria
- value: BE
label:
en_US: Belgium
zh_Hans: 比利时
pt_BR: Belgium
- value: BR
label:
en_US: Brazil
zh_Hans: 巴西
pt_BR: Brazil
- value: CA
label:
en_US: Canada
zh_Hans: 加拿大
pt_BR: Canada
- value: CL
label:
en_US: Chile
zh_Hans: 智利
pt_BR: Chile
- value: CO
label:
en_US: Colombia
zh_Hans: 哥伦比亚
pt_BR: Colombia
- value: CN
label:
en_US: China
zh_Hans: 中国
pt_BR: China
- value: CZ
label:
en_US: Czech Republic
zh_Hans: 捷克共和国
pt_BR: Czech Republic
- value: DK
label:
en_US: Denmark
zh_Hans: 丹麦
pt_BR: Denmark
- value: FI
label:
en_US: Finland
zh_Hans: 芬兰
pt_BR: Finland
- value: FR
label:
en_US: France
zh_Hans: 法国
pt_BR: France
- value: DE
label:
en_US: Germany
zh_Hans: 德国
pt_BR: Germany
- value: HK
label:
en_US: Hong Kong
zh_Hans: 香港
pt_BR: Hong Kong
- value: IN
label:
en_US: India
zh_Hans: 印度
pt_BR: India
- value: ID
label:
en_US: Indonesia
zh_Hans: 印度尼西亚
pt_BR: Indonesia
- value: IT
label:
en_US: Italy
zh_Hans: 意大利
pt_BR: Italy
- value: JP
label:
en_US: Japan
zh_Hans: 日本
pt_BR: Japan
- value: KR
label:
en_US: Korea
zh_Hans: 韩国
pt_BR: Korea
- value: MY
label:
en_US: Malaysia
zh_Hans: 马来西亚
pt_BR: Malaysia
- value: MX
label:
en_US: Mexico
zh_Hans: 墨西哥
pt_BR: Mexico
- value: NL
label:
en_US: Netherlands
zh_Hans: 荷兰
pt_BR: Netherlands
- value: NZ
label:
en_US: New Zealand
zh_Hans: 新西兰
pt_BR: New Zealand
- value: "NO"
label:
en_US: Norway
zh_Hans: 挪威
pt_BR: Norway
- value: PH
label:
en_US: Philippines
zh_Hans: 菲律宾
pt_BR: Philippines
- value: PL
label:
en_US: Poland
zh_Hans: 波兰
pt_BR: Poland
- value: PT
label:
en_US: Portugal
zh_Hans: 葡萄牙
pt_BR: Portugal
- value: RU
label:
en_US: Russia
zh_Hans: 俄罗斯
pt_BR: Russia
- value: SA
label:
en_US: Saudi Arabia
zh_Hans: 沙特阿拉伯
pt_BR: Saudi Arabia
- value: SG
label:
en_US: Singapore
zh_Hans: 新加坡
pt_BR: Singapore
- value: ZA
label:
en_US: South Africa
zh_Hans: 南非
pt_BR: South Africa
- value: ES
label:
en_US: Spain
zh_Hans: 西班牙
pt_BR: Spain
- value: SE
label:
en_US: Sweden
zh_Hans: 瑞典
pt_BR: Sweden
- value: CH
label:
en_US: Switzerland
zh_Hans: 瑞士
pt_BR: Switzerland
- value: TW
label:
en_US: Taiwan
zh_Hans: 台湾
pt_BR: Taiwan
- value: TH
label:
en_US: Thailand
zh_Hans: 泰国
pt_BR: Thailand
- value: TR
label:
en_US: Turkey
zh_Hans: 土耳其
pt_BR: Turkey
- value: GB
label:
en_US: United Kingdom
zh_Hans: 英国
pt_BR: United Kingdom
- value: US
label:
en_US: United States
zh_Hans: 美国
pt_BR: United States
- name: hl
type: select
label:
en_US: Language
zh_Hans: 语言
human_description:
en_US: Defines the interface language of the search. Default is "en".
zh_Hans: 定义搜索的界面语言。默认为“en”。
required: false
default: en
form: form
options:
- value: ar
label:
en_US: Arabic
zh_Hans: 阿拉伯语
- value: bg
label:
en_US: Bulgarian
zh_Hans: 保加利亚语
- value: ca
label:
en_US: Catalan
zh_Hans: 加泰罗尼亚语
- value: zh-cn
label:
en_US: Chinese (Simplified)
zh_Hans: 中文(简体)
- value: zh-tw
label:
en_US: Chinese (Traditional)
zh_Hans: 中文(繁体)
- value: cs
label:
en_US: Czech
zh_Hans: 捷克语
- value: da
label:
en_US: Danish
zh_Hans: 丹麦语
- value: nl
label:
en_US: Dutch
zh_Hans: 荷兰语
- value: en
label:
en_US: English
zh_Hans: 英语
- value: et
label:
en_US: Estonian
zh_Hans: 爱沙尼亚语
- value: fi
label:
en_US: Finnish
zh_Hans: 芬兰语
- value: fr
label:
en_US: French
zh_Hans: 法语
- value: de
label:
en_US: German
zh_Hans: 德语
- value: el
label:
en_US: Greek
zh_Hans: 希腊语
- value: iw
label:
en_US: Hebrew
zh_Hans: 希伯来语
- value: hi
label:
en_US: Hindi
zh_Hans: 印地语
- value: hu
label:
en_US: Hungarian
zh_Hans: 匈牙利语
- value: id
label:
en_US: Indonesian
zh_Hans: 印尼语
- value: it
label:
en_US: Italian
zh_Hans: 意大利语
- value: ja
label:
en_US: Japanese
zh_Hans: 日语
- value: kn
label:
en_US: Kannada
zh_Hans: 卡纳达语
- value: ko
label:
en_US: Korean
zh_Hans: 韩语
- value: lv
label:
en_US: Latvian
zh_Hans: 拉脱维亚语
- value: lt
label:
en_US: Lithuanian
zh_Hans: 立陶宛语
- value: my
label:
en_US: Malay
zh_Hans: 马来语
- value: ml
label:
en_US: Malayalam
zh_Hans: 马拉雅拉姆语
- value: mr
label:
en_US: Marathi
zh_Hans: 马拉地语
- value: "no"
label:
en_US: Norwegian
zh_Hans: 挪威语
- value: pl
label:
en_US: Polish
zh_Hans: 波兰语
- value: pt-br
label:
en_US: Portuguese (Brazil)
zh_Hans: 葡萄牙语(巴西)
- value: pt-pt
label:
en_US: Portuguese (Portugal)
zh_Hans: 葡萄牙语(葡萄牙)
- value: pa
label:
en_US: Punjabi
zh_Hans: 旁遮普语
- value: ro
label:
en_US: Romanian
zh_Hans: 罗马尼亚语
- value: ru
label:
en_US: Russian
zh_Hans: 俄语
- value: sr
label:
en_US: Serbian
zh_Hans: 塞尔维亚语
- value: sk
label:
en_US: Slovak
zh_Hans: 斯洛伐克语
- value: sl
label:
en_US: Slovenian
zh_Hans: 斯洛文尼亚语
- value: es
label:
en_US: Spanish
zh_Hans: 西班牙语
- value: sv
label:
en_US: Swedish
zh_Hans: 瑞典语
- value: ta
label:
en_US: Tamil
zh_Hans: 泰米尔语
- value: te
label:
en_US: Telugu
zh_Hans: 泰卢固语
- value: th
label:
en_US: Thai
zh_Hans: 泰语
- value: tr
label:
en_US: Turkish
zh_Hans: 土耳其语
- value: uk
label:
en_US: Ukrainian
zh_Hans: 乌克兰语
- value: vi
label:
en_US: Vietnamese
zh_Hans: 越南语

View File

@ -0,0 +1,88 @@
import typing
from urllib.parse import urlencode
import requests
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.tool.builtin_tool import BuiltinTool
class SerplyApi:
"""
SerplyApi tool provider.
"""
def __init__(self, api_key: str) -> None:
"""Initialize Serply Web Search Tool provider."""
self.serply_api_key = api_key
self.base_url = "https://api.serply.io/v1/search/"
def run(self, query: str, **kwargs: typing.Any) -> str:
"""Run query through Serply and parse result."""
params = {"q": query, "hl": kwargs.get("hl", "en"), "gl": kwargs.get("gl", "US"), "num": kwargs.get("num", 10)}
location = kwargs.get("location", "US")
headers = {
"X-API-KEY": self.serply_api_key,
"X-User-Agent": kwargs.get("device", "desktop"),
"X-Proxy-Location": location,
"User-Agent": "Dify",
}
url = f"{self.base_url}{urlencode(params)}"
res = requests.get(
url,
headers=headers,
)
res = res.json()
return self.parse_results(res)
@staticmethod
def parse_results(res: dict) -> str:
"""Process response from Serply Web Search."""
results = res.get("results", [])
if not results:
raise ValueError(f"Got error from Serply: {res}")
string = []
for result in results:
try:
string.append(
"\n".join([
f"Title: {result['title']}",
f"Link: {result['link']}",
f"Description: {result['description'].strip()}",
"---",
])
)
except KeyError:
continue
if related_questions := res.get("related_questions", []):
string.append("---")
string.append("Related Questions: ")
string.append("\n".join(related_questions))
content = "\n".join(string)
return f"\nSearch results:\n {content}\n"
class WebSearchTool(BuiltinTool):
def _invoke(
self,
user_id: str,
tool_parameters: dict[str, typing.Any],
) -> typing.Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
"""
Invoke the SerplyApi tool.
"""
query = tool_parameters["query"]
num = tool_parameters.get("num", 10)
gl = tool_parameters.get("gl", "us")
hl = tool_parameters.get("hl", "en")
location = tool_parameters.get("location", "None")
api_key = self.runtime.credentials["serply_api_key"]
result = SerplyApi(api_key).run(query=query, num=num, gl=gl, hl=hl, location=location)
return self.create_text_message(text=result)

View File

@ -0,0 +1,376 @@
identity:
name: web_search
author: Dify
label:
en_US: Web Search API
zh_Hans: Web Search API
description:
human:
en_US: A tool to retrieve answer boxes, knowledge graphs, snippets, and webpages from Google Search engine.
zh_Hans: 一种从 Google 搜索引擎检索答案框、知识图、片段和网页的工具。
llm: A tool to retrieve answer boxes, knowledge graphs, snippets, and webpages from Google Search engine.
parameters:
- name: query
type: string
required: true
label:
en_US: Query
zh_Hans: 询问
human_description:
en_US: Defines the query you want to search.
zh_Hans: 定义您要搜索的查询。
llm_description: Defines the search query you want to search.
form: llm
- name: location
type: string
required: false
default: US
label:
en_US: Location
zh_Hans: 询问
human_description:
en_US: Defines from where you want the search to originate. (For example - New York)
zh_Hans: 定义您想要搜索的起始位置。 (例如 - 纽约)
llm_description: Defines from where you want the search to originate. (For example - New York)
form: form
options:
- value: AU
label:
en_US: Australia
zh_Hans: 澳大利亚
pt_BR: Australia
- value: BR
label:
en_US: Brazil
zh_Hans: 巴西
pt_BR: Brazil
- value: CA
label:
en_US: Canada
zh_Hans: 加拿大
pt_BR: Canada
- value: DE
label:
en_US: Germany
zh_Hans: 德国
pt_BR: Germany
- value: FR
label:
en_US: France
zh_Hans: 法国
pt_BR: France
- value: GB
label:
en_US: United Kingdom
zh_Hans: 英国
pt_BR: United Kingdom
- value: US
label:
en_US: United States
zh_Hans: 美国
pt_BR: United States
- value: JP
label:
en_US: Japan
zh_Hans: 日本
pt_BR: Japan
- value: IN
label:
en_US: India
zh_Hans: 印度
pt_BR: India
- value: KR
label:
en_US: Korea
zh_Hans: 韩国
pt_BR: Korea
- value: SG
label:
en_US: Singapore
zh_Hans: 新加坡
pt_BR: Singapore
- value: SE
label:
en_US: Sweden
zh_Hans: 瑞典
pt_BR: Sweden
- name: device
type: select
label:
en_US: Device Type
zh_Hans: 汉斯先生
human_description:
en_US: Defines the device to make interface search. Default is "desktop".
zh_Hans: 定义进行接口搜索的设备。默认为“桌面”
required: false
default: desktop
form: form
options:
- value: desktop
label:
en_US: Desktop
zh_Hans: 桌面
- value: mobile
label:
en_US: Mobile
zh_Hans: 移动的
- name: gl
type: select
label:
en_US: Country
zh_Hans: 国家/地区
required: false
human_description:
en_US: Defines the country of the search. Default is "US".
zh_Hans: 定义搜索的国家/地区。默认为“美国”。
llm_description: Defines the gl parameter of the Google search.
form: form
default: US
options:
- value: AU
label:
en_US: Australia
zh_Hans: 澳大利亚
pt_BR: Australia
- value: BR
label:
en_US: Brazil
zh_Hans: 巴西
pt_BR: Brazil
- value: CA
label:
en_US: Canada
zh_Hans: 加拿大
pt_BR: Canada
- value: DE
label:
en_US: Germany
zh_Hans: 德国
pt_BR: Germany
- value: FR
label:
en_US: France
zh_Hans: 法国
pt_BR: France
- value: GB
label:
en_US: United Kingdom
zh_Hans: 英国
pt_BR: United Kingdom
- value: IN
label:
en_US: India
zh_Hans: 印度
pt_BR: India
- value: KR
label:
en_US: Korea
zh_Hans: 韩国
pt_BR: Korea
- value: SE
label:
en_US: Sweden
zh_Hans: 瑞典
pt_BR: Sweden
- value: SG
label:
en_US: Singapore
zh_Hans: 新加坡
pt_BR: Singapore
- value: US
label:
en_US: United States
zh_Hans: 美国
pt_BR: United States
- name: hl
type: select
label:
en_US: Language
zh_Hans: 语言
human_description:
en_US: Defines the interface language of the search. Default is "en".
zh_Hans: 定义搜索的界面语言。默认为“en”。
required: false
default: en
form: form
options:
- value: ar
label:
en_US: Arabic
zh_Hans: 阿拉伯语
- value: bg
label:
en_US: Bulgarian
zh_Hans: 保加利亚语
- value: ca
label:
en_US: Catalan
zh_Hans: 加泰罗尼亚语
- value: zh-cn
label:
en_US: Chinese (Simplified)
zh_Hans: 中文(简体)
- value: zh-tw
label:
en_US: Chinese (Traditional)
zh_Hans: 中文(繁体)
- value: cs
label:
en_US: Czech
zh_Hans: 捷克语
- value: da
label:
en_US: Danish
zh_Hans: 丹麦语
- value: nl
label:
en_US: Dutch
zh_Hans: 荷兰语
- value: en
label:
en_US: English
zh_Hans: 英语
- value: et
label:
en_US: Estonian
zh_Hans: 爱沙尼亚语
- value: fi
label:
en_US: Finnish
zh_Hans: 芬兰语
- value: fr
label:
en_US: French
zh_Hans: 法语
- value: de
label:
en_US: German
zh_Hans: 德语
- value: el
label:
en_US: Greek
zh_Hans: 希腊语
- value: iw
label:
en_US: Hebrew
zh_Hans: 希伯来语
- value: hi
label:
en_US: Hindi
zh_Hans: 印地语
- value: hu
label:
en_US: Hungarian
zh_Hans: 匈牙利语
- value: id
label:
en_US: Indonesian
zh_Hans: 印尼语
- value: it
label:
en_US: Italian
zh_Hans: 意大利语
- value: ja
label:
en_US: Japanese
zh_Hans: 日语
- value: kn
label:
en_US: Kannada
zh_Hans: 卡纳达语
- value: ko
label:
en_US: Korean
zh_Hans: 韩语
- value: lv
label:
en_US: Latvian
zh_Hans: 拉脱维亚语
- value: lt
label:
en_US: Lithuanian
zh_Hans: 立陶宛语
- value: my
label:
en_US: Malay
zh_Hans: 马来语
- value: ml
label:
en_US: Malayalam
zh_Hans: 马拉雅拉姆语
- value: mr
label:
en_US: Marathi
zh_Hans: 马拉地语
- value: "no"
label:
en_US: Norwegian
zh_Hans: 挪威语
- value: pl
label:
en_US: Polish
zh_Hans: 波兰语
- value: pt-br
label:
en_US: Portuguese (Brazil)
zh_Hans: 葡萄牙语(巴西)
- value: pt-pt
label:
en_US: Portuguese (Portugal)
zh_Hans: 葡萄牙语(葡萄牙)
- value: pa
label:
en_US: Punjabi
zh_Hans: 旁遮普语
- value: ro
label:
en_US: Romanian
zh_Hans: 罗马尼亚语
- value: ru
label:
en_US: Russian
zh_Hans: 俄语
- value: sr
label:
en_US: Serbian
zh_Hans: 塞尔维亚语
- value: sk
label:
en_US: Slovak
zh_Hans: 斯洛伐克语
- value: sl
label:
en_US: Slovenian
zh_Hans: 斯洛文尼亚语
- value: es
label:
en_US: Spanish
zh_Hans: 西班牙语
- value: sv
label:
en_US: Swedish
zh_Hans: 瑞典语
- value: ta
label:
en_US: Tamil
zh_Hans: 泰米尔语
- value: te
label:
en_US: Telugu
zh_Hans: 泰卢固语
- value: th
label:
en_US: Thai
zh_Hans: 泰语
- value: tr
label:
en_US: Turkish
zh_Hans: 土耳其语
- value: uk
label:
en_US: Ukrainian
zh_Hans: 乌克兰语
- value: vi
label:
en_US: Vietnamese
zh_Hans: 越南语

View File

@ -0,0 +1,21 @@
from typing import Any
from core.tools.errors import ToolProviderCredentialValidationError
from core.tools.provider.builtin.websearch.tools.web_search import WebSearchTool
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
class WebSearchAPIProvider(BuiltinToolProviderController):
# validate when saving the api_key
def _validate_credentials(self, credentials: dict[str, Any]) -> None:
try:
WebSearchTool().fork_tool_runtime(
runtime={
"credentials": credentials,
}
).invoke(
user_id="",
tool_parameters={"query": "what is llm"},
)
except Exception as e:
raise ToolProviderCredentialValidationError(str(e))

View File

@ -0,0 +1,34 @@
identity:
name: websearch
author: Serply.io
label:
en_US: Serply.io
zh_Hans: Serply.io
pt_BR: Serply.io
description:
en_US: Serply.io is a robust real-time SERP API delivering structured data from a collection of search engines including Web Search, Jobs, News, and many more.
zh_Hans: Serply.io 是一个强大的实时 SERP API可提供来自 搜索 招聘 新闻等搜索引擎集合的结构化数据。
pt_BR: Serply.io is a robust real-time SERP API delivering structured data from a collection of search engines including Web Search, Jobs, News, and many more.
icon: icon.svg
tags:
- search
- business
- news
- productivity
credentials_for_provider:
serply_api_key:
type: secret-input
required: true
label:
en_US: Serply.io API key
zh_Hans: Serply.io API key
pt_BR: Serply.io API key
placeholder:
en_US: Please input your Serply.io API key
zh_Hans: 请输入你的 Serply.io API key
pt_BR: Please input your Serply.io API key
help:
en_US: Get your Serply.io API key from https://Serply.io/
zh_Hans: 从 Serply.io 获取您的 Serply.io API key
pt_BR: Get your Serply.io API key from Serply.io
url: https://Serply.io/

View File

@ -66,7 +66,7 @@ class BuiltinTool(Tool):
tenant_id=self.runtime.tenant_id,
prompt_messages=prompt_messages
)
def summary(self, user_id: str, content: str) -> str:
max_tokens = self.get_max_tokens()

View File

@ -32,8 +32,8 @@ class Tool(BaseModel, ABC):
# pydantic configs
model_config = ConfigDict(protected_namespaces=())
@classmethod
@field_validator('parameters', mode='before')
@classmethod
def set_parameters(cls, v, validation_info: ValidationInfo) -> list[ToolParameter]:
return v or []

View File

@ -1,7 +1,7 @@
import os
from typing import Literal, Optional, Union
from pydantic import BaseModel, field_validator
from pydantic import BaseModel, ValidationInfo, field_validator
from core.workflow.entities.base_node_data_entities import BaseNodeData
@ -24,13 +24,13 @@ class HttpRequestNodeData(BaseNodeData):
type: Literal['no-auth', 'api-key']
config: Optional[Config]
@classmethod
@field_validator('config', mode='before')
def check_config(cls, v, values):
@classmethod
def check_config(cls, v: Config, values: ValidationInfo):
"""
Check config, if type is no-auth, config should be None, otherwise it should be a dict.
"""
if values['type'] == 'no-auth':
if values.data['type'] == 'no-auth':
return None
else:
if not v or not isinstance(v, dict):

View File

@ -12,7 +12,11 @@ from core.file.file_obj import FileVar
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance, ModelManager
from core.model_runtime.entities.llm_entities import LLMUsage
from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageContentType
from core.model_runtime.entities.message_entities import (
ImagePromptMessageContent,
PromptMessage,
PromptMessageContentType,
)
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.model_runtime.utils.encoders import jsonable_encoder
@ -548,6 +552,7 @@ class LLMNode(BaseNode):
stop = model_config.stop
vision_enabled = node_data.vision.enabled
vision_detail = node_data.vision.configs.detail if node_data.vision.configs else None
filtered_prompt_messages = []
for prompt_message in prompt_messages:
if prompt_message.is_empty():
@ -556,7 +561,10 @@ class LLMNode(BaseNode):
if not isinstance(prompt_message.content, str):
prompt_message_content = []
for content_item in prompt_message.content:
if vision_enabled and content_item.type == PromptMessageContentType.IMAGE:
if vision_enabled and content_item.type == PromptMessageContentType.IMAGE and isinstance(content_item, ImagePromptMessageContent):
# Override vision config if LLM node has vision config
if vision_detail:
content_item.detail = ImagePromptMessageContent.DETAIL(vision_detail)
prompt_message_content.append(content_item)
elif content_item.type == PromptMessageContentType.TEXT:
prompt_message_content.append(content_item)

View File

@ -25,8 +25,8 @@ class ParameterConfig(BaseModel):
description: str
required: bool
@classmethod
@field_validator('name', mode='before')
@classmethod
def validate_name(cls, value) -> str:
if not value:
raise ValueError('Parameter name is required')
@ -45,8 +45,8 @@ class ParameterExtractorNodeData(BaseNodeData):
memory: Optional[MemoryConfig] = None
reasoning_mode: Literal['function_call', 'prompt']
@classmethod
@field_validator('reasoning_mode', mode='before')
@classmethod
def set_reasoning_mode(cls, v) -> str:
return v or 'function_call'

View File

@ -95,7 +95,7 @@ class ParameterExtractorNode(LLMNode):
# fetch memory
memory = self._fetch_memory(node_data.memory, variable_pool, model_instance)
if set(model_schema.features or []) & set([ModelFeature.MULTI_TOOL_CALL, ModelFeature.MULTI_TOOL_CALL]) \
if set(model_schema.features or []) & set([ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL]) \
and node_data.reasoning_mode == 'function_call':
# use function call
prompt_messages, prompt_message_tools = self._generate_function_call_prompt(

View File

@ -14,9 +14,9 @@ class ToolEntity(BaseModel):
tool_label: str # redundancy
tool_configurations: dict[str, Any]
@classmethod
@field_validator('tool_configurations', mode='before')
def validate_tool_configurations(cls, value, values: ValidationInfo) -> dict[str, Any]:
@classmethod
def validate_tool_configurations(cls, value, values: ValidationInfo):
if not isinstance(value, dict):
raise ValueError('tool_configurations must be a dictionary')
@ -32,8 +32,8 @@ class ToolNodeData(BaseNodeData, ToolEntity):
value: Union[Any, list[str]]
type: Literal['mixed', 'variable', 'constant']
@classmethod
@field_validator('type', mode='before')
@classmethod
def check_type(cls, value, validation_info: ValidationInfo):
typ = value
value = validation_info.data.get('value')

View File

@ -7,11 +7,35 @@ REGEX = re.compile(r"\{\{(#[a-zA-Z0-9_]{1,50}(\.[a-zA-Z_][a-zA-Z0-9_]{0,29}){1,1
class VariableTemplateParser:
"""
A class for parsing and manipulating template variables in a string.
Rules:
1. Template variables must be enclosed in `{{}}`.
2. The template variable Key can only be: #node_id.var1.var2#.
3. The template variable Key cannot contain new lines or spaces, and must comply with rule 2.
Example usage:
template = "Hello, {{#node_id.query.name#}}! Your age is {{#node_id.query.age#}}."
parser = VariableTemplateParser(template)
# Extract template variable keys
variable_keys = parser.extract()
print(variable_keys)
# Output: ['#node_id.query.name#', '#node_id.query.age#']
# Extract variable selectors
variable_selectors = parser.extract_variable_selectors()
print(variable_selectors)
# Output: [VariableSelector(variable='#node_id.query.name#', value_selector=['node_id', 'query', 'name']),
# VariableSelector(variable='#node_id.query.age#', value_selector=['node_id', 'query', 'age'])]
# Format the template string
inputs = {'#node_id.query.name#': 'John', '#node_id.query.age#': 25}}
formatted_string = parser.format(inputs)
print(formatted_string)
# Output: "Hello, John! Your age is 25."
"""
def __init__(self, template: str):
@ -19,6 +43,12 @@ class VariableTemplateParser:
self.variable_keys = self.extract()
def extract(self) -> list:
"""
Extracts all the template variable keys from the template string.
Returns:
A list of template variable keys.
"""
# Regular expression to match the template rules
matches = re.findall(REGEX, self.template)
@ -27,6 +57,12 @@ class VariableTemplateParser:
return list(set(first_group_matches))
def extract_variable_selectors(self) -> list[VariableSelector]:
"""
Extracts the variable selectors from the template variable keys.
Returns:
A list of VariableSelector objects representing the variable selectors.
"""
variable_selectors = []
for variable_key in self.variable_keys:
remove_hash = variable_key.replace('#', '')
@ -42,6 +78,16 @@ class VariableTemplateParser:
return variable_selectors
def format(self, inputs: dict, remove_template_variables: bool = True) -> str:
"""
Formats the template string by replacing the template variables with their corresponding values.
Args:
inputs: A dictionary containing the values for the template variables.
remove_template_variables: A boolean indicating whether to remove the template variables from the output.
Returns:
The formatted string with template variables replaced by their values.
"""
def replacer(match):
key = match.group(1)
value = inputs.get(key, match.group(0)) # return original matched string if key not found
@ -59,4 +105,13 @@ class VariableTemplateParser:
@classmethod
def remove_template_variables(cls, text: str):
"""
Removes the template variables from the given text.
Args:
text: The text from which to remove the template variables.
Returns:
The text with template variables removed.
"""
return re.sub(REGEX, r'{\1}', text)

View File

@ -4,7 +4,7 @@ set -e
if [[ "${MIGRATION_ENABLED}" == "true" ]]; then
echo "Running migrations"
flask db upgrade
flask upgrade-db
fi
if [[ "${MODE}" == "worker" ]]; then

View File

@ -5,6 +5,7 @@ from .create_installed_app_when_app_created import handle
from .create_site_record_when_app_created import handle
from .deduct_quota_when_messaeg_created import handle
from .delete_installed_app_when_app_deleted import handle
from .delete_site_record_when_app_deleted import handle
from .delete_tool_parameters_cache_when_sync_draft_workflow import handle
from .delete_workflow_as_tool_when_app_deleted import handle
from .update_app_dataset_join_when_app_model_config_updated import handle

View File

@ -0,0 +1,11 @@
from events.app_event import app_was_deleted
from extensions.ext_database import db
from models.model import Site
@app_was_deleted.connect
def handle(sender, **kwargs):
app = sender
site = db.session.query(Site).filter(Site.app_id == app.id).first()
db.session.delete(site)
db.session.commit()

View File

@ -8,6 +8,7 @@ from extensions.storage.azure_storage import AzureStorage
from extensions.storage.google_storage import GoogleStorage
from extensions.storage.local_storage import LocalStorage
from extensions.storage.s3_storage import S3Storage
from extensions.storage.tencent_storage import TencentStorage
class Storage:
@ -32,6 +33,10 @@ class Storage:
self.storage_runner = GoogleStorage(
app=app
)
elif storage_type == 'tencent-cos':
self.storage_runner = TencentStorage(
app=app
)
else:
self.storage_runner = LocalStorage(app=app)

View File

@ -0,0 +1,48 @@
from collections.abc import Generator
from flask import Flask
from qcloud_cos import CosConfig, CosS3Client
from extensions.storage.base_storage import BaseStorage
class TencentStorage(BaseStorage):
"""Implementation for tencent cos storage.
"""
def __init__(self, app: Flask):
super().__init__(app)
app_config = self.app.config
self.bucket_name = app_config.get('TENCENT_COS_BUCKET_NAME')
config = CosConfig(
Region=app_config.get('TENCENT_COS_REGION'),
SecretId=app_config.get('TENCENT_COS_SECRET_ID'),
SecretKey=app_config.get('TENCENT_COS_SECRET_KEY'),
Scheme=app_config.get('TENCENT_COS_SCHEME'),
)
self.client = CosS3Client(config)
def save(self, filename, data):
self.client.put_object(Bucket=self.bucket_name, Body=data, Key=filename)
def load_once(self, filename: str) -> bytes:
data = self.client.get_object(Bucket=self.bucket_name, Key=filename)['Body'].get_raw_stream().read()
return data
def load_stream(self, filename: str) -> Generator:
def generate(filename: str = filename) -> Generator:
response = self.client.get_object(Bucket=self.bucket_name, Key=filename)
while chunk := response['Body'].get_stream(chunk_size=4096):
yield chunk
return generate()
def download(self, filename, target_filepath):
response = self.client.get_object(Bucket=self.bucket_name, Key=filename)
response['Body'].get_stream_to_file(target_filepath)
def exists(self, filename):
return self.client.object_exists(Bucket=self.bucket_name, Key=filename)
def delete(self, filename):
self.client.delete_object(Bucket=self.bucket_name, Key=filename)

View File

@ -25,7 +25,7 @@ class TimestampField(fields.Raw):
def email(email):
# Define a regex pattern for email addresses
pattern = r"^[\w\.-]+@([\w-]+\.)+[\w-]{2,}$"
pattern = r"^[\w\.!#$%&'*+\-/=?^_`{|}~]+@([\w-]+\.)+[\w-]{2,}$"
# Check if the email matches the pattern
if re.match(pattern, email) is not None:
return email

View File

@ -1,2 +1 @@
Single-database configuration for Flask.

81
api/poetry.lock generated
View File

@ -1088,13 +1088,13 @@ numpy = "*"
[[package]]
name = "chromadb"
version = "0.5.0"
version = "0.5.1"
description = "Chroma."
optional = false
python-versions = ">=3.8"
files = [
{file = "chromadb-0.5.0-py3-none-any.whl", hash = "sha256:8193dc65c143b61d8faf87f02c44ecfa778d471febd70de517f51c5d88a06009"},
{file = "chromadb-0.5.0.tar.gz", hash = "sha256:7954af614a9ff7b2902ddbd0a162f33f7ec0669e2429903905c4f7876d1f766f"},
{file = "chromadb-0.5.1-py3-none-any.whl", hash = "sha256:61f1f75a672b6edce7f1c8875c67e2aaaaf130dc1c1684431fbc42ad7240d01d"},
{file = "chromadb-0.5.1.tar.gz", hash = "sha256:e2b2b6a34c2a949bedcaa42fa7775f40c7f6667848fc8094dcbf97fc0d30bee7"},
]
[package.dependencies]
@ -1103,10 +1103,11 @@ build = ">=1.0.3"
chroma-hnswlib = "0.7.3"
fastapi = ">=0.95.2"
grpcio = ">=1.58.0"
httpx = ">=0.27.0"
importlib-resources = "*"
kubernetes = ">=28.1.0"
mmh3 = ">=4.0.1"
numpy = ">=1.22.5"
numpy = ">=1.22.5,<2.0.0"
onnxruntime = ">=1.14.1"
opentelemetry-api = ">=1.2.0"
opentelemetry-exporter-otlp-proto-grpc = ">=1.2.0"
@ -1441,12 +1442,12 @@ test-no-images = ["pytest", "pytest-cov", "pytest-xdist", "wurlitzer"]
[[package]]
name = "cos-python-sdk-v5"
version = "1.9.29"
version = "1.9.30"
description = "cos-python-sdk-v5"
optional = false
python-versions = "*"
files = [
{file = "cos-python-sdk-v5-1.9.29.tar.gz", hash = "sha256:1bb07022368d178e7a50a3cc42e0d6cbf4b0bef2af12a3bb8436904339cdec8e"},
{file = "cos-python-sdk-v5-1.9.30.tar.gz", hash = "sha256:a23fd090211bf90883066d90cd74317860aa67c6d3aa80fe5e44b18c7e9b2a81"},
]
[package.dependencies]
@ -1636,6 +1637,22 @@ tiktoken = {version = "*", optional = true, markers = "extra == \"tokenizer\""}
[package.extras]
tokenizer = ["tiktoken"]
[[package]]
name = "dataclass-wizard"
version = "0.22.3"
description = "Marshal dataclasses to/from JSON. Use field properties with initial values. Construct a dataclass schema with JSON input."
optional = false
python-versions = "*"
files = [
{file = "dataclass-wizard-0.22.3.tar.gz", hash = "sha256:4c46591782265058f1148cfd1f54a3a91221e63986fdd04c9d59f4ced61f4424"},
{file = "dataclass_wizard-0.22.3-py2.py3-none-any.whl", hash = "sha256:63751203e54b9b9349212cc185331da73c1adc99c51312575eb73bb5c00c1962"},
]
[package.extras]
dev = ["Sphinx (==5.3.0)", "bump2version (==1.0.1)", "coverage (>=6.2)", "dataclass-factory (==2.12)", "dataclasses-json (==0.5.6)", "flake8 (>=3)", "jsons (==1.6.1)", "pip (>=21.3.1)", "pytest (==7.0.1)", "pytest-cov (==3.0.0)", "pytest-mock (>=3.6.1)", "pytimeparse (==1.1.8)", "sphinx-issues (==3.0.1)", "sphinx-issues (==4.0.0)", "tox (==3.24.5)", "twine (==3.8.0)", "watchdog[watchmedo] (==2.1.6)", "wheel (==0.37.1)", "wheel (==0.42.0)"]
timedelta = ["pytimeparse (>=1.1.7)"]
yaml = ["PyYAML (>=5.3)"]
[[package]]
name = "dataclasses"
version = "0.6"
@ -3381,24 +3398,24 @@ lxml = ["lxml"]
[[package]]
name = "httpcore"
version = "0.17.3"
version = "1.0.5"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.7"
python-versions = ">=3.8"
files = [
{file = "httpcore-0.17.3-py3-none-any.whl", hash = "sha256:c2789b767ddddfa2a5782e3199b2b7f6894540b17b16ec26b2c4d8e103510b87"},
{file = "httpcore-0.17.3.tar.gz", hash = "sha256:a6f30213335e34c1ade7be6ec7c47f19f50c56db36abef1a9dfa3815b1cb3888"},
{file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"},
{file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"},
]
[package.dependencies]
anyio = ">=3.0,<5.0"
certifi = "*"
h11 = ">=0.13,<0.15"
sniffio = "==1.*"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
trio = ["trio (>=0.22.0,<0.26.0)"]
[[package]]
name = "httplib2"
@ -3464,19 +3481,20 @@ test = ["Cython (>=0.29.24,<0.30.0)"]
[[package]]
name = "httpx"
version = "0.24.1"
version = "0.27.0"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.7"
python-versions = ">=3.8"
files = [
{file = "httpx-0.24.1-py3-none-any.whl", hash = "sha256:06781eb9ac53cde990577af654bd990a4949de37a28bdb4a230d434f3a30b9bd"},
{file = "httpx-0.24.1.tar.gz", hash = "sha256:5853a43053df830c20f8110c5e69fe44d035d850b2dfe795e196f00fdb774bdd"},
{file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"},
{file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"},
]
[package.dependencies]
anyio = "*"
certifi = "*"
h2 = {version = ">=3,<5", optional = true, markers = "extra == \"http2\""}
httpcore = ">=0.15.0,<0.18.0"
httpcore = "==1.*"
idna = "*"
sniffio = "*"
socksio = {version = "==1.*", optional = true, markers = "extra == \"socks\""}
@ -4615,6 +4633,22 @@ plot = ["matplotlib"]
tgrep = ["pyparsing"]
twitter = ["twython"]
[[package]]
name = "novita-client"
version = "0.5.6"
description = "novita SDK for Python"
optional = false
python-versions = ">=3.6"
files = [
{file = "novita_client-0.5.6-py3-none-any.whl", hash = "sha256:9fa6cfd12f13a75c7da42b27f811a560b0320da24cf256480f517bde479bc57c"},
{file = "novita_client-0.5.6.tar.gz", hash = "sha256:2e4d956903d5da39d43127a41dcb020ae40322d2a6196413071b94b3d6988b98"},
]
[package.dependencies]
dataclass-wizard = ">=0.22.2"
pillow = ">=10.2.0"
requests = ">=2.27.1"
[[package]]
name = "numba"
version = "0.59.1"
@ -6380,7 +6414,6 @@ files = [
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
{file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
{file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
{file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
{file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
@ -6388,16 +6421,8 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
{file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
{file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
{file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
{file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
@ -6414,7 +6439,6 @@ files = [
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
{file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
{file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
{file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
{file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
@ -6422,7 +6446,6 @@ files = [
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
{file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
{file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
{file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
@ -8921,4 +8944,4 @@ testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
[metadata]
lock-version = "2.0"
python-versions = "^3.10"
content-hash = "e967aa4b61dc7c40f2f50eb325038da1dc0ff633d8f778e7a7560bdabce744dc"
content-hash = "21360e271c46e0368b8e3bd26287caca73145a73ee73287669f91e7eac6f05b9"

View File

@ -1,12 +1,17 @@
[project]
requires-python = ">=3.10"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.ruff]
exclude = [
]
line-length = 120
[tool.ruff.lint]
preview = true
select = [
"B", # flake8-bugbear rules
"F", # pyflakes rules
@ -77,17 +82,10 @@ MOCK_SWITCH = "true"
CODE_MAX_STRING_LENGTH = "80000"
CODE_EXECUTION_ENDPOINT="http://127.0.0.1:8194"
CODE_EXECUTION_API_KEY="dify-sandbox"
FIRECRAWL_API_KEY = "fc-"
[tool.poetry]
name = "dify-api"
version = "0.6.11"
description = ""
authors = ["Dify <hello@dify.ai>"]
readme = "README.md"
package-mode = false
[tool.poetry.dependencies]
@ -155,7 +153,7 @@ numpy = "~1.26.4"
unstructured = {version = "~0.10.27", extras = ["docx", "epub", "md", "msg", "ppt", "pptx"]}
bs4 = "~0.0.1"
markdown = "~3.5.1"
httpx = {version = "~0.24.1", extras = ["socks"]}
httpx = {version = "~0.27.0", extras = ["socks"]}
matplotlib = "~3.8.2"
yfinance = "~0.2.40"
pydub = "~0.25.1"
@ -183,7 +181,10 @@ vanna = {version = "0.5.5", extras = ["postgres", "mysql", "clickhouse", "duckdb
kaleido = "0.2.1"
tencentcloud-sdk-python-hunyuan = "~3.0.1158"
tcvectordb = "1.3.2"
chromadb = "~0.5.0"
chromadb = "~0.5.1"
tenacity = "~8.3.0"
cos-python-sdk-v5 = "1.9.30"
novita-client = "^0.5.6"
[tool.poetry.group.dev]
optional = true

View File

@ -61,7 +61,7 @@ numpy~=1.26.4
unstructured[docx,pptx,msg,md,ppt,epub]~=0.10.27
bs4~=0.0.1
markdown~=3.5.1
httpx[socks]~=0.24.1
httpx[socks]~=0.27.0
matplotlib~=3.8.2
yfinance~=0.2.40
pydub~=0.25.1
@ -87,4 +87,7 @@ tidb-vector==0.0.9
google-cloud-aiplatform==1.49.0
vanna[postgres,mysql,clickhouse,duckdb]==0.5.5
tencentcloud-sdk-python-hunyuan~=3.0.1158
chromadb~=0.5.0
chromadb~=0.5.1
novita_client~=0.5.6
tenacity~=8.3.0
cos-python-sdk-v5==1.9.30

View File

@ -31,7 +31,10 @@ class FileService:
@staticmethod
def upload_file(file: FileStorage, user: Union[Account, EndUser], only_image: bool = False) -> UploadFile:
filename = file.filename
extension = file.filename.split('.')[-1]
if len(filename) > 200:
filename = filename.split('.')[0][:200] + '.' + extension
etl_type = current_app.config['ETL_TYPE']
allowed_extensions = UNSTRUSTURED_ALLOWED_EXTENSIONS + IMAGE_EXTENSIONS if etl_type == 'Unstructured' \
else ALLOWED_EXTENSIONS + IMAGE_EXTENSIONS
@ -75,7 +78,7 @@ class FileService:
tenant_id=current_tenant_id,
storage_type=config['STORAGE_TYPE'],
key=file_key,
name=file.filename,
name=filename,
size=file_size,
extension=extension,
mime_type=file.mimetype,
@ -93,6 +96,8 @@ class FileService:
@staticmethod
def upload_text(text: str, text_name: str) -> UploadFile:
if len(text_name) > 200:
text_name = text_name[:200]
# user uuid as file name
file_uuid = str(uuid.uuid4())
file_key = 'upload_files/' + current_user.current_tenant_id + '/' + file_uuid + '.txt'

View File

@ -0,0 +1,25 @@
from libs.helper import email
def test_email_with_valid_email():
assert email("test@example.com") == "test@example.com"
assert email("TEST12345@example.com") == "TEST12345@example.com"
assert email("test+test@example.com") == "test+test@example.com"
assert email("!#$%&'*+-/=?^_{|}~`@example.com") == "!#$%&'*+-/=?^_{|}~`@example.com"
def test_email_with_invalid_email():
try:
email("invalid_email")
except ValueError as e:
assert str(e) == "invalid_email is not a valid email."
try:
email("@example.com")
except ValueError as e:
assert str(e) == "@example.com is not a valid email."
try:
email("()@example.com")
except ValueError as e:
assert str(e) == "()@example.com is not a valid email."

View File

@ -9,7 +9,7 @@ if ! command -v ruff &> /dev/null; then
fi
# run ruff linter
ruff check --fix --preview ./api
ruff check --fix ./api
# env files linting relies on `dotenv-linter` in path
if ! command -v dotenv-linter &> /dev/null; then

View File

@ -2,7 +2,7 @@ version: '3'
services:
# Chroma vector store.
chroma:
image: ghcr.io/chroma-core/chroma:0.5.0
image: ghcr.io/chroma-core/chroma:0.5.1
restart: always
volumes:
- ./volumes/chroma:/chroma/chroma

View File

@ -96,6 +96,12 @@ services:
GOOGLE_STORAGE_BUCKET_NAME: 'yout-bucket-name'
# if you want to use Application Default Credentials, you can leave GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64 empty.
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: 'your-google-service-account-json-base64-string'
# The Tencent COS storage configurations, only available when STORAGE_TYPE is `tencent-cos`.
TENCENT_COS_BUCKET_NAME: 'your-bucket-name'
TENCENT_COS_SECRET_KEY: 'your-secret-key'
TENCENT_COS_SECRET_ID: 'your-secret-id'
TENCENT_COS_REGION: 'your-region'
TENCENT_COS_SCHEME: 'your-scheme'
# The type of vector store to use. Supported values are `weaviate`, `qdrant`, `milvus`, `relyt`.
VECTOR_STORE: weaviate
# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
@ -252,6 +258,12 @@ services:
GOOGLE_STORAGE_BUCKET_NAME: 'yout-bucket-name'
# if you want to use Application Default Credentials, you can leave GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64 empty.
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: 'your-google-service-account-json-base64-string'
# The Tencent COS storage configurations, only available when STORAGE_TYPE is `tencent-cos`.
TENCENT_COS_BUCKET_NAME: 'your-bucket-name'
TENCENT_COS_SECRET_KEY: 'your-secret-key'
TENCENT_COS_SECRET_ID: 'your-secret-id'
TENCENT_COS_REGION: 'your-region'
TENCENT_COS_SCHEME: 'your-scheme'
# The type of vector store to use. Supported values are `weaviate`, `qdrant`, `milvus`, `relyt`, `pgvector`.
VECTOR_STORE: weaviate
# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.

View File

@ -1,8 +1,11 @@
# Dify Frontend
This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app).
## Getting Started
### Run by source code
To start the web frontend service, you will need [Node.js v18.x (LTS)](https://nodejs.org/en) and [NPM version 8.x.x](https://www.npmjs.com/) or [Yarn](https://yarnpkg.com/).
First, install the dependencies:
@ -14,6 +17,7 @@ yarn install --frozen-lockfile
```
Then, configure the environment variables. Create a file named `.env.local` in the current directory and copy the contents from `.env.example`. Modify the values of these environment variables according to your requirements:
```
# For production release, change this to PRODUCTION
NEXT_PUBLIC_DEPLOY_ENV=DEVELOPMENT
@ -45,27 +49,35 @@ Open [http://localhost:3000](http://localhost:3000) with your browser to see the
You can start editing the file under folder `app`. The page auto-updates as you edit the file.
## Deploy
### Deploy on server
First, build the app for production:
```bash
npm run build
```
Then, start the server:
```bash
npm run start
```
If you want to customize the host and port:
```bash
npm run start --port=3001 --host=0.0.0.0
```
## Lint Code
If your IDE is VSCode, rename `web/.vscode/settings.example.json` to `web/.vscode/settings.json` for lint code setting.
## Documentation
Visit https://docs.dify.ai/getting-started/readme to view the full documentation.
Visit <https://docs.dify.ai/getting-started/readme> to view the full documentation.
## Community
The Dify community can be found on [Discord community](https://discord.gg/5AEfbxcd9k), where you can ask questions, voice ideas, and share your projects.

View File

@ -279,27 +279,27 @@ const AppCard = ({ app, onRefresh }: AppCardProps) => {
'items-center shrink-0 mt-1 pt-1 pl-[14px] pr-[6px] pb-[6px] h-[42px]',
tags.length ? 'flex' : '!hidden group-hover:!flex',
)}>
<div className={cn('grow flex items-center gap-1 w-0')} onClick={(e) => {
e.stopPropagation()
e.preventDefault()
}}>
<div className={cn(
'group-hover:!block group-hover:!mr-0 mr-[41px] grow w-full',
tags.length ? '!block' : '!hidden',
)}>
<TagSelector
position='bl'
type='app'
targetID={app.id}
value={tags.map(tag => tag.id)}
selectedTags={tags}
onCacheUpdate={setTags}
onChange={onRefresh}
/>
</div>
</div>
{isCurrentWorkspaceEditor && (
<>
<div className={cn('grow flex items-center gap-1 w-0')} onClick={(e) => {
e.stopPropagation()
e.preventDefault()
}}>
<div className={cn(
'group-hover:!block group-hover:!mr-0 mr-[41px] grow w-full',
tags.length ? '!block' : '!hidden',
)}>
<TagSelector
position='bl'
type='app'
targetID={app.id}
value={tags.map(tag => tag.id)}
selectedTags={tags}
onCacheUpdate={setTags}
onChange={onRefresh}
/>
</div>
</div>
<div className='!hidden group-hover:!flex shrink-0 mx-1 w-[1px] h-[14px] bg-gray-200'/>
<div className='!hidden group-hover:!flex shrink-0'>
<CustomPopover

View File

@ -86,7 +86,7 @@ const ActivateForm = () => {
timezone,
},
})
setLocaleOnClient(language.startsWith('en') ? 'en-US' : 'zh-Hans', false)
setLocaleOnClient(language, false)
setShowSuccess(true)
}
catch {

View File

@ -16,7 +16,7 @@ import Divider from '@/app/components/base/divider'
import Confirm from '@/app/components/base/confirm'
import { useStore as useAppStore } from '@/app/components/app/store'
import { ToastContext } from '@/app/components/base/toast'
import AppsContext from '@/context/app-context'
import AppsContext, { useAppContext } from '@/context/app-context'
import { useProviderContext } from '@/context/provider-context'
import { copyApp, deleteApp, exportAppConfig, updateAppInfo } from '@/service/apps'
import DuplicateAppModal from '@/app/components/app/duplicate-modal'
@ -142,6 +142,8 @@ const AppInfo = ({ expand }: IAppInfoProps) => {
setShowConfirmDelete(false)
}, [appDetail, mutateApps, notify, onPlanInfoChanged, replace, t])
const { isCurrentWorkspaceEditor } = useAppContext()
if (!appDetail)
return null
@ -154,10 +156,13 @@ const AppInfo = ({ expand }: IAppInfoProps) => {
>
<div className='relative'>
<PortalToFollowElemTrigger
onClick={() => setOpen(v => !v)}
onClick={() => {
if (isCurrentWorkspaceEditor)
setOpen(v => !v)
}}
className='block'
>
<div className={cn('flex cursor-pointer p-1 rounded-lg hover:bg-gray-100', open && 'bg-gray-100')}>
<div className={cn('flex p-1 rounded-lg', open && 'bg-gray-100', isCurrentWorkspaceEditor && 'hover:bg-gray-100 cursor-pointer')}>
<div className='relative shrink-0 mr-2'>
<AppIcon size={expand ? 'large' : 'small'} icon={appDetail.icon} background={appDetail.icon_background} />
<span className={cn(
@ -185,7 +190,7 @@ const AppInfo = ({ expand }: IAppInfoProps) => {
<div className="grow w-0">
<div className='flex justify-between items-center text-sm leading-5 font-medium text-gray-900'>
<div className='truncate' title={appDetail.name}>{appDetail.name}</div>
<ChevronDown className='shrink-0 ml-[2px] w-3 h-3 text-gray-500' />
{isCurrentWorkspaceEditor && <ChevronDown className='shrink-0 ml-[2px] w-3 h-3 text-gray-500' />}
</div>
<div className='flex items-center text-[10px] leading-[18px] font-medium text-gray-500 gap-1'>
{appDetail.mode === 'advanced-chat' && (

View File

@ -87,7 +87,7 @@ const BatchModal: FC<IBatchModalProps> = ({
}
return (
<Modal isShow={isShow} onClose={() => { }} wrapperClassName='!z-[20]' className='px-8 py-6 !max-w-[520px] !rounded-xl'>
<Modal isShow={isShow} onClose={() => { }} className='px-8 py-6 !max-w-[520px] !rounded-xl'>
<div className='relative pb-1 text-xl font-medium leading-[30px] text-gray-900'>{t('appAnnotation.batchModal.title')}</div>
<div className='absolute right-4 top-4 p-2 cursor-pointer' onClick={onCancel}>
<XClose className='w-4 h-4 text-gray-500' />

View File

@ -27,7 +27,6 @@ const EditModal: FC<Props> = ({
title={t('appDebug.feature.conversationHistory.editModal.title')}
isShow={isShow}
onClose={onClose}
wrapperClassName='!z-[101]'
>
<div className={'mt-6 font-medium text-sm leading-[21px] text-gray-900'}>{t('appDebug.feature.conversationHistory.editModal.userPrefix')}</div>
<input className={'mt-2 w-full rounded-lg h-10 box-border px-3 text-sm leading-10 bg-gray-100'}

View File

@ -132,7 +132,6 @@ const ConfigModal: FC<IConfigModalProps> = ({
title={t(`appDebug.variableConig.${isCreate ? 'addModalTitle' : 'editModalTitle'}`)}
isShow={isShow}
onClose={onClose}
wrapperClassName='!z-[100]'
>
<div className='mb-8'>
<div className='space-y-2'>

View File

@ -94,7 +94,6 @@ const SelectDataSet: FC<ISelectDataSetProps> = ({
isShow={isShow}
onClose={onClose}
className='w-[400px]'
wrapperClassName='!z-[101]'
title={t('appDebug.feature.dataSet.selectTitle')}
>
{!loaded && (

View File

@ -186,7 +186,6 @@ const ExternalDataToolModal: FC<ExternalDataToolModalProps> = ({
<Modal
isShow
onClose={() => { }}
wrapperClassName='z-[101]'
className='!p-8 !pb-6 !max-w-none !w-[640px]'
>
<div className='mb-2 text-xl font-semibold text-gray-900'>
@ -287,7 +286,6 @@ const ExternalDataToolModal: FC<ExternalDataToolModalProps> = ({
{
showEmojiPicker && (
<EmojiPicker
className='!z-[200]'
onSelect={(icon, icon_background) => {
handleValueChange({ icon, icon_background })
setShowEmojiPicker(false)

View File

@ -83,7 +83,6 @@ const CreateAppModal = ({ show, onSuccess, onClose }: CreateAppDialogProps) => {
return (
<Modal
overflowVisible
wrapperClassName='z-20'
className='!p-0 !max-w-[720px] !w-[720px] rounded-xl'
isShow={show}
onClose={() => {}}

View File

@ -78,7 +78,6 @@ const CreateFromDSLModal = ({ show, onSuccess, onClose }: CreateFromDSLModalProp
return (
<Modal
wrapperClassName='z-20'
className='px-8 py-6 max-w-[520px] w-[520px] rounded-xl'
isShow={show}
onClose={() => {}}

View File

@ -87,7 +87,6 @@ const SwitchAppModal = ({ show, appDetail, inAppDetail = false, onSuccess, onClo
return (
<>
<Modal
wrapperClassName='z-20'
className={cn('p-8 max-w-[600px] w-[600px]', s.bg)}
isShow={show}
onClose={() => {}}

View File

@ -86,7 +86,7 @@ const EmojiPicker: FC<IEmojiPickerProps> = ({
onClose={() => { }}
isShow
closable={false}
wrapperClassName={`!z-50 ${className}`}
wrapperClassName={className}
className={cn(s.container, '!w-[362px] !p-0')}
>
<div className='flex flex-col items-center w-full p-3'>

View File

@ -1,5 +1,5 @@
<svg width="21" height="22" viewBox="0 0 21 22" fill="none" xmlns="http://www.w3.org/2000/svg">
<g id="Microsfot">
<g id="Microsoft">
<rect id="Rectangle 1010" y="0.5" width="10" height="10" fill="#EF4F21"/>
<rect id="Rectangle 1012" y="11.5" width="10" height="10" fill="#03A4EE"/>
<rect id="Rectangle 1011" x="11" y="0.5" width="10" height="10" fill="#7EB903"/>

Before

Width:  |  Height:  |  Size: 439 B

After

Width:  |  Height:  |  Size: 439 B

Some files were not shown because too many files have changed in this diff Show More