Compare commits

..

6 Commits

Author SHA1 Message Date
8c1bca3119 fix: eslint run failed 2025-02-14 15:01:02 +08:00
a8982a98f4 chore: update libs 2025-02-14 14:13:44 +08:00
130964d9a7 update eslint.config.mjs 2025-02-14 14:00:59 +08:00
1a8a1a9574 fix: ignore .storybook folder 2025-02-08 17:52:10 +08:00
20bcb49932 fix: ignore rule no-explicit-any 2025-02-08 17:50:35 +08:00
91e411bbaa wip: update eslint config and stash 2025-02-08 15:45:16 +08:00
112 changed files with 5982 additions and 6351 deletions

View File

@ -5,7 +5,6 @@ on:
branches:
- "main"
- "deploy/dev"
- "fix/dataset-admin"
release:
types: [published]

View File

@ -1,47 +0,0 @@
name: Build docker image
on:
pull_request:
branches:
- "main"
paths:
- api/Dockerfile
- web/Dockerfile
concurrency:
group: docker-build-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build-docker:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- service_name: "api-amd64"
platform: linux/amd64
context: "api"
- service_name: "api-arm64"
platform: linux/arm64
context: "api"
- service_name: "web-amd64"
platform: linux/amd64
context: "web"
- service_name: "web-arm64"
platform: linux/arm64
context: "web"
steps:
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build Docker Image
uses: docker/build-push-action@v6
with:
push: false
context: "{{defaultContext}}:${{ matrix.context }}"
platforms: ${{ matrix.platform }}
cache-from: type=gha
cache-to: type=gha,mode=max

View File

@ -25,9 +25,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="follow on X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="follow on LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">

View File

@ -21,9 +21,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="follow on X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="follow on LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">

View File

@ -21,9 +21,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="follow on X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="follow on LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">

View File

@ -21,9 +21,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="seguir en X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="seguir en LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Descargas de Docker" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">

View File

@ -21,9 +21,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="suivre sur X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="suivre sur LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Tirages Docker" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">

View File

@ -21,9 +21,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="X(Twitter)でフォロー"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="LinkedInでフォロー"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">

View File

@ -21,9 +21,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="follow on X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="follow on LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">

View File

@ -21,9 +21,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="follow on X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="follow on LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">

View File

@ -25,9 +25,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="follow on X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="follow on LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">

View File

@ -22,9 +22,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="follow on X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="follow on LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">

View File

@ -21,9 +21,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="X(Twitter)'da takip et"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="LinkedIn'da takip et"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Çekmeleri" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">
@ -65,6 +62,8 @@ Görsel bir arayüz üzerinde güçlü AI iş akışları oluşturun ve test edi
![providers-v5](https://github.com/langgenius/dify/assets/13230914/5a17bdbe-097a-4100-8363-40255b70f6e3)
Özür dilerim, haklısınız. Daha anlamlı ve akıcı bir çeviri yapmaya çalışayım. İşte güncellenmiş çeviri:
**3. Prompt IDE**:
Komut istemlerini oluşturmak, model performansını karşılaştırmak ve sohbet tabanlı uygulamalara metin-konuşma gibi ek özellikler eklemek için kullanıcı dostu bir arayüz.
@ -151,6 +150,8 @@ Görsel bir arayüz üzerinde güçlü AI iş akışları oluşturun ve test edi
## Dify'ı Kullanma
- **Cloud </br>**
İşte verdiğiniz metnin Türkçe çevirisi, kod bloğu içinde:
-
Herkesin sıfır kurulumla denemesi için bir [Dify Cloud](https://dify.ai) hizmeti sunuyoruz. Bu hizmet, kendi kendine dağıtılan versiyonun tüm yeteneklerini sağlar ve sandbox planında 200 ücretsiz GPT-4 çağrısı içerir.
- **Dify Topluluk Sürümünü Kendi Sunucunuzda Barındırma</br>**
@ -176,6 +177,8 @@ GitHub'da Dify'a yıldız verin ve yeni sürümlerden anında haberdar olun.
>- RAM >= 4GB
</br>
İşte verdiğiniz metnin Türkçe çevirisi, kod bloğu içinde:
Dify sunucusunu başlatmanın en kolay yolu, [docker-compose.yml](docker/docker-compose.yaml) dosyamızı çalıştırmaktır. Kurulum komutunu çalıştırmadan önce, makinenizde [Docker](https://docs.docker.com/get-docker/) ve [Docker Compose](https://docs.docker.com/compose/install/)'un kurulu olduğundan emin olun:
```bash

View File

@ -21,9 +21,6 @@
<a href="https://twitter.com/intent/follow?screen_name=dify_ai" target="_blank">
<img src="https://img.shields.io/twitter/follow/dify_ai?logo=X&color=%20%23f5f5f5"
alt="theo dõi trên X(Twitter)"></a>
<a href="https://www.linkedin.com/company/langgenius/" target="_blank">
<img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff"
alt="theo dõi trên LinkedIn"></a>
<a href="https://hub.docker.com/u/langgenius" target="_blank">
<img alt="Docker Pulls" src="https://img.shields.io/docker/pulls/langgenius/dify-web?labelColor=%20%23FDB062&color=%20%23f79009"></a>
<a href="https://github.com/langgenius/dify/graphs/commit-activity" target="_blank">

View File

@ -48,18 +48,18 @@ ENV TZ=UTC
WORKDIR /app/api
RUN \
apt-get update \
# Install dependencies
&& apt-get install -y --no-install-recommends \
# basic environment
curl nodejs libgmp-dev libmpfr-dev libmpc-dev \
# For Security
expat libldap-2.5-0 perl libsqlite3-0 zlib1g \
# install a chinese font to support the use of tools like matplotlib
fonts-noto-cjk \
# install libmagic to support the use of python-magic guess MIMETYPE
libmagic1 \
RUN apt-get update \
&& apt-get install -y --no-install-recommends curl nodejs libgmp-dev libmpfr-dev libmpc-dev \
# if you located in China, you can use aliyun mirror to speed up
# && echo "deb http://mirrors.aliyun.com/debian testing main" > /etc/apt/sources.list \
&& echo "deb http://deb.debian.org/debian bookworm main" > /etc/apt/sources.list \
&& apt-get update \
# For Security
&& apt-get install -y --no-install-recommends expat libldap-2.5-0 perl libsqlite3-0 zlib1g \
# install a chinese font to support the use of tools like matplotlib
&& apt-get install -y fonts-noto-cjk \
# install libmagic to support the use of python-magic guess MIMETYPE
&& apt-get install -y libmagic1 \
&& apt-get autoremove -y \
&& rm -rf /var/lib/apt/lists/*
@ -78,6 +78,7 @@ COPY . /app/api/
COPY docker/entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
ARG COMMIT_SHA
ENV COMMIT_SHA=${COMMIT_SHA}

View File

@ -1,40 +1,9 @@
from typing import Optional
from pydantic import Field, NonNegativeInt, computed_field
from pydantic import Field, NonNegativeInt
from pydantic_settings import BaseSettings
class HostedCreditConfig(BaseSettings):
HOSTED_MODEL_CREDIT_CONFIG: str = Field(
description="Model credit configuration in format 'model:credits,model:credits', e.g., 'gpt-4:20,gpt-4o:10'",
default="",
)
def get_model_credits(self, model_name: str) -> int:
"""
Get credit value for a specific model name.
Returns 1 if model is not found in configuration (default credit).
:param model_name: The name of the model to search for
:return: The credit value for the model
"""
if not self.HOSTED_MODEL_CREDIT_CONFIG:
return 1
try:
credit_map = dict(
item.strip().split(":", 1) for item in self.HOSTED_MODEL_CREDIT_CONFIG.split(",") if ":" in item
)
# Search for matching model pattern
for pattern, credit in credit_map.items():
if pattern.strip() == model_name:
return int(credit)
return 1 # Default quota if no match found
except (ValueError, AttributeError):
return 1 # Return default quota if parsing fails
class HostedOpenAiConfig(BaseSettings):
"""
Configuration for hosted OpenAI service
@ -233,7 +202,5 @@ class HostedServiceConfig(
HostedZhipuAIConfig,
# moderation
HostedModerationConfig,
# credit config
HostedCreditConfig,
):
pass

View File

@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
CURRENT_VERSION: str = Field(
description="Dify version",
default="0.15.3",
default="0.15.2",
)
COMMIT_SHA: str = Field(

View File

@ -310,7 +310,7 @@ class DatasetInitApi(Resource):
@cloud_edition_billing_resource_check("vector_space")
def post(self):
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_dataset_editor:
if not current_user.is_editor:
raise Forbidden()
parser = reqparse.RequestParser()
@ -684,7 +684,7 @@ class DocumentProcessingApi(DocumentResource):
document = self.get_document(dataset_id, document_id)
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_dataset_editor:
if not current_user.is_editor:
raise Forbidden()
if action == "pause":
@ -748,7 +748,7 @@ class DocumentMetadataApi(DocumentResource):
doc_metadata = req_data.get("doc_metadata")
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_dataset_editor:
if not current_user.is_editor:
raise Forbidden()
if doc_type is None or doc_metadata is None:

View File

@ -122,7 +122,7 @@ class DatasetDocumentSegmentListApi(Resource):
segment_ids = request.args.getlist("segment_id")
# The role of the current user in the ta table must be admin or owner
if not current_user.is_dataset_editor:
if not current_user.is_editor:
raise Forbidden()
try:
DatasetService.check_dataset_permission(dataset, current_user)
@ -149,7 +149,7 @@ class DatasetDocumentSegmentApi(Resource):
# check user's model setting
DatasetService.check_dataset_model_setting(dataset)
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_dataset_editor:
if not current_user.is_editor:
raise Forbidden()
try:
@ -202,7 +202,7 @@ class DatasetDocumentSegmentAddApi(Resource):
document = DocumentService.get_document(dataset_id, document_id)
if not document:
raise NotFound("Document not found.")
if not current_user.is_dataset_editor:
if not current_user.is_editor:
raise Forbidden()
# check embedding model setting
if dataset.indexing_technique == "high_quality":
@ -277,7 +277,7 @@ class DatasetDocumentSegmentUpdateApi(Resource):
if not segment:
raise NotFound("Segment not found.")
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_dataset_editor:
if not current_user.is_editor:
raise Forbidden()
try:
DatasetService.check_dataset_permission(dataset, current_user)
@ -320,7 +320,7 @@ class DatasetDocumentSegmentUpdateApi(Resource):
if not segment:
raise NotFound("Segment not found.")
# The role of the current user in the ta table must be admin or owner
if not current_user.is_dataset_editor:
if not current_user.is_editor:
raise Forbidden()
try:
DatasetService.check_dataset_permission(dataset, current_user)
@ -420,7 +420,7 @@ class ChildChunkAddApi(Resource):
).first()
if not segment:
raise NotFound("Segment not found.")
if not current_user.is_dataset_editor:
if not current_user.is_editor:
raise Forbidden()
# check embedding model setting
if dataset.indexing_technique == "high_quality":
@ -520,7 +520,7 @@ class ChildChunkAddApi(Resource):
if not segment:
raise NotFound("Segment not found.")
# The role of the current user in the ta table must be admin, owner, or editor
if not current_user.is_dataset_editor:
if not current_user.is_editor:
raise Forbidden()
try:
DatasetService.check_dataset_permission(dataset, current_user)
@ -570,7 +570,7 @@ class ChildChunkUpdateApi(Resource):
if not child_chunk:
raise NotFound("Child chunk not found.")
# The role of the current user in the ta table must be admin or owner
if not current_user.is_dataset_editor:
if not current_user.is_editor:
raise Forbidden()
try:
DatasetService.check_dataset_permission(dataset, current_user)
@ -614,7 +614,7 @@ class ChildChunkUpdateApi(Resource):
if not child_chunk:
raise NotFound("Child chunk not found.")
# The role of the current user in the ta table must be admin or owner
if not current_user.is_dataset_editor:
if not current_user.is_editor:
raise Forbidden()
try:
DatasetService.check_dataset_permission(dataset, current_user)

View File

@ -11,6 +11,15 @@ from configs import dify_config
SSRF_DEFAULT_MAX_RETRIES = dify_config.SSRF_DEFAULT_MAX_RETRIES
proxy_mounts = (
{
"http://": httpx.HTTPTransport(proxy=dify_config.SSRF_PROXY_HTTP_URL),
"https://": httpx.HTTPTransport(proxy=dify_config.SSRF_PROXY_HTTPS_URL),
}
if dify_config.SSRF_PROXY_HTTP_URL and dify_config.SSRF_PROXY_HTTPS_URL
else None
)
BACKOFF_FACTOR = 0.5
STATUS_FORCELIST = [429, 500, 502, 503, 504]
@ -42,11 +51,7 @@ def make_request(method, url, max_retries=SSRF_DEFAULT_MAX_RETRIES, **kwargs):
if dify_config.SSRF_PROXY_ALL_URL:
with httpx.Client(proxy=dify_config.SSRF_PROXY_ALL_URL) as client:
response = client.request(method=method, url=url, **kwargs)
elif dify_config.SSRF_PROXY_HTTP_URL and dify_config.SSRF_PROXY_HTTPS_URL:
proxy_mounts = {
"http://": httpx.HTTPTransport(proxy=dify_config.SSRF_PROXY_HTTP_URL),
"https://": httpx.HTTPTransport(proxy=dify_config.SSRF_PROXY_HTTPS_URL),
}
elif proxy_mounts:
with httpx.Client(mounts=proxy_mounts) as client:
response = client.request(method=method, url=url, **kwargs)
else:

View File

@ -1,4 +1,4 @@
from .llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from .llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from .message_entities import (
AssistantPromptMessage,
AudioPromptMessageContent,
@ -23,7 +23,6 @@ __all__ = [
"AudioPromptMessageContent",
"DocumentPromptMessageContent",
"ImagePromptMessageContent",
"LLMMode",
"LLMResult",
"LLMResultChunk",
"LLMResultChunkDelta",

View File

@ -1,5 +1,5 @@
from decimal import Decimal
from enum import StrEnum
from enum import Enum
from typing import Optional
from pydantic import BaseModel
@ -8,7 +8,7 @@ from core.model_runtime.entities.message_entities import AssistantPromptMessage,
from core.model_runtime.entities.model_entities import ModelUsage, PriceInfo
class LLMMode(StrEnum):
class LLMMode(Enum):
"""
Enum class for large language model mode.
"""

View File

@ -30,11 +30,6 @@ from core.model_runtime.model_providers.__base.ai_model import AIModel
logger = logging.getLogger(__name__)
HTML_THINKING_TAG = (
'<details style="color:gray;background-color: #f8f8f8;padding: 8px;border-radius: 4px;" open> '
"<summary> Thinking... </summary>"
)
class LargeLanguageModel(AIModel):
"""
@ -405,40 +400,6 @@ if you are not sure about the structure.
),
)
def _wrap_thinking_by_reasoning_content(self, delta: dict, is_reasoning: bool) -> tuple[str, bool]:
"""
If the reasoning response is from delta.get("reasoning_content"), we wrap
it with HTML details tag.
:param delta: delta dictionary from LLM streaming response
:param is_reasoning: is reasoning
:return: tuple of (processed_content, is_reasoning)
"""
content = delta.get("content") or ""
reasoning_content = delta.get("reasoning_content")
if reasoning_content:
if not is_reasoning:
content = HTML_THINKING_TAG + reasoning_content
is_reasoning = True
else:
content = reasoning_content
elif is_reasoning:
content = "</details>" + content
is_reasoning = False
return content, is_reasoning
def _wrap_thinking_by_tag(self, content: str) -> str:
"""
if the reasoning response is a <think>...</think> block from delta.get("content"),
we replace <think> to <detail>.
:param content: delta.get("content")
:return: processed_content
"""
return content.replace("<think>", HTML_THINKING_TAG).replace("</think>", "</details>")
def _invoke_result_generator(
self,
model: str,

View File

@ -1,5 +1,4 @@
- openai
- deepseek
- anthropic
- azure_openai
- google
@ -33,6 +32,7 @@
- localai
- volcengine_maas
- openai_api_compatible
- deepseek
- hunyuan
- siliconflow
- perfxcloud

View File

@ -51,40 +51,6 @@ model_credential_schema:
show_on:
- variable: __model_type
value: llm
- variable: mode
show_on:
- variable: __model_type
value: llm
label:
en_US: Completion mode
type: select
required: false
default: chat
placeholder:
zh_Hans: 选择对话类型
en_US: Select completion mode
options:
- value: completion
label:
en_US: Completion
zh_Hans: 补全
- value: chat
label:
en_US: Chat
zh_Hans: 对话
- variable: context_size
label:
zh_Hans: 模型上下文长度
en_US: Model context size
required: true
show_on:
- variable: __model_type
value: llm
type: text-input
default: "4096"
placeholder:
zh_Hans: 在此输入您的模型上下文长度
en_US: Enter your Model context size
- variable: jwt_token
required: true
label:

View File

@ -1,9 +1,9 @@
import logging
from collections.abc import Generator, Sequence
from collections.abc import Generator
from typing import Any, Optional, Union
from azure.ai.inference import ChatCompletionsClient
from azure.ai.inference.models import StreamingChatCompletionsUpdate, SystemMessage, UserMessage
from azure.ai.inference.models import StreamingChatCompletionsUpdate
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import (
ClientAuthenticationError,
@ -20,7 +20,7 @@ from azure.core.exceptions import (
)
from core.model_runtime.callbacks.base_callback import Callback
from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
PromptMessage,
@ -30,7 +30,6 @@ from core.model_runtime.entities.model_entities import (
AIModelEntity,
FetchFrom,
I18nObject,
ModelPropertyKey,
ModelType,
ParameterRule,
ParameterType,
@ -61,10 +60,10 @@ class AzureAIStudioLargeLanguageModel(LargeLanguageModel):
self,
model: str,
credentials: dict,
prompt_messages: Sequence[PromptMessage],
prompt_messages: list[PromptMessage],
model_parameters: dict,
tools: Optional[Sequence[PromptMessageTool]] = None,
stop: Optional[Sequence[str]] = None,
tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[list[str]] = None,
stream: bool = True,
user: Optional[str] = None,
) -> Union[LLMResult, Generator]:
@ -83,8 +82,8 @@ class AzureAIStudioLargeLanguageModel(LargeLanguageModel):
"""
if not self.client:
endpoint = str(credentials.get("endpoint"))
api_key = str(credentials.get("api_key"))
endpoint = credentials.get("endpoint")
api_key = credentials.get("api_key")
self.client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(api_key))
messages = [{"role": msg.role.value, "content": msg.content} for msg in prompt_messages]
@ -95,7 +94,6 @@ class AzureAIStudioLargeLanguageModel(LargeLanguageModel):
"temperature": model_parameters.get("temperature", 0),
"top_p": model_parameters.get("top_p", 1),
"stream": stream,
"model": model,
}
if stop:
@ -257,16 +255,10 @@ class AzureAIStudioLargeLanguageModel(LargeLanguageModel):
:return:
"""
try:
endpoint = str(credentials.get("endpoint"))
api_key = str(credentials.get("api_key"))
endpoint = credentials.get("endpoint")
api_key = credentials.get("api_key")
client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(api_key))
client.complete(
messages=[
SystemMessage(content="I say 'ping', you say 'pong'"),
UserMessage(content="ping"),
],
model=model,
)
client.get_model_info()
except Exception as ex:
raise CredentialsValidateFailedError(str(ex))
@ -335,10 +327,7 @@ class AzureAIStudioLargeLanguageModel(LargeLanguageModel):
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_type=ModelType.LLM,
features=[],
model_properties={
ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", "4096")),
ModelPropertyKey.MODE: credentials.get("mode", LLMMode.CHAT),
},
model_properties={},
parameter_rules=rules,
)

View File

@ -138,18 +138,6 @@ model_credential_schema:
show_on:
- variable: __model_type
value: llm
- label:
en_US: o3-mini
value: o3-mini
show_on:
- variable: __model_type
value: llm
- label:
en_US: o3-mini-2025-01-31
value: o3-mini-2025-01-31
show_on:
- variable: __model_type
value: llm
- label:
en_US: o1-preview
value: o1-preview

View File

@ -123,15 +123,6 @@ provider_credential_schema:
en_US: AWS GovCloud (US-West)
zh_Hans: AWS GovCloud (US-West)
ja_JP: AWS GovCloud (米国西部)
- variable: bedrock_endpoint_url
label:
zh_Hans: Bedrock Endpoint URL
en_US: Bedrock Endpoint URL
type: text-input
required: false
placeholder:
zh_Hans: 在此输入您的 Bedrock Endpoint URL, 如https://123456.cloudfront.net
en_US: Enter your Bedrock Endpoint URL, e.g. https://123456.cloudfront.net
- variable: model_for_validation
required: false
label:

View File

@ -13,7 +13,6 @@ def get_bedrock_client(service_name: str, credentials: Mapping[str, str]):
client_config = Config(region_name=region_name)
aws_access_key_id = credentials.get("aws_access_key_id")
aws_secret_access_key = credentials.get("aws_secret_access_key")
bedrock_endpoint_url = credentials.get("bedrock_endpoint_url")
if aws_access_key_id and aws_secret_access_key:
# use aksk to call bedrock
@ -22,7 +21,6 @@ def get_bedrock_client(service_name: str, credentials: Mapping[str, str]):
config=client_config,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
**({"endpoint_url": bedrock_endpoint_url} if bedrock_endpoint_url else {}),
)
else:
# use iam without aksk to call

View File

@ -1,10 +1,13 @@
import json
from collections.abc import Generator
from typing import Optional, Union
import requests
from yarl import URL
from core.model_runtime.entities.llm_entities import LLMMode, LLMResult
from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
PromptMessage,
PromptMessageTool,
)
@ -36,3 +39,208 @@ class DeepseekLargeLanguageModel(OAIAPICompatLargeLanguageModel):
credentials["mode"] = LLMMode.CHAT.value
credentials["function_calling_type"] = "tool_call"
credentials["stream_function_calling"] = "support"
def _handle_generate_stream_response(
self, model: str, credentials: dict, response: requests.Response, prompt_messages: list[PromptMessage]
) -> Generator:
"""
Handle llm stream response
:param model: model name
:param credentials: model credentials
:param response: streamed response
:param prompt_messages: prompt messages
:return: llm response chunk generator
"""
full_assistant_content = ""
chunk_index = 0
is_reasoning_started = False # Add flag to track reasoning state
def create_final_llm_result_chunk(
id: Optional[str], index: int, message: AssistantPromptMessage, finish_reason: str, usage: dict
) -> LLMResultChunk:
# calculate num tokens
prompt_tokens = usage and usage.get("prompt_tokens")
if prompt_tokens is None:
prompt_tokens = self._num_tokens_from_string(model, prompt_messages[0].content)
completion_tokens = usage and usage.get("completion_tokens")
if completion_tokens is None:
completion_tokens = self._num_tokens_from_string(model, full_assistant_content)
# transform usage
usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)
return LLMResultChunk(
id=id,
model=model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(index=index, message=message, finish_reason=finish_reason, usage=usage),
)
# delimiter for stream response, need unicode_escape
import codecs
delimiter = credentials.get("stream_mode_delimiter", "\n\n")
delimiter = codecs.decode(delimiter, "unicode_escape")
tools_calls: list[AssistantPromptMessage.ToolCall] = []
def increase_tool_call(new_tool_calls: list[AssistantPromptMessage.ToolCall]):
def get_tool_call(tool_call_id: str):
if not tool_call_id:
return tools_calls[-1]
tool_call = next((tool_call for tool_call in tools_calls if tool_call.id == tool_call_id), None)
if tool_call is None:
tool_call = AssistantPromptMessage.ToolCall(
id=tool_call_id,
type="function",
function=AssistantPromptMessage.ToolCall.ToolCallFunction(name="", arguments=""),
)
tools_calls.append(tool_call)
return tool_call
for new_tool_call in new_tool_calls:
# get tool call
tool_call = get_tool_call(new_tool_call.function.name)
# update tool call
if new_tool_call.id:
tool_call.id = new_tool_call.id
if new_tool_call.type:
tool_call.type = new_tool_call.type
if new_tool_call.function.name:
tool_call.function.name = new_tool_call.function.name
if new_tool_call.function.arguments:
tool_call.function.arguments += new_tool_call.function.arguments
finish_reason = None # The default value of finish_reason is None
message_id, usage = None, None
for chunk in response.iter_lines(decode_unicode=True, delimiter=delimiter):
chunk = chunk.strip()
if chunk:
# ignore sse comments
if chunk.startswith(":"):
continue
decoded_chunk = chunk.strip().removeprefix("data:").lstrip()
if decoded_chunk == "[DONE]": # Some provider returns "data: [DONE]"
continue
try:
chunk_json: dict = json.loads(decoded_chunk)
# stream ended
except json.JSONDecodeError as e:
yield create_final_llm_result_chunk(
id=message_id,
index=chunk_index + 1,
message=AssistantPromptMessage(content=""),
finish_reason="Non-JSON encountered.",
usage=usage,
)
break
# handle the error here. for issue #11629
if chunk_json.get("error") and chunk_json.get("choices") is None:
raise ValueError(chunk_json.get("error"))
if chunk_json:
if u := chunk_json.get("usage"):
usage = u
if not chunk_json or len(chunk_json["choices"]) == 0:
continue
choice = chunk_json["choices"][0]
finish_reason = chunk_json["choices"][0].get("finish_reason")
message_id = chunk_json.get("id")
chunk_index += 1
if "delta" in choice:
delta = choice["delta"]
is_reasoning = delta.get("reasoning_content")
delta_content = delta.get("content") or delta.get("reasoning_content")
assistant_message_tool_calls = None
if "tool_calls" in delta and credentials.get("function_calling_type", "no_call") == "tool_call":
assistant_message_tool_calls = delta.get("tool_calls", None)
elif (
"function_call" in delta
and credentials.get("function_calling_type", "no_call") == "function_call"
):
assistant_message_tool_calls = [
{"id": "tool_call_id", "type": "function", "function": delta.get("function_call", {})}
]
# assistant_message_function_call = delta.delta.function_call
# extract tool calls from response
if assistant_message_tool_calls:
tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls)
increase_tool_call(tool_calls)
if delta_content is None or delta_content == "":
continue
# Add markdown quote markers for reasoning content
if is_reasoning:
if not is_reasoning_started:
delta_content = "> 💭 " + delta_content
is_reasoning_started = True
elif "\n\n" in delta_content:
delta_content = delta_content.replace("\n\n", "\n> ")
elif "\n" in delta_content:
delta_content = delta_content.replace("\n", "\n> ")
elif is_reasoning_started:
# If we were in reasoning mode but now getting regular content,
# add \n\n to close the reasoning block
delta_content = "\n\n" + delta_content
is_reasoning_started = False
# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=delta_content,
)
# reset tool calls
tool_calls = []
full_assistant_content += delta_content
elif "text" in choice:
choice_text = choice.get("text", "")
if choice_text == "":
continue
# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(content=choice_text)
full_assistant_content += choice_text
else:
continue
yield LLMResultChunk(
id=message_id,
model=model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(
index=chunk_index,
message=assistant_prompt_message,
),
)
chunk_index += 1
if tools_calls:
yield LLMResultChunk(
id=message_id,
model=model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(
index=chunk_index,
message=AssistantPromptMessage(tool_calls=tools_calls, content=""),
),
)
yield create_final_llm_result_chunk(
id=message_id,
index=chunk_index,
message=AssistantPromptMessage(content=""),
finish_reason=finish_reason,
usage=usage,
)

View File

@ -1,6 +1,4 @@
- gemini-2.0-flash-001
- gemini-2.0-flash-exp
- gemini-2.0-pro-exp-02-05
- gemini-2.0-flash-thinking-exp-1219
- gemini-2.0-flash-thinking-exp-01-21
- gemini-1.5-pro

View File

@ -1,41 +0,0 @@
model: gemini-2.0-flash-001
label:
en_US: Gemini 2.0 Flash 001
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
- document
- video
- audio
model_properties:
mode: chat
context_size: 1048576
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: max_output_tokens
use_template: max_tokens
default: 8192
min: 1
max: 8192
- name: json_schema
use_template: json_schema
pricing:
input: '0.00'
output: '0.00'
unit: '0.000001'
currency: USD

View File

@ -1,41 +0,0 @@
model: gemini-2.0-pro-exp-02-05
label:
en_US: Gemini 2.0 pro exp 02-05
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
- document
- video
- audio
model_properties:
mode: chat
context_size: 1048576
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: max_output_tokens
use_template: max_tokens
default: 8192
min: 1
max: 8192
- name: json_schema
use_template: json_schema
pricing:
input: '0.00'
output: '0.00'
unit: '0.000001'
currency: USD

View File

@ -1,4 +1,3 @@
- deepseek-r1-distill-llama-70b
- llama-3.1-405b-reasoning
- llama-3.3-70b-versatile
- llama-3.1-70b-versatile

View File

@ -1,36 +0,0 @@
model: deepseek-r1-distill-llama-70b
label:
en_US: DeepSeek R1 Distill Llama 70b
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 128000
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: max_tokens
use_template: max_tokens
default: 512
min: 1
max: 8192
- name: response_format
label:
zh_Hans: 回复格式
en_US: Response Format
type: string
help:
zh_Hans: 指定模型必须输出的格式
en_US: specifying the format that the model must output
required: false
options:
- text
- json_object
pricing:
input: '3.00'
output: '3.00'
unit: '0.000001'
currency: USD

View File

@ -1,4 +1,3 @@
- deepseek-ai/deepseek-r1
- google/gemma-7b
- google/codegemma-7b
- google/recurrentgemma-2b

View File

@ -1,35 +0,0 @@
model: deepseek-ai/deepseek-r1
label:
en_US: deepseek-ai/deepseek-r1
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 128000
parameter_rules:
- name: temperature
use_template: temperature
min: 0
max: 1
default: 0.5
- name: top_p
use_template: top_p
min: 0
max: 1
default: 1
- name: max_tokens
use_template: max_tokens
min: 1
max: 1024
default: 1024
- name: frequency_penalty
use_template: frequency_penalty
min: -2
max: 2
default: 0
- name: presence_penalty
use_template: presence_penalty
min: -2
max: 2
default: 0

View File

@ -83,7 +83,7 @@ class NVIDIALargeLanguageModel(OAIAPICompatLargeLanguageModel):
def _add_custom_parameters(self, credentials: dict, model: str) -> None:
credentials["mode"] = "chat"
if self.MODEL_SUFFIX_MAP.get(model):
if self.MODEL_SUFFIX_MAP[model]:
credentials["server_url"] = f"https://ai.api.nvidia.com/v1/{self.MODEL_SUFFIX_MAP[model]}"
credentials.pop("endpoint_url")
else:

View File

@ -1,52 +0,0 @@
model: cohere.command-r-08-2024
label:
en_US: cohere.command-r-08-2024 v1.7
model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
model_properties:
mode: chat
context_size: 128000
parameter_rules:
- name: temperature
use_template: temperature
default: 1
max: 1.0
- name: topP
use_template: top_p
default: 0.75
min: 0
max: 1
- name: topK
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
default: 0
min: 0
max: 500
- name: presencePenalty
use_template: presence_penalty
min: 0
max: 1
default: 0
- name: frequencyPenalty
use_template: frequency_penalty
min: 0
max: 1
default: 0
- name: maxTokens
use_template: max_tokens
default: 600
max: 4000
pricing:
input: '0.0009'
output: '0.0009'
unit: '0.0001'
currency: USD

View File

@ -50,4 +50,3 @@ pricing:
output: '0.004'
unit: '0.0001'
currency: USD
deprecated: true

View File

@ -1,52 +0,0 @@
model: cohere.command-r-plus-08-2024
label:
en_US: cohere.command-r-plus-08-2024 v1.6
model_type: llm
features:
- multi-tool-call
- agent-thought
- stream-tool-call
model_properties:
mode: chat
context_size: 128000
parameter_rules:
- name: temperature
use_template: temperature
default: 1
max: 1.0
- name: topP
use_template: top_p
default: 0.75
min: 0
max: 1
- name: topK
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
default: 0
min: 0
max: 500
- name: presencePenalty
use_template: presence_penalty
min: 0
max: 1
default: 0
- name: frequencyPenalty
use_template: frequency_penalty
min: 0
max: 1
default: 0
- name: maxTokens
use_template: max_tokens
default: 600
max: 4000
pricing:
input: '0.0156'
output: '0.0156'
unit: '0.0001'
currency: USD

View File

@ -50,4 +50,3 @@ pricing:
output: '0.0219'
unit: '0.0001'
currency: USD
deprecated: true

View File

@ -33,7 +33,7 @@ logger = logging.getLogger(__name__)
request_template = {
"compartmentId": "",
"servingMode": {"modelId": "cohere.command-r-plus-08-2024", "servingType": "ON_DEMAND"},
"servingMode": {"modelId": "cohere.command-r-plus", "servingType": "ON_DEMAND"},
"chatRequest": {
"apiFormat": "COHERE",
# "preambleOverride": "You are a helpful assistant.",
@ -60,19 +60,19 @@ oci_config_template = {
class OCILargeLanguageModel(LargeLanguageModel):
# https://docs.oracle.com/en-us/iaas/Content/generative-ai/pretrained-models.htm
_supported_models = {
"meta.llama-3.1-70b-instruct": {
"meta.llama-3-70b-instruct": {
"system": True,
"multimodal": False,
"tool_call": False,
"stream_tool_call": False,
},
"cohere.command-r-08-2024": {
"cohere.command-r-16k": {
"system": True,
"multimodal": False,
"tool_call": True,
"stream_tool_call": False,
},
"cohere.command-r-plus-08-2024": {
"cohere.command-r-plus": {
"system": True,
"multimodal": False,
"tool_call": True,

View File

@ -49,4 +49,3 @@ pricing:
output: '0.015'
unit: '0.0001'
currency: USD
deprecated: true

View File

@ -1,51 +0,0 @@
model: meta.llama-3.1-70b-instruct
label:
zh_Hans: meta.llama-3.1-70b-instruct
en_US: meta.llama-3.1-70b-instruct
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 131072
parameter_rules:
- name: temperature
use_template: temperature
default: 1
max: 2.0
- name: topP
use_template: top_p
default: 0.75
min: 0
max: 1
- name: topK
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
default: 0
min: 0
max: 500
- name: presencePenalty
use_template: presence_penalty
min: -2
max: 2
default: 0
- name: frequencyPenalty
use_template: frequency_penalty
min: -2
max: 2
default: 0
- name: maxTokens
use_template: max_tokens
default: 600
max: 4000
pricing:
input: '0.0075'
output: '0.0075'
unit: '0.0001'
currency: USD

View File

@ -19,8 +19,8 @@ class OCIGENAIProvider(ModelProvider):
try:
model_instance = self.get_model_instance(ModelType.LLM)
# Use `cohere.command-r-plus-08-2024` model for validate,
model_instance.validate_credentials(model="cohere.command-r-plus-08-2024", credentials=credentials)
# Use `cohere.command-r-plus` model for validate,
model_instance.validate_credentials(model="cohere.command-r-plus", credentials=credentials)
except CredentialsValidateFailedError as ex:
raise ex
except Exception as ex:

View File

@ -367,7 +367,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
# transform assistant message to prompt message
text = chunk_json["response"]
text = self._wrap_thinking_by_tag(text)
assistant_prompt_message = AssistantPromptMessage(content=text)

View File

@ -2,8 +2,6 @@
- o1-2024-12-17
- o1-mini
- o1-mini-2024-09-12
- o3-mini
- o3-mini-2025-01-31
- gpt-4
- gpt-4o
- gpt-4o-2024-05-13

View File

@ -619,9 +619,9 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
# clear illegal prompt messages
prompt_messages = self._clear_illegal_prompt_messages(model, prompt_messages)
# o1, o3 compatibility
# o1 compatibility
block_as_stream = False
if model.startswith(("o1", "o3")):
if model.startswith("o1"):
if "max_tokens" in model_parameters:
model_parameters["max_completion_tokens"] = model_parameters["max_tokens"]
del model_parameters["max_tokens"]
@ -941,7 +941,7 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
]
)
if model.startswith(("o1", "o3")):
if model.startswith("o1"):
system_message_count = len([m for m in prompt_messages if isinstance(m, SystemPromptMessage)])
if system_message_count > 0:
new_prompt_messages = []
@ -1053,7 +1053,7 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
model = model.split(":")[1]
# Currently, we can use gpt4o to calculate chatgpt-4o-latest's token.
if model == "chatgpt-4o-latest" or model.startswith(("o1", "o3")):
if model == "chatgpt-4o-latest" or model.startswith("o1"):
model = "gpt-4o"
try:
@ -1068,7 +1068,7 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
tokens_per_message = 4
# if there's a name, the role is omitted
tokens_per_name = -1
elif model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4") or model.startswith(("o1", "o3")):
elif model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4") or model.startswith("o1"):
tokens_per_message = 3
tokens_per_name = 1
else:

View File

@ -16,19 +16,6 @@ parameter_rules:
default: 50000
min: 1
max: 50000
- name: reasoning_effort
label:
zh_Hans: 推理工作
en_US: reasoning_effort
type: string
help:
zh_Hans: 限制推理模型的推理工作
en_US: constrains effort on reasoning for reasoning models
required: false
options:
- low
- medium
- high
- name: response_format
label:
zh_Hans: 回复格式

View File

@ -17,19 +17,6 @@ parameter_rules:
default: 50000
min: 1
max: 50000
- name: reasoning_effort
label:
zh_Hans: 推理工作
en_US: reasoning_effort
type: string
help:
zh_Hans: 限制推理模型的推理工作
en_US: constrains effort on reasoning for reasoning models
required: false
options:
- low
- medium
- high
- name: response_format
label:
zh_Hans: 回复格式

View File

@ -1,46 +0,0 @@
model: o3-mini-2025-01-31
label:
zh_Hans: o3-mini-2025-01-31
en_US: o3-mini-2025-01-31
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 200000
parameter_rules:
- name: max_tokens
use_template: max_tokens
default: 100000
min: 1
max: 100000
- name: reasoning_effort
label:
zh_Hans: 推理工作
en_US: reasoning_effort
type: string
help:
zh_Hans: 限制推理模型的推理工作
en_US: constrains effort on reasoning for reasoning models
required: false
options:
- low
- medium
- high
- name: response_format
label:
zh_Hans: 回复格式
en_US: response_format
type: string
help:
zh_Hans: 指定模型必须输出的格式
en_US: specifying the format that the model must output
required: false
options:
- text
- json_object
pricing:
input: '1.10'
output: '4.40'
unit: '0.000001'
currency: USD

View File

@ -1,46 +0,0 @@
model: o3-mini
label:
zh_Hans: o3-mini
en_US: o3-mini
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 200000
parameter_rules:
- name: max_tokens
use_template: max_tokens
default: 100000
min: 1
max: 100000
- name: reasoning_effort
label:
zh_Hans: 推理工作
en_US: reasoning_effort
type: string
help:
zh_Hans: 限制推理模型的推理工作
en_US: constrains effort on reasoning for reasoning models
required: false
options:
- low
- medium
- high
- name: response_format
label:
zh_Hans: 回复格式
en_US: response_format
type: string
help:
zh_Hans: 指定模型必须输出的格式
en_US: specifying the format that the model must output
required: false
options:
- text
- json_object
pricing:
input: '1.10'
output: '4.40'
unit: '0.000001'
currency: USD

View File

@ -1,5 +1,5 @@
import codecs
import json
import logging
from collections.abc import Generator
from decimal import Decimal
from typing import Optional, Union, cast
@ -38,6 +38,8 @@ from core.model_runtime.model_providers.__base.large_language_model import Large
from core.model_runtime.model_providers.openai_api_compatible._common import _CommonOaiApiCompat
from core.model_runtime.utils import helper
logger = logging.getLogger(__name__)
class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel):
"""
@ -97,7 +99,7 @@ class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel):
:param tools: tools for tool calling
:return:
"""
return self._num_tokens_from_messages(prompt_messages, tools, credentials)
return self._num_tokens_from_messages(model, prompt_messages, tools, credentials)
def validate_credentials(self, model: str, credentials: dict) -> None:
"""
@ -396,73 +398,6 @@ class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel):
return self._handle_generate_response(model, credentials, response, prompt_messages)
def _create_final_llm_result_chunk(
self,
index: int,
message: AssistantPromptMessage,
finish_reason: str,
usage: dict,
model: str,
prompt_messages: list[PromptMessage],
credentials: dict,
full_content: str,
) -> LLMResultChunk:
# calculate num tokens
prompt_tokens = usage and usage.get("prompt_tokens")
if prompt_tokens is None:
prompt_tokens = self._num_tokens_from_string(text=prompt_messages[0].content)
completion_tokens = usage and usage.get("completion_tokens")
if completion_tokens is None:
completion_tokens = self._num_tokens_from_string(text=full_content)
# transform usage
usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)
return LLMResultChunk(
model=model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(index=index, message=message, finish_reason=finish_reason, usage=usage),
)
def _get_tool_call(self, tool_call_id: str, tools_calls: list[AssistantPromptMessage.ToolCall]):
"""
Get or create a tool call by ID
:param tool_call_id: tool call ID
:param tools_calls: list of existing tool calls
:return: existing or new tool call, updated tools_calls
"""
if not tool_call_id:
return tools_calls[-1], tools_calls
tool_call = next((tool_call for tool_call in tools_calls if tool_call.id == tool_call_id), None)
if tool_call is None:
tool_call = AssistantPromptMessage.ToolCall(
id=tool_call_id,
type="function",
function=AssistantPromptMessage.ToolCall.ToolCallFunction(name="", arguments=""),
)
tools_calls.append(tool_call)
return tool_call, tools_calls
def _increase_tool_call(
self, new_tool_calls: list[AssistantPromptMessage.ToolCall], tools_calls: list[AssistantPromptMessage.ToolCall]
) -> list[AssistantPromptMessage.ToolCall]:
for new_tool_call in new_tool_calls:
# get tool call
tool_call, tools_calls = self._get_tool_call(new_tool_call.function.name, tools_calls)
# update tool call
if new_tool_call.id:
tool_call.id = new_tool_call.id
if new_tool_call.type:
tool_call.type = new_tool_call.type
if new_tool_call.function.name:
tool_call.function.name = new_tool_call.function.name
if new_tool_call.function.arguments:
tool_call.function.arguments += new_tool_call.function.arguments
return tools_calls
def _handle_generate_stream_response(
self, model: str, credentials: dict, response: requests.Response, prompt_messages: list[PromptMessage]
) -> Generator:
@ -475,15 +410,69 @@ class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel):
:param prompt_messages: prompt messages
:return: llm response chunk generator
"""
chunk_index = 0
full_assistant_content = ""
tools_calls: list[AssistantPromptMessage.ToolCall] = []
finish_reason = None
usage = None
is_reasoning_started = False
chunk_index = 0
def create_final_llm_result_chunk(
id: Optional[str], index: int, message: AssistantPromptMessage, finish_reason: str, usage: dict
) -> LLMResultChunk:
# calculate num tokens
prompt_tokens = usage and usage.get("prompt_tokens")
if prompt_tokens is None:
prompt_tokens = self._num_tokens_from_string(model, prompt_messages[0].content)
completion_tokens = usage and usage.get("completion_tokens")
if completion_tokens is None:
completion_tokens = self._num_tokens_from_string(model, full_assistant_content)
# transform usage
usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)
return LLMResultChunk(
id=id,
model=model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(index=index, message=message, finish_reason=finish_reason, usage=usage),
)
# delimiter for stream response, need unicode_escape
import codecs
delimiter = credentials.get("stream_mode_delimiter", "\n\n")
delimiter = codecs.decode(delimiter, "unicode_escape")
tools_calls: list[AssistantPromptMessage.ToolCall] = []
def increase_tool_call(new_tool_calls: list[AssistantPromptMessage.ToolCall]):
def get_tool_call(tool_call_id: str):
if not tool_call_id:
return tools_calls[-1]
tool_call = next((tool_call for tool_call in tools_calls if tool_call.id == tool_call_id), None)
if tool_call is None:
tool_call = AssistantPromptMessage.ToolCall(
id=tool_call_id,
type="function",
function=AssistantPromptMessage.ToolCall.ToolCallFunction(name="", arguments=""),
)
tools_calls.append(tool_call)
return tool_call
for new_tool_call in new_tool_calls:
# get tool call
tool_call = get_tool_call(new_tool_call.function.name)
# update tool call
if new_tool_call.id:
tool_call.id = new_tool_call.id
if new_tool_call.type:
tool_call.type = new_tool_call.type
if new_tool_call.function.name:
tool_call.function.name = new_tool_call.function.name
if new_tool_call.function.arguments:
tool_call.function.arguments += new_tool_call.function.arguments
finish_reason = None # The default value of finish_reason is None
message_id, usage = None, None
for chunk in response.iter_lines(decode_unicode=True, delimiter=delimiter):
chunk = chunk.strip()
if chunk:
@ -498,15 +487,12 @@ class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel):
chunk_json: dict = json.loads(decoded_chunk)
# stream ended
except json.JSONDecodeError as e:
yield self._create_final_llm_result_chunk(
yield create_final_llm_result_chunk(
id=message_id,
index=chunk_index + 1,
message=AssistantPromptMessage(content=""),
finish_reason="Non-JSON encountered.",
usage=usage,
model=model,
credentials=credentials,
prompt_messages=prompt_messages,
full_content=full_assistant_content,
)
break
# handle the error here. for issue #11629
@ -521,14 +507,12 @@ class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel):
choice = chunk_json["choices"][0]
finish_reason = chunk_json["choices"][0].get("finish_reason")
message_id = chunk_json.get("id")
chunk_index += 1
if "delta" in choice:
delta = choice["delta"]
delta_content, is_reasoning_started = self._wrap_thinking_by_reasoning_content(
delta, is_reasoning_started
)
delta_content = self._wrap_thinking_by_tag(delta_content)
delta_content = delta.get("content")
assistant_message_tool_calls = None
@ -542,10 +526,12 @@ class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel):
{"id": "tool_call_id", "type": "function", "function": delta.get("function_call", {})}
]
# assistant_message_function_call = delta.delta.function_call
# extract tool calls from response
if assistant_message_tool_calls:
tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls)
tools_calls = self._increase_tool_call(tool_calls, tools_calls)
increase_tool_call(tool_calls)
if delta_content is None or delta_content == "":
continue
@ -570,6 +556,7 @@ class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel):
continue
yield LLMResultChunk(
id=message_id,
model=model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(
@ -582,6 +569,7 @@ class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel):
if tools_calls:
yield LLMResultChunk(
id=message_id,
model=model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(
@ -590,15 +578,12 @@ class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel):
),
)
yield self._create_final_llm_result_chunk(
yield create_final_llm_result_chunk(
id=message_id,
index=chunk_index,
message=AssistantPromptMessage(content=""),
finish_reason=finish_reason,
usage=usage,
model=model,
credentials=credentials,
prompt_messages=prompt_messages,
full_content=full_assistant_content,
)
def _handle_generate_response(
@ -712,11 +697,12 @@ class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel):
return message_dict
def _num_tokens_from_string(
self, text: Union[str, list[PromptMessageContent]], tools: Optional[list[PromptMessageTool]] = None
self, model: str, text: Union[str, list[PromptMessageContent]], tools: Optional[list[PromptMessageTool]] = None
) -> int:
"""
Approximate num tokens for model with gpt2 tokenizer.
:param model: model name
:param text: prompt text
:param tools: tools for tool calling
:return: number of tokens
@ -739,6 +725,7 @@ class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel):
def _num_tokens_from_messages(
self,
model: str,
messages: list[PromptMessage],
tools: Optional[list[PromptMessageTool]] = None,
credentials: Optional[dict] = None,

View File

@ -1,7 +1,5 @@
- openai/o1-preview
- openai/o1-mini
- openai/o3-mini
- openai/o3-mini-2025-01-31
- openai/gpt-4o
- openai/gpt-4o-mini
- openai/gpt-4
@ -30,6 +28,5 @@
- mistralai/mistral-7b-instruct
- qwen/qwen-2.5-72b-instruct
- qwen/qwen-2-72b-instruct
- deepseek/deepseek-r1
- deepseek/deepseek-chat
- deepseek/deepseek-coder

View File

@ -53,7 +53,7 @@ parameter_rules:
zh_Hans: 介于 -2.0 和 2.0 之间的数字。如果该值为正,那么新 token 会根据其在已有文本中的出现频率受到相应的惩罚,降低模型重复相同内容的可能性。
en_US: A number between -2.0 and 2.0. If the value is positive, new tokens are penalized based on their frequency of occurrence in existing text, reducing the likelihood that the model will repeat the same content.
pricing:
input: "0.49"
output: "0.89"
input: "0.14"
output: "0.28"
unit: "0.000001"
currency: USD

View File

@ -1,59 +0,0 @@
model: deepseek/deepseek-r1
label:
en_US: deepseek-r1
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 163840
parameter_rules:
- name: temperature
use_template: temperature
type: float
default: 1
min: 0.0
max: 2.0
help:
zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。
en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is.
- name: max_tokens
use_template: max_tokens
type: int
default: 4096
min: 1
max: 4096
help:
zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
- name: top_p
use_template: top_p
type: float
default: 1
min: 0.01
max: 1.00
help:
zh_Hans: 控制生成结果的随机性。数值越小随机性越弱数值越大随机性越强。一般而言top_p 和 temperature 两个参数选择一个进行调整即可。
en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature.
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: frequency_penalty
use_template: frequency_penalty
default: 0
min: -2.0
max: 2.0
help:
zh_Hans: 介于 -2.0 和 2.0 之间的数字。如果该值为正,那么新 token 会根据其在已有文本中的出现频率受到相应的惩罚,降低模型重复相同内容的可能性。
en_US: A number between -2.0 and 2.0. If the value is positive, new tokens are penalized based on their frequency of occurrence in existing text, reducing the likelihood that the model will repeat the same content.
pricing:
input: "3"
output: "8"
unit: "0.000001"
currency: USD

View File

@ -1,49 +0,0 @@
model: openai/o3-mini-2025-01-31
label:
en_US: o3-mini-2025-01-31
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 200000
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: presence_penalty
use_template: presence_penalty
- name: frequency_penalty
use_template: frequency_penalty
- name: max_tokens
use_template: max_tokens
default: 512
min: 1
max: 100000
- name: response_format
label:
zh_Hans: 回复格式
en_US: response_format
type: string
help:
zh_Hans: 指定模型必须输出的格式
en_US: specifying the format that the model must output
required: false
options:
- text
- json_object
pricing:
input: "1.10"
output: "4.40"
unit: "0.000001"
currency: USD

View File

@ -1,49 +0,0 @@
model: openai/o3-mini
label:
en_US: o3-mini
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 200000
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: presence_penalty
use_template: presence_penalty
- name: frequency_penalty
use_template: frequency_penalty
- name: max_tokens
use_template: max_tokens
default: 512
min: 1
max: 100000
- name: response_format
label:
zh_Hans: 回复格式
en_US: response_format
type: string
help:
zh_Hans: 指定模型必须输出的格式
en_US: specifying the format that the model must output
required: false
options:
- text
- json_object
pricing:
input: "1.10"
output: "4.40"
unit: "0.000001"
currency: USD

View File

@ -12,11 +12,7 @@
- Pro/Qwen/Qwen2-VL-7B-Instruct
- OpenGVLab/InternVL2-26B
- Pro/OpenGVLab/InternVL2-8B
- deepseek-ai/DeepSeek-R1
- deepseek-ai/DeepSeek-V2-Chat
- deepseek-ai/DeepSeek-V2.5
- deepseek-ai/DeepSeek-V3
- deepseek-ai/DeepSeek-Coder-V2-Instruct
- THUDM/glm-4-9b-chat
- 01-ai/Yi-1.5-34B-Chat-16K
- 01-ai/Yi-1.5-9B-Chat-16K
@ -29,4 +25,3 @@
- meta-llama/Meta-Llama-3.1-8B-Instruct
- google/gemma-2-27b-it
- google/gemma-2-9b-it
- Tencent/Hunyuan-A52B-Instruct

View File

@ -1,21 +0,0 @@
model: deepseek-ai/DeepSeek-R1
label:
zh_Hans: deepseek-ai/DeepSeek-R1
en_US: deepseek-ai/DeepSeek-R1
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 64000
parameter_rules:
- name: max_tokens
use_template: max_tokens
min: 1
max: 8192
default: 4096
pricing:
input: "4"
output: "16"
unit: "0.000001"
currency: RMB

View File

@ -1,53 +0,0 @@
model: deepseek-ai/DeepSeek-V3
label:
en_US: deepseek-ai/DeepSeek-V3
model_type: llm
features:
- agent-thought
- tool-call
- stream-tool-call
model_properties:
mode: chat
context_size: 64000
parameter_rules:
- name: temperature
use_template: temperature
- name: max_tokens
use_template: max_tokens
type: int
default: 512
min: 1
max: 4096
help:
zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: frequency_penalty
use_template: frequency_penalty
- name: response_format
label:
zh_Hans: 回复格式
en_US: Response Format
type: string
help:
zh_Hans: 指定模型必须输出的格式
en_US: specifying the format that the model must output
required: false
options:
- text
- json_object
pricing:
input: "1"
output: "2"
unit: "0.000001"
currency: RMB

View File

@ -1,9 +1,13 @@
import json
from collections.abc import Generator
from typing import Optional, Union
import requests
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.llm_entities import LLMMode, LLMResult
from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
PromptMessage,
PromptMessageTool,
)
@ -92,3 +96,208 @@ class SiliconflowLargeLanguageModel(OAIAPICompatLargeLanguageModel):
),
],
)
def _handle_generate_stream_response(
self, model: str, credentials: dict, response: requests.Response, prompt_messages: list[PromptMessage]
) -> Generator:
"""
Handle llm stream response
:param model: model name
:param credentials: model credentials
:param response: streamed response
:param prompt_messages: prompt messages
:return: llm response chunk generator
"""
full_assistant_content = ""
chunk_index = 0
is_reasoning_started = False # Add flag to track reasoning state
def create_final_llm_result_chunk(
id: Optional[str], index: int, message: AssistantPromptMessage, finish_reason: str, usage: dict
) -> LLMResultChunk:
# calculate num tokens
prompt_tokens = usage and usage.get("prompt_tokens")
if prompt_tokens is None:
prompt_tokens = self._num_tokens_from_string(model, prompt_messages[0].content)
completion_tokens = usage and usage.get("completion_tokens")
if completion_tokens is None:
completion_tokens = self._num_tokens_from_string(model, full_assistant_content)
# transform usage
usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)
return LLMResultChunk(
id=id,
model=model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(index=index, message=message, finish_reason=finish_reason, usage=usage),
)
# delimiter for stream response, need unicode_escape
import codecs
delimiter = credentials.get("stream_mode_delimiter", "\n\n")
delimiter = codecs.decode(delimiter, "unicode_escape")
tools_calls: list[AssistantPromptMessage.ToolCall] = []
def increase_tool_call(new_tool_calls: list[AssistantPromptMessage.ToolCall]):
def get_tool_call(tool_call_id: str):
if not tool_call_id:
return tools_calls[-1]
tool_call = next((tool_call for tool_call in tools_calls if tool_call.id == tool_call_id), None)
if tool_call is None:
tool_call = AssistantPromptMessage.ToolCall(
id=tool_call_id,
type="function",
function=AssistantPromptMessage.ToolCall.ToolCallFunction(name="", arguments=""),
)
tools_calls.append(tool_call)
return tool_call
for new_tool_call in new_tool_calls:
# get tool call
tool_call = get_tool_call(new_tool_call.function.name)
# update tool call
if new_tool_call.id:
tool_call.id = new_tool_call.id
if new_tool_call.type:
tool_call.type = new_tool_call.type
if new_tool_call.function.name:
tool_call.function.name = new_tool_call.function.name
if new_tool_call.function.arguments:
tool_call.function.arguments += new_tool_call.function.arguments
finish_reason = None # The default value of finish_reason is None
message_id, usage = None, None
for chunk in response.iter_lines(decode_unicode=True, delimiter=delimiter):
chunk = chunk.strip()
if chunk:
# ignore sse comments
if chunk.startswith(":"):
continue
decoded_chunk = chunk.strip().removeprefix("data:").lstrip()
if decoded_chunk == "[DONE]": # Some provider returns "data: [DONE]"
continue
try:
chunk_json: dict = json.loads(decoded_chunk)
# stream ended
except json.JSONDecodeError as e:
yield create_final_llm_result_chunk(
id=message_id,
index=chunk_index + 1,
message=AssistantPromptMessage(content=""),
finish_reason="Non-JSON encountered.",
usage=usage,
)
break
# handle the error here. for issue #11629
if chunk_json.get("error") and chunk_json.get("choices") is None:
raise ValueError(chunk_json.get("error"))
if chunk_json:
if u := chunk_json.get("usage"):
usage = u
if not chunk_json or len(chunk_json["choices"]) == 0:
continue
choice = chunk_json["choices"][0]
finish_reason = chunk_json["choices"][0].get("finish_reason")
message_id = chunk_json.get("id")
chunk_index += 1
if "delta" in choice:
delta = choice["delta"]
delta_content = delta.get("content")
assistant_message_tool_calls = None
if "tool_calls" in delta and credentials.get("function_calling_type", "no_call") == "tool_call":
assistant_message_tool_calls = delta.get("tool_calls", None)
elif (
"function_call" in delta
and credentials.get("function_calling_type", "no_call") == "function_call"
):
assistant_message_tool_calls = [
{"id": "tool_call_id", "type": "function", "function": delta.get("function_call", {})}
]
# assistant_message_function_call = delta.delta.function_call
# extract tool calls from response
if assistant_message_tool_calls:
tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls)
increase_tool_call(tool_calls)
if delta_content is None or delta_content == "":
continue
# Check for think tags
if "<think>" in delta_content:
is_reasoning_started = True
# Remove <think> tag and add markdown quote
delta_content = "> 💭 " + delta_content.replace("<think>", "")
elif "</think>" in delta_content:
# Remove </think> tag and add newlines to end quote block
delta_content = delta_content.replace("</think>", "") + "\n\n"
is_reasoning_started = False
elif is_reasoning_started:
# Add quote markers for content within thinking block
if "\n\n" in delta_content:
delta_content = delta_content.replace("\n\n", "\n> ")
elif "\n" in delta_content:
delta_content = delta_content.replace("\n", "\n> ")
# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=delta_content,
)
# reset tool calls
tool_calls = []
full_assistant_content += delta_content
elif "text" in choice:
choice_text = choice.get("text", "")
if choice_text == "":
continue
# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(content=choice_text)
full_assistant_content += choice_text
else:
continue
yield LLMResultChunk(
id=message_id,
model=model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(
index=chunk_index,
message=assistant_prompt_message,
),
)
chunk_index += 1
if tools_calls:
yield LLMResultChunk(
id=message_id,
model=model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(
index=chunk_index,
message=AssistantPromptMessage(tool_calls=tools_calls, content=""),
),
)
yield create_final_llm_result_chunk(
id=message_id,
index=chunk_index,
message=AssistantPromptMessage(content=""),
finish_reason=finish_reason,
usage=usage,
)

View File

@ -1,41 +0,0 @@
model: gemini-2.0-flash-001
label:
en_US: Gemini 2.0 Flash 001
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
- document
- video
- audio
model_properties:
mode: chat
context_size: 1048576
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: max_output_tokens
use_template: max_tokens
default: 8192
min: 1
max: 8192
- name: json_schema
use_template: json_schema
pricing:
input: '0.00'
output: '0.00'
unit: '0.000001'
currency: USD

View File

@ -1,41 +0,0 @@
model: gemini-2.0-flash-lite-preview-02-05
label:
en_US: Gemini 2.0 Flash Lite Preview 0205
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
- document
- video
- audio
model_properties:
mode: chat
context_size: 1048576
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: max_output_tokens
use_template: max_tokens
default: 8192
min: 1
max: 8192
- name: json_schema
use_template: json_schema
pricing:
input: '0.00'
output: '0.00'
unit: '0.000001'
currency: USD

View File

@ -1,39 +0,0 @@
model: gemini-2.0-flash-thinking-exp-01-21
label:
en_US: Gemini 2.0 Flash Thinking Exp 0121
model_type: llm
features:
- agent-thought
- vision
- document
- video
- audio
model_properties:
mode: chat
context_size: 32767
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: max_output_tokens
use_template: max_tokens
default: 8192
min: 1
max: 8192
- name: json_schema
use_template: json_schema
pricing:
input: '0.00'
output: '0.00'
unit: '0.000001'
currency: USD

View File

@ -1,39 +0,0 @@
model: gemini-2.0-flash-thinking-exp-1219
label:
en_US: Gemini 2.0 Flash Thinking Exp 1219
model_type: llm
features:
- agent-thought
- vision
- document
- video
- audio
model_properties:
mode: chat
context_size: 32767
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: max_output_tokens
use_template: max_tokens
default: 8192
min: 1
max: 8192
- name: json_schema
use_template: json_schema
pricing:
input: '0.00'
output: '0.00'
unit: '0.000001'
currency: USD

View File

@ -1,37 +0,0 @@
model: gemini-2.0-pro-exp-02-05
label:
en_US: Gemini 2.0 Pro Exp 0205
model_type: llm
features:
- agent-thought
- document
model_properties:
mode: chat
context_size: 2000000
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
en_US: Top k
type: int
help:
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: presence_penalty
use_template: presence_penalty
- name: frequency_penalty
use_template: frequency_penalty
- name: max_output_tokens
use_template: max_tokens
required: true
default: 8192
min: 1
max: 8192
pricing:
input: '0.00'
output: '0.00'
unit: '0.000001'
currency: USD

View File

@ -1,41 +0,0 @@
model: gemini-exp-1114
label:
en_US: Gemini exp 1114
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
- document
- video
- audio
model_properties:
mode: chat
context_size: 32767
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: max_output_tokens
use_template: max_tokens
default: 8192
min: 1
max: 8192
- name: json_schema
use_template: json_schema
pricing:
input: '0.00'
output: '0.00'
unit: '0.000001'
currency: USD

View File

@ -1,41 +0,0 @@
model: gemini-exp-1121
label:
en_US: Gemini exp 1121
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
- document
- video
- audio
model_properties:
mode: chat
context_size: 32767
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: max_output_tokens
use_template: max_tokens
default: 8192
min: 1
max: 8192
- name: json_schema
use_template: json_schema
pricing:
input: '0.00'
output: '0.00'
unit: '0.000001'
currency: USD

View File

@ -1,41 +0,0 @@
model: gemini-exp-1206
label:
en_US: Gemini exp 1206
model_type: llm
features:
- agent-thought
- vision
- tool-call
- stream-tool-call
- document
- video
- audio
model_properties:
mode: chat
context_size: 2097152
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: max_output_tokens
use_template: max_tokens
default: 8192
min: 1
max: 8192
- name: json_schema
use_template: json_schema
pricing:
input: '0.00'
output: '0.00'
unit: '0.000001'
currency: USD

View File

@ -1,5 +1,4 @@
import logging
import re
from collections.abc import Generator
from typing import Optional
@ -248,34 +247,15 @@ class VolcengineMaaSLargeLanguageModel(LargeLanguageModel):
req_params["tools"] = tools
def _handle_stream_chat_response(chunks: Generator[ChatCompletionChunk]) -> Generator:
is_reasoning_started = False
for chunk in chunks:
content = ""
if chunk.choices:
delta = chunk.choices[0].delta
if is_reasoning_started and not hasattr(delta, "reasoning_content") and not delta.content:
content = ""
elif hasattr(delta, "reasoning_content"):
if not is_reasoning_started:
is_reasoning_started = True
content = "> 💭 " + delta.reasoning_content
else:
content = delta.reasoning_content
if "\n" in content:
content = re.sub(r"\n(?!(>|\n))", "\n> ", content)
elif is_reasoning_started:
content = "\n\n" + delta.content
is_reasoning_started = False
else:
content = delta.content
yield LLMResultChunk(
model=model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(
index=0,
message=AssistantPromptMessage(content=content, tool_calls=[]),
message=AssistantPromptMessage(
content=chunk.choices[0].delta.content if chunk.choices else "", tool_calls=[]
),
usage=self._calc_response_usage(
model=model,
credentials=credentials,

View File

@ -18,22 +18,6 @@ class ModelConfig(BaseModel):
configs: dict[str, ModelConfig] = {
"DeepSeek-R1-Distill-Qwen-32B": ModelConfig(
properties=ModelProperties(context_size=64000, max_tokens=8192, mode=LLMMode.CHAT),
features=[ModelFeature.AGENT_THOUGHT],
),
"DeepSeek-R1-Distill-Qwen-7B": ModelConfig(
properties=ModelProperties(context_size=64000, max_tokens=8192, mode=LLMMode.CHAT),
features=[ModelFeature.AGENT_THOUGHT],
),
"DeepSeek-R1": ModelConfig(
properties=ModelProperties(context_size=64000, max_tokens=8192, mode=LLMMode.CHAT),
features=[ModelFeature.AGENT_THOUGHT],
),
"DeepSeek-V3": ModelConfig(
properties=ModelProperties(context_size=64000, max_tokens=8192, mode=LLMMode.CHAT),
features=[ModelFeature.AGENT_THOUGHT, ModelFeature.TOOL_CALL, ModelFeature.STREAM_TOOL_CALL],
),
"Doubao-1.5-vision-pro-32k": ModelConfig(
properties=ModelProperties(context_size=32768, max_tokens=12288, mode=LLMMode.CHAT),
features=[ModelFeature.AGENT_THOUGHT, ModelFeature.VISION],

View File

@ -118,30 +118,6 @@ model_credential_schema:
type: select
required: true
options:
- label:
en_US: DeepSeek-R1-Distill-Qwen-32B
value: DeepSeek-R1-Distill-Qwen-32B
show_on:
- variable: __model_type
value: llm
- label:
en_US: DeepSeek-R1-Distill-Qwen-7B
value: DeepSeek-R1-Distill-Qwen-7B
show_on:
- variable: __model_type
value: llm
- label:
en_US: DeepSeek-R1
value: DeepSeek-R1
show_on:
- variable: __model_type
value: llm
- label:
en_US: DeepSeek-V3
value: DeepSeek-V3
show_on:
- variable: __model_type
value: llm
- label:
en_US: Doubao-1.5-vision-pro-32k
value: Doubao-1.5-vision-pro-32k

View File

@ -635,13 +635,16 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
handle stream chat generate response
"""
full_response = ""
for chunk in resp:
if len(chunk.choices) == 0:
continue
delta = chunk.choices[0]
if delta.finish_reason is None and (delta.delta.content is None or delta.delta.content == ""):
continue
delta_content = delta.delta.content or ""
# check if there is a tool call in the response
function_call = None
tool_calls = []
@ -654,10 +657,9 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
if function_call:
assistant_message_tool_calls += [self._extract_response_function_call(function_call)]
delta_content = self._wrap_thinking_by_tag(delta_content)
# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=delta_content or "", tool_calls=assistant_message_tool_calls
content=delta.delta.content or "", tool_calls=assistant_message_tool_calls
)
if delta.finish_reason is not None:
@ -695,7 +697,7 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
),
)
full_response += delta_content
full_response += delta.delta.content
def _handle_completion_generate_response(
self,

View File

@ -0,0 +1,114 @@
"""
Configuration classes for AWS Bedrock retrieve and generate API
"""
from dataclasses import dataclass
from typing import Any, Literal, Optional
@dataclass
class TextInferenceConfig:
"""Text inference configuration"""
maxTokens: Optional[int] = None
stopSequences: Optional[list[str]] = None
temperature: Optional[float] = None
topP: Optional[float] = None
@dataclass
class PerformanceConfig:
"""Performance configuration"""
latency: Literal["standard", "optimized"]
@dataclass
class PromptTemplate:
"""Prompt template configuration"""
textPromptTemplate: str
@dataclass
class GuardrailConfig:
"""Guardrail configuration"""
guardrailId: str
guardrailVersion: str
@dataclass
class GenerationConfig:
"""Generation configuration"""
additionalModelRequestFields: Optional[dict[str, Any]] = None
guardrailConfiguration: Optional[GuardrailConfig] = None
inferenceConfig: Optional[dict[str, TextInferenceConfig]] = None
performanceConfig: Optional[PerformanceConfig] = None
promptTemplate: Optional[PromptTemplate] = None
@dataclass
class VectorSearchConfig:
"""Vector search configuration"""
filter: Optional[dict[str, Any]] = None
numberOfResults: Optional[int] = None
overrideSearchType: Optional[Literal["HYBRID", "SEMANTIC"]] = None
@dataclass
class RetrievalConfig:
"""Retrieval configuration"""
vectorSearchConfiguration: VectorSearchConfig
@dataclass
class OrchestrationConfig:
"""Orchestration configuration"""
additionalModelRequestFields: Optional[dict[str, Any]] = None
inferenceConfig: Optional[dict[str, TextInferenceConfig]] = None
performanceConfig: Optional[PerformanceConfig] = None
promptTemplate: Optional[PromptTemplate] = None
@dataclass
class KnowledgeBaseConfig:
"""Knowledge base configuration"""
generationConfiguration: GenerationConfig
knowledgeBaseId: str
modelArn: str
orchestrationConfiguration: Optional[OrchestrationConfig] = None
retrievalConfiguration: Optional[RetrievalConfig] = None
@dataclass
class SessionConfig:
"""Session configuration"""
kmsKeyArn: Optional[str] = None
sessionId: Optional[str] = None
@dataclass
class RetrieveAndGenerateConfiguration:
"""Retrieve and generate configuration
The use of knowledgeBaseConfiguration or externalSourcesConfiguration depends on the type value
"""
type: str = "KNOWLEDGE_BASE"
knowledgeBaseConfiguration: Optional[KnowledgeBaseConfig] = None
@dataclass
class RetrieveAndGenerateConfig:
"""Retrieve and generate main configuration"""
input: dict[str, str]
retrieveAndGenerateConfiguration: RetrieveAndGenerateConfiguration
sessionConfiguration: Optional[SessionConfig] = None
sessionId: Optional[str] = None

View File

@ -77,27 +77,15 @@ class BedrockRetrieveTool(BuiltinTool):
"""
invoke tools
"""
line = 0
try:
line = 0
# Initialize Bedrock client if not already initialized
if not self.bedrock_client:
aws_region = tool_parameters.get("aws_region")
aws_access_key_id = tool_parameters.get("aws_access_key_id")
aws_secret_access_key = tool_parameters.get("aws_secret_access_key")
if aws_region:
self.bedrock_client = boto3.client("bedrock-agent-runtime", region_name=aws_region)
else:
self.bedrock_client = boto3.client("bedrock-agent-runtime")
client_kwargs = {"service_name": "bedrock-agent-runtime", "region_name": aws_region or None}
# Only add credentials if both access key and secret key are provided
if aws_access_key_id and aws_secret_access_key:
client_kwargs.update(
{"aws_access_key_id": aws_access_key_id, "aws_secret_access_key": aws_secret_access_key}
)
self.bedrock_client = boto3.client(**client_kwargs)
except Exception as e:
return self.create_text_message(f"Failed to initialize Bedrock client: {str(e)}")
try:
line = 1
if not self.knowledge_base_id:
self.knowledge_base_id = tool_parameters.get("knowledge_base_id")
@ -135,14 +123,7 @@ class BedrockRetrieveTool(BuiltinTool):
sorted_docs = sorted(retrieved_docs, key=operator.itemgetter("score"), reverse=True)
line = 6
result_type = tool_parameters.get("result_type")
if result_type == "json":
return [self.create_json_message(res) for res in sorted_docs]
else:
text = ""
for i, res in enumerate(sorted_docs):
text += f"{i + 1}: {res['content']}\n"
return self.create_text_message(text)
return [self.create_json_message(res) for res in sorted_docs]
except Exception as e:
return self.create_text_message(f"Exception {str(e)}, line : {line}")
@ -157,6 +138,7 @@ class BedrockRetrieveTool(BuiltinTool):
if not parameters.get("query"):
raise ValueError("query is required")
# 可选:可以验证元数据过滤条件是否为有效的 JSON 字符串(如果提供)
metadata_filter_str = parameters.get("metadata_filter")
if metadata_filter_str and not isinstance(json.loads(metadata_filter_str), dict):
raise ValueError("metadata_filter must be a valid JSON object")

View File

@ -15,60 +15,6 @@ description:
llm: A tool for retrieving relevant information from Amazon Bedrock Knowledge Base. You can find deploy instructions on Github Repo - https://github.com/aws-samples/dify-aws-tool
parameters:
- name: aws_region
type: string
required: false
label:
en_US: AWS Region
zh_Hans: AWS区域
human_description:
en_US: AWS region for the Bedrock service
zh_Hans: Bedrock服务的AWS区域
form: form
- name: aws_access_key_id
type: string
required: false
label:
en_US: AWS Access Key ID
zh_Hans: AWS访问密钥ID
human_description:
en_US: AWS access key ID for authentication (optional)
zh_Hans: 用于身份验证的AWS访问密钥ID可选
form: form
- name: aws_secret_access_key
type: string
required: false
label:
en_US: AWS Secret Access Key
zh_Hans: AWS秘密访问密钥
human_description:
en_US: AWS secret access key for authentication (optional)
zh_Hans: 用于身份验证的AWS秘密访问密钥可选
form: form
- name: result_type
type: select
required: true
label:
en_US: result type
zh_Hans: 结果类型
human_description:
en_US: return a list of json or texts
zh_Hans: 返回一个列表内容是json还是纯文本
default: text
options:
- value: json
label:
en_US: JSON
zh_Hans: JSON
- value: text
label:
en_US: Text
zh_Hans: 文本
form: form
- name: knowledge_base_id
type: string
required: true
@ -149,7 +95,6 @@ parameters:
zh_Hans: 重拍模型ID
pt_BR: rerank model id
llm_description: rerank model id
default: default
options:
- value: default
label:
@ -165,6 +110,20 @@ parameters:
zh_Hans: amazon.rerank-v1:0
form: form
- name: aws_region
type: string
required: false
label:
en_US: AWS Region
zh_Hans: AWS 区域
pt_BR: AWS Region
human_description:
en_US: AWS region where the Bedrock Knowledge Base is located
zh_Hans: Bedrock知识库所在的AWS区域
pt_BR: AWS region where the Bedrock Knowledge Base is located
llm_description: AWS region where the Bedrock Knowledge Base is located
form: form
- name: metadata_filter # Additional parameter for metadata filtering
type: string # String type, expects JSON-formatted filter conditions
required: false # Optional field - can be omitted

View File

@ -1,5 +1,5 @@
import json
from typing import Any
from typing import Any, Optional
import boto3
@ -10,63 +10,193 @@ from core.tools.tool.builtin_tool import BuiltinTool
class BedrockRetrieveAndGenerateTool(BuiltinTool):
bedrock_client: Any = None
def _invoke(
def _create_text_inference_config(
self,
user_id: str,
tool_parameters: dict[str, Any],
) -> ToolInvokeMessage:
max_tokens: Optional[int] = None,
stop_sequences: Optional[str] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
) -> Optional[dict]:
"""Create text inference configuration"""
if any([max_tokens, stop_sequences, temperature, top_p]):
config = {}
if max_tokens is not None:
config["maxTokens"] = max_tokens
if stop_sequences:
try:
config["stopSequences"] = json.loads(stop_sequences)
except json.JSONDecodeError:
config["stopSequences"] = []
if temperature is not None:
config["temperature"] = temperature
if top_p is not None:
config["topP"] = top_p
return config
return None
def _create_guardrail_config(
self,
guardrail_id: Optional[str] = None,
guardrail_version: Optional[str] = None,
) -> Optional[dict]:
"""Create guardrail configuration"""
if guardrail_id and guardrail_version:
return {"guardrailId": guardrail_id, "guardrailVersion": guardrail_version}
return None
def _create_generation_config(
self,
additional_model_fields: Optional[str] = None,
guardrail_config: Optional[dict] = None,
text_inference_config: Optional[dict] = None,
performance_mode: Optional[str] = None,
prompt_template: Optional[str] = None,
) -> dict:
"""Create generation configuration"""
config = {}
if additional_model_fields:
try:
config["additionalModelRequestFields"] = json.loads(additional_model_fields)
except json.JSONDecodeError:
pass
if guardrail_config:
config["guardrailConfiguration"] = guardrail_config
if text_inference_config:
config["inferenceConfig"] = {"textInferenceConfig": text_inference_config}
if performance_mode:
config["performanceConfig"] = {"latency": performance_mode}
if prompt_template:
config["promptTemplate"] = {"textPromptTemplate": prompt_template}
return config
def _create_orchestration_config(
self,
orchestration_additional_model_fields: Optional[str] = None,
orchestration_text_inference_config: Optional[dict] = None,
orchestration_performance_mode: Optional[str] = None,
orchestration_prompt_template: Optional[str] = None,
) -> dict:
"""Create orchestration configuration"""
config = {}
if orchestration_additional_model_fields:
try:
config["additionalModelRequestFields"] = json.loads(orchestration_additional_model_fields)
except json.JSONDecodeError:
pass
if orchestration_text_inference_config:
config["inferenceConfig"] = {"textInferenceConfig": orchestration_text_inference_config}
if orchestration_performance_mode:
config["performanceConfig"] = {"latency": orchestration_performance_mode}
if orchestration_prompt_template:
config["promptTemplate"] = {"textPromptTemplate": orchestration_prompt_template}
return config
def _create_vector_search_config(
self,
number_of_results: int = 5,
search_type: str = "SEMANTIC",
metadata_filter: Optional[dict] = None,
) -> dict:
"""Create vector search configuration"""
config = {
"numberOfResults": number_of_results,
"overrideSearchType": search_type,
}
# Only add filter if metadata_filter is not empty
if metadata_filter:
config["filter"] = metadata_filter
return config
def _bedrock_retrieve_and_generate(
self,
query: str,
knowledge_base_id: str,
model_arn: str,
# Generation Configuration
additional_model_fields: Optional[str] = None,
guardrail_id: Optional[str] = None,
guardrail_version: Optional[str] = None,
max_tokens: Optional[int] = None,
stop_sequences: Optional[str] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
performance_mode: str = "standard",
prompt_template: Optional[str] = None,
# Orchestration Configuration
orchestration_additional_model_fields: Optional[str] = None,
orchestration_max_tokens: Optional[int] = None,
orchestration_stop_sequences: Optional[str] = None,
orchestration_temperature: Optional[float] = None,
orchestration_top_p: Optional[float] = None,
orchestration_performance_mode: Optional[str] = None,
orchestration_prompt_template: Optional[str] = None,
# Retrieval Configuration
number_of_results: int = 5,
search_type: str = "SEMANTIC",
metadata_filter: Optional[dict] = None,
# Additional Configuration
session_id: Optional[str] = None,
) -> dict[str, Any]:
try:
# Initialize Bedrock client if not already initialized
if not self.bedrock_client:
aws_region = tool_parameters.get("aws_region")
aws_access_key_id = tool_parameters.get("aws_access_key_id")
aws_secret_access_key = tool_parameters.get("aws_secret_access_key")
# Create text inference configurations
text_inference_config = self._create_text_inference_config(max_tokens, stop_sequences, temperature, top_p)
orchestration_text_inference_config = self._create_text_inference_config(
orchestration_max_tokens, orchestration_stop_sequences, orchestration_temperature, orchestration_top_p
)
client_kwargs = {"service_name": "bedrock-agent-runtime", "region_name": aws_region or None}
# Create guardrail configuration
guardrail_config = self._create_guardrail_config(guardrail_id, guardrail_version)
# Only add credentials if both access key and secret key are provided
if aws_access_key_id and aws_secret_access_key:
client_kwargs.update(
{"aws_access_key_id": aws_access_key_id, "aws_secret_access_key": aws_secret_access_key}
)
# Create vector search configuration
vector_search_config = self._create_vector_search_config(number_of_results, search_type, metadata_filter)
self.bedrock_client = boto3.client(**client_kwargs)
except Exception as e:
return self.create_text_message(f"Failed to initialize Bedrock client: {str(e)}")
# Create generation configuration
generation_config = self._create_generation_config(
additional_model_fields, guardrail_config, text_inference_config, performance_mode, prompt_template
)
try:
request_config = {}
# Create orchestration configuration
orchestration_config = self._create_orchestration_config(
orchestration_additional_model_fields,
orchestration_text_inference_config,
orchestration_performance_mode,
orchestration_prompt_template,
)
# Set input configuration
input_text = tool_parameters.get("input")
if input_text:
request_config["input"] = {"text": input_text}
# Create knowledge base configuration
knowledge_base_config = {
"knowledgeBaseId": knowledge_base_id,
"modelArn": model_arn,
"generationConfiguration": generation_config,
"orchestrationConfiguration": orchestration_config,
"retrievalConfiguration": {"vectorSearchConfiguration": vector_search_config},
}
# Build retrieve and generate configuration
config_type = tool_parameters.get("type")
retrieve_generate_config = {"type": config_type}
# Create request configuration
request_config = {
"input": {"text": query},
"retrieveAndGenerateConfiguration": {
"type": "KNOWLEDGE_BASE",
"knowledgeBaseConfiguration": knowledge_base_config,
},
}
# Add configuration based on type
if config_type == "KNOWLEDGE_BASE":
kb_config_str = tool_parameters.get("knowledge_base_configuration")
kb_config = json.loads(kb_config_str) if kb_config_str else None
retrieve_generate_config["knowledgeBaseConfiguration"] = kb_config
else: # EXTERNAL_SOURCES
es_config_str = tool_parameters.get("external_sources_configuration")
es_config = json.loads(kb_config_str) if es_config_str else None
retrieve_generate_config["externalSourcesConfiguration"] = es_config
request_config["retrieveAndGenerateConfiguration"] = retrieve_generate_config
# Parse session configuration
session_config_str = tool_parameters.get("session_configuration")
session_config = json.loads(session_config_str) if session_config_str else None
if session_config:
request_config["sessionConfiguration"] = session_config
# Add session ID if provided
session_id = tool_parameters.get("session_id")
if session_id:
# Add session configuration if provided
if session_id and len(session_id) >= 2:
request_config["sessionConfiguration"] = {"sessionId": session_id}
request_config["sessionId"] = session_id
# Send request
@ -96,42 +226,99 @@ class BedrockRetrieveAndGenerateTool(BuiltinTool):
citation_info["references"].append(reference)
result["citations"].append(citation_info)
result_type = tool_parameters.get("result_type")
if result_type == "json":
return self.create_json_message(result)
elif result_type == "text-with-citations":
return self.create_text_message(result)
else:
return self.create_text_message(result.get("output"))
except json.JSONDecodeError as e:
return self.create_text_message(f"Invalid JSON format: {str(e)}")
return result
except Exception as e:
return self.create_text_message(f"Tool invocation error: {str(e)}")
raise Exception(f"Error calling Bedrock service: {str(e)}")
def _invoke(
self,
user_id: str,
tool_parameters: dict[str, Any],
) -> ToolInvokeMessage:
try:
# Initialize Bedrock client if not already initialized
if not self.bedrock_client:
aws_region = tool_parameters.get("aws_region")
aws_access_key_id = tool_parameters.get("aws_access_key_id")
aws_secret_access_key = tool_parameters.get("aws_secret_access_key")
client_kwargs = {
"service_name": "bedrock-agent-runtime",
}
if aws_region:
client_kwargs["region_name"] = aws_region
# Only add credentials if both access key and secret key are provided
if aws_access_key_id and aws_secret_access_key:
client_kwargs.update(
{"aws_access_key_id": aws_access_key_id, "aws_secret_access_key": aws_secret_access_key}
)
try:
self.bedrock_client = boto3.client(**client_kwargs)
except Exception as e:
return self.create_text_message(f"Failed to initialize Bedrock client: {str(e)}")
# Parse metadata filter if provided
metadata_filter = None
if metadata_filter_str := tool_parameters.get("metadata_filter"):
try:
parsed_filter = json.loads(metadata_filter_str)
if parsed_filter: # Only set if not empty
metadata_filter = parsed_filter
except json.JSONDecodeError:
return self.create_text_message("metadata_filter must be a valid JSON string")
try:
response = self._bedrock_retrieve_and_generate(
query=tool_parameters["query"],
knowledge_base_id=tool_parameters["knowledge_base_id"],
model_arn=tool_parameters["model_arn"],
# Generation Configuration
additional_model_fields=tool_parameters.get("additional_model_fields"),
guardrail_id=tool_parameters.get("guardrail_id"),
guardrail_version=tool_parameters.get("guardrail_version"),
max_tokens=tool_parameters.get("max_tokens"),
stop_sequences=tool_parameters.get("stop_sequences"),
temperature=tool_parameters.get("temperature"),
top_p=tool_parameters.get("top_p"),
performance_mode=tool_parameters.get("performance_mode", "standard"),
prompt_template=tool_parameters.get("prompt_template"),
# Orchestration Configuration
orchestration_additional_model_fields=tool_parameters.get("orchestration_additional_model_fields"),
orchestration_max_tokens=tool_parameters.get("orchestration_max_tokens"),
orchestration_stop_sequences=tool_parameters.get("orchestration_stop_sequences"),
orchestration_temperature=tool_parameters.get("orchestration_temperature"),
orchestration_top_p=tool_parameters.get("orchestration_top_p"),
orchestration_performance_mode=tool_parameters.get("orchestration_performance_mode"),
orchestration_prompt_template=tool_parameters.get("orchestration_prompt_template"),
# Retrieval Configuration
number_of_results=tool_parameters.get("number_of_results", 5),
search_type=tool_parameters.get("search_type", "SEMANTIC"),
metadata_filter=metadata_filter,
# Additional Configuration
session_id=tool_parameters.get("session_id"),
)
return self.create_json_message(response)
except Exception as e:
return self.create_text_message(f"Tool invocation error: {str(e)}")
except Exception as e:
return self.create_text_message(f"Tool execution error: {str(e)}")
def validate_parameters(self, parameters: dict[str, Any]) -> None:
"""Validate the parameters"""
# Validate required parameters
if not parameters.get("input"):
raise ValueError("input is required")
if not parameters.get("type"):
raise ValueError("type is required")
required_params = ["query", "model_arn", "knowledge_base_id"]
for param in required_params:
if not parameters.get(param):
raise ValueError(f"{param} is required")
# Validate JSON configurations
json_configs = ["knowledge_base_configuration", "external_sources_configuration", "session_configuration"]
for config in json_configs:
if config_value := parameters.get(config):
try:
json.loads(config_value)
except json.JSONDecodeError:
raise ValueError(f"{config} must be a valid JSON string")
# Validate configuration type
config_type = parameters.get("type")
if config_type not in ["KNOWLEDGE_BASE", "EXTERNAL_SOURCES"]:
raise ValueError("type must be either KNOWLEDGE_BASE or EXTERNAL_SOURCES")
# Validate type-specific configuration
if config_type == "KNOWLEDGE_BASE" and not parameters.get("knowledge_base_configuration"):
raise ValueError("knowledge_base_configuration is required when type is KNOWLEDGE_BASE")
elif config_type == "EXTERNAL_SOURCES" and not parameters.get("external_sources_configuration"):
raise ValueError("external_sources_configuration is required when type is EXTERNAL_SOURCES")
# Validate metadata filter if provided
if metadata_filter_str := parameters.get("metadata_filter"):
try:
if not isinstance(json.loads(metadata_filter_str), dict):
raise ValueError("metadata_filter must be a valid JSON object")
except json.JSONDecodeError:
raise ValueError("metadata_filter must be a valid JSON string")

View File

@ -8,11 +8,24 @@ identity:
description:
human:
en_US: "This is an advanced usage of Bedrock Retrieve. Please refer to the API documentation for detailed parameters and paste them into the corresponding Knowledge Base Configuration or External Sources Configuration"
zh_Hans: "这个工具为Bedrock Retrieve的高级用法请参考API设置详细的参数并粘贴到对应的知识库配置或者外部源配置"
en_US: A tool for retrieving and generating information using Amazon Bedrock Knowledge Base
zh_Hans: 使用Amazon Bedrock知识库进行信息检索和生成的工具
llm: A tool for retrieving and generating information using Amazon Bedrock Knowledge Base
parameters:
# Additional Configuration
- name: session_id
type: string
required: false
label:
en_US: Session ID
zh_Hans: 会话ID
human_description:
en_US: Optional session ID for continuous conversations
zh_Hans: 用于连续对话的可选会话ID
form: form
# AWS Configuration
- name: aws_region
type: string
required: false
@ -46,103 +59,300 @@ parameters:
zh_Hans: 用于身份验证的AWS秘密访问密钥可选
form: form
- name: result_type
type: select
required: true
label:
en_US: result type
zh_Hans: 结果类型
human_description:
en_US: return a list of json or texts
zh_Hans: 返回一个列表内容是json还是纯文本
default: text
options:
- value: json
label:
en_US: JSON
zh_Hans: JSON
- value: text
label:
en_US: Text
zh_Hans: 文本
- value: text-with-citations
label:
en_US: Text With Citations
zh_Hans: 文本(包含引用)
form: form
- name: input
# Knowledge Base Configuration
- name: knowledge_base_id
type: string
required: true
label:
en_US: Input Text
zh_Hans: 输入文本
en_US: Knowledge Base ID
zh_Hans: 知识库ID
human_description:
en_US: The text query to retrieve information
zh_Hans: 用于检索信息的文本查询
en_US: ID of the Bedrock Knowledge Base
zh_Hans: Bedrock知识库的ID
form: form
- name: model_arn
type: string
required: true
label:
en_US: Model ARN
zh_Hans: 模型ARN
human_description:
en_US: The ARN of the model to use
zh_Hans: 要使用的模型ARN
form: form
# Retrieval Configuration
- name: query
type: string
required: true
label:
en_US: Query
zh_Hans: 查询
human_description:
en_US: The search query to retrieve information
zh_Hans: 用于检索信息的查询语句
form: llm
- name: type
- name: number_of_results
type: number
required: false
label:
en_US: Number of Results
zh_Hans: 结果数量
human_description:
en_US: Number of results to retrieve (1-10)
zh_Hans: 要检索的结果数量1-10
default: 5
min: 1
max: 10
form: form
- name: search_type
type: select
required: true
required: false
label:
en_US: Configuration Type
zh_Hans: 配置类型
en_US: Search Type
zh_Hans: 搜索类型
human_description:
en_US: Type of retrieve and generate configuration
zh_Hans: 检索和生成配置的类型
en_US: Type of search to perform
zh_Hans: 要执行的搜索类型
default: SEMANTIC
options:
- value: KNOWLEDGE_BASE
- value: SEMANTIC
label:
en_US: Knowledge Base
zh_Hans: 知识库
- value: EXTERNAL_SOURCES
en_US: Semantic Search
zh_Hans: 语义搜索
- value: HYBRID
label:
en_US: External Sources
zh_Hans: 外部源
en_US: Hybrid Search
zh_Hans: 混合搜索
form: form
- name: knowledge_base_configuration
- name: metadata_filter
type: string
required: false
label:
en_US: Knowledge Base Configuration
zh_Hans: 知识库配置
en_US: Metadata Filter
zh_Hans: 元数据过滤器
human_description:
en_US: Please refer to @https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/bedrock-agent-runtime/client/retrieve_and_generate.html#retrieve-and-generate for complete parameters and paste them here
zh_Hans: 请参考 https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/bedrock-agent-runtime/client/retrieve_and_generate.html#retrieve-and-generate 配置完整的参数并粘贴到这里
en_US: JSON formatted filter conditions for metadata, supporting operations like equals, greaterThan, lessThan, etc.
zh_Hans: 元数据的JSON格式过滤条件支持等于、大于、小于等操作
default: "{}"
form: form
- name: external_sources_configuration
# Generation Configuration
- name: guardrail_id
type: string
required: false
label:
en_US: External Sources Configuration
zh_Hans: 外部源配置
en_US: Guardrail ID
zh_Hans: 防护栏ID
human_description:
en_US: Please refer to https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/bedrock-agent-runtime/client/retrieve_and_generate.html#retrieve-and-generate for complete parameters and paste them here
zh_Hans: 请参考 https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/bedrock-agent-runtime/client/retrieve_and_generate.html#retrieve-and-generate 配置完整的参数并粘贴到这里
en_US: ID of the guardrail to apply
zh_Hans: 要应用的防护栏ID
form: form
- name: session_configuration
- name: guardrail_version
type: string
required: false
label:
en_US: Session Configuration
zh_Hans: 会话配置
en_US: Guardrail Version
zh_Hans: 防护栏版本
human_description:
en_US: JSON formatted session configuration
zh_Hans: JSON格式的会话配置
default: ""
en_US: Version of the guardrail to apply
zh_Hans: 要应用的防护栏版本
form: form
- name: session_id
- name: max_tokens
type: number
required: false
label:
en_US: Maximum Tokens
zh_Hans: 最大令牌数
human_description:
en_US: Maximum number of tokens to generate
zh_Hans: 生成的最大令牌数
default: 2048
form: form
- name: stop_sequences
type: string
required: false
label:
en_US: Session ID
zh_Hans: 会话ID
en_US: Stop Sequences
zh_Hans: 停止序列
human_description:
en_US: Session ID for continuous conversations
zh_Hans: 用于连续对话的会话ID
en_US: JSON array of strings that will stop generation when encountered
zh_Hans: JSON数组格式的字符串遇到这些序列时将停止生成
default: "[]"
form: form
- name: temperature
type: number
required: false
label:
en_US: Temperature
zh_Hans: 温度
human_description:
en_US: Controls randomness in the output (0-1)
zh_Hans: 控制输出的随机性0-1
default: 0.7
min: 0
max: 1
form: form
- name: top_p
type: number
required: false
label:
en_US: Top P
zh_Hans: Top P值
human_description:
en_US: Controls diversity via nucleus sampling (0-1)
zh_Hans: 通过核采样控制多样性0-1
default: 0.95
min: 0
max: 1
form: form
- name: performance_mode
type: select
required: false
label:
en_US: Performance Mode
zh_Hans: 性能模式
human_description:
en_US: Select performance optimization mode(performanceConfig.latency)
zh_Hans: 选择性能优化模式(performanceConfig.latency)
default: standard
options:
- value: standard
label:
en_US: Standard
zh_Hans: 标准
- value: optimized
label:
en_US: Optimized
zh_Hans: 优化
form: form
- name: prompt_template
type: string
required: false
label:
en_US: Prompt Template
zh_Hans: 提示模板
human_description:
en_US: Custom prompt template for generation
zh_Hans: 用于生成的自定义提示模板
form: form
- name: additional_model_fields
type: string
required: false
label:
en_US: Additional Model Fields
zh_Hans: 额外模型字段
human_description:
en_US: JSON formatted additional fields for model configuration
zh_Hans: JSON格式的额外模型配置字段
default: "{}"
form: form
# Orchestration Configuration
- name: orchestration_max_tokens
type: number
required: false
label:
en_US: Orchestration Maximum Tokens
zh_Hans: 编排最大令牌数
human_description:
en_US: Maximum number of tokens for orchestration
zh_Hans: 编排过程的最大令牌数
default: 2048
form: form
- name: orchestration_stop_sequences
type: string
required: false
label:
en_US: Orchestration Stop Sequences
zh_Hans: 编排停止序列
human_description:
en_US: JSON array of strings that will stop orchestration when encountered
zh_Hans: JSON数组格式的字符串遇到这些序列时将停止编排
default: "[]"
form: form
- name: orchestration_temperature
type: number
required: false
label:
en_US: Orchestration Temperature
zh_Hans: 编排温度
human_description:
en_US: Controls randomness in the orchestration output (0-1)
zh_Hans: 控制编排输出的随机性0-1
default: 0.7
min: 0
max: 1
form: form
- name: orchestration_top_p
type: number
required: false
label:
en_US: Orchestration Top P
zh_Hans: 编排Top P值
human_description:
en_US: Controls diversity via nucleus sampling in orchestration (0-1)
zh_Hans: 通过核采样控制编排的多样性0-1
default: 0.95
min: 0
max: 1
form: form
- name: orchestration_performance_mode
type: select
required: false
label:
en_US: Orchestration Performance Mode
zh_Hans: 编排性能模式
human_description:
en_US: Select performance optimization mode for orchestration
zh_Hans: 选择编排的性能优化模式
default: standard
options:
- value: standard
label:
en_US: Standard
zh_Hans: 标准
- value: optimized
label:
en_US: Optimized
zh_Hans: 优化
form: form
- name: orchestration_prompt_template
type: string
required: false
label:
en_US: Orchestration Prompt Template
zh_Hans: 编排提示模板
human_description:
en_US: Custom prompt template for orchestration
zh_Hans: 用于编排的自定义提示模板
form: form
- name: orchestration_additional_model_fields
type: string
required: false
label:
en_US: Orchestration Additional Model Fields
zh_Hans: 编排额外模型字段
human_description:
en_US: JSON formatted additional fields for orchestration model configuration
zh_Hans: JSON格式的编排模型额外配置字段
default: "{}"
form: form

View File

@ -590,8 +590,6 @@ class Graph(BaseModel):
start_node_id=node_id,
routes_node_ids=routes_node_ids,
)
# Exclude conditional branch nodes
and all(edge.run_condition is None for edge in reverse_edge_mapping.get(node_id, []))
):
if node_id not in merge_branch_node_ids:
merge_branch_node_ids[node_id] = []

View File

@ -195,7 +195,7 @@ class CodeNode(BaseNode[CodeNodeData]):
if output_config.type == "object":
# check if output is object
if not isinstance(result.get(output_name), dict):
if result.get(output_name) is None:
if isinstance(result.get(output_name), type(None)):
transformed_result[output_name] = None
else:
raise OutputValidationError(
@ -223,7 +223,7 @@ class CodeNode(BaseNode[CodeNodeData]):
elif output_config.type == "array[number]":
# check if array of number available
if not isinstance(result[output_name], list):
if result[output_name] is None:
if isinstance(result[output_name], type(None)):
transformed_result[output_name] = None
else:
raise OutputValidationError(
@ -244,7 +244,7 @@ class CodeNode(BaseNode[CodeNodeData]):
elif output_config.type == "array[string]":
# check if array of string available
if not isinstance(result[output_name], list):
if result[output_name] is None:
if isinstance(result[output_name], type(None)):
transformed_result[output_name] = None
else:
raise OutputValidationError(
@ -265,7 +265,7 @@ class CodeNode(BaseNode[CodeNodeData]):
elif output_config.type == "array[object]":
# check if array of object available
if not isinstance(result[output_name], list):
if result[output_name] is None:
if isinstance(result[output_name], type(None)):
transformed_result[output_name] = None
else:
raise OutputValidationError(

View File

@ -3,7 +3,7 @@ from typing import Any, Optional
from pydantic import BaseModel, Field, field_validator
from core.model_runtime.entities import ImagePromptMessageContent, LLMMode
from core.model_runtime.entities import ImagePromptMessageContent
from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate, MemoryConfig
from core.workflow.entities.variable_entities import VariableSelector
from core.workflow.nodes.base import BaseNodeData
@ -12,7 +12,7 @@ from core.workflow.nodes.base import BaseNodeData
class ModelConfig(BaseModel):
provider: str
name: str
mode: LLMMode
mode: str
completion_params: dict[str, Any] = {}

View File

@ -3,7 +3,6 @@ import logging
from collections.abc import Generator, Mapping, Sequence
from typing import TYPE_CHECKING, Any, Optional, cast
from configs import dify_config
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
from core.entities.model_entities import ModelStatus
from core.entities.provider_entities import QuotaUnit
@ -733,7 +732,10 @@ class LLMNode(BaseNode[LLMNodeData]):
if quota_unit == QuotaUnit.TOKENS:
used_quota = usage.total_tokens
elif quota_unit == QuotaUnit.CREDITS:
used_quota = dify_config.get_model_credits(model_instance.model)
used_quota = 1
if "gpt-4" in model_instance.model:
used_quota = 20
else:
used_quota = 1

View File

@ -1,4 +1,3 @@
from configs import dify_config
from core.app.entities.app_invoke_entities import AgentChatAppGenerateEntity, ChatAppGenerateEntity
from core.entities.provider_entities import QuotaUnit
from events.message_event import message_was_created
@ -38,7 +37,10 @@ def handle(sender, **kwargs):
if quota_unit == QuotaUnit.TOKENS:
used_quota = message.message_tokens + message.answer_tokens
elif quota_unit == QuotaUnit.CREDITS:
used_quota = dify_config.get_model_credits(model_config.model)
used_quota = 1
if "gpt-4" in model_config.model:
used_quota = 20
else:
used_quota = 1

View File

@ -1,8 +1,6 @@
from collections.abc import Generator
from datetime import UTC, datetime, timedelta
from typing import Optional
from azure.identity import ChainedTokenCredential, DefaultAzureCredential
from azure.storage.blob import AccountSasPermissions, BlobServiceClient, ResourceTypes, generate_account_sas
from configs import dify_config
@ -20,12 +18,6 @@ class AzureBlobStorage(BaseStorage):
self.account_name = dify_config.AZURE_BLOB_ACCOUNT_NAME
self.account_key = dify_config.AZURE_BLOB_ACCOUNT_KEY
self.credential: Optional[ChainedTokenCredential] = None
if self.account_key == "managedidentity":
self.credential = DefaultAzureCredential()
else:
self.credential = None
def save(self, filename, data):
client = self._sync_client()
blob_container = client.get_container_client(container=self.bucket_name)
@ -65,9 +57,6 @@ class AzureBlobStorage(BaseStorage):
blob_container.delete_blob(filename)
def _sync_client(self):
if self.account_key == "managedidentity":
return BlobServiceClient(account_url=self.account_url, credential=self.credential) # type: ignore
cache_key = "azure_blob_sas_token_{}_{}".format(self.account_name, self.account_key)
cache_result = redis_client.get(cache_key)
if cache_result is not None:

33
api/poetry.lock generated
View File

@ -627,15 +627,15 @@ cryptography = "*"
[[package]]
name = "azure-ai-inference"
version = "1.0.0b8"
version = "1.0.0b6"
description = "Microsoft Azure AI Inference Client Library for Python"
optional = false
python-versions = ">=3.8"
groups = ["main"]
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
files = [
{file = "azure_ai_inference-1.0.0b8-py3-none-any.whl", hash = "sha256:9bfcfe6ef5b1699fed6c70058027c253bcbc88f4730e7409fbfc675636ec05e4"},
{file = "azure_ai_inference-1.0.0b8.tar.gz", hash = "sha256:b7bcaaac5f53f2be06804ac6c755be9583ac6ba99df533a3970da081838b4cc1"},
{file = "azure_ai_inference-1.0.0b6-py3-none-any.whl", hash = "sha256:5699ad78d70ec2d227a5eff2c1bafc845018f6624edc5b03589dfff861c54958"},
{file = "azure_ai_inference-1.0.0b6.tar.gz", hash = "sha256:b8ac941de1e69151bad464191e18856d4e74f962ae03235da137a9a326143676"},
]
[package.dependencies]
@ -931,19 +931,19 @@ files = [
[[package]]
name = "boto3"
version = "1.36.12"
version = "1.36.4"
description = "The AWS SDK for Python"
optional = false
python-versions = ">=3.8"
groups = ["main"]
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
files = [
{file = "boto3-1.36.12-py3-none-any.whl", hash = "sha256:32cdf0967287f3ec25a9dc09df0d29cb86b8900c3e0546a63d672775d8127abf"},
{file = "boto3-1.36.12.tar.gz", hash = "sha256:287d84f49bba3255a17b374578127d42b6251e72f55914a62e0ad9ca78c0954b"},
{file = "boto3-1.36.4-py3-none-any.whl", hash = "sha256:9f8f699e75ec63fcc98c4dd7290997c7c06c68d3ac8161ad4735fe71f5fe945c"},
{file = "boto3-1.36.4.tar.gz", hash = "sha256:eeceeb74ef8b65634d358c27aa074917f4449dc828f79301f1075232618eb502"},
]
[package.dependencies]
botocore = ">=1.36.12,<1.37.0"
botocore = ">=1.36.4,<1.37.0"
jmespath = ">=0.7.1,<2.0.0"
s3transfer = ">=0.11.0,<0.12.0"
@ -952,15 +952,15 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"]
[[package]]
name = "botocore"
version = "1.36.12"
version = "1.36.5"
description = "Low-level, data-driven core of boto 3."
optional = false
python-versions = ">=3.8"
groups = ["main"]
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
files = [
{file = "botocore-1.36.12-py3-none-any.whl", hash = "sha256:5ae1ed362c8ed908a6ced8cdd12b21e2196c100bc79f9e95c9c1fc7f9ea74f5a"},
{file = "botocore-1.36.12.tar.gz", hash = "sha256:86ed88beb4f244c96529435c868d3940073c2774116f0023fb7691f6e7053bd9"},
{file = "botocore-1.36.5-py3-none-any.whl", hash = "sha256:6d9f70afa9bf9d21407089dc22b8cc8ec6fa44866d4660858c062c74fc8555eb"},
{file = "botocore-1.36.5.tar.gz", hash = "sha256:234ed3d29a8954c37a551c933453bf14c6ae44a69a4f273ffef377a2612ca6a6"},
]
[package.dependencies]
@ -969,7 +969,7 @@ python-dateutil = ">=2.1,<3.0.0"
urllib3 = {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""}
[package.extras]
crt = ["awscrt (==0.23.8)"]
crt = ["awscrt (==0.23.4)"]
[[package]]
name = "bottleneck"
@ -6522,15 +6522,15 @@ sympy = "*"
[[package]]
name = "openai"
version = "1.61.0"
version = "1.52.2"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
python-versions = ">=3.7.1"
groups = ["main"]
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
files = [
{file = "openai-1.61.0-py3-none-any.whl", hash = "sha256:e8c512c0743accbdbe77f3429a1490d862f8352045de8dc81969301eb4a4f666"},
{file = "openai-1.61.0.tar.gz", hash = "sha256:216f325a24ed8578e929b0f1b3fb2052165f3b04b0461818adaa51aa29c71f8a"},
{file = "openai-1.52.2-py3-none-any.whl", hash = "sha256:57e9e37bc407f39bb6ec3a27d7e8fb9728b2779936daa1fcf95df17d3edfaccc"},
{file = "openai-1.52.2.tar.gz", hash = "sha256:87b7d0f69d85f5641678d414b7ee3082363647a5c66a462ed7f3ccb59582da0d"},
]
[package.dependencies]
@ -6545,7 +6545,6 @@ typing-extensions = ">=4.11,<5"
[package.extras]
datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
realtime = ["websockets (>=13,<15)"]
[[package]]
name = "opencensus"
@ -12389,4 +12388,4 @@ cffi = ["cffi (>=1.11)"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.11,<3.13"
content-hash = "d197cdff507a70323c1d6aca11609188f54970f67715af744fe6def15b7776fd"
content-hash = "6243573a26b9aa03558eb2c176d2477a08b1033a17065e870e4be83af0af644d"

View File

@ -17,11 +17,11 @@ package-mode = false
[tool.poetry.dependencies]
anthropic = "~0.23.1"
authlib = "1.3.1"
azure-ai-inference = "~1.0.0b8"
azure-ai-inference = "~1.0.0b3"
azure-ai-ml = "~1.20.0"
azure-identity = "1.16.1"
beautifulsoup4 = "4.12.2"
boto3 = "1.36.12"
boto3 = "1.36.4"
bs4 = "~0.0.1"
cachetools = "~5.3.0"
celery = "~5.4.0"
@ -58,7 +58,7 @@ nomic = "~3.1.2"
novita-client = "~0.5.7"
numpy = "~1.26.4"
oci = "~2.135.1"
openai = "~1.61.0"
openai = "~1.52.0"
openpyxl = "~3.1.5"
opik = "~1.3.4"
pandas = { version = "~2.2.2", extras = ["performance", "excel"] }

View File

@ -2,7 +2,7 @@ version: '3'
services:
# API service
api:
image: langgenius/dify-api:0.15.3
image: langgenius/dify-api:0.15.2
restart: always
environment:
# Startup mode, 'api' starts the API server.
@ -227,7 +227,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
image: langgenius/dify-api:0.15.3
image: langgenius/dify-api:0.15.2
restart: always
environment:
CONSOLE_WEB_URL: ''
@ -397,7 +397,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:0.15.3
image: langgenius/dify-web:0.15.2
restart: always
environment:
# The base URL of console application api server, refers to the Console base URL of WEB service if console domain is

View File

@ -2,7 +2,7 @@ x-shared-env: &shared-api-worker-env
services:
# API service
api:
image: langgenius/dify-api:0.15.3
image: langgenius/dify-api:0.15.2
restart: always
environment:
# Use the shared environment variables.
@ -25,7 +25,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
image: langgenius/dify-api:0.15.3
image: langgenius/dify-api:0.15.2
restart: always
environment:
# Use the shared environment variables.
@ -47,7 +47,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:0.15.3
image: langgenius/dify-web:0.15.2
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}

View File

@ -393,7 +393,7 @@ x-shared-env: &shared-api-worker-env
services:
# API service
api:
image: langgenius/dify-api:0.15.3
image: langgenius/dify-api:0.15.2
restart: always
environment:
# Use the shared environment variables.
@ -416,7 +416,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
image: langgenius/dify-api:0.15.3
image: langgenius/dify-api:0.15.2
restart: always
environment:
# Use the shared environment variables.
@ -438,7 +438,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:0.15.3
image: langgenius/dify-web:0.15.2
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}

View File

@ -87,7 +87,7 @@ class TestKnowledgeBaseClient(unittest.TestCase):
def _test_005_batch_indexing_status(self):
client = self._get_dataset_kb_client()
response = client.batch_indexing_status(self.batch_id)
response.json()
data = response.json()
self.assertEqual(response.status_code, 200)
def _test_006_update_document_by_file(self):

View File

@ -1,7 +0,0 @@
/**/node_modules/*
node_modules/
dist/
build/
out/
.next/

View File

@ -1,31 +0,0 @@
{
"extends": [
"next",
"@antfu",
"plugin:storybook/recommended"
],
"rules": {
"@typescript-eslint/consistent-type-definitions": [
"error",
"type"
],
"@typescript-eslint/no-var-requires": "off",
"no-console": "off",
"indent": "off",
"@typescript-eslint/indent": [
"error",
2,
{
"SwitchCase": 1,
"flatTernaryExpressions": false,
"ignoredNodes": [
"PropertyDefinition[decorators]",
"TSUnionType",
"FunctionExpression[params]:has(Identifier[decorators])"
]
}
],
"react-hooks/exhaustive-deps": "warn",
"react/display-name": "warn"
}
}

View File

@ -63,6 +63,7 @@ RUN yarn global add pm2 \
&& chown -R 1001:0 /.pm2 /app/web \
&& chmod -R g=u /.pm2 /app/web
ARG COMMIT_SHA
ENV COMMIT_SHA=${COMMIT_SHA}

View File

@ -23,14 +23,12 @@ type ItemProps = {
onRemove: (id: string) => void
readonly?: boolean
onSave: (newDataset: DataSet) => void
editable?: boolean
}
const Item: FC<ItemProps> = ({
config,
onSave,
onRemove,
editable = true,
}) => {
const media = useBreakpoints()
const isMobile = media === MediaType.mobile
@ -70,21 +68,19 @@ const Item: FC<ItemProps> = ({
<div className='flex items-center h-[18px]'>
<div className='grow text-[13px] font-medium text-gray-800 truncate' title={config.name}>{config.name}</div>
{config.provider === 'external'
? <Badge text={t('dataset.externalTag') as string} />
? <Badge text={t('dataset.externalTag')}></Badge>
: <Badge
text={formatIndexingTechniqueAndMethod(config.indexing_technique, config.retrieval_model_dict?.search_method)}
/>}
</div>
</div>
<div className='hidden rounded-lg group-hover:flex items-center justify-end absolute right-0 top-0 bottom-0 pr-2 w-[124px] bg-gradient-to-r from-white/50 to-white to-50%'>
{
editable && <div
className='flex items-center justify-center mr-1 w-6 h-6 hover:bg-black/5 rounded-md cursor-pointer'
onClick={() => setShowSettingsModal(true)}
>
<RiEditLine className='w-4 h-4 text-gray-500' />
</div>
}
<div
className='flex items-center justify-center mr-1 w-6 h-6 hover:bg-black/5 rounded-md cursor-pointer'
onClick={() => setShowSettingsModal(true)}
>
<RiEditLine className='w-4 h-4 text-gray-500' />
</div>
<div
className='group/action flex items-center justify-center w-6 h-6 hover:bg-[#FEE4E2] rounded-md cursor-pointer'
onClick={() => onRemove(config.id)}

View File

@ -1,6 +1,6 @@
'use client'
import type { FC } from 'react'
import React, { useMemo } from 'react'
import React from 'react'
import { useTranslation } from 'react-i18next'
import { useContext } from 'use-context-selector'
import produce from 'immer'
@ -19,8 +19,6 @@ import {
} from '@/app/components/workflow/nodes/knowledge-retrieval/utils'
import { useModelListAndDefaultModelAndCurrentProviderAndModel } from '@/app/components/header/account-setting/model-provider-page/hooks'
import { ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { useSelector as useAppContextSelector } from '@/context/app-context'
import { hasEditPermissionForDataset } from '@/utils/permission'
const Icon = (
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
@ -31,7 +29,6 @@ const Icon = (
const DatasetConfig: FC = () => {
const { t } = useTranslation()
const userProfile = useAppContextSelector(s => s.userProfile)
const {
mode,
dataSets: dataSet,
@ -108,20 +105,6 @@ const DatasetConfig: FC = () => {
setModelConfig(newModelConfig)
}
const formattedDataset = useMemo(() => {
return dataSet.map((item) => {
const datasetConfig = {
createdBy: item.created_by,
partialMemberList: item.partial_member_list || [],
permission: item.permission,
}
return {
...item,
editable: hasEditPermissionForDataset(userProfile?.id || '', datasetConfig),
}
})
}, [dataSet, userProfile?.id])
return (
<FeaturePanel
className='mt-2'
@ -139,13 +122,12 @@ const DatasetConfig: FC = () => {
{hasData
? (
<div className='flex flex-wrap mt-1 px-3 pb-3 justify-between'>
{formattedDataset.map(item => (
{dataSet.map(item => (
<CardItem
key={item.id}
config={item}
onRemove={onRemove}
onSave={handleSave}
editable={item.editable}
/>
))}
</div>

View File

@ -12,7 +12,7 @@ import Divider from '@/app/components/base/divider'
import Button from '@/app/components/base/button'
import Input from '@/app/components/base/input'
import Textarea from '@/app/components/base/textarea'
import { type DataSet, DatasetPermission } from '@/models/datasets'
import { type DataSet } from '@/models/datasets'
import { useToastContext } from '@/app/components/base/toast'
import { updateDatasetSetting } from '@/service/datasets'
import { useAppContext } from '@/context/app-context'
@ -134,7 +134,7 @@ const SettingsModal: FC<SettingsModalProps> = ({
}),
},
} as any
if (permission === DatasetPermission.partialMembers) {
if (permission === 'partial_members') {
requestParams.body.partial_member_list = selectedMemberIDs.map((id) => {
return {
user_id: id,

View File

@ -491,7 +491,7 @@ const Configuration: FC = () => {
}, [formattingChangedDispatcher, setShowAppConfigureFeaturesModal])
const handleAddPromptVariable = useCallback((variable: PromptVariable[]) => {
const newModelConfig = produce(modelConfig, (draft: ModelConfig) => {
draft.configs.prompt_variables = [...draft.configs.prompt_variables, ...variable]
draft.configs.prompt_variables = variable
})
setModelConfig(newModelConfig)
}, [modelConfig])

View File

@ -211,7 +211,7 @@ export const useFile = (fileConfig: FileUpload) => {
type: '',
size: 0,
progress: 0,
transferMethod: TransferMethod.remote_url,
transferMethod: TransferMethod.local_file,
supportFileType: '',
url,
isRemote: true,

Some files were not shown because too many files have changed in this diff Show More