Compare commits
21 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| a6af8e5d8f | |||
| 3e1d5ac51b | |||
| b0091452ca | |||
| eff115267f | |||
| 07cde4f8fe | |||
| 9f28a48a92 | |||
| 0d3cd3b16a | |||
| 3dc82fb044 | |||
| cb6e73347e | |||
| ecd6cbaee6 | |||
| d54e942264 | |||
| 28ba721455 | |||
| 784dd7848e | |||
| e2a5f8ba1a | |||
| 8e11200306 | |||
| 7599f79a17 | |||
| 510389909c | |||
| 2c6e00174b | |||
| 24f3456990 | |||
| 20514ff288 | |||
| 381d255290 |
12
README.md
@ -17,9 +17,15 @@ A single API encompassing plugin capabilities, context enhancement, and more, sa
|
||||
Visual data analysis, log review, and annotation for applications
|
||||
Dify is compatible with Langchain, meaning we'll gradually support multiple LLMs, currently supported:
|
||||
|
||||
- GPT 3 (text-davinci-003)
|
||||
- GPT 3.5 Turbo(ChatGPT)
|
||||
- GPT-4
|
||||
* **OpenAI** :GPT4、GPT3.5-turbo、GPT3.5-turbo-16k、text-davinci-003
|
||||
|
||||
* **Azure OpenAI**
|
||||
|
||||
* **Antropic**:Claude2、Claude-instant
|
||||
> We've got 1000 free trial credits available for all cloud service users to try out the Claude model.Visit [Dify.ai](https://dify.ai) and
|
||||
try it now.
|
||||
|
||||
* **hugging face Hub**:Coming soon.
|
||||
|
||||
## Use Cloud Services
|
||||
|
||||
|
||||
13
README_CN.md
@ -17,11 +17,16 @@
|
||||
- 一套 API 即可包含插件、上下文增强等能力,替你省下了后端代码的编写工作
|
||||
- 可视化的对应用进行数据分析,查阅日志或进行标注
|
||||
|
||||
Dify 兼容 Langchain,这意味着我们将逐步支持多种 LLMs ,目前已支持:
|
||||
Dify 兼容 Langchain,这意味着我们将逐步支持多种 LLMs ,目前支持的模型供应商:
|
||||
|
||||
- GPT 3 (text-davinci-003)
|
||||
- GPT 3.5 Turbo(ChatGPT)
|
||||
- GPT-4
|
||||
* **OpenAI**:GPT4、GPT3.5-turbo、GPT3.5-turbo-16k、text-davinci-003
|
||||
|
||||
* **Azure OpenAI Service**
|
||||
* **Anthropic**:Claude2、Claude-instant
|
||||
|
||||
> 我们为所有注册云端版的用户免费提供了 1000 次 Claude 模型的消息调用额度,登录 [dify.ai](https://cloud.dify.ai) 即可使用。
|
||||
|
||||
* **Hugging Face Hub**(即将推出)
|
||||
|
||||
## 使用云服务
|
||||
|
||||
|
||||
@ -27,4 +27,4 @@ RUN chmod +x /entrypoint.sh
|
||||
ARG COMMIT_SHA
|
||||
ENV COMMIT_SHA ${COMMIT_SHA}
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
ENTRYPOINT ["/bin/bash", "/entrypoint.sh"]
|
||||
@ -2,6 +2,8 @@
|
||||
import os
|
||||
from datetime import datetime
|
||||
|
||||
from werkzeug.exceptions import Forbidden
|
||||
|
||||
if not os.environ.get("DEBUG") or os.environ.get("DEBUG").lower() != 'true':
|
||||
from gevent import monkey
|
||||
monkey.patch_all()
|
||||
@ -27,7 +29,7 @@ from events import event_handlers
|
||||
import core
|
||||
from config import Config, CloudEditionConfig
|
||||
from commands import register_commands
|
||||
from models.account import TenantAccountJoin
|
||||
from models.account import TenantAccountJoin, AccountStatus
|
||||
from models.model import Account, EndUser, App
|
||||
|
||||
import warnings
|
||||
@ -101,6 +103,9 @@ def load_user(user_id):
|
||||
account = db.session.query(Account).filter(Account.id == account_id).first()
|
||||
|
||||
if account:
|
||||
if account.status == AccountStatus.BANNED.value or account.status == AccountStatus.CLOSED.value:
|
||||
raise Forbidden('Account is banned or closed.')
|
||||
|
||||
workspace_id = session.get('workspace_id')
|
||||
if workspace_id:
|
||||
tenant_account_join = db.session.query(TenantAccountJoin).filter(
|
||||
|
||||
@ -18,7 +18,8 @@ from models.model import Account
|
||||
import secrets
|
||||
import base64
|
||||
|
||||
from models.provider import Provider
|
||||
from models.provider import Provider, ProviderName
|
||||
from services.provider_service import ProviderService
|
||||
|
||||
|
||||
@click.command('reset-password', help='Reset the account password.')
|
||||
@ -193,9 +194,40 @@ def recreate_all_dataset_indexes():
|
||||
click.echo(click.style('Congratulations! Recreate {} dataset indexes.'.format(recreate_count), fg='green'))
|
||||
|
||||
|
||||
@click.command('sync-anthropic-hosted-providers', help='Sync anthropic hosted providers.')
|
||||
def sync_anthropic_hosted_providers():
|
||||
click.echo(click.style('Start sync anthropic hosted providers.', fg='green'))
|
||||
count = 0
|
||||
|
||||
page = 1
|
||||
while True:
|
||||
try:
|
||||
tenants = db.session.query(Tenant).order_by(Tenant.created_at.desc()).paginate(page=page, per_page=50)
|
||||
except NotFound:
|
||||
break
|
||||
|
||||
page += 1
|
||||
for tenant in tenants:
|
||||
try:
|
||||
click.echo('Syncing tenant anthropic hosted provider: {}'.format(tenant.id))
|
||||
ProviderService.create_system_provider(
|
||||
tenant,
|
||||
ProviderName.ANTHROPIC.value,
|
||||
current_app.config['ANTHROPIC_HOSTED_QUOTA_LIMIT'],
|
||||
True
|
||||
)
|
||||
count += 1
|
||||
except Exception as e:
|
||||
click.echo(click.style('Sync tenant anthropic hosted provider error: {} {}'.format(e.__class__.__name__, str(e)), fg='red'))
|
||||
continue
|
||||
|
||||
click.echo(click.style('Congratulations! Synced {} anthropic hosted providers.'.format(count), fg='green'))
|
||||
|
||||
|
||||
def register_commands(app):
|
||||
app.cli.add_command(reset_password)
|
||||
app.cli.add_command(reset_email)
|
||||
app.cli.add_command(generate_invitation_codes)
|
||||
app.cli.add_command(reset_encrypt_key_pair)
|
||||
app.cli.add_command(recreate_all_dataset_indexes)
|
||||
app.cli.add_command(sync_anthropic_hosted_providers)
|
||||
|
||||
@ -50,7 +50,10 @@ DEFAULTS = {
|
||||
'PDF_PREVIEW': 'True',
|
||||
'LOG_LEVEL': 'INFO',
|
||||
'DISABLE_PROVIDER_CONFIG_VALIDATION': 'False',
|
||||
'DEFAULT_LLM_PROVIDER': 'openai'
|
||||
'DEFAULT_LLM_PROVIDER': 'openai',
|
||||
'OPENAI_HOSTED_QUOTA_LIMIT': 200,
|
||||
'ANTHROPIC_HOSTED_QUOTA_LIMIT': 1000,
|
||||
'TENANT_DOCUMENT_COUNT': 100
|
||||
}
|
||||
|
||||
|
||||
@ -86,7 +89,7 @@ class Config:
|
||||
self.CONSOLE_URL = get_env('CONSOLE_URL')
|
||||
self.API_URL = get_env('API_URL')
|
||||
self.APP_URL = get_env('APP_URL')
|
||||
self.CURRENT_VERSION = "0.3.8"
|
||||
self.CURRENT_VERSION = "0.3.10"
|
||||
self.COMMIT_SHA = get_env('COMMIT_SHA')
|
||||
self.EDITION = "SELF_HOSTED"
|
||||
self.DEPLOY_ENV = get_env('DEPLOY_ENV')
|
||||
@ -191,6 +194,10 @@ class Config:
|
||||
|
||||
# hosted provider credentials
|
||||
self.OPENAI_API_KEY = get_env('OPENAI_API_KEY')
|
||||
self.ANTHROPIC_API_KEY = get_env('ANTHROPIC_API_KEY')
|
||||
|
||||
self.OPENAI_HOSTED_QUOTA_LIMIT = get_env('OPENAI_HOSTED_QUOTA_LIMIT')
|
||||
self.ANTHROPIC_HOSTED_QUOTA_LIMIT = get_env('ANTHROPIC_HOSTED_QUOTA_LIMIT')
|
||||
|
||||
# By default it is False
|
||||
# You could disable it for compatibility with certain OpenAPI providers
|
||||
@ -207,6 +214,8 @@ class Config:
|
||||
self.NOTION_INTERNAL_SECRET = get_env('NOTION_INTERNAL_SECRET')
|
||||
self.NOTION_INTEGRATION_TOKEN = get_env('NOTION_INTEGRATION_TOKEN')
|
||||
|
||||
self.TENANT_DOCUMENT_COUNT = get_env('TENANT_DOCUMENT_COUNT')
|
||||
|
||||
|
||||
class CloudEditionConfig(Config):
|
||||
|
||||
|
||||
@ -50,8 +50,8 @@ class ChatMessageAudioApi(Resource):
|
||||
raise UnsupportedAudioTypeError()
|
||||
except ProviderNotSupportSpeechToTextServiceError:
|
||||
raise ProviderNotSupportSpeechToTextError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
|
||||
@ -63,8 +63,8 @@ class CompletionMessageApi(Resource):
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
@ -133,8 +133,8 @@ class ChatMessageApi(Resource):
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
@ -164,8 +164,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
|
||||
except ProviderTokenNotInitError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
|
||||
except ProviderTokenNotInitError as ex:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
|
||||
except QuotaExceededError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
|
||||
except ModelCurrentlyNotSupportError:
|
||||
|
||||
@ -16,7 +16,7 @@ class ProviderNotInitializeError(BaseHTTPException):
|
||||
|
||||
class ProviderQuotaExceededError(BaseHTTPException):
|
||||
error_code = 'provider_quota_exceeded'
|
||||
description = "Your quota for Dify Hosted OpenAI has been exhausted. " \
|
||||
description = "Your quota for Dify Hosted Model Provider has been exhausted. " \
|
||||
"Please go to Settings -> Model Provider to complete your own provider credentials."
|
||||
code = 400
|
||||
|
||||
|
||||
@ -27,8 +27,8 @@ class IntroductionGenerateApi(Resource):
|
||||
account.current_tenant_id,
|
||||
args['prompt_template']
|
||||
)
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
@ -58,8 +58,8 @@ class RuleGenerateApi(Resource):
|
||||
args['audiences'],
|
||||
args['hoping_to_solve']
|
||||
)
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
|
||||
@ -269,8 +269,8 @@ class MessageMoreLikeThisApi(Resource):
|
||||
raise NotFound("Message Not Exists.")
|
||||
except MoreLikeThisDisabledError:
|
||||
raise AppMoreLikeThisDisabledError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
@ -297,8 +297,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
|
||||
yield "data: " + json.dumps(api.handle_error(NotFound("Message Not Exists.")).get_json()) + "\n\n"
|
||||
except MoreLikeThisDisabledError:
|
||||
yield "data: " + json.dumps(api.handle_error(AppMoreLikeThisDisabledError()).get_json()) + "\n\n"
|
||||
except ProviderTokenNotInitError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
|
||||
except ProviderTokenNotInitError as ex:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
|
||||
except QuotaExceededError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
|
||||
except ModelCurrentlyNotSupportError:
|
||||
@ -339,8 +339,8 @@ class MessageSuggestedQuestionApi(Resource):
|
||||
raise NotFound("Message not found")
|
||||
except ConversationNotExistsError:
|
||||
raise NotFound("Conversation not found")
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
|
||||
@ -279,8 +279,8 @@ class DatasetDocumentListApi(Resource):
|
||||
|
||||
try:
|
||||
documents, batch = DocumentService.save_document_with_dataset_id(dataset, args, current_user)
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
@ -324,8 +324,8 @@ class DatasetInitApi(Resource):
|
||||
document_data=args,
|
||||
account=current_user
|
||||
)
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
|
||||
@ -95,8 +95,8 @@ class HitTestingApi(Resource):
|
||||
return {"query": response['query'], 'records': marshal(response['records'], hit_testing_record_fields)}
|
||||
except services.errors.index.IndexNotInitializedError:
|
||||
raise DatasetNotInitializedError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
|
||||
@ -47,8 +47,8 @@ class ChatAudioApi(InstalledAppResource):
|
||||
raise UnsupportedAudioTypeError()
|
||||
except ProviderNotSupportSpeechToTextServiceError:
|
||||
raise ProviderNotSupportSpeechToTextError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
|
||||
@ -54,8 +54,8 @@ class CompletionApi(InstalledAppResource):
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
@ -113,8 +113,8 @@ class ChatApi(InstalledAppResource):
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
@ -155,8 +155,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
|
||||
except ProviderTokenNotInitError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
|
||||
except ProviderTokenNotInitError as ex:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
|
||||
except QuotaExceededError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
|
||||
except ModelCurrentlyNotSupportError:
|
||||
|
||||
@ -107,8 +107,8 @@ class MessageMoreLikeThisApi(InstalledAppResource):
|
||||
raise NotFound("Message Not Exists.")
|
||||
except MoreLikeThisDisabledError:
|
||||
raise AppMoreLikeThisDisabledError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
@ -135,8 +135,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
|
||||
yield "data: " + json.dumps(api.handle_error(NotFound("Message Not Exists.")).get_json()) + "\n\n"
|
||||
except MoreLikeThisDisabledError:
|
||||
yield "data: " + json.dumps(api.handle_error(AppMoreLikeThisDisabledError()).get_json()) + "\n\n"
|
||||
except ProviderTokenNotInitError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
|
||||
except ProviderTokenNotInitError as ex:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
|
||||
except QuotaExceededError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
|
||||
except ModelCurrentlyNotSupportError:
|
||||
@ -174,8 +174,8 @@ class MessageSuggestedQuestionApi(InstalledAppResource):
|
||||
raise NotFound("Conversation not found")
|
||||
except SuggestedQuestionsAfterAnswerDisabledError:
|
||||
raise AppSuggestedQuestionsAfterAnswerDisabledError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
|
||||
@ -3,6 +3,7 @@ import base64
|
||||
import json
|
||||
import logging
|
||||
|
||||
from flask import current_app
|
||||
from flask_login import login_required, current_user
|
||||
from flask_restful import Resource, reqparse, abort
|
||||
from werkzeug.exceptions import Forbidden
|
||||
@ -34,7 +35,7 @@ class ProviderListApi(Resource):
|
||||
plaintext, the rest is replaced by * and the last two bits are displayed in plaintext
|
||||
"""
|
||||
|
||||
ProviderService.init_supported_provider(current_user.current_tenant, "cloud")
|
||||
ProviderService.init_supported_provider(current_user.current_tenant)
|
||||
providers = Provider.query.filter_by(tenant_id=tenant_id).all()
|
||||
|
||||
provider_list = [
|
||||
@ -50,7 +51,8 @@ class ProviderListApi(Resource):
|
||||
'quota_used': p.quota_used
|
||||
} if p.provider_type == ProviderType.SYSTEM.value else {}),
|
||||
'token': ProviderService.get_obfuscated_api_key(current_user.current_tenant,
|
||||
ProviderName(p.provider_name))
|
||||
ProviderName(p.provider_name), only_custom=True)
|
||||
if p.provider_type == ProviderType.CUSTOM.value else None
|
||||
}
|
||||
for p in providers
|
||||
]
|
||||
@ -121,9 +123,10 @@ class ProviderTokenApi(Resource):
|
||||
is_valid=token_is_valid)
|
||||
db.session.add(provider_model)
|
||||
|
||||
if provider_model.is_valid:
|
||||
if provider in [ProviderName.OPENAI.value, ProviderName.AZURE_OPENAI.value] and provider_model.is_valid:
|
||||
other_providers = db.session.query(Provider).filter(
|
||||
Provider.tenant_id == tenant.id,
|
||||
Provider.provider_name.in_([ProviderName.OPENAI.value, ProviderName.AZURE_OPENAI.value]),
|
||||
Provider.provider_name != provider,
|
||||
Provider.provider_type == ProviderType.CUSTOM.value
|
||||
).all()
|
||||
@ -133,7 +136,7 @@ class ProviderTokenApi(Resource):
|
||||
|
||||
db.session.commit()
|
||||
|
||||
if provider in [ProviderName.ANTHROPIC.value, ProviderName.AZURE_OPENAI.value, ProviderName.COHERE.value,
|
||||
if provider in [ProviderName.AZURE_OPENAI.value, ProviderName.COHERE.value,
|
||||
ProviderName.HUGGINGFACEHUB.value]:
|
||||
return {'result': 'success', 'warning': 'MOCK: This provider is not supported yet.'}, 201
|
||||
|
||||
@ -157,7 +160,7 @@ class ProviderTokenValidateApi(Resource):
|
||||
args = parser.parse_args()
|
||||
|
||||
# todo: remove this when the provider is supported
|
||||
if provider in [ProviderName.ANTHROPIC.value, ProviderName.COHERE.value,
|
||||
if provider in [ProviderName.COHERE.value,
|
||||
ProviderName.HUGGINGFACEHUB.value]:
|
||||
return {'result': 'success', 'warning': 'MOCK: This provider is not supported yet.'}
|
||||
|
||||
@ -203,7 +206,19 @@ class ProviderSystemApi(Resource):
|
||||
provider_model.is_valid = args['is_enabled']
|
||||
db.session.commit()
|
||||
elif not provider_model:
|
||||
ProviderService.create_system_provider(tenant, provider, args['is_enabled'])
|
||||
if provider == ProviderName.OPENAI.value:
|
||||
quota_limit = current_app.config['OPENAI_HOSTED_QUOTA_LIMIT']
|
||||
elif provider == ProviderName.ANTHROPIC.value:
|
||||
quota_limit = current_app.config['ANTHROPIC_HOSTED_QUOTA_LIMIT']
|
||||
else:
|
||||
quota_limit = 0
|
||||
|
||||
ProviderService.create_system_provider(
|
||||
tenant,
|
||||
provider,
|
||||
quota_limit,
|
||||
args['is_enabled']
|
||||
)
|
||||
else:
|
||||
abort(403)
|
||||
|
||||
|
||||
@ -43,8 +43,8 @@ class AudioApi(AppApiResource):
|
||||
raise UnsupportedAudioTypeError()
|
||||
except ProviderNotSupportSpeechToTextServiceError:
|
||||
raise ProviderNotSupportSpeechToTextError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
|
||||
@ -54,8 +54,8 @@ class CompletionApi(AppApiResource):
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
@ -115,8 +115,8 @@ class ChatApi(AppApiResource):
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
@ -156,8 +156,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
|
||||
except ProviderTokenNotInitError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
|
||||
except ProviderTokenNotInitError as ex:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
|
||||
except QuotaExceededError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
|
||||
except ModelCurrentlyNotSupportError:
|
||||
|
||||
@ -85,8 +85,8 @@ class DocumentListApi(DatasetApiResource):
|
||||
dataset_process_rule=dataset.latest_process_rule,
|
||||
created_from='api'
|
||||
)
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
document = documents[0]
|
||||
if doc_type and doc_metadata:
|
||||
metadata_schema = DocumentService.DOCUMENT_METADATA_SCHEMA[doc_type]
|
||||
|
||||
@ -45,8 +45,8 @@ class AudioApi(WebApiResource):
|
||||
raise UnsupportedAudioTypeError()
|
||||
except ProviderNotSupportSpeechToTextServiceError:
|
||||
raise ProviderNotSupportSpeechToTextError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
|
||||
@ -52,8 +52,8 @@ class CompletionApi(WebApiResource):
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
@ -109,8 +109,8 @@ class ChatApi(WebApiResource):
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
@ -150,8 +150,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
|
||||
except ProviderTokenNotInitError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
|
||||
except ProviderTokenNotInitError as ex:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
|
||||
except QuotaExceededError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
|
||||
except ModelCurrentlyNotSupportError:
|
||||
|
||||
@ -101,8 +101,8 @@ class MessageMoreLikeThisApi(WebApiResource):
|
||||
raise NotFound("Message Not Exists.")
|
||||
except MoreLikeThisDisabledError:
|
||||
raise AppMoreLikeThisDisabledError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
@ -129,8 +129,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
|
||||
yield "data: " + json.dumps(api.handle_error(NotFound("Message Not Exists.")).get_json()) + "\n\n"
|
||||
except MoreLikeThisDisabledError:
|
||||
yield "data: " + json.dumps(api.handle_error(AppMoreLikeThisDisabledError()).get_json()) + "\n\n"
|
||||
except ProviderTokenNotInitError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n"
|
||||
except ProviderTokenNotInitError as ex:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
|
||||
except QuotaExceededError:
|
||||
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
|
||||
except ModelCurrentlyNotSupportError:
|
||||
@ -167,8 +167,8 @@ class MessageSuggestedQuestionApi(WebApiResource):
|
||||
raise NotFound("Conversation not found")
|
||||
except SuggestedQuestionsAfterAnswerDisabledError:
|
||||
raise AppSuggestedQuestionsAfterAnswerDisabledError()
|
||||
except ProviderTokenNotInitError:
|
||||
raise ProviderNotInitializeError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
|
||||
@ -13,8 +13,13 @@ class HostedOpenAICredential(BaseModel):
|
||||
api_key: str
|
||||
|
||||
|
||||
class HostedAnthropicCredential(BaseModel):
|
||||
api_key: str
|
||||
|
||||
|
||||
class HostedLLMCredentials(BaseModel):
|
||||
openai: Optional[HostedOpenAICredential] = None
|
||||
anthropic: Optional[HostedAnthropicCredential] = None
|
||||
|
||||
|
||||
hosted_llm_credentials = HostedLLMCredentials()
|
||||
@ -26,3 +31,6 @@ def init_app(app: Flask):
|
||||
|
||||
if app.config.get("OPENAI_API_KEY"):
|
||||
hosted_llm_credentials.openai = HostedOpenAICredential(api_key=app.config.get("OPENAI_API_KEY"))
|
||||
|
||||
if app.config.get("ANTHROPIC_API_KEY"):
|
||||
hosted_llm_credentials.anthropic = HostedAnthropicCredential(api_key=app.config.get("ANTHROPIC_API_KEY"))
|
||||
|
||||
@ -48,7 +48,7 @@ class LLMCallbackHandler(BaseCallbackHandler):
|
||||
})
|
||||
|
||||
self.llm_message.prompt = real_prompts
|
||||
self.llm_message.prompt_tokens = self.llm.get_messages_tokens(messages[0])
|
||||
self.llm_message.prompt_tokens = self.llm.get_num_tokens_from_messages(messages[0])
|
||||
|
||||
def on_llm_start(
|
||||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
||||
@ -69,9 +69,8 @@ class LLMCallbackHandler(BaseCallbackHandler):
|
||||
if not self.conversation_message_task.streaming:
|
||||
self.conversation_message_task.append_message_text(response.generations[0][0].text)
|
||||
self.llm_message.completion = response.generations[0][0].text
|
||||
self.llm_message.completion_tokens = response.llm_output['token_usage']['completion_tokens']
|
||||
else:
|
||||
self.llm_message.completion_tokens = self.llm.get_num_tokens(self.llm_message.completion)
|
||||
|
||||
self.llm_message.completion_tokens = self.llm.get_num_tokens(self.llm_message.completion)
|
||||
|
||||
self.conversation_message_task.save_message(self.llm_message)
|
||||
|
||||
|
||||
@ -118,6 +118,7 @@ class Completion:
|
||||
prompt, stop_words = cls.get_main_llm_prompt(
|
||||
mode=mode,
|
||||
llm=final_llm,
|
||||
model=app_model_config.model_dict,
|
||||
pre_prompt=app_model_config.pre_prompt,
|
||||
query=query,
|
||||
inputs=inputs,
|
||||
@ -129,6 +130,7 @@ class Completion:
|
||||
|
||||
cls.recale_llm_max_tokens(
|
||||
final_llm=final_llm,
|
||||
model=app_model_config.model_dict,
|
||||
prompt=prompt,
|
||||
mode=mode
|
||||
)
|
||||
@ -138,7 +140,8 @@ class Completion:
|
||||
return response
|
||||
|
||||
@classmethod
|
||||
def get_main_llm_prompt(cls, mode: str, llm: BaseLanguageModel, pre_prompt: str, query: str, inputs: dict,
|
||||
def get_main_llm_prompt(cls, mode: str, llm: BaseLanguageModel, model: dict,
|
||||
pre_prompt: str, query: str, inputs: dict,
|
||||
chain_output: Optional[str],
|
||||
memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory]) -> \
|
||||
Tuple[Union[str | List[BaseMessage]], Optional[List[str]]]:
|
||||
@ -151,10 +154,11 @@ class Completion:
|
||||
|
||||
if mode == 'completion':
|
||||
prompt_template = JinjaPromptTemplate.from_template(
|
||||
template=("""Use the following CONTEXT as your learned knowledge:
|
||||
[CONTEXT]
|
||||
template=("""Use the following context as your learned knowledge, inside <context></context> XML tags.
|
||||
|
||||
<context>
|
||||
{{context}}
|
||||
[END CONTEXT]
|
||||
</context>
|
||||
|
||||
When answer to user:
|
||||
- If you don't know, just say that you don't know.
|
||||
@ -204,10 +208,11 @@ And answer according to the language of the user's question.
|
||||
|
||||
if chain_output:
|
||||
human_inputs['context'] = chain_output
|
||||
human_message_prompt += """Use the following CONTEXT as your learned knowledge.
|
||||
[CONTEXT]
|
||||
human_message_prompt += """Use the following context as your learned knowledge, inside <context></context> XML tags.
|
||||
|
||||
<context>
|
||||
{{context}}
|
||||
[END CONTEXT]
|
||||
</context>
|
||||
|
||||
When answer to user:
|
||||
- If you don't know, just say that you don't know.
|
||||
@ -219,7 +224,7 @@ And answer according to the language of the user's question.
|
||||
if pre_prompt:
|
||||
human_message_prompt += pre_prompt
|
||||
|
||||
query_prompt = "\nHuman: {{query}}\nAI: "
|
||||
query_prompt = "\n\nHuman: {{query}}\n\nAssistant: "
|
||||
|
||||
if memory:
|
||||
# append chat histories
|
||||
@ -228,9 +233,11 @@ And answer according to the language of the user's question.
|
||||
inputs=human_inputs
|
||||
)
|
||||
|
||||
curr_message_tokens = memory.llm.get_messages_tokens([tmp_human_message])
|
||||
rest_tokens = llm_constant.max_context_token_length[memory.llm.model_name] \
|
||||
- memory.llm.max_tokens - curr_message_tokens
|
||||
curr_message_tokens = memory.llm.get_num_tokens_from_messages([tmp_human_message])
|
||||
model_name = model['name']
|
||||
max_tokens = model.get("completion_params").get('max_tokens')
|
||||
rest_tokens = llm_constant.max_context_token_length[model_name] \
|
||||
- max_tokens - curr_message_tokens
|
||||
rest_tokens = max(rest_tokens, 0)
|
||||
histories = cls.get_history_messages_from_memory(memory, rest_tokens)
|
||||
|
||||
@ -241,7 +248,10 @@ And answer according to the language of the user's question.
|
||||
# if histories_param not in human_inputs:
|
||||
# human_inputs[histories_param] = '{{' + histories_param + '}}'
|
||||
|
||||
human_message_prompt += "\n\n" + histories
|
||||
human_message_prompt += "\n\n" if human_message_prompt else ""
|
||||
human_message_prompt += "Here is the chat histories between human and assistant, " \
|
||||
"inside <histories></histories> XML tags.\n\n<histories>"
|
||||
human_message_prompt += histories + "</histories>"
|
||||
|
||||
human_message_prompt += query_prompt
|
||||
|
||||
@ -307,13 +317,15 @@ And answer according to the language of the user's question.
|
||||
model=app_model_config.model_dict
|
||||
)
|
||||
|
||||
model_limited_tokens = llm_constant.max_context_token_length[llm.model_name]
|
||||
max_tokens = llm.max_tokens
|
||||
model_name = app_model_config.model_dict.get("name")
|
||||
model_limited_tokens = llm_constant.max_context_token_length[model_name]
|
||||
max_tokens = app_model_config.model_dict.get("completion_params").get('max_tokens')
|
||||
|
||||
# get prompt without memory and context
|
||||
prompt, _ = cls.get_main_llm_prompt(
|
||||
mode=mode,
|
||||
llm=llm,
|
||||
model=app_model_config.model_dict,
|
||||
pre_prompt=app_model_config.pre_prompt,
|
||||
query=query,
|
||||
inputs=inputs,
|
||||
@ -332,16 +344,17 @@ And answer according to the language of the user's question.
|
||||
return rest_tokens
|
||||
|
||||
@classmethod
|
||||
def recale_llm_max_tokens(cls, final_llm: Union[StreamableOpenAI, StreamableChatOpenAI],
|
||||
def recale_llm_max_tokens(cls, final_llm: BaseLanguageModel, model: dict,
|
||||
prompt: Union[str, List[BaseMessage]], mode: str):
|
||||
# recalc max_tokens if sum(prompt_token + max_tokens) over model token limit
|
||||
model_limited_tokens = llm_constant.max_context_token_length[final_llm.model_name]
|
||||
max_tokens = final_llm.max_tokens
|
||||
model_name = model.get("name")
|
||||
model_limited_tokens = llm_constant.max_context_token_length[model_name]
|
||||
max_tokens = model.get("completion_params").get('max_tokens')
|
||||
|
||||
if mode == 'completion' and isinstance(final_llm, BaseLLM):
|
||||
prompt_tokens = final_llm.get_num_tokens(prompt)
|
||||
else:
|
||||
prompt_tokens = final_llm.get_messages_tokens(prompt)
|
||||
prompt_tokens = final_llm.get_num_tokens_from_messages(prompt)
|
||||
|
||||
if prompt_tokens + max_tokens > model_limited_tokens:
|
||||
max_tokens = max(model_limited_tokens - prompt_tokens, 16)
|
||||
@ -350,9 +363,10 @@ And answer according to the language of the user's question.
|
||||
@classmethod
|
||||
def generate_more_like_this(cls, task_id: str, app: App, message: Message, pre_prompt: str,
|
||||
app_model_config: AppModelConfig, user: Account, streaming: bool):
|
||||
llm: StreamableOpenAI = LLMBuilder.to_llm(
|
||||
|
||||
llm = LLMBuilder.to_llm_from_model(
|
||||
tenant_id=app.tenant_id,
|
||||
model_name='gpt-3.5-turbo',
|
||||
model=app_model_config.model_dict,
|
||||
streaming=streaming
|
||||
)
|
||||
|
||||
@ -360,6 +374,7 @@ And answer according to the language of the user's question.
|
||||
original_prompt, _ = cls.get_main_llm_prompt(
|
||||
mode="completion",
|
||||
llm=llm,
|
||||
model=app_model_config.model_dict,
|
||||
pre_prompt=pre_prompt,
|
||||
query=message.query,
|
||||
inputs=message.inputs,
|
||||
@ -390,6 +405,7 @@ And answer according to the language of the user's question.
|
||||
|
||||
cls.recale_llm_max_tokens(
|
||||
final_llm=llm,
|
||||
model=app_model_config.model_dict,
|
||||
prompt=prompt,
|
||||
mode='completion'
|
||||
)
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
from _decimal import Decimal
|
||||
|
||||
models = {
|
||||
'claude-instant-1': 'anthropic', # 100,000 tokens
|
||||
'claude-2': 'anthropic', # 100,000 tokens
|
||||
'gpt-4': 'openai', # 8,192 tokens
|
||||
'gpt-4-32k': 'openai', # 32,768 tokens
|
||||
'gpt-3.5-turbo': 'openai', # 4,096 tokens
|
||||
@ -10,10 +12,13 @@ models = {
|
||||
'text-curie-001': 'openai', # 2,049 tokens
|
||||
'text-babbage-001': 'openai', # 2,049 tokens
|
||||
'text-ada-001': 'openai', # 2,049 tokens
|
||||
'text-embedding-ada-002': 'openai' # 8191 tokens, 1536 dimensions
|
||||
'text-embedding-ada-002': 'openai', # 8191 tokens, 1536 dimensions
|
||||
'whisper-1': 'openai'
|
||||
}
|
||||
|
||||
max_context_token_length = {
|
||||
'claude-instant-1': 100000,
|
||||
'claude-2': 100000,
|
||||
'gpt-4': 8192,
|
||||
'gpt-4-32k': 32768,
|
||||
'gpt-3.5-turbo': 4096,
|
||||
@ -23,17 +28,21 @@ max_context_token_length = {
|
||||
'text-curie-001': 2049,
|
||||
'text-babbage-001': 2049,
|
||||
'text-ada-001': 2049,
|
||||
'text-embedding-ada-002': 8191
|
||||
'text-embedding-ada-002': 8191,
|
||||
}
|
||||
|
||||
models_by_mode = {
|
||||
'chat': [
|
||||
'claude-instant-1', # 100,000 tokens
|
||||
'claude-2', # 100,000 tokens
|
||||
'gpt-4', # 8,192 tokens
|
||||
'gpt-4-32k', # 32,768 tokens
|
||||
'gpt-3.5-turbo', # 4,096 tokens
|
||||
'gpt-3.5-turbo-16k', # 16,384 tokens
|
||||
],
|
||||
'completion': [
|
||||
'claude-instant-1', # 100,000 tokens
|
||||
'claude-2', # 100,000 tokens
|
||||
'gpt-4', # 8,192 tokens
|
||||
'gpt-4-32k', # 32,768 tokens
|
||||
'gpt-3.5-turbo', # 4,096 tokens
|
||||
@ -52,6 +61,14 @@ models_by_mode = {
|
||||
model_currency = 'USD'
|
||||
|
||||
model_prices = {
|
||||
'claude-instant-1': {
|
||||
'prompt': Decimal('0.00163'),
|
||||
'completion': Decimal('0.00551'),
|
||||
},
|
||||
'claude-2': {
|
||||
'prompt': Decimal('0.01102'),
|
||||
'completion': Decimal('0.03268'),
|
||||
},
|
||||
'gpt-4': {
|
||||
'prompt': Decimal('0.03'),
|
||||
'completion': Decimal('0.06'),
|
||||
|
||||
@ -56,7 +56,7 @@ class ConversationMessageTask:
|
||||
)
|
||||
|
||||
def init(self):
|
||||
provider_name = LLMBuilder.get_default_provider(self.app.tenant_id)
|
||||
provider_name = LLMBuilder.get_default_provider(self.app.tenant_id, self.model_name)
|
||||
self.model_dict['provider'] = provider_name
|
||||
|
||||
override_model_configs = None
|
||||
@ -89,7 +89,7 @@ class ConversationMessageTask:
|
||||
system_message = PromptBuilder.to_system_message(self.app_model_config.pre_prompt, self.inputs)
|
||||
system_instruction = system_message.content
|
||||
llm = LLMBuilder.to_llm(self.tenant_id, self.model_name)
|
||||
system_instruction_tokens = llm.get_messages_tokens([system_message])
|
||||
system_instruction_tokens = llm.get_num_tokens_from_messages([system_message])
|
||||
|
||||
if not self.conversation:
|
||||
self.is_new_conversation = True
|
||||
@ -185,6 +185,7 @@ class ConversationMessageTask:
|
||||
if provider and provider.provider_type == ProviderType.SYSTEM.value:
|
||||
db.session.query(Provider).filter(
|
||||
Provider.tenant_id == self.app.tenant_id,
|
||||
Provider.provider_name == provider.provider_name,
|
||||
Provider.quota_limit > Provider.quota_used
|
||||
).update({'quota_used': Provider.quota_used + 1})
|
||||
|
||||
|
||||
@ -4,6 +4,7 @@ from typing import List
|
||||
from langchain.embeddings.base import Embeddings
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
|
||||
from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
|
||||
from extensions.ext_database import db
|
||||
from libs import helper
|
||||
from models.dataset import Embedding
|
||||
@ -49,6 +50,7 @@ class CacheEmbedding(Embeddings):
|
||||
text_embeddings.extend(embedding_results)
|
||||
return text_embeddings
|
||||
|
||||
@handle_openai_exceptions
|
||||
def embed_query(self, text: str) -> List[float]:
|
||||
"""Embed query text."""
|
||||
# use doc embedding cache or store if not exists
|
||||
|
||||
@ -23,6 +23,10 @@ class LLMGenerator:
|
||||
@classmethod
|
||||
def generate_conversation_name(cls, tenant_id: str, query, answer):
|
||||
prompt = CONVERSATION_TITLE_PROMPT
|
||||
|
||||
if len(query) > 2000:
|
||||
query = query[:300] + "...[TRUNCATED]..." + query[-300:]
|
||||
|
||||
prompt = prompt.format(query=query)
|
||||
llm: StreamableOpenAI = LLMBuilder.to_llm(
|
||||
tenant_id=tenant_id,
|
||||
@ -52,7 +56,17 @@ class LLMGenerator:
|
||||
if not message.answer:
|
||||
continue
|
||||
|
||||
message_qa_text = "Human:" + message.query + "\nAI:" + message.answer + "\n"
|
||||
if len(message.query) > 2000:
|
||||
query = message.query[:300] + "...[TRUNCATED]..." + message.query[-300:]
|
||||
else:
|
||||
query = message.query
|
||||
|
||||
if len(message.answer) > 2000:
|
||||
answer = message.answer[:300] + "...[TRUNCATED]..." + message.answer[-300:]
|
||||
else:
|
||||
answer = message.answer
|
||||
|
||||
message_qa_text = "\n\nHuman:" + query + "\n\nAssistant:" + answer
|
||||
if rest_tokens - TokenCalculator.get_num_tokens(model, context + message_qa_text) > 0:
|
||||
context += message_qa_text
|
||||
|
||||
|
||||
@ -17,7 +17,7 @@ class IndexBuilder:
|
||||
|
||||
model_credentials = LLMBuilder.get_model_credentials(
|
||||
tenant_id=dataset.tenant_id,
|
||||
model_provider=LLMBuilder.get_default_provider(dataset.tenant_id),
|
||||
model_provider=LLMBuilder.get_default_provider(dataset.tenant_id, 'text-embedding-ada-002'),
|
||||
model_name='text-embedding-ada-002'
|
||||
)
|
||||
|
||||
|
||||
@ -40,6 +40,9 @@ class ProviderTokenNotInitError(Exception):
|
||||
"""
|
||||
description = "Provider Token Not Init"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.description = args[0] if args else self.description
|
||||
|
||||
|
||||
class QuotaExceededError(Exception):
|
||||
"""
|
||||
|
||||
@ -8,9 +8,10 @@ from core.llm.provider.base import BaseProvider
|
||||
from core.llm.provider.llm_provider_service import LLMProviderService
|
||||
from core.llm.streamable_azure_chat_open_ai import StreamableAzureChatOpenAI
|
||||
from core.llm.streamable_azure_open_ai import StreamableAzureOpenAI
|
||||
from core.llm.streamable_chat_anthropic import StreamableChatAnthropic
|
||||
from core.llm.streamable_chat_open_ai import StreamableChatOpenAI
|
||||
from core.llm.streamable_open_ai import StreamableOpenAI
|
||||
from models.provider import ProviderType
|
||||
from models.provider import ProviderType, ProviderName
|
||||
|
||||
|
||||
class LLMBuilder:
|
||||
@ -32,43 +33,43 @@ class LLMBuilder:
|
||||
|
||||
@classmethod
|
||||
def to_llm(cls, tenant_id: str, model_name: str, **kwargs) -> Union[StreamableOpenAI, StreamableChatOpenAI]:
|
||||
provider = cls.get_default_provider(tenant_id)
|
||||
provider = cls.get_default_provider(tenant_id, model_name)
|
||||
|
||||
model_credentials = cls.get_model_credentials(tenant_id, provider, model_name)
|
||||
|
||||
llm_cls = None
|
||||
mode = cls.get_mode_by_model(model_name)
|
||||
if mode == 'chat':
|
||||
if provider == 'openai':
|
||||
if provider == ProviderName.OPENAI.value:
|
||||
llm_cls = StreamableChatOpenAI
|
||||
else:
|
||||
elif provider == ProviderName.AZURE_OPENAI.value:
|
||||
llm_cls = StreamableAzureChatOpenAI
|
||||
elif provider == ProviderName.ANTHROPIC.value:
|
||||
llm_cls = StreamableChatAnthropic
|
||||
elif mode == 'completion':
|
||||
if provider == 'openai':
|
||||
if provider == ProviderName.OPENAI.value:
|
||||
llm_cls = StreamableOpenAI
|
||||
else:
|
||||
elif provider == ProviderName.AZURE_OPENAI.value:
|
||||
llm_cls = StreamableAzureOpenAI
|
||||
else:
|
||||
|
||||
if not llm_cls:
|
||||
raise ValueError(f"model name {model_name} is not supported.")
|
||||
|
||||
|
||||
model_kwargs = {
|
||||
'model_name': model_name,
|
||||
'temperature': kwargs.get('temperature', 0),
|
||||
'max_tokens': kwargs.get('max_tokens', 256),
|
||||
'top_p': kwargs.get('top_p', 1),
|
||||
'frequency_penalty': kwargs.get('frequency_penalty', 0),
|
||||
'presence_penalty': kwargs.get('presence_penalty', 0),
|
||||
'callbacks': kwargs.get('callbacks', None),
|
||||
'streaming': kwargs.get('streaming', False),
|
||||
}
|
||||
|
||||
model_extras_kwargs = model_kwargs if mode == 'completion' else {'model_kwargs': model_kwargs}
|
||||
model_kwargs.update(model_credentials)
|
||||
model_kwargs = llm_cls.get_kwargs_from_model_params(model_kwargs)
|
||||
|
||||
return llm_cls(
|
||||
model_name=model_name,
|
||||
temperature=kwargs.get('temperature', 0),
|
||||
max_tokens=kwargs.get('max_tokens', 256),
|
||||
**model_extras_kwargs,
|
||||
callbacks=kwargs.get('callbacks', None),
|
||||
streaming=kwargs.get('streaming', False),
|
||||
# request_timeout=None
|
||||
**model_credentials
|
||||
)
|
||||
return llm_cls(**model_kwargs)
|
||||
|
||||
@classmethod
|
||||
def to_llm_from_model(cls, tenant_id: str, model: dict, streaming: bool = False,
|
||||
@ -118,14 +119,30 @@ class LLMBuilder:
|
||||
return provider_service.get_credentials(model_name)
|
||||
|
||||
@classmethod
|
||||
def get_default_provider(cls, tenant_id: str) -> str:
|
||||
provider = BaseProvider.get_valid_provider(tenant_id)
|
||||
if not provider:
|
||||
raise ProviderTokenNotInitError()
|
||||
def get_default_provider(cls, tenant_id: str, model_name: str) -> str:
|
||||
provider_name = llm_constant.models[model_name]
|
||||
|
||||
if provider_name == 'openai':
|
||||
# get the default provider (openai / azure_openai) for the tenant
|
||||
openai_provider = BaseProvider.get_valid_provider(tenant_id, ProviderName.OPENAI.value)
|
||||
azure_openai_provider = BaseProvider.get_valid_provider(tenant_id, ProviderName.AZURE_OPENAI.value)
|
||||
|
||||
provider = None
|
||||
if openai_provider and openai_provider.provider_type == ProviderType.CUSTOM.value:
|
||||
provider = openai_provider
|
||||
elif azure_openai_provider and azure_openai_provider.provider_type == ProviderType.CUSTOM.value:
|
||||
provider = azure_openai_provider
|
||||
elif openai_provider and openai_provider.provider_type == ProviderType.SYSTEM.value:
|
||||
provider = openai_provider
|
||||
elif azure_openai_provider and azure_openai_provider.provider_type == ProviderType.SYSTEM.value:
|
||||
provider = azure_openai_provider
|
||||
|
||||
if not provider:
|
||||
raise ProviderTokenNotInitError(
|
||||
f"No valid {provider_name} model provider credentials found. "
|
||||
f"Please go to Settings -> Model Provider to complete your provider credentials."
|
||||
)
|
||||
|
||||
if provider.provider_type == ProviderType.SYSTEM.value:
|
||||
provider_name = 'openai'
|
||||
else:
|
||||
provider_name = provider.provider_name
|
||||
|
||||
return provider_name
|
||||
|
||||
@ -1,23 +1,138 @@
|
||||
from typing import Optional
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional, Union
|
||||
|
||||
import anthropic
|
||||
from langchain.chat_models import ChatAnthropic
|
||||
from langchain.schema import HumanMessage
|
||||
|
||||
from core import hosted_llm_credentials
|
||||
from core.llm.error import ProviderTokenNotInitError
|
||||
from core.llm.provider.base import BaseProvider
|
||||
from models.provider import ProviderName
|
||||
from core.llm.provider.errors import ValidateFailedError
|
||||
from models.provider import ProviderName, ProviderType
|
||||
|
||||
|
||||
class AnthropicProvider(BaseProvider):
|
||||
def get_models(self, model_id: Optional[str] = None) -> list[dict]:
|
||||
credentials = self.get_credentials(model_id)
|
||||
# todo
|
||||
return []
|
||||
return [
|
||||
{
|
||||
'id': 'claude-instant-1',
|
||||
'name': 'claude-instant-1',
|
||||
},
|
||||
{
|
||||
'id': 'claude-2',
|
||||
'name': 'claude-2',
|
||||
},
|
||||
]
|
||||
|
||||
def get_credentials(self, model_id: Optional[str] = None) -> dict:
|
||||
"""
|
||||
Returns the API credentials for Azure OpenAI as a dictionary, for the given tenant_id.
|
||||
The dictionary contains keys: azure_api_type, azure_api_version, azure_api_base, and azure_api_key.
|
||||
"""
|
||||
return {
|
||||
'anthropic_api_key': self.get_provider_api_key(model_id=model_id)
|
||||
}
|
||||
return self.get_provider_api_key(model_id=model_id)
|
||||
|
||||
def get_provider_name(self):
|
||||
return ProviderName.ANTHROPIC
|
||||
return ProviderName.ANTHROPIC
|
||||
|
||||
def get_provider_configs(self, obfuscated: bool = False, only_custom: bool = False) -> Union[str | dict]:
|
||||
"""
|
||||
Returns the provider configs.
|
||||
"""
|
||||
try:
|
||||
config = self.get_provider_api_key(only_custom=only_custom)
|
||||
except:
|
||||
config = {
|
||||
'anthropic_api_key': ''
|
||||
}
|
||||
|
||||
if obfuscated:
|
||||
if not config.get('anthropic_api_key'):
|
||||
config = {
|
||||
'anthropic_api_key': ''
|
||||
}
|
||||
|
||||
config['anthropic_api_key'] = self.obfuscated_token(config.get('anthropic_api_key'))
|
||||
return config
|
||||
|
||||
return config
|
||||
|
||||
def get_encrypted_token(self, config: Union[dict | str]):
|
||||
"""
|
||||
Returns the encrypted token.
|
||||
"""
|
||||
return json.dumps({
|
||||
'anthropic_api_key': self.encrypt_token(config['anthropic_api_key'])
|
||||
})
|
||||
|
||||
def get_decrypted_token(self, token: str):
|
||||
"""
|
||||
Returns the decrypted token.
|
||||
"""
|
||||
config = json.loads(token)
|
||||
config['anthropic_api_key'] = self.decrypt_token(config['anthropic_api_key'])
|
||||
return config
|
||||
|
||||
def get_token_type(self):
|
||||
return dict
|
||||
|
||||
def config_validate(self, config: Union[dict | str]):
|
||||
"""
|
||||
Validates the given config.
|
||||
"""
|
||||
# check OpenAI / Azure OpenAI credential is valid
|
||||
openai_provider = BaseProvider.get_valid_provider(self.tenant_id, ProviderName.OPENAI.value)
|
||||
azure_openai_provider = BaseProvider.get_valid_provider(self.tenant_id, ProviderName.AZURE_OPENAI.value)
|
||||
|
||||
provider = None
|
||||
if openai_provider:
|
||||
provider = openai_provider
|
||||
elif azure_openai_provider:
|
||||
provider = azure_openai_provider
|
||||
|
||||
if not provider:
|
||||
raise ValidateFailedError(f"OpenAI or Azure OpenAI provider must be configured first.")
|
||||
|
||||
if provider.provider_type == ProviderType.SYSTEM.value:
|
||||
quota_used = provider.quota_used if provider.quota_used is not None else 0
|
||||
quota_limit = provider.quota_limit if provider.quota_limit is not None else 0
|
||||
if quota_used >= quota_limit:
|
||||
raise ValidateFailedError(f"Your quota for Dify Hosted OpenAI has been exhausted, "
|
||||
f"please configure OpenAI or Azure OpenAI provider first.")
|
||||
|
||||
try:
|
||||
if not isinstance(config, dict):
|
||||
raise ValueError('Config must be a object.')
|
||||
|
||||
if 'anthropic_api_key' not in config:
|
||||
raise ValueError('anthropic_api_key must be provided.')
|
||||
|
||||
chat_llm = ChatAnthropic(
|
||||
model='claude-instant-1',
|
||||
anthropic_api_key=config['anthropic_api_key'],
|
||||
max_tokens_to_sample=10,
|
||||
temperature=0,
|
||||
default_request_timeout=60
|
||||
)
|
||||
|
||||
messages = [
|
||||
HumanMessage(
|
||||
content="ping"
|
||||
)
|
||||
]
|
||||
|
||||
chat_llm(messages)
|
||||
except anthropic.APIConnectionError as ex:
|
||||
raise ValidateFailedError(f"Anthropic: Connection error, cause: {ex.__cause__}")
|
||||
except (anthropic.APIStatusError, anthropic.RateLimitError) as ex:
|
||||
raise ValidateFailedError(f"Anthropic: Error code: {ex.status_code} - "
|
||||
f"{ex.body['error']['type']}: {ex.body['error']['message']}")
|
||||
except Exception as ex:
|
||||
logging.exception('Anthropic config validation failed')
|
||||
raise ex
|
||||
|
||||
def get_hosted_credentials(self) -> Union[str | dict]:
|
||||
if not hosted_llm_credentials.anthropic or not hosted_llm_credentials.anthropic.api_key:
|
||||
raise ProviderTokenNotInitError(
|
||||
f"No valid {self.get_provider_name().value} model provider credentials found. "
|
||||
f"Please go to Settings -> Model Provider to complete your provider credentials."
|
||||
)
|
||||
|
||||
return {'anthropic_api_key': hosted_llm_credentials.anthropic.api_key}
|
||||
|
||||
@ -52,12 +52,12 @@ class AzureProvider(BaseProvider):
|
||||
def get_provider_name(self):
|
||||
return ProviderName.AZURE_OPENAI
|
||||
|
||||
def get_provider_configs(self, obfuscated: bool = False) -> Union[str | dict]:
|
||||
def get_provider_configs(self, obfuscated: bool = False, only_custom: bool = False) -> Union[str | dict]:
|
||||
"""
|
||||
Returns the provider configs.
|
||||
"""
|
||||
try:
|
||||
config = self.get_provider_api_key()
|
||||
config = self.get_provider_api_key(only_custom=only_custom)
|
||||
except:
|
||||
config = {
|
||||
'openai_api_type': 'azure',
|
||||
@ -81,7 +81,6 @@ class AzureProvider(BaseProvider):
|
||||
return config
|
||||
|
||||
def get_token_type(self):
|
||||
# TODO: change to dict when implemented
|
||||
return dict
|
||||
|
||||
def config_validate(self, config: Union[dict | str]):
|
||||
@ -98,12 +97,11 @@ class AzureProvider(BaseProvider):
|
||||
models = self.get_models(credentials=config)
|
||||
|
||||
if not models:
|
||||
raise ValidateFailedError("Please add deployments for 'text-davinci-003', "
|
||||
raise ValidateFailedError("Please add deployments for "
|
||||
"'gpt-3.5-turbo', 'text-embedding-ada-002' (required) "
|
||||
"and 'gpt-4', 'gpt-35-turbo-16k' (optional).")
|
||||
"and 'gpt-4', 'gpt-35-turbo-16k', 'text-davinci-003' (optional).")
|
||||
|
||||
fixed_model_ids = [
|
||||
'text-davinci-003',
|
||||
'gpt-35-turbo',
|
||||
'text-embedding-ada-002'
|
||||
]
|
||||
|
||||
@ -2,7 +2,7 @@ import base64
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional, Union
|
||||
|
||||
from core import hosted_llm_credentials
|
||||
from core.constant import llm_constant
|
||||
from core.llm.error import QuotaExceededError, ModelCurrentlyNotSupportError, ProviderTokenNotInitError
|
||||
from extensions.ext_database import db
|
||||
from libs import rsa
|
||||
@ -14,15 +14,18 @@ class BaseProvider(ABC):
|
||||
def __init__(self, tenant_id: str):
|
||||
self.tenant_id = tenant_id
|
||||
|
||||
def get_provider_api_key(self, model_id: Optional[str] = None, prefer_custom: bool = True) -> Union[str | dict]:
|
||||
def get_provider_api_key(self, model_id: Optional[str] = None, only_custom: bool = False) -> Union[str | dict]:
|
||||
"""
|
||||
Returns the decrypted API key for the given tenant_id and provider_name.
|
||||
If the provider is of type SYSTEM and the quota is exceeded, raises a QuotaExceededError.
|
||||
If the provider is not found or not valid, raises a ProviderTokenNotInitError.
|
||||
"""
|
||||
provider = self.get_provider(prefer_custom)
|
||||
provider = self.get_provider(only_custom)
|
||||
if not provider:
|
||||
raise ProviderTokenNotInitError()
|
||||
raise ProviderTokenNotInitError(
|
||||
f"No valid {llm_constant.models[model_id]} model provider credentials found. "
|
||||
f"Please go to Settings -> Model Provider to complete your provider credentials."
|
||||
)
|
||||
|
||||
if provider.provider_type == ProviderType.SYSTEM.value:
|
||||
quota_used = provider.quota_used if provider.quota_used is not None else 0
|
||||
@ -38,18 +41,19 @@ class BaseProvider(ABC):
|
||||
else:
|
||||
return self.get_decrypted_token(provider.encrypted_config)
|
||||
|
||||
def get_provider(self, prefer_custom: bool) -> Optional[Provider]:
|
||||
def get_provider(self, only_custom: bool = False) -> Optional[Provider]:
|
||||
"""
|
||||
Returns the Provider instance for the given tenant_id and provider_name.
|
||||
If both CUSTOM and System providers exist, the preferred provider will be returned based on the prefer_custom flag.
|
||||
"""
|
||||
return BaseProvider.get_valid_provider(self.tenant_id, self.get_provider_name().value, prefer_custom)
|
||||
return BaseProvider.get_valid_provider(self.tenant_id, self.get_provider_name().value, only_custom)
|
||||
|
||||
@classmethod
|
||||
def get_valid_provider(cls, tenant_id: str, provider_name: str = None, prefer_custom: bool = False) -> Optional[Provider]:
|
||||
def get_valid_provider(cls, tenant_id: str, provider_name: str = None, only_custom: bool = False) -> Optional[
|
||||
Provider]:
|
||||
"""
|
||||
Returns the Provider instance for the given tenant_id and provider_name.
|
||||
If both CUSTOM and System providers exist, the preferred provider will be returned based on the prefer_custom flag.
|
||||
If both CUSTOM and System providers exist.
|
||||
"""
|
||||
query = db.session.query(Provider).filter(
|
||||
Provider.tenant_id == tenant_id
|
||||
@ -58,39 +62,31 @@ class BaseProvider(ABC):
|
||||
if provider_name:
|
||||
query = query.filter(Provider.provider_name == provider_name)
|
||||
|
||||
providers = query.order_by(Provider.provider_type.desc() if prefer_custom else Provider.provider_type).all()
|
||||
if only_custom:
|
||||
query = query.filter(Provider.provider_type == ProviderType.CUSTOM.value)
|
||||
|
||||
custom_provider = None
|
||||
system_provider = None
|
||||
providers = query.order_by(Provider.provider_type.asc()).all()
|
||||
|
||||
for provider in providers:
|
||||
if provider.provider_type == ProviderType.CUSTOM.value and provider.is_valid and provider.encrypted_config:
|
||||
custom_provider = provider
|
||||
return provider
|
||||
elif provider.provider_type == ProviderType.SYSTEM.value and provider.is_valid:
|
||||
system_provider = provider
|
||||
return provider
|
||||
|
||||
if custom_provider:
|
||||
return custom_provider
|
||||
elif system_provider:
|
||||
return system_provider
|
||||
else:
|
||||
return None
|
||||
return None
|
||||
|
||||
def get_hosted_credentials(self) -> str:
|
||||
if self.get_provider_name() != ProviderName.OPENAI:
|
||||
raise ProviderTokenNotInitError()
|
||||
def get_hosted_credentials(self) -> Union[str | dict]:
|
||||
raise ProviderTokenNotInitError(
|
||||
f"No valid {self.get_provider_name().value} model provider credentials found. "
|
||||
f"Please go to Settings -> Model Provider to complete your provider credentials."
|
||||
)
|
||||
|
||||
if not hosted_llm_credentials.openai or not hosted_llm_credentials.openai.api_key:
|
||||
raise ProviderTokenNotInitError()
|
||||
|
||||
return hosted_llm_credentials.openai.api_key
|
||||
|
||||
def get_provider_configs(self, obfuscated: bool = False) -> Union[str | dict]:
|
||||
def get_provider_configs(self, obfuscated: bool = False, only_custom: bool = False) -> Union[str | dict]:
|
||||
"""
|
||||
Returns the provider configs.
|
||||
"""
|
||||
try:
|
||||
config = self.get_provider_api_key()
|
||||
config = self.get_provider_api_key(only_custom=only_custom)
|
||||
except:
|
||||
config = ''
|
||||
|
||||
|
||||
@ -31,11 +31,11 @@ class LLMProviderService:
|
||||
def get_credentials(self, model_id: Optional[str] = None) -> dict:
|
||||
return self.provider.get_credentials(model_id)
|
||||
|
||||
def get_provider_configs(self, obfuscated: bool = False) -> Union[str | dict]:
|
||||
return self.provider.get_provider_configs(obfuscated)
|
||||
def get_provider_configs(self, obfuscated: bool = False, only_custom: bool = False) -> Union[str | dict]:
|
||||
return self.provider.get_provider_configs(obfuscated=obfuscated, only_custom=only_custom)
|
||||
|
||||
def get_provider_db_record(self, prefer_custom: bool = False) -> Optional[Provider]:
|
||||
return self.provider.get_provider(prefer_custom)
|
||||
def get_provider_db_record(self) -> Optional[Provider]:
|
||||
return self.provider.get_provider()
|
||||
|
||||
def config_validate(self, config: Union[dict | str]):
|
||||
"""
|
||||
|
||||
@ -4,6 +4,8 @@ from typing import Optional, Union
|
||||
import openai
|
||||
from openai.error import AuthenticationError, OpenAIError
|
||||
|
||||
from core import hosted_llm_credentials
|
||||
from core.llm.error import ProviderTokenNotInitError
|
||||
from core.llm.moderation import Moderation
|
||||
from core.llm.provider.base import BaseProvider
|
||||
from core.llm.provider.errors import ValidateFailedError
|
||||
@ -42,3 +44,12 @@ class OpenAIProvider(BaseProvider):
|
||||
except Exception as ex:
|
||||
logging.exception('OpenAI config validation failed')
|
||||
raise ex
|
||||
|
||||
def get_hosted_credentials(self) -> Union[str | dict]:
|
||||
if not hosted_llm_credentials.openai or not hosted_llm_credentials.openai.api_key:
|
||||
raise ProviderTokenNotInitError(
|
||||
f"No valid {self.get_provider_name().value} model provider credentials found. "
|
||||
f"Please go to Settings -> Model Provider to complete your provider credentials."
|
||||
)
|
||||
|
||||
return hosted_llm_credentials.openai.api_key
|
||||
|
||||
@ -1,11 +1,11 @@
|
||||
from langchain.callbacks.manager import CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun, Callbacks
|
||||
from langchain.schema import BaseMessage, ChatResult, LLMResult
|
||||
from langchain.callbacks.manager import Callbacks
|
||||
from langchain.schema import BaseMessage, LLMResult
|
||||
from langchain.chat_models import AzureChatOpenAI
|
||||
from typing import Optional, List, Dict, Any
|
||||
|
||||
from pydantic import root_validator
|
||||
|
||||
from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async
|
||||
from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
|
||||
|
||||
|
||||
class StreamableAzureChatOpenAI(AzureChatOpenAI):
|
||||
@ -46,30 +46,7 @@ class StreamableAzureChatOpenAI(AzureChatOpenAI):
|
||||
"organization": self.openai_organization if self.openai_organization else None,
|
||||
}
|
||||
|
||||
def get_messages_tokens(self, messages: List[BaseMessage]) -> int:
|
||||
"""Get the number of tokens in a list of messages.
|
||||
|
||||
Args:
|
||||
messages: The messages to count the tokens of.
|
||||
|
||||
Returns:
|
||||
The number of tokens in the messages.
|
||||
"""
|
||||
tokens_per_message = 5
|
||||
tokens_per_request = 3
|
||||
|
||||
message_tokens = tokens_per_request
|
||||
message_strs = ''
|
||||
for message in messages:
|
||||
message_strs += message.content
|
||||
message_tokens += tokens_per_message
|
||||
|
||||
# calc once
|
||||
message_tokens += self.get_num_tokens(message_strs)
|
||||
|
||||
return message_tokens
|
||||
|
||||
@handle_llm_exceptions
|
||||
@handle_openai_exceptions
|
||||
def generate(
|
||||
self,
|
||||
messages: List[List[BaseMessage]],
|
||||
@ -79,12 +56,18 @@ class StreamableAzureChatOpenAI(AzureChatOpenAI):
|
||||
) -> LLMResult:
|
||||
return super().generate(messages, stop, callbacks, **kwargs)
|
||||
|
||||
@handle_llm_exceptions_async
|
||||
async def agenerate(
|
||||
self,
|
||||
messages: List[List[BaseMessage]],
|
||||
stop: Optional[List[str]] = None,
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
) -> LLMResult:
|
||||
return await super().agenerate(messages, stop, callbacks, **kwargs)
|
||||
@classmethod
|
||||
def get_kwargs_from_model_params(cls, params: dict):
|
||||
model_kwargs = {
|
||||
'top_p': params.get('top_p', 1),
|
||||
'frequency_penalty': params.get('frequency_penalty', 0),
|
||||
'presence_penalty': params.get('presence_penalty', 0),
|
||||
}
|
||||
|
||||
del params['top_p']
|
||||
del params['frequency_penalty']
|
||||
del params['presence_penalty']
|
||||
|
||||
params['model_kwargs'] = model_kwargs
|
||||
|
||||
return params
|
||||
|
||||
@ -5,7 +5,7 @@ from typing import Optional, List, Dict, Mapping, Any
|
||||
|
||||
from pydantic import root_validator
|
||||
|
||||
from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async
|
||||
from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
|
||||
|
||||
|
||||
class StreamableAzureOpenAI(AzureOpenAI):
|
||||
@ -50,7 +50,7 @@ class StreamableAzureOpenAI(AzureOpenAI):
|
||||
"organization": self.openai_organization if self.openai_organization else None,
|
||||
}}
|
||||
|
||||
@handle_llm_exceptions
|
||||
@handle_openai_exceptions
|
||||
def generate(
|
||||
self,
|
||||
prompts: List[str],
|
||||
@ -60,12 +60,6 @@ class StreamableAzureOpenAI(AzureOpenAI):
|
||||
) -> LLMResult:
|
||||
return super().generate(prompts, stop, callbacks, **kwargs)
|
||||
|
||||
@handle_llm_exceptions_async
|
||||
async def agenerate(
|
||||
self,
|
||||
prompts: List[str],
|
||||
stop: Optional[List[str]] = None,
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
) -> LLMResult:
|
||||
return await super().agenerate(prompts, stop, callbacks, **kwargs)
|
||||
@classmethod
|
||||
def get_kwargs_from_model_params(cls, params: dict):
|
||||
return params
|
||||
|
||||
39
api/core/llm/streamable_chat_anthropic.py
Normal file
@ -0,0 +1,39 @@
|
||||
from typing import List, Optional, Any, Dict
|
||||
|
||||
from langchain.callbacks.manager import Callbacks
|
||||
from langchain.chat_models import ChatAnthropic
|
||||
from langchain.schema import BaseMessage, LLMResult
|
||||
|
||||
from core.llm.wrappers.anthropic_wrapper import handle_anthropic_exceptions
|
||||
|
||||
|
||||
class StreamableChatAnthropic(ChatAnthropic):
|
||||
"""
|
||||
Wrapper around Anthropic's large language model.
|
||||
"""
|
||||
|
||||
@handle_anthropic_exceptions
|
||||
def generate(
|
||||
self,
|
||||
messages: List[List[BaseMessage]],
|
||||
stop: Optional[List[str]] = None,
|
||||
callbacks: Callbacks = None,
|
||||
*,
|
||||
tags: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
**kwargs: Any,
|
||||
) -> LLMResult:
|
||||
return super().generate(messages, stop, callbacks, tags=tags, metadata=metadata, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def get_kwargs_from_model_params(cls, params: dict):
|
||||
params['model'] = params.get('model_name')
|
||||
del params['model_name']
|
||||
|
||||
params['max_tokens_to_sample'] = params.get('max_tokens')
|
||||
del params['max_tokens']
|
||||
|
||||
del params['frequency_penalty']
|
||||
del params['presence_penalty']
|
||||
|
||||
return params
|
||||
@ -7,7 +7,7 @@ from typing import Optional, List, Dict, Any
|
||||
|
||||
from pydantic import root_validator
|
||||
|
||||
from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async
|
||||
from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
|
||||
|
||||
|
||||
class StreamableChatOpenAI(ChatOpenAI):
|
||||
@ -48,30 +48,7 @@ class StreamableChatOpenAI(ChatOpenAI):
|
||||
"organization": self.openai_organization if self.openai_organization else None,
|
||||
}
|
||||
|
||||
def get_messages_tokens(self, messages: List[BaseMessage]) -> int:
|
||||
"""Get the number of tokens in a list of messages.
|
||||
|
||||
Args:
|
||||
messages: The messages to count the tokens of.
|
||||
|
||||
Returns:
|
||||
The number of tokens in the messages.
|
||||
"""
|
||||
tokens_per_message = 5
|
||||
tokens_per_request = 3
|
||||
|
||||
message_tokens = tokens_per_request
|
||||
message_strs = ''
|
||||
for message in messages:
|
||||
message_strs += message.content
|
||||
message_tokens += tokens_per_message
|
||||
|
||||
# calc once
|
||||
message_tokens += self.get_num_tokens(message_strs)
|
||||
|
||||
return message_tokens
|
||||
|
||||
@handle_llm_exceptions
|
||||
@handle_openai_exceptions
|
||||
def generate(
|
||||
self,
|
||||
messages: List[List[BaseMessage]],
|
||||
@ -81,12 +58,18 @@ class StreamableChatOpenAI(ChatOpenAI):
|
||||
) -> LLMResult:
|
||||
return super().generate(messages, stop, callbacks, **kwargs)
|
||||
|
||||
@handle_llm_exceptions_async
|
||||
async def agenerate(
|
||||
self,
|
||||
messages: List[List[BaseMessage]],
|
||||
stop: Optional[List[str]] = None,
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
) -> LLMResult:
|
||||
return await super().agenerate(messages, stop, callbacks, **kwargs)
|
||||
@classmethod
|
||||
def get_kwargs_from_model_params(cls, params: dict):
|
||||
model_kwargs = {
|
||||
'top_p': params.get('top_p', 1),
|
||||
'frequency_penalty': params.get('frequency_penalty', 0),
|
||||
'presence_penalty': params.get('presence_penalty', 0),
|
||||
}
|
||||
|
||||
del params['top_p']
|
||||
del params['frequency_penalty']
|
||||
del params['presence_penalty']
|
||||
|
||||
params['model_kwargs'] = model_kwargs
|
||||
|
||||
return params
|
||||
|
||||
@ -6,7 +6,7 @@ from typing import Optional, List, Dict, Any, Mapping
|
||||
from langchain import OpenAI
|
||||
from pydantic import root_validator
|
||||
|
||||
from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async
|
||||
from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
|
||||
|
||||
|
||||
class StreamableOpenAI(OpenAI):
|
||||
@ -49,7 +49,7 @@ class StreamableOpenAI(OpenAI):
|
||||
"organization": self.openai_organization if self.openai_organization else None,
|
||||
}}
|
||||
|
||||
@handle_llm_exceptions
|
||||
@handle_openai_exceptions
|
||||
def generate(
|
||||
self,
|
||||
prompts: List[str],
|
||||
@ -59,12 +59,6 @@ class StreamableOpenAI(OpenAI):
|
||||
) -> LLMResult:
|
||||
return super().generate(prompts, stop, callbacks, **kwargs)
|
||||
|
||||
@handle_llm_exceptions_async
|
||||
async def agenerate(
|
||||
self,
|
||||
prompts: List[str],
|
||||
stop: Optional[List[str]] = None,
|
||||
callbacks: Callbacks = None,
|
||||
**kwargs: Any,
|
||||
) -> LLMResult:
|
||||
return await super().agenerate(prompts, stop, callbacks, **kwargs)
|
||||
@classmethod
|
||||
def get_kwargs_from_model_params(cls, params: dict):
|
||||
return params
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import openai
|
||||
|
||||
from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
|
||||
from models.provider import ProviderName
|
||||
from core.llm.error_handle_wraps import handle_llm_exceptions
|
||||
from core.llm.provider.base import BaseProvider
|
||||
|
||||
|
||||
@ -13,7 +14,7 @@ class Whisper:
|
||||
self.client = openai.Audio
|
||||
self.credentials = provider.get_credentials()
|
||||
|
||||
@handle_llm_exceptions
|
||||
@handle_openai_exceptions
|
||||
def transcribe(self, file):
|
||||
return self.client.transcribe(
|
||||
model='whisper-1',
|
||||
|
||||
27
api/core/llm/wrappers/anthropic_wrapper.py
Normal file
@ -0,0 +1,27 @@
|
||||
import logging
|
||||
from functools import wraps
|
||||
|
||||
import anthropic
|
||||
|
||||
from core.llm.error import LLMAPIConnectionError, LLMAPIUnavailableError, LLMRateLimitError, LLMAuthorizationError, \
|
||||
LLMBadRequestError
|
||||
|
||||
|
||||
def handle_anthropic_exceptions(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except anthropic.APIConnectionError as e:
|
||||
logging.exception("Failed to connect to Anthropic API.")
|
||||
raise LLMAPIConnectionError(f"Anthropic: The server could not be reached, cause: {e.__cause__}")
|
||||
except anthropic.RateLimitError:
|
||||
raise LLMRateLimitError("Anthropic: A 429 status code was received; we should back off a bit.")
|
||||
except anthropic.AuthenticationError as e:
|
||||
raise LLMAuthorizationError(f"Anthropic: {e.message}")
|
||||
except anthropic.BadRequestError as e:
|
||||
raise LLMBadRequestError(f"Anthropic: {e.message}")
|
||||
except anthropic.APIStatusError as e:
|
||||
raise LLMAPIUnavailableError(f"Anthropic: code: {e.status_code}, cause: {e.message}")
|
||||
|
||||
return wrapper
|
||||
@ -7,7 +7,7 @@ from core.llm.error import LLMAPIConnectionError, LLMAPIUnavailableError, LLMRat
|
||||
LLMBadRequestError
|
||||
|
||||
|
||||
def handle_llm_exceptions(func):
|
||||
def handle_openai_exceptions(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
@ -29,27 +29,3 @@ def handle_llm_exceptions(func):
|
||||
raise LLMBadRequestError(e.__class__.__name__ + ":" + str(e))
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def handle_llm_exceptions_async(func):
|
||||
@wraps(func)
|
||||
async def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return await func(*args, **kwargs)
|
||||
except openai.error.InvalidRequestError as e:
|
||||
logging.exception("Invalid request to OpenAI API.")
|
||||
raise LLMBadRequestError(str(e))
|
||||
except openai.error.APIConnectionError as e:
|
||||
logging.exception("Failed to connect to OpenAI API.")
|
||||
raise LLMAPIConnectionError(e.__class__.__name__ + ":" + str(e))
|
||||
except (openai.error.APIError, openai.error.ServiceUnavailableError, openai.error.Timeout) as e:
|
||||
logging.exception("OpenAI service unavailable.")
|
||||
raise LLMAPIUnavailableError(e.__class__.__name__ + ":" + str(e))
|
||||
except openai.error.RateLimitError as e:
|
||||
raise LLMRateLimitError(str(e))
|
||||
except openai.error.AuthenticationError as e:
|
||||
raise LLMAuthorizationError(str(e))
|
||||
except openai.error.OpenAIError as e:
|
||||
raise LLMBadRequestError(e.__class__.__name__ + ":" + str(e))
|
||||
|
||||
return wrapper
|
||||
@ -1,7 +1,7 @@
|
||||
from typing import Any, List, Dict, Union
|
||||
|
||||
from langchain.memory.chat_memory import BaseChatMemory
|
||||
from langchain.schema import get_buffer_string, BaseMessage, HumanMessage, AIMessage
|
||||
from langchain.schema import get_buffer_string, BaseMessage, HumanMessage, AIMessage, BaseLanguageModel
|
||||
|
||||
from core.llm.streamable_chat_open_ai import StreamableChatOpenAI
|
||||
from core.llm.streamable_open_ai import StreamableOpenAI
|
||||
@ -12,8 +12,8 @@ from models.model import Conversation, Message
|
||||
class ReadOnlyConversationTokenDBBufferSharedMemory(BaseChatMemory):
|
||||
conversation: Conversation
|
||||
human_prefix: str = "Human"
|
||||
ai_prefix: str = "AI"
|
||||
llm: Union[StreamableChatOpenAI | StreamableOpenAI]
|
||||
ai_prefix: str = "Assistant"
|
||||
llm: BaseLanguageModel
|
||||
memory_key: str = "chat_history"
|
||||
max_token_limit: int = 2000
|
||||
message_limit: int = 10
|
||||
@ -38,12 +38,12 @@ class ReadOnlyConversationTokenDBBufferSharedMemory(BaseChatMemory):
|
||||
return chat_messages
|
||||
|
||||
# prune the chat message if it exceeds the max token limit
|
||||
curr_buffer_length = self.llm.get_messages_tokens(chat_messages)
|
||||
curr_buffer_length = self.llm.get_num_tokens_from_messages(chat_messages)
|
||||
if curr_buffer_length > self.max_token_limit:
|
||||
pruned_memory = []
|
||||
while curr_buffer_length > self.max_token_limit and chat_messages:
|
||||
pruned_memory.append(chat_messages.pop(0))
|
||||
curr_buffer_length = self.llm.get_messages_tokens(chat_messages)
|
||||
curr_buffer_length = self.llm.get_num_tokens_from_messages(chat_messages)
|
||||
|
||||
return chat_messages
|
||||
|
||||
|
||||
@ -30,7 +30,7 @@ class DatasetTool(BaseTool):
|
||||
else:
|
||||
model_credentials = LLMBuilder.get_model_credentials(
|
||||
tenant_id=self.dataset.tenant_id,
|
||||
model_provider=LLMBuilder.get_default_provider(self.dataset.tenant_id),
|
||||
model_provider=LLMBuilder.get_default_provider(self.dataset.tenant_id, 'text-embedding-ada-002'),
|
||||
model_name='text-embedding-ada-002'
|
||||
)
|
||||
|
||||
@ -60,7 +60,7 @@ class DatasetTool(BaseTool):
|
||||
async def _arun(self, tool_input: str) -> str:
|
||||
model_credentials = LLMBuilder.get_model_credentials(
|
||||
tenant_id=self.dataset.tenant_id,
|
||||
model_provider=LLMBuilder.get_default_provider(self.dataset.tenant_id),
|
||||
model_provider=LLMBuilder.get_default_provider(self.dataset.tenant_id, 'text-embedding-ada-002'),
|
||||
model_name='text-embedding-ada-002'
|
||||
)
|
||||
|
||||
|
||||
@ -1,4 +1,7 @@
|
||||
from flask import current_app
|
||||
|
||||
from events.tenant_event import tenant_was_updated
|
||||
from models.provider import ProviderName
|
||||
from services.provider_service import ProviderService
|
||||
|
||||
|
||||
@ -6,4 +9,16 @@ from services.provider_service import ProviderService
|
||||
def handle(sender, **kwargs):
|
||||
tenant = sender
|
||||
if tenant.status == 'normal':
|
||||
ProviderService.create_system_provider(tenant)
|
||||
ProviderService.create_system_provider(
|
||||
tenant,
|
||||
ProviderName.OPENAI.value,
|
||||
current_app.config['OPENAI_HOSTED_QUOTA_LIMIT'],
|
||||
True
|
||||
)
|
||||
|
||||
ProviderService.create_system_provider(
|
||||
tenant,
|
||||
ProviderName.ANTHROPIC.value,
|
||||
current_app.config['ANTHROPIC_HOSTED_QUOTA_LIMIT'],
|
||||
True
|
||||
)
|
||||
|
||||
@ -1,4 +1,7 @@
|
||||
from flask import current_app
|
||||
|
||||
from events.tenant_event import tenant_was_created
|
||||
from models.provider import ProviderName
|
||||
from services.provider_service import ProviderService
|
||||
|
||||
|
||||
@ -6,4 +9,16 @@ from services.provider_service import ProviderService
|
||||
def handle(sender, **kwargs):
|
||||
tenant = sender
|
||||
if tenant.status == 'normal':
|
||||
ProviderService.create_system_provider(tenant)
|
||||
ProviderService.create_system_provider(
|
||||
tenant,
|
||||
ProviderName.OPENAI.value,
|
||||
current_app.config['OPENAI_HOSTED_QUOTA_LIMIT'],
|
||||
True
|
||||
)
|
||||
|
||||
ProviderService.create_system_provider(
|
||||
tenant,
|
||||
ProviderName.ANTHROPIC.value,
|
||||
current_app.config['ANTHROPIC_HOSTED_QUOTA_LIMIT'],
|
||||
True
|
||||
)
|
||||
|
||||
@ -10,7 +10,7 @@ flask-session2==1.3.1
|
||||
flask-cors==3.0.10
|
||||
gunicorn~=20.1.0
|
||||
gevent~=22.10.2
|
||||
langchain==0.0.209
|
||||
langchain==0.0.230
|
||||
openai~=0.27.5
|
||||
psycopg2-binary~=2.9.6
|
||||
pycryptodome==3.17
|
||||
@ -35,3 +35,4 @@ docx2txt==0.8
|
||||
pypdfium2==4.16.0
|
||||
resend~=0.5.1
|
||||
pyjwt~=2.6.0
|
||||
anthropic~=0.3.4
|
||||
|
||||
@ -6,6 +6,30 @@ from models.account import Account
|
||||
from services.dataset_service import DatasetService
|
||||
from core.llm.llm_builder import LLMBuilder
|
||||
|
||||
MODEL_PROVIDERS = [
|
||||
'openai',
|
||||
'anthropic',
|
||||
]
|
||||
|
||||
MODELS_BY_APP_MODE = {
|
||||
'chat': [
|
||||
'claude-instant-1',
|
||||
'claude-2',
|
||||
'gpt-4',
|
||||
'gpt-4-32k',
|
||||
'gpt-3.5-turbo',
|
||||
'gpt-3.5-turbo-16k',
|
||||
],
|
||||
'completion': [
|
||||
'claude-instant-1',
|
||||
'claude-2',
|
||||
'gpt-4',
|
||||
'gpt-4-32k',
|
||||
'gpt-3.5-turbo',
|
||||
'gpt-3.5-turbo-16k',
|
||||
'text-davinci-003',
|
||||
]
|
||||
}
|
||||
|
||||
class AppModelConfigService:
|
||||
@staticmethod
|
||||
@ -125,7 +149,7 @@ class AppModelConfigService:
|
||||
if not isinstance(config["speech_to_text"]["enabled"], bool):
|
||||
raise ValueError("enabled in speech_to_text must be of boolean type")
|
||||
|
||||
provider_name = LLMBuilder.get_default_provider(account.current_tenant_id)
|
||||
provider_name = LLMBuilder.get_default_provider(account.current_tenant_id, 'whisper-1')
|
||||
|
||||
if config["speech_to_text"]["enabled"] and provider_name != 'openai':
|
||||
raise ValueError("provider not support speech to text")
|
||||
@ -153,14 +177,14 @@ class AppModelConfigService:
|
||||
raise ValueError("model must be of object type")
|
||||
|
||||
# model.provider
|
||||
if 'provider' not in config["model"] or config["model"]["provider"] != "openai":
|
||||
raise ValueError("model.provider must be 'openai'")
|
||||
if 'provider' not in config["model"] or config["model"]["provider"] not in MODEL_PROVIDERS:
|
||||
raise ValueError(f"model.provider is required and must be in {str(MODEL_PROVIDERS)}")
|
||||
|
||||
# model.name
|
||||
if 'name' not in config["model"]:
|
||||
raise ValueError("model.name is required")
|
||||
|
||||
if config["model"]["name"] not in llm_constant.models_by_mode[mode]:
|
||||
if config["model"]["name"] not in MODELS_BY_APP_MODE[mode]:
|
||||
raise ValueError("model.name must be in the specified model list")
|
||||
|
||||
# model.completion_params
|
||||
|
||||
@ -27,7 +27,7 @@ class AudioService:
|
||||
message = f"Audio size larger than {FILE_SIZE} mb"
|
||||
raise AudioTooLargeServiceError(message)
|
||||
|
||||
provider_name = LLMBuilder.get_default_provider(tenant_id)
|
||||
provider_name = LLMBuilder.get_default_provider(tenant_id, 'whisper-1')
|
||||
if provider_name != ProviderName.OPENAI.value:
|
||||
raise ProviderNotSupportSpeechToTextServiceError()
|
||||
|
||||
@ -37,8 +37,3 @@ class AudioService:
|
||||
buffer.name = 'temp.mp3'
|
||||
|
||||
return Whisper(provider_service.provider).transcribe(buffer)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@ -4,6 +4,9 @@ import datetime
|
||||
import time
|
||||
import random
|
||||
from typing import Optional, List
|
||||
|
||||
from flask import current_app
|
||||
|
||||
from extensions.ext_redis import redis_client
|
||||
from flask_login import current_user
|
||||
|
||||
@ -374,6 +377,12 @@ class DocumentService:
|
||||
def save_document_with_dataset_id(dataset: Dataset, document_data: dict,
|
||||
account: Account, dataset_process_rule: Optional[DatasetProcessRule] = None,
|
||||
created_from: str = 'web'):
|
||||
# check document limit
|
||||
if current_app.config['EDITION'] == 'CLOUD':
|
||||
documents_count = DocumentService.get_tenant_documents_count()
|
||||
tenant_document_count = int(current_app.config['TENANT_DOCUMENT_COUNT'])
|
||||
if documents_count > tenant_document_count:
|
||||
raise ValueError(f"over document limit {tenant_document_count}.")
|
||||
# if dataset is empty, update dataset data_source_type
|
||||
if not dataset.data_source_type:
|
||||
dataset.data_source_type = document_data["data_source"]["type"]
|
||||
@ -521,6 +530,14 @@ class DocumentService:
|
||||
)
|
||||
return document
|
||||
|
||||
@staticmethod
|
||||
def get_tenant_documents_count():
|
||||
documents_count = Document.query.filter(Document.completed_at.isnot(None),
|
||||
Document.enabled == True,
|
||||
Document.archived == False,
|
||||
Document.tenant_id == current_user.current_tenant_id).count()
|
||||
return documents_count
|
||||
|
||||
@staticmethod
|
||||
def update_document_with_dataset_id(dataset: Dataset, document_data: dict,
|
||||
account: Account, dataset_process_rule: Optional[DatasetProcessRule] = None,
|
||||
@ -616,6 +633,12 @@ class DocumentService:
|
||||
|
||||
@staticmethod
|
||||
def save_document_without_dataset_id(tenant_id: str, document_data: dict, account: Account):
|
||||
# check document limit
|
||||
if current_app.config['EDITION'] == 'CLOUD':
|
||||
documents_count = DocumentService.get_tenant_documents_count()
|
||||
tenant_document_count = int(current_app.config['TENANT_DOCUMENT_COUNT'])
|
||||
if documents_count > tenant_document_count:
|
||||
raise ValueError(f"over document limit {tenant_document_count}.")
|
||||
# save dataset
|
||||
dataset = Dataset(
|
||||
tenant_id=tenant_id,
|
||||
|
||||
@ -31,7 +31,7 @@ class HitTestingService:
|
||||
|
||||
model_credentials = LLMBuilder.get_model_credentials(
|
||||
tenant_id=dataset.tenant_id,
|
||||
model_provider=LLMBuilder.get_default_provider(dataset.tenant_id),
|
||||
model_provider=LLMBuilder.get_default_provider(dataset.tenant_id, 'text-embedding-ada-002'),
|
||||
model_name='text-embedding-ada-002'
|
||||
)
|
||||
|
||||
|
||||
@ -10,50 +10,40 @@ from models.provider import *
|
||||
class ProviderService:
|
||||
|
||||
@staticmethod
|
||||
def init_supported_provider(tenant, edition):
|
||||
def init_supported_provider(tenant):
|
||||
"""Initialize the model provider, check whether the supported provider has a record"""
|
||||
|
||||
providers = Provider.query.filter_by(tenant_id=tenant.id).all()
|
||||
need_init_provider_names = [ProviderName.OPENAI.value, ProviderName.AZURE_OPENAI.value, ProviderName.ANTHROPIC.value]
|
||||
|
||||
openai_provider_exists = False
|
||||
azure_openai_provider_exists = False
|
||||
|
||||
# TODO: The cloud version needs to construct the data of the SYSTEM type
|
||||
providers = db.session.query(Provider).filter(
|
||||
Provider.tenant_id == tenant.id,
|
||||
Provider.provider_type == ProviderType.CUSTOM.value,
|
||||
Provider.provider_name.in_(need_init_provider_names)
|
||||
).all()
|
||||
|
||||
exists_provider_names = []
|
||||
for provider in providers:
|
||||
if provider.provider_name == ProviderName.OPENAI.value and provider.provider_type == ProviderType.CUSTOM.value:
|
||||
openai_provider_exists = True
|
||||
if provider.provider_name == ProviderName.AZURE_OPENAI.value and provider.provider_type == ProviderType.CUSTOM.value:
|
||||
azure_openai_provider_exists = True
|
||||
exists_provider_names.append(provider.provider_name)
|
||||
|
||||
# Initialize the model provider, check whether the supported provider has a record
|
||||
not_exists_provider_names = list(set(need_init_provider_names) - set(exists_provider_names))
|
||||
|
||||
# Create default providers if they don't exist
|
||||
if not openai_provider_exists:
|
||||
openai_provider = Provider(
|
||||
tenant_id=tenant.id,
|
||||
provider_name=ProviderName.OPENAI.value,
|
||||
provider_type=ProviderType.CUSTOM.value,
|
||||
is_valid=False
|
||||
)
|
||||
db.session.add(openai_provider)
|
||||
if not_exists_provider_names:
|
||||
# Initialize the model provider, check whether the supported provider has a record
|
||||
for provider_name in not_exists_provider_names:
|
||||
provider = Provider(
|
||||
tenant_id=tenant.id,
|
||||
provider_name=provider_name,
|
||||
provider_type=ProviderType.CUSTOM.value,
|
||||
is_valid=False
|
||||
)
|
||||
db.session.add(provider)
|
||||
|
||||
if not azure_openai_provider_exists:
|
||||
azure_openai_provider = Provider(
|
||||
tenant_id=tenant.id,
|
||||
provider_name=ProviderName.AZURE_OPENAI.value,
|
||||
provider_type=ProviderType.CUSTOM.value,
|
||||
is_valid=False
|
||||
)
|
||||
db.session.add(azure_openai_provider)
|
||||
|
||||
if not openai_provider_exists or not azure_openai_provider_exists:
|
||||
db.session.commit()
|
||||
|
||||
@staticmethod
|
||||
def get_obfuscated_api_key(tenant, provider_name: ProviderName):
|
||||
def get_obfuscated_api_key(tenant, provider_name: ProviderName, only_custom: bool = False):
|
||||
llm_provider_service = LLMProviderService(tenant.id, provider_name.value)
|
||||
return llm_provider_service.get_provider_configs(obfuscated=True)
|
||||
return llm_provider_service.get_provider_configs(obfuscated=True, only_custom=only_custom)
|
||||
|
||||
@staticmethod
|
||||
def get_token_type(tenant, provider_name: ProviderName):
|
||||
@ -73,7 +63,7 @@ class ProviderService:
|
||||
return llm_provider_service.get_encrypted_token(configs)
|
||||
|
||||
@staticmethod
|
||||
def create_system_provider(tenant: Tenant, provider_name: str = ProviderName.OPENAI.value,
|
||||
def create_system_provider(tenant: Tenant, provider_name: str = ProviderName.OPENAI.value, quota_limit: int = 200,
|
||||
is_valid: bool = True):
|
||||
if current_app.config['EDITION'] != 'CLOUD':
|
||||
return
|
||||
@ -90,7 +80,7 @@ class ProviderService:
|
||||
provider_name=provider_name,
|
||||
provider_type=ProviderType.SYSTEM.value,
|
||||
quota_type=ProviderQuotaType.TRIAL.value,
|
||||
quota_limit=200,
|
||||
quota_limit=quota_limit,
|
||||
encrypted_config='',
|
||||
is_valid=is_valid,
|
||||
)
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
from extensions.ext_database import db
|
||||
from models.account import Tenant
|
||||
from models.provider import Provider, ProviderType
|
||||
from models.provider import Provider, ProviderType, ProviderName
|
||||
|
||||
|
||||
class WorkspaceService:
|
||||
@ -33,7 +33,7 @@ class WorkspaceService:
|
||||
if provider.is_valid and provider.encrypted_config:
|
||||
custom_provider = provider
|
||||
elif provider.provider_type == ProviderType.SYSTEM.value:
|
||||
if provider.is_valid:
|
||||
if provider.provider_name == ProviderName.OPENAI.value and provider.is_valid:
|
||||
system_provider = provider
|
||||
|
||||
if system_provider and not custom_provider:
|
||||
|
||||
@ -44,14 +44,13 @@ def deal_dataset_vector_index_task(dataset_id: str, action: str):
|
||||
if dataset_documents:
|
||||
# save vector index
|
||||
index = IndexBuilder.get_index(dataset, 'high_quality', ignore_high_quality_check=True)
|
||||
documents = []
|
||||
for dataset_document in dataset_documents:
|
||||
# delete from vector index
|
||||
segments = db.session.query(DocumentSegment).filter(
|
||||
DocumentSegment.document_id == dataset_document.id,
|
||||
DocumentSegment.enabled == True
|
||||
) .order_by(DocumentSegment.position.asc()).all()
|
||||
|
||||
documents = []
|
||||
for segment in segments:
|
||||
document = Document(
|
||||
page_content=segment.content,
|
||||
@ -65,8 +64,8 @@ def deal_dataset_vector_index_task(dataset_id: str, action: str):
|
||||
|
||||
documents.append(document)
|
||||
|
||||
# save vector index
|
||||
index.add_texts(documents)
|
||||
# save vector index
|
||||
index.add_texts(documents)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
|
||||
@ -2,7 +2,7 @@ version: '3.1'
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.3.8
|
||||
image: langgenius/dify-api:0.3.10
|
||||
restart: always
|
||||
environment:
|
||||
# Startup mode, 'api' starts the API server.
|
||||
@ -124,7 +124,7 @@ services:
|
||||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.3.8
|
||||
image: langgenius/dify-api:0.3.10
|
||||
restart: always
|
||||
environment:
|
||||
# Startup mode, 'worker' starts the Celery worker for processing the queue.
|
||||
@ -176,7 +176,7 @@ services:
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.3.8
|
||||
image: langgenius/dify-web:0.3.10
|
||||
restart: always
|
||||
environment:
|
||||
EDITION: SELF_HOSTED
|
||||
|
||||
@ -20,16 +20,20 @@ export const routes = {
|
||||
},
|
||||
getConversationMessages: {
|
||||
method: "GET",
|
||||
url: () => "/messages",
|
||||
url: () => `/messages`,
|
||||
},
|
||||
getConversations: {
|
||||
method: "GET",
|
||||
url: () => "/conversations",
|
||||
url: () => `/conversations`,
|
||||
},
|
||||
renameConversation: {
|
||||
method: "PATCH",
|
||||
url: (conversation_id) => `/conversations/${conversation_id}`,
|
||||
},
|
||||
deleteConversation: {
|
||||
method: "DELETE",
|
||||
url: (conversation_id) => `/conversations/${conversation_id}`,
|
||||
},
|
||||
};
|
||||
|
||||
export class DifyClient {
|
||||
@ -185,4 +189,13 @@ export class ChatClient extends DifyClient {
|
||||
data
|
||||
);
|
||||
}
|
||||
|
||||
deleteConversation(conversation_id, user) {
|
||||
const data = { user };
|
||||
return this.sendRequest(
|
||||
routes.deleteConversation.method,
|
||||
routes.deleteConversation.url(conversation_id),
|
||||
data
|
||||
);
|
||||
}
|
||||
}
|
||||
18
sdks/ruby-client/.github/workflows/main.yml
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
name: Ruby
|
||||
|
||||
on: [push,pull_request]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Ruby
|
||||
uses: ruby/setup-ruby@v1
|
||||
with:
|
||||
ruby-version: 3.0.0
|
||||
- name: Run the default task
|
||||
run: |
|
||||
gem install bundler -v 2.2.3
|
||||
bundle install
|
||||
bundle exec rake
|
||||
8
sdks/ruby-client/.gitignore
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
/.bundle/
|
||||
/.yardoc
|
||||
/_yardoc/
|
||||
/coverage/
|
||||
/doc/
|
||||
/pkg/
|
||||
/spec/reports/
|
||||
/tmp/
|
||||
10
sdks/ruby-client/.rubocop.yml
Normal file
@ -0,0 +1,10 @@
|
||||
Style/StringLiterals:
|
||||
Enabled: true
|
||||
EnforcedStyle: double_quotes
|
||||
|
||||
Style/StringLiteralsInInterpolation:
|
||||
Enabled: true
|
||||
EnforcedStyle: double_quotes
|
||||
|
||||
Layout/LineLength:
|
||||
Max: 120
|
||||
84
sdks/ruby-client/CODE_OF_CONDUCT.md
Normal file
@ -0,0 +1,84 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at 427733928@qq.com. All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0,
|
||||
available at https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations.
|
||||
14
sdks/ruby-client/Gemfile
Normal file
@ -0,0 +1,14 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
source "https://rubygems.org"
|
||||
|
||||
# Specify your gem's dependencies in dify_client.gemspec
|
||||
gemspec
|
||||
|
||||
gem "rake", "~> 13.0"
|
||||
|
||||
gem "minitest", "~> 5.0"
|
||||
|
||||
gem "rubocop", "~> 0.80"
|
||||
|
||||
gem 'webmock'
|
||||
55
sdks/ruby-client/Gemfile.lock
Normal file
@ -0,0 +1,55 @@
|
||||
PATH
|
||||
remote: .
|
||||
specs:
|
||||
dify_client (0.1.0)
|
||||
|
||||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
addressable (2.8.4)
|
||||
public_suffix (>= 2.0.2, < 6.0)
|
||||
ast (2.4.2)
|
||||
crack (0.4.5)
|
||||
rexml
|
||||
hashdiff (1.0.1)
|
||||
minitest (5.18.1)
|
||||
parallel (1.23.0)
|
||||
parser (3.2.2.3)
|
||||
ast (~> 2.4.1)
|
||||
racc
|
||||
public_suffix (5.0.3)
|
||||
racc (1.7.1)
|
||||
rainbow (3.1.1)
|
||||
rake (13.0.6)
|
||||
regexp_parser (2.8.1)
|
||||
rexml (3.2.5)
|
||||
rubocop (0.93.1)
|
||||
parallel (~> 1.10)
|
||||
parser (>= 2.7.1.5)
|
||||
rainbow (>= 2.2.2, < 4.0)
|
||||
regexp_parser (>= 1.8)
|
||||
rexml
|
||||
rubocop-ast (>= 0.6.0)
|
||||
ruby-progressbar (~> 1.7)
|
||||
unicode-display_width (>= 1.4.0, < 2.0)
|
||||
rubocop-ast (1.29.0)
|
||||
parser (>= 3.2.1.0)
|
||||
ruby-progressbar (1.13.0)
|
||||
unicode-display_width (1.8.0)
|
||||
webmock (3.18.1)
|
||||
addressable (>= 2.8.0)
|
||||
crack (>= 0.3.2)
|
||||
hashdiff (>= 0.4.0, < 2.0.0)
|
||||
|
||||
PLATFORMS
|
||||
arm64-darwin-21
|
||||
|
||||
DEPENDENCIES
|
||||
dify_client!
|
||||
minitest (~> 5.0)
|
||||
rake (~> 13.0)
|
||||
rubocop (~> 0.80)
|
||||
webmock
|
||||
|
||||
BUNDLED WITH
|
||||
2.2.3
|
||||
21
sdks/ruby-client/LICENSE.txt
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2023 crazywoola
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
111
sdks/ruby-client/README.md
Normal file
@ -0,0 +1,111 @@
|
||||
# DifyClient
|
||||
|
||||
Welcome to the DifyClient gem! This gem provides a Ruby client for interacting with the Dify.ai API. It allows you to perform various actions such as sending requests, providing feedback, creating completion messages, managing conversations, and more.
|
||||
|
||||
## Installation
|
||||
|
||||
Add this line to your application's Gemfile:
|
||||
|
||||
```ruby
|
||||
gem 'dify_client'
|
||||
```
|
||||
|
||||
And then execute:
|
||||
|
||||
$ bundle install
|
||||
|
||||
Or install it yourself as:
|
||||
|
||||
$ gem install dify_client
|
||||
|
||||
## Usage
|
||||
|
||||
To use the DifyClient gem, follow these steps:
|
||||
|
||||
1 Require the gem:
|
||||
|
||||
```ruby
|
||||
require 'dify_client'
|
||||
```
|
||||
2 Create a new client instance:
|
||||
|
||||
```ruby
|
||||
api_key = 'YOUR_API_KEY'
|
||||
client = DifyClient::Client.new(api_key)
|
||||
```
|
||||
|
||||
3 Use the available methods to interact with the Dify.ai API. Here are the methods provided by the DifyClient::Client class:
|
||||
|
||||
### Update API Key
|
||||
|
||||
```ruby
|
||||
client.update_api_key('NEW_API_KEY')
|
||||
```
|
||||
Updates the API key used by the client.
|
||||
|
||||
### Message Feedback
|
||||
|
||||
```ruby
|
||||
client.message_feedback(message_id, rating, user)
|
||||
```
|
||||
|
||||
Submits feedback for a specific message identified by `message_id`. The `rating` parameter should be the rating value, and `user` is the user identifier.
|
||||
|
||||
### Get Application Parameters
|
||||
|
||||
```ruby
|
||||
client.get_application_parameters(user)
|
||||
```
|
||||
|
||||
### Create Completion Message
|
||||
|
||||
```ruby
|
||||
client.create_completion_message(inputs, query, user, stream = false)
|
||||
```
|
||||
|
||||
Creates a completion message with the provided `inputs`, `query`, and `user`. The stream parameter is optional and set to `false` by default. Set it to `true` to enable streaming response mode.
|
||||
|
||||
|
||||
### Create Chat Message
|
||||
|
||||
```ruby
|
||||
client.create_chat_message(inputs, query, user, stream = false, conversation_id = nil)
|
||||
```
|
||||
|
||||
Creates a chat message with the provided `inputs`, `query`, and `user`. The stream parameter is optional and set to `false` by default. Set it to `true` to enable streaming response mode. The `conversation_id` parameter is optional and can be used to specify the conversation ID.
|
||||
|
||||
### Get Conversations
|
||||
|
||||
```ruby
|
||||
client.get_conversations(user, first_id = nil, limit = nil, pinned = nil)
|
||||
```
|
||||
Retrieves the conversations for a given `user`. You can provide `first_id`, `limit`, and `pinned` parameters to customize the retrieval.
|
||||
|
||||
### Rename Conversation
|
||||
|
||||
```ruby
|
||||
client.rename_conversation(conversation_id, name, user)
|
||||
```
|
||||
Renames a conversation identified by conversatio`n_id with the provided `name` for the given `user`.
|
||||
|
||||
### Delete Conversation
|
||||
|
||||
```ruby
|
||||
client.delete_conversation(conversation_id, user)
|
||||
```
|
||||
Deletes a conversation identified by `conversation_id` for the given `user`.
|
||||
|
||||
## Development
|
||||
|
||||
After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake test` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
|
||||
|
||||
To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run` bundle exec rake release`, which will create a git tag for the version, push git commits and the created tag, and push the .gem file to rubygems.org.
|
||||
|
||||
|
||||
## Contributing
|
||||
|
||||
Bug reports and pull requests are welcome on GitHub at [https://github.com/langgenius/dify/issues](https://github.com/langgenius/dify/issues).
|
||||
|
||||
## License
|
||||
|
||||
The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
|
||||
16
sdks/ruby-client/Rakefile
Normal file
@ -0,0 +1,16 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "bundler/gem_tasks"
|
||||
require "rake/testtask"
|
||||
|
||||
Rake::TestTask.new(:test) do |t|
|
||||
t.libs << "test"
|
||||
t.libs << "lib"
|
||||
t.test_files = FileList["test/**/*_test.rb"]
|
||||
end
|
||||
|
||||
require "rubocop/rake_task"
|
||||
|
||||
RuboCop::RakeTask.new
|
||||
|
||||
task default: %i[test rubocop]
|
||||
15
sdks/ruby-client/bin/console
Executable file
@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env ruby
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "bundler/setup"
|
||||
require "dify_client"
|
||||
|
||||
# You can add fixtures and/or initialization code here to make experimenting
|
||||
# with your gem easier. You can also use a different console, if you like.
|
||||
|
||||
# (If you use this, don't forget to add pry to your Gemfile!)
|
||||
# require "pry"
|
||||
# Pry.start
|
||||
|
||||
require "irb"
|
||||
IRB.start(__FILE__)
|
||||
8
sdks/ruby-client/bin/setup
Executable file
@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
set -vx
|
||||
|
||||
bundle install
|
||||
|
||||
# Do any other automated setup that you need to do here
|
||||
37
sdks/ruby-client/dify_client.gemspec
Normal file
@ -0,0 +1,37 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require_relative "lib/dify_client/version"
|
||||
|
||||
Gem::Specification.new do |spec|
|
||||
spec.name = "dify_client"
|
||||
spec.version = DifyClient::VERSION
|
||||
spec.authors = ["crazywoola"]
|
||||
spec.email = ["427733928@qq.com"]
|
||||
|
||||
spec.summary = "Ruby client for Dify"
|
||||
spec.description = "Ruby client for Dify"
|
||||
spec.homepage = "https://dify.ai"
|
||||
spec.license = "MIT"
|
||||
spec.required_ruby_version = Gem::Requirement.new(">= 2.3.0")
|
||||
|
||||
# spec.metadata["allowed_push_host"] = "TODO: Set to 'http://mygemserver.com'"
|
||||
|
||||
spec.metadata["homepage_uri"] = spec.homepage
|
||||
spec.metadata["source_code_uri"] = "https://github.com/langgenius/dify/tree/main/sdks"
|
||||
spec.metadata["changelog_uri"] = "https://github.com/langgenius/dify/tree/main/sdks"
|
||||
|
||||
# Specify which files should be added to the gem when it is released.
|
||||
# The `git ls-files -z` loads the files in the RubyGem that have been added into git.
|
||||
spec.files = Dir.chdir(File.expand_path(__dir__)) do
|
||||
`git ls-files -z`.split("\x0").reject { |f| f.match(%r{\A(?:test|spec|features)/}) }
|
||||
end
|
||||
spec.bindir = "exe"
|
||||
spec.executables = spec.files.grep(%r{\Aexe/}) { |f| File.basename(f) }
|
||||
spec.require_paths = ["lib"]
|
||||
|
||||
# Uncomment to register a new dependency of your gem
|
||||
# spec.add_dependency "example-gem", "~> 1.0"
|
||||
|
||||
# For more information and examples about making a new gem, checkout our
|
||||
# guide at: https://bundler.io/guides/creating_gem.html
|
||||
end
|
||||
42
sdks/ruby-client/test/dify_client_test.rb
Normal file
@ -0,0 +1,42 @@
|
||||
require 'test_helper'
|
||||
require 'webmock/minitest'
|
||||
require 'json'
|
||||
require 'dify_client'
|
||||
|
||||
class DifyClientTest < Minitest::Test
|
||||
def setup
|
||||
@api_key = 'YOUR_API_KEY'
|
||||
@client = DifyClient::Client.new(@api_key)
|
||||
end
|
||||
|
||||
def test_update_api_key
|
||||
new_api_key = 'NEW_API_KEY'
|
||||
|
||||
@client.update_api_key(new_api_key)
|
||||
|
||||
assert_equal new_api_key, @client.instance_variable_get(:@api_key)
|
||||
end
|
||||
|
||||
def test_get_application_parameters
|
||||
user = 'USER_ID'
|
||||
expected_response = {}
|
||||
|
||||
stub_request(:get, "https://api.dify.ai/v1/parameters").
|
||||
with(
|
||||
body: {"user"=>"USER_ID"},
|
||||
headers: {
|
||||
'Accept'=>'*/*',
|
||||
'Accept-Encoding'=>'gzip;q=1.0,deflate;q=0.6,identity;q=0.3',
|
||||
'Authorization'=>'Bearer YOUR_API_KEY',
|
||||
'Content-Type'=>'application/x-www-form-urlencoded',
|
||||
'Responsetype'=>'json',
|
||||
'User-Agent'=>'Ruby'
|
||||
}).
|
||||
to_return(status: 200, body: expected_response.to_json, headers: {})
|
||||
|
||||
response = @client.get_application_parameters(user)
|
||||
|
||||
assert_equal expected_response, response
|
||||
end
|
||||
|
||||
end
|
||||
6
sdks/ruby-client/test/test_helper.rb
Normal file
@ -0,0 +1,6 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
$LOAD_PATH.unshift File.expand_path("../lib", __dir__)
|
||||
require "dify_client"
|
||||
|
||||
require "minitest/autorun"
|
||||
@ -26,4 +26,4 @@ RUN chmod +x /entrypoint.sh
|
||||
ARG COMMIT_SHA
|
||||
ENV COMMIT_SHA ${COMMIT_SHA}
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
ENTRYPOINT ["/bin/bash", "/entrypoint.sh"]
|
||||
|
||||
@ -3,6 +3,7 @@ import type { ReactNode } from 'react'
|
||||
import SwrInitor from '@/app/components/swr-initor'
|
||||
import { AppContextProvider } from '@/context/app-context'
|
||||
import GA, { GaType } from '@/app/components/base/ga'
|
||||
import HeaderWrapper from '@/app/components/header/HeaderWrapper'
|
||||
import Header from '@/app/components/header'
|
||||
|
||||
const Layout = ({ children }: { children: ReactNode }) => {
|
||||
@ -11,7 +12,9 @@ const Layout = ({ children }: { children: ReactNode }) => {
|
||||
<GA gaType={GaType.admin} />
|
||||
<SwrInitor>
|
||||
<AppContextProvider>
|
||||
<Header />
|
||||
<HeaderWrapper>
|
||||
<Header />
|
||||
</HeaderWrapper>
|
||||
{children}
|
||||
</AppContextProvider>
|
||||
</SwrInitor>
|
||||
|
||||
@ -65,6 +65,7 @@ export type IChatProps = {
|
||||
isShowSuggestion?: boolean
|
||||
suggestionList?: string[]
|
||||
isShowSpeechToText?: boolean
|
||||
answerIconClassName?: string
|
||||
}
|
||||
|
||||
export type MessageMore = {
|
||||
@ -174,10 +175,11 @@ type IAnswerProps = {
|
||||
onSubmitAnnotation?: SubmitAnnotationFunc
|
||||
displayScene: DisplayScene
|
||||
isResponsing?: boolean
|
||||
answerIconClassName?: string
|
||||
}
|
||||
|
||||
// The component needs to maintain its own state to control whether to display input component
|
||||
const Answer: FC<IAnswerProps> = ({ item, feedbackDisabled = false, isHideFeedbackEdit = false, onFeedback, onSubmitAnnotation, displayScene = 'web', isResponsing }) => {
|
||||
const Answer: FC<IAnswerProps> = ({ item, feedbackDisabled = false, isHideFeedbackEdit = false, onFeedback, onSubmitAnnotation, displayScene = 'web', isResponsing, answerIconClassName }) => {
|
||||
const { id, content, more, feedback, adminFeedback, annotation: initAnnotation } = item
|
||||
const [showEdit, setShowEdit] = useState(false)
|
||||
const [loading, setLoading] = useState(false)
|
||||
@ -292,7 +294,7 @@ const Answer: FC<IAnswerProps> = ({ item, feedbackDisabled = false, isHideFeedba
|
||||
return (
|
||||
<div key={id}>
|
||||
<div className='flex items-start'>
|
||||
<div className={`${s.answerIcon} w-10 h-10 shrink-0`}>
|
||||
<div className={`${s.answerIcon} ${answerIconClassName} w-10 h-10 shrink-0`}>
|
||||
{isResponsing
|
||||
&& <div className={s.typeingIcon}>
|
||||
<LoadingAnim type='avatar' />
|
||||
@ -428,6 +430,7 @@ const Chat: FC<IChatProps> = ({
|
||||
isShowSuggestion,
|
||||
suggestionList,
|
||||
isShowSpeechToText,
|
||||
answerIconClassName,
|
||||
}) => {
|
||||
const { t } = useTranslation()
|
||||
const { notify } = useContext(ToastContext)
|
||||
@ -520,6 +523,7 @@ const Chat: FC<IChatProps> = ({
|
||||
onSubmitAnnotation={onSubmitAnnotation}
|
||||
displayScene={displayScene ?? 'web'}
|
||||
isResponsing={isResponsing && isLast}
|
||||
answerIconClassName={answerIconClassName}
|
||||
/>
|
||||
}
|
||||
return <Question key={item.id} id={item.id} content={item.content} more={item.more} useCurrentUserAvatar={useCurrentUserAvatar} />
|
||||
|
||||
@ -372,7 +372,7 @@ const Debug: FC<IDebug> = ({
|
||||
{/* Chat */}
|
||||
{mode === AppType.chat && (
|
||||
<div className="mt-[34px] h-full flex flex-col">
|
||||
<div className={cn(doShowSuggestion ? 'pb-[140px]' : (isResponsing ? 'pb-[113px]' : 'pb-[66px]'), 'relative mt-1.5 grow h-[200px] overflow-hidden')}>
|
||||
<div className={cn(doShowSuggestion ? 'pb-[140px]' : (isResponsing ? 'pb-[113px]' : 'pb-[76px]'), 'relative mt-1.5 grow h-[200px] overflow-hidden')}>
|
||||
<div className="h-full overflow-y-auto overflow-x-hidden" ref={chatListDomRef}>
|
||||
<Chat
|
||||
chatList={chatList}
|
||||
|
||||
@ -16,6 +16,7 @@ import ConfigModel from '@/app/components/app/configuration/config-model'
|
||||
import Config from '@/app/components/app/configuration/config'
|
||||
import Debug from '@/app/components/app/configuration/debug'
|
||||
import Confirm from '@/app/components/base/confirm'
|
||||
import { ProviderType } from '@/types/app'
|
||||
import type { AppDetailResponse } from '@/models/app'
|
||||
import { ToastContext } from '@/app/components/base/toast'
|
||||
import { fetchTenantInfo } from '@/service/common'
|
||||
@ -67,7 +68,7 @@ const Configuration: FC = () => {
|
||||
frequency_penalty: 1, // -2-2
|
||||
})
|
||||
const [modelConfig, doSetModelConfig] = useState<ModelConfig>({
|
||||
provider: 'openai',
|
||||
provider: ProviderType.openai,
|
||||
model_id: 'gpt-3.5-turbo',
|
||||
configs: {
|
||||
prompt_template: '',
|
||||
@ -84,8 +85,9 @@ const Configuration: FC = () => {
|
||||
doSetModelConfig(newModelConfig)
|
||||
}
|
||||
|
||||
const setModelId = (modelId: string) => {
|
||||
const setModelId = (modelId: string, provider: ProviderType) => {
|
||||
const newModelConfig = produce(modelConfig, (draft: any) => {
|
||||
draft.provider = provider
|
||||
draft.model_id = modelId
|
||||
})
|
||||
setModelConfig(newModelConfig)
|
||||
|
||||
@ -184,7 +184,11 @@ const GenerationItem: FC<IGenerationItemProps> = ({
|
||||
{taskId}
|
||||
</div>)
|
||||
}
|
||||
<Markdown content={content} />
|
||||
<div className='flex'>
|
||||
<div className='grow w-0'>
|
||||
<Markdown content={content} />
|
||||
</div>
|
||||
</div>
|
||||
{messageId && (
|
||||
<div className='flex items-center justify-between mt-3'>
|
||||
<div className='flex items-center'>
|
||||
|
||||
@ -19,6 +19,7 @@ const AutoHeightTextarea = forwardRef(
|
||||
{ value, onChange, placeholder, className, minHeight = 36, maxHeight = 96, autoFocus, controlFocus, onKeyDown, onKeyUp }: IProps,
|
||||
outerRef: any,
|
||||
) => {
|
||||
// eslint-disable-next-line react-hooks/rules-of-hooks
|
||||
const ref = outerRef || useRef<HTMLTextAreaElement>(null)
|
||||
|
||||
const doFocus = () => {
|
||||
@ -54,13 +55,20 @@ const AutoHeightTextarea = forwardRef(
|
||||
|
||||
return (
|
||||
<div className='relative'>
|
||||
<div className={cn(className, 'invisible whitespace-pre-wrap break-all overflow-y-auto')} style={{ minHeight, maxHeight }}>
|
||||
<div className={cn(className, 'invisible whitespace-pre-wrap break-all overflow-y-auto')} style={{
|
||||
minHeight,
|
||||
maxHeight,
|
||||
paddingRight: (value && value.trim().length > 10000) ? 140 : 130,
|
||||
}}>
|
||||
{!value ? placeholder : value.replace(/\n$/, '\n ')}
|
||||
</div>
|
||||
<textarea
|
||||
ref={ref}
|
||||
autoFocus={autoFocus}
|
||||
className={cn(className, 'absolute inset-0 resize-none overflow-hidden')}
|
||||
className={cn(className, 'absolute inset-0 resize-none overflow-auto')}
|
||||
style={{
|
||||
paddingRight: (value && value.trim().length > 10000) ? 140 : 130,
|
||||
}}
|
||||
placeholder={placeholder}
|
||||
onChange={onChange}
|
||||
onKeyDown={onKeyDown}
|
||||
|
||||
@ -1,39 +1,41 @@
|
||||
'use client'
|
||||
import cn from 'classnames'
|
||||
|
||||
interface IAvatarProps {
|
||||
type AvatarProps = {
|
||||
name: string
|
||||
avatar?: string
|
||||
size?: number
|
||||
className?: string
|
||||
textClassName?: string
|
||||
}
|
||||
const Avatar = ({
|
||||
name,
|
||||
avatar,
|
||||
size = 30,
|
||||
className
|
||||
}: IAvatarProps) => {
|
||||
const avatarClassName = `shrink-0 flex items-center rounded-full bg-primary-600`
|
||||
const style = { width: `${size}px`, height:`${size}px`, fontSize: `${size}px`, lineHeight: `${size}px` }
|
||||
className,
|
||||
textClassName,
|
||||
}: AvatarProps) => {
|
||||
const avatarClassName = 'shrink-0 flex items-center rounded-full bg-primary-600'
|
||||
const style = { width: `${size}px`, height: `${size}px`, fontSize: `${size}px`, lineHeight: `${size}px` }
|
||||
|
||||
if (avatar) {
|
||||
return (
|
||||
<img
|
||||
className={cn(avatarClassName, className)}
|
||||
<img
|
||||
className={cn(avatarClassName, className)}
|
||||
style={style}
|
||||
alt={name}
|
||||
alt={name}
|
||||
src={avatar}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(avatarClassName, className)}
|
||||
<div
|
||||
className={cn(avatarClassName, className)}
|
||||
style={style}
|
||||
>
|
||||
<div
|
||||
className={`text-center text-white scale-[0.4]`}
|
||||
<div
|
||||
className={cn(textClassName, 'text-center text-white scale-[0.4]')}
|
||||
style={style}
|
||||
>
|
||||
{name[0].toLocaleUpperCase()}
|
||||
@ -42,4 +44,4 @@ const Avatar = ({
|
||||
)
|
||||
}
|
||||
|
||||
export default Avatar
|
||||
export default Avatar
|
||||
|
||||
@ -0,0 +1,8 @@
|
||||
<svg width="50" height="26" viewBox="0 0 50 26" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g id="Dify">
|
||||
<path d="M6.61784 2.064C8.37784 2.064 9.92184 2.408 11.2498 3.096C12.5938 3.784 13.6258 4.768 14.3458 6.048C15.0818 7.312 15.4498 8.784 15.4498 10.464C15.4498 12.144 15.0818 13.616 14.3458 14.88C13.6258 16.128 12.5938 17.096 11.2498 17.784C9.92184 18.472 8.37784 18.816 6.61784 18.816H0.761841V2.064H6.61784ZM6.49784 15.96C8.25784 15.96 9.61784 15.48 10.5778 14.52C11.5378 13.56 12.0178 12.208 12.0178 10.464C12.0178 8.72 11.5378 7.36 10.5778 6.384C9.61784 5.392 8.25784 4.896 6.49784 4.896H4.12184V15.96H6.49784Z" fill="#1D2939"/>
|
||||
<path d="M20.869 3.936C20.277 3.936 19.781 3.752 19.381 3.384C18.997 3 18.805 2.528 18.805 1.968C18.805 1.408 18.997 0.944 19.381 0.576C19.781 0.192 20.277 0 20.869 0C21.461 0 21.949 0.192 22.333 0.576C22.733 0.944 22.933 1.408 22.933 1.968C22.933 2.528 22.733 3 22.333 3.384C21.949 3.752 21.461 3.936 20.869 3.936ZM22.525 5.52V18.816H19.165V5.52H22.525Z" fill="#1D2939"/>
|
||||
<path d="M33.1407 8.28H30.8127V18.816H27.4047V8.28H25.8927V5.52H27.4047V4.848C27.4047 3.216 27.8687 2.016 28.7967 1.248C29.7247 0.48 31.1247 0.12 32.9967 0.168001V3C32.1807 2.984 31.6127 3.12 31.2927 3.408C30.9727 3.696 30.8127 4.216 30.8127 4.968V5.52H33.1407V8.28Z" fill="#1D2939"/>
|
||||
<path d="M49.2381 5.52L41.0061 25.104H37.4301L40.3101 18.48L34.9821 5.52H38.7501L42.1821 14.808L45.6621 5.52H49.2381Z" fill="#1D2939"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.4 KiB |
@ -0,0 +1,5 @@
|
||||
<svg width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g id="github">
|
||||
<path id="Vector" d="M9 1.125C4.64906 1.125 1.125 4.64906 1.125 9C1.125 12.4847 3.37922 15.428 6.50953 16.4714C6.90328 16.5403 7.05094 16.3041 7.05094 16.0973C7.05094 15.9103 7.04109 15.2902 7.04109 14.6306C5.0625 14.9948 4.55062 14.1483 4.39312 13.7053C4.30453 13.4789 3.92063 12.78 3.58594 12.593C3.31031 12.4453 2.91656 12.0811 3.57609 12.0712C4.19625 12.0614 4.63922 12.6422 4.78688 12.8784C5.49563 14.0695 6.62766 13.7348 7.08047 13.5281C7.14938 13.0163 7.35609 12.6717 7.5825 12.4748C5.83031 12.278 3.99938 11.5987 3.99938 8.58656C3.99938 7.73016 4.30453 7.02141 4.80656 6.47016C4.72781 6.27328 4.45219 5.46609 4.88531 4.38328C4.88531 4.38328 5.54484 4.17656 7.05094 5.19047C7.68094 5.01328 8.35031 4.92469 9.01969 4.92469C9.68906 4.92469 10.3584 5.01328 10.9884 5.19047C12.4945 4.16672 13.1541 4.38328 13.1541 4.38328C13.5872 5.46609 13.3116 6.27328 13.2328 6.47016C13.7348 7.02141 14.04 7.72031 14.04 8.58656C14.04 11.6086 12.1992 12.278 10.447 12.4748C10.7325 12.7209 10.9786 13.1934 10.9786 13.9317C10.9786 14.985 10.9688 15.8316 10.9688 16.0973C10.9688 16.3041 11.1164 16.5502 11.5102 16.4714C13.0735 15.9436 14.432 14.9389 15.3943 13.5986C16.3567 12.2583 16.8746 10.65 16.875 9C16.875 4.64906 13.3509 1.125 9 1.125Z" fill="#24292F"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.3 KiB |
@ -0,0 +1,5 @@
|
||||
<svg width="14" height="14" viewBox="0 0 14 14" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g id="arrow-up-right">
|
||||
<path id="Icon" d="M4.08325 9.91665L9.91659 4.08331M9.91659 4.08331H4.08325M9.91659 4.08331V9.91665" stroke="#667085" stroke-width="1.25" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 319 B |
@ -0,0 +1,5 @@
|
||||
<svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g id="chevron-down">
|
||||
<path id="Icon" d="M3 4.5L6 7.5L9 4.5" stroke="#344054" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 254 B |
@ -0,0 +1,5 @@
|
||||
<svg width="14" height="14" viewBox="0 0 14 14" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g id="chevron-right">
|
||||
<path id="Icon" d="M5.25 10.5L8.75 7L5.25 3.5" stroke="#667085" stroke-width="1.25" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 264 B |
@ -0,0 +1,5 @@
|
||||
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g id="check">
|
||||
<path id="Icon" d="M13.3334 4L6.00008 11.3333L2.66675 8" stroke="#155EEF" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 265 B |
@ -0,0 +1,5 @@
|
||||
<svg width="14" height="14" viewBox="0 0 14 14" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g id="log-out-01">
|
||||
<path id="Icon" d="M9.33333 9.91667L12.25 7M12.25 7L9.33333 4.08333M12.25 7H5.25M5.25 1.75H4.55C3.56991 1.75 3.07986 1.75 2.70552 1.94074C2.37623 2.10852 2.10852 2.37623 1.94074 2.70552C1.75 3.07986 1.75 3.56991 1.75 4.55V9.45C1.75 10.4301 1.75 10.9201 1.94074 11.2945C2.10852 11.6238 2.37623 11.8915 2.70552 12.0593C3.07986 12.25 3.56991 12.25 4.55 12.25H5.25" stroke="#667085" stroke-width="1.25" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 576 B |
@ -0,0 +1,5 @@
|
||||
<svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g id="terminal-square">
|
||||
<path id="Solid" fill-rule="evenodd" clip-rule="evenodd" d="M8.91927 1H3.08073C2.81716 0.999992 2.58977 0.999984 2.40249 1.01529C2.20481 1.03144 2.00821 1.06709 1.81902 1.16349C1.53677 1.3073 1.3073 1.53677 1.16349 1.81902C1.06709 2.00821 1.03144 2.20481 1.01529 2.40249C0.999984 2.58977 0.999992 2.81714 1 3.08071V8.91927C0.999992 9.18284 0.999984 9.41023 1.01529 9.59752C1.03144 9.79519 1.06709 9.9918 1.16349 10.181C1.3073 10.4632 1.53677 10.6927 1.81902 10.8365C2.00821 10.9329 2.20481 10.9686 2.40249 10.9847C2.58977 11 2.81715 11 3.08072 11H8.91928C9.18285 11 9.41023 11 9.59752 10.9847C9.79519 10.9686 9.9918 10.9329 10.181 10.8365C10.4632 10.6927 10.6927 10.4632 10.8365 10.181C10.9329 9.9918 10.9686 9.79519 10.9847 9.59752C11 9.41023 11 9.18285 11 8.91928V3.08072C11 2.81715 11 2.58977 10.9847 2.40249C10.9686 2.20481 10.9329 2.00821 10.8365 1.81902C10.6927 1.53677 10.4632 1.3073 10.181 1.16349C9.9918 1.06709 9.79519 1.03144 9.59752 1.01529C9.41023 0.999984 9.18284 0.999992 8.91927 1ZM3.85355 4.14645C3.65829 3.95118 3.34171 3.95118 3.14645 4.14645C2.95118 4.34171 2.95118 4.65829 3.14645 4.85355L4.29289 6L3.14645 7.14645C2.95118 7.34171 2.95118 7.65829 3.14645 7.85355C3.34171 8.04882 3.65829 8.04882 3.85355 7.85355L5.35355 6.35355C5.54882 6.15829 5.54882 5.84171 5.35355 5.64645L3.85355 4.14645ZM6.5 7C6.22386 7 6 7.22386 6 7.5C6 7.77614 6.22386 8 6.5 8H8.5C8.77614 8 9 7.77614 9 7.5C9 7.22386 8.77614 7 8.5 7H6.5Z" fill="#B54708"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.5 KiB |
@ -0,0 +1,5 @@
|
||||
<svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g id="beaker-02">
|
||||
<path id="Solid" fill-rule="evenodd" clip-rule="evenodd" d="M4.13856 0.500003H7.8617C7.92126 0.49998 7.99238 0.499953 8.05504 0.505073C8.12765 0.511005 8.23165 0.526227 8.34062 0.581751C8.48174 0.653656 8.59648 0.768392 8.66838 0.909513C8.72391 1.01849 8.73913 1.12248 8.74506 1.19509C8.75018 1.25775 8.75015 1.32888 8.75013 1.38844V2.61157C8.75015 2.67113 8.75018 2.74226 8.74506 2.80492C8.73913 2.87753 8.72391 2.98153 8.66838 3.0905C8.59648 3.23162 8.48174 3.34636 8.34062 3.41826C8.23165 3.47379 8.12765 3.48901 8.05504 3.49494C8.03725 3.49639 8.01877 3.49743 8.00006 3.49817V5.2506C8.00006 5.55312 8.00408 5.61265 8.01723 5.66153C8.03245 5.71807 8.05747 5.7715 8.09117 5.81939C8.1203 5.86078 8.16346 5.90197 8.39586 6.09564L10.2807 7.66627C10.4566 7.81255 10.6116 7.94145 10.7267 8.10509C10.8278 8.24875 10.9029 8.40904 10.9486 8.57867C11.0005 8.7719 11.0003 8.97351 11.0001 9.2023C11.0001 9.39886 11.0002 9.59542 11.0002 9.79198C11.0003 9.98232 11.0005 10.1463 10.9713 10.2927C10.853 10.8877 10.3878 11.3529 9.7928 11.4712C9.64637 11.5003 9.48246 11.5002 9.29211 11.5001H2.70822C2.51787 11.5002 2.35396 11.5003 2.20753 11.4712C1.98473 11.4269 1.78014 11.334 1.60515 11.2038C1.42854 11.0725 1.28221 10.9034 1.17753 10.7077C1.10892 10.5796 1.05831 10.4401 1.02899 10.2927C0.999862 10.1463 0.999992 9.98233 1.00014 9.79199C1.00014 9.59542 1.00006 9.39886 1.00003 9.20229C0.999794 8.97351 0.999584 8.7719 1.05157 8.57867C1.09721 8.40904 1.17229 8.24875 1.27338 8.10509C1.38855 7.94145 1.54356 7.81255 1.71947 7.66627L3.60427 6.09564C3.83667 5.90197 3.87983 5.86078 3.90896 5.81939C3.94266 5.7715 3.96768 5.71807 3.9829 5.66153C3.99605 5.61265 4.00006 5.55312 4.00006 5.2506V3.49817C3.9814 3.49743 3.96297 3.49639 3.94521 3.49494C3.8726 3.48901 3.76861 3.47379 3.65964 3.41826C3.51851 3.34636 3.40378 3.23162 3.33187 3.0905C3.27635 2.98153 3.26113 2.87753 3.25519 2.80492C3.25008 2.74226 3.2501 2.67113 3.25013 2.61158V1.38844C3.2501 1.32888 3.25008 1.25775 3.25519 1.19509C3.26113 1.12248 3.27635 1.01849 3.33187 0.909513C3.40378 0.768392 3.51851 0.653656 3.65964 0.581751C3.76861 0.526227 3.8726 0.511005 3.94521 0.505073C4.00787 0.499953 4.079 0.49998 4.13856 0.500003ZM9.11909 8.00004H2.88104L4.28066 6.83373C4.45657 6.68745 4.61158 6.55855 4.72675 6.39491C4.82784 6.25125 4.90292 6.09096 4.94856 5.92133C5.00054 5.7281 5.00033 5.52649 5.0001 5.29771L5.00006 3.50001H7.00006L7.00003 5.29771C6.99979 5.52649 6.99958 5.7281 7.05157 5.92133C7.09721 6.09096 7.17229 6.25125 7.27338 6.39491C7.38855 6.55855 7.54356 6.68745 7.71947 6.83373L9.11909 8.00004Z" fill="#0E7090"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.6 KiB |
62
web/app/components/base/icons/src/public/common/Dify.json
Normal file
@ -0,0 +1,62 @@
|
||||
{
|
||||
"icon": {
|
||||
"type": "element",
|
||||
"isRootNode": true,
|
||||
"name": "svg",
|
||||
"attributes": {
|
||||
"width": "50",
|
||||
"height": "26",
|
||||
"viewBox": "0 0 50 26",
|
||||
"fill": "none",
|
||||
"xmlns": "http://www.w3.org/2000/svg"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"type": "element",
|
||||
"name": "g",
|
||||
"attributes": {
|
||||
"id": "Dify"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"type": "element",
|
||||
"name": "path",
|
||||
"attributes": {
|
||||
"d": "M6.61784 2.064C8.37784 2.064 9.92184 2.408 11.2498 3.096C12.5938 3.784 13.6258 4.768 14.3458 6.048C15.0818 7.312 15.4498 8.784 15.4498 10.464C15.4498 12.144 15.0818 13.616 14.3458 14.88C13.6258 16.128 12.5938 17.096 11.2498 17.784C9.92184 18.472 8.37784 18.816 6.61784 18.816H0.761841V2.064H6.61784ZM6.49784 15.96C8.25784 15.96 9.61784 15.48 10.5778 14.52C11.5378 13.56 12.0178 12.208 12.0178 10.464C12.0178 8.72 11.5378 7.36 10.5778 6.384C9.61784 5.392 8.25784 4.896 6.49784 4.896H4.12184V15.96H6.49784Z",
|
||||
"fill": "#1D2939"
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"type": "element",
|
||||
"name": "path",
|
||||
"attributes": {
|
||||
"d": "M20.869 3.936C20.277 3.936 19.781 3.752 19.381 3.384C18.997 3 18.805 2.528 18.805 1.968C18.805 1.408 18.997 0.944 19.381 0.576C19.781 0.192 20.277 0 20.869 0C21.461 0 21.949 0.192 22.333 0.576C22.733 0.944 22.933 1.408 22.933 1.968C22.933 2.528 22.733 3 22.333 3.384C21.949 3.752 21.461 3.936 20.869 3.936ZM22.525 5.52V18.816H19.165V5.52H22.525Z",
|
||||
"fill": "#1D2939"
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"type": "element",
|
||||
"name": "path",
|
||||
"attributes": {
|
||||
"d": "M33.1407 8.28H30.8127V18.816H27.4047V8.28H25.8927V5.52H27.4047V4.848C27.4047 3.216 27.8687 2.016 28.7967 1.248C29.7247 0.48 31.1247 0.12 32.9967 0.168001V3C32.1807 2.984 31.6127 3.12 31.2927 3.408C30.9727 3.696 30.8127 4.216 30.8127 4.968V5.52H33.1407V8.28Z",
|
||||
"fill": "#1D2939"
|
||||
},
|
||||
"children": []
|
||||
},
|
||||
{
|
||||
"type": "element",
|
||||
"name": "path",
|
||||
"attributes": {
|
||||
"d": "M49.2381 5.52L41.0061 25.104H37.4301L40.3101 18.48L34.9821 5.52H38.7501L42.1821 14.808L45.6621 5.52H49.2381Z",
|
||||
"fill": "#1D2939"
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"name": "Dify"
|
||||
}
|
||||
14
web/app/components/base/icons/src/public/common/Dify.tsx
Normal file
@ -0,0 +1,14 @@
|
||||
// GENERATE BY script
|
||||
// DON NOT EDIT IT MANUALLY
|
||||
|
||||
import * as React from 'react'
|
||||
import data from './Dify.json'
|
||||
import IconBase from '@/app/components/base/icons/IconBase'
|
||||
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
|
||||
|
||||
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
|
||||
props,
|
||||
ref,
|
||||
) => <IconBase {...props} ref={ref} data={data as IconData} />)
|
||||
|
||||
export default Icon
|
||||
36
web/app/components/base/icons/src/public/common/Github.json
Normal file
@ -0,0 +1,36 @@
|
||||
{
|
||||
"icon": {
|
||||
"type": "element",
|
||||
"isRootNode": true,
|
||||
"name": "svg",
|
||||
"attributes": {
|
||||
"width": "18",
|
||||
"height": "18",
|
||||
"viewBox": "0 0 18 18",
|
||||
"fill": "none",
|
||||
"xmlns": "http://www.w3.org/2000/svg"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"type": "element",
|
||||
"name": "g",
|
||||
"attributes": {
|
||||
"id": "github"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"type": "element",
|
||||
"name": "path",
|
||||
"attributes": {
|
||||
"id": "Vector",
|
||||
"d": "M9 1.125C4.64906 1.125 1.125 4.64906 1.125 9C1.125 12.4847 3.37922 15.428 6.50953 16.4714C6.90328 16.5403 7.05094 16.3041 7.05094 16.0973C7.05094 15.9103 7.04109 15.2902 7.04109 14.6306C5.0625 14.9948 4.55062 14.1483 4.39312 13.7053C4.30453 13.4789 3.92063 12.78 3.58594 12.593C3.31031 12.4453 2.91656 12.0811 3.57609 12.0712C4.19625 12.0614 4.63922 12.6422 4.78688 12.8784C5.49563 14.0695 6.62766 13.7348 7.08047 13.5281C7.14938 13.0163 7.35609 12.6717 7.5825 12.4748C5.83031 12.278 3.99938 11.5987 3.99938 8.58656C3.99938 7.73016 4.30453 7.02141 4.80656 6.47016C4.72781 6.27328 4.45219 5.46609 4.88531 4.38328C4.88531 4.38328 5.54484 4.17656 7.05094 5.19047C7.68094 5.01328 8.35031 4.92469 9.01969 4.92469C9.68906 4.92469 10.3584 5.01328 10.9884 5.19047C12.4945 4.16672 13.1541 4.38328 13.1541 4.38328C13.5872 5.46609 13.3116 6.27328 13.2328 6.47016C13.7348 7.02141 14.04 7.72031 14.04 8.58656C14.04 11.6086 12.1992 12.278 10.447 12.4748C10.7325 12.7209 10.9786 13.1934 10.9786 13.9317C10.9786 14.985 10.9688 15.8316 10.9688 16.0973C10.9688 16.3041 11.1164 16.5502 11.5102 16.4714C13.0735 15.9436 14.432 14.9389 15.3943 13.5986C16.3567 12.2583 16.8746 10.65 16.875 9C16.875 4.64906 13.3509 1.125 9 1.125Z",
|
||||
"fill": "#24292F"
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"name": "Github"
|
||||
}
|
||||
14
web/app/components/base/icons/src/public/common/Github.tsx
Normal file
@ -0,0 +1,14 @@
|
||||
// GENERATE BY script
|
||||
// DON NOT EDIT IT MANUALLY
|
||||
|
||||
import * as React from 'react'
|
||||
import data from './Github.json'
|
||||
import IconBase from '@/app/components/base/icons/IconBase'
|
||||
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
|
||||
|
||||
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
|
||||
props,
|
||||
ref,
|
||||
) => <IconBase {...props} ref={ref} data={data as IconData} />)
|
||||
|
||||
export default Icon
|
||||
2
web/app/components/base/icons/src/public/common/index.ts
Normal file
@ -0,0 +1,2 @@
|
||||
export { default as Dify } from './Dify'
|
||||
export { default as Github } from './Github'
|
||||
@ -0,0 +1,39 @@
|
||||
{
|
||||
"icon": {
|
||||
"type": "element",
|
||||
"isRootNode": true,
|
||||
"name": "svg",
|
||||
"attributes": {
|
||||
"width": "14",
|
||||
"height": "14",
|
||||
"viewBox": "0 0 14 14",
|
||||
"fill": "none",
|
||||
"xmlns": "http://www.w3.org/2000/svg"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"type": "element",
|
||||
"name": "g",
|
||||
"attributes": {
|
||||
"id": "arrow-up-right"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"type": "element",
|
||||
"name": "path",
|
||||
"attributes": {
|
||||
"id": "Icon",
|
||||
"d": "M4.08325 9.91665L9.91659 4.08331M9.91659 4.08331H4.08325M9.91659 4.08331V9.91665",
|
||||
"stroke": "currentColor",
|
||||
"stroke-width": "1.25",
|
||||
"stroke-linecap": "round",
|
||||
"stroke-linejoin": "round"
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"name": "ArrowUpRight"
|
||||
}
|
||||
@ -0,0 +1,14 @@
|
||||
// GENERATE BY script
|
||||
// DON NOT EDIT IT MANUALLY
|
||||
|
||||
import * as React from 'react'
|
||||
import data from './ArrowUpRight.json'
|
||||
import IconBase from '@/app/components/base/icons/IconBase'
|
||||
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
|
||||
|
||||
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
|
||||
props,
|
||||
ref,
|
||||
) => <IconBase {...props} ref={ref} data={data as IconData} />)
|
||||
|
||||
export default Icon
|
||||
@ -0,0 +1,39 @@
|
||||
{
|
||||
"icon": {
|
||||
"type": "element",
|
||||
"isRootNode": true,
|
||||
"name": "svg",
|
||||
"attributes": {
|
||||
"width": "12",
|
||||
"height": "12",
|
||||
"viewBox": "0 0 12 12",
|
||||
"fill": "none",
|
||||
"xmlns": "http://www.w3.org/2000/svg"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"type": "element",
|
||||
"name": "g",
|
||||
"attributes": {
|
||||
"id": "chevron-down"
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
"type": "element",
|
||||
"name": "path",
|
||||
"attributes": {
|
||||
"id": "Icon",
|
||||
"d": "M3 4.5L6 7.5L9 4.5",
|
||||
"stroke": "currentColor",
|
||||
"stroke-width": "1.5",
|
||||
"stroke-linecap": "round",
|
||||
"stroke-linejoin": "round"
|
||||
},
|
||||
"children": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"name": "ChevronDown"
|
||||
}
|
||||