mirror of
https://github.com/langgenius/dify.git
synced 2026-04-28 06:28:05 +08:00
Mergin main into fix/chore-fix
This commit is contained in:
@ -0,0 +1,44 @@
|
||||
from datetime import datetime
|
||||
from typing import Any, Union
|
||||
|
||||
import pytz
|
||||
|
||||
from core.tools.builtin_tool.tool import BuiltinTool
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.errors import ToolInvokeError
|
||||
|
||||
|
||||
class LocaltimeToTimestampTool(BuiltinTool):
|
||||
def _invoke(
|
||||
self,
|
||||
user_id: str,
|
||||
tool_parameters: dict[str, Any],
|
||||
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
|
||||
"""
|
||||
Convert localtime to timestamp
|
||||
"""
|
||||
localtime = tool_parameters.get("localtime")
|
||||
timezone = tool_parameters.get("timezone", "Asia/Shanghai")
|
||||
if not timezone:
|
||||
timezone = None
|
||||
time_format = "%Y-%m-%d %H:%M:%S"
|
||||
|
||||
timestamp = self.localtime_to_timestamp(localtime, time_format, timezone)
|
||||
if not timestamp:
|
||||
return self.create_text_message(f"Invalid localtime: {localtime}")
|
||||
|
||||
return self.create_text_message(f"{timestamp}")
|
||||
|
||||
@staticmethod
|
||||
def localtime_to_timestamp(localtime: str, time_format: str, local_tz=None) -> int | None:
|
||||
try:
|
||||
if local_tz is None:
|
||||
local_tz = datetime.now().astimezone().tzinfo
|
||||
if isinstance(local_tz, str):
|
||||
local_tz = pytz.timezone(local_tz)
|
||||
local_time = datetime.strptime(localtime, time_format)
|
||||
localtime = local_tz.localize(local_time)
|
||||
timestamp = int(localtime.timestamp())
|
||||
return timestamp
|
||||
except Exception as e:
|
||||
raise ToolInvokeError(str(e))
|
||||
@ -0,0 +1,33 @@
|
||||
identity:
|
||||
name: localtime_to_timestamp
|
||||
author: zhuhao
|
||||
label:
|
||||
en_US: localtime to timestamp
|
||||
zh_Hans: 获取时间戳
|
||||
description:
|
||||
human:
|
||||
en_US: A tool for localtime convert to timestamp
|
||||
zh_Hans: 获取时间戳
|
||||
llm: A tool for localtime convert to timestamp
|
||||
parameters:
|
||||
- name: localtime
|
||||
type: string
|
||||
required: true
|
||||
form: llm
|
||||
label:
|
||||
en_US: localtime
|
||||
zh_Hans: 本地时间
|
||||
human_description:
|
||||
en_US: localtime, such as 2024-1-1 0:0:0
|
||||
zh_Hans: 本地时间, 比如2024-1-1 0:0:0
|
||||
- name: timezone
|
||||
type: string
|
||||
required: false
|
||||
form: llm
|
||||
label:
|
||||
en_US: Timezone
|
||||
zh_Hans: 时区
|
||||
human_description:
|
||||
en_US: Timezone, such as Asia/Shanghai
|
||||
zh_Hans: 时区, 比如Asia/Shanghai
|
||||
default: Asia/Shanghai
|
||||
@ -0,0 +1,44 @@
|
||||
from datetime import datetime
|
||||
from typing import Any, Union
|
||||
|
||||
import pytz
|
||||
|
||||
from core.tools.builtin_tool.tool import BuiltinTool
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.errors import ToolInvokeError
|
||||
|
||||
|
||||
class TimestampToLocaltimeTool(BuiltinTool):
|
||||
def _invoke(
|
||||
self,
|
||||
user_id: str,
|
||||
tool_parameters: dict[str, Any],
|
||||
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
|
||||
"""
|
||||
Convert timestamp to localtime
|
||||
"""
|
||||
timestamp = tool_parameters.get("timestamp")
|
||||
timezone = tool_parameters.get("timezone", "Asia/Shanghai")
|
||||
if not timezone:
|
||||
timezone = None
|
||||
time_format = "%Y-%m-%d %H:%M:%S"
|
||||
|
||||
locatime = self.timestamp_to_localtime(timestamp, timezone)
|
||||
if not locatime:
|
||||
return self.create_text_message(f"Invalid timestamp: {timestamp}")
|
||||
|
||||
localtime_format = locatime.strftime(time_format)
|
||||
|
||||
return self.create_text_message(f"{localtime_format}")
|
||||
|
||||
@staticmethod
|
||||
def timestamp_to_localtime(timestamp: int, local_tz=None) -> datetime | None:
|
||||
try:
|
||||
if local_tz is None:
|
||||
local_tz = datetime.now().astimezone().tzinfo
|
||||
if isinstance(local_tz, str):
|
||||
local_tz = pytz.timezone(local_tz)
|
||||
local_time = datetime.fromtimestamp(timestamp, local_tz)
|
||||
return local_time
|
||||
except Exception as e:
|
||||
raise ToolInvokeError(str(e))
|
||||
@ -0,0 +1,33 @@
|
||||
identity:
|
||||
name: timestamp_to_localtime
|
||||
author: zhuhao
|
||||
label:
|
||||
en_US: Timestamp to localtime
|
||||
zh_Hans: 时间戳转换
|
||||
description:
|
||||
human:
|
||||
en_US: A tool for timestamp convert to localtime
|
||||
zh_Hans: 时间戳转换
|
||||
llm: A tool for timestamp convert to localtime
|
||||
parameters:
|
||||
- name: timestamp
|
||||
type: number
|
||||
required: true
|
||||
form: llm
|
||||
label:
|
||||
en_US: Timestamp
|
||||
zh_Hans: 时间戳
|
||||
human_description:
|
||||
en_US: Timestamp
|
||||
zh_Hans: 时间戳
|
||||
- name: timezone
|
||||
type: string
|
||||
required: false
|
||||
form: llm
|
||||
label:
|
||||
en_US: Timezone
|
||||
zh_Hans: 时区
|
||||
human_description:
|
||||
en_US: Timezone, such as Asia/Shanghai
|
||||
zh_Hans: 时区, 比如Asia/Shanghai
|
||||
default: Asia/Shanghai
|
||||
@ -1,3 +1,5 @@
|
||||
from typing import Optional
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult
|
||||
from core.model_runtime.entities.message_entities import PromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.tools.__base.tool import Tool
|
||||
|
||||
@ -1,10 +1,12 @@
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from core.rag.datasource.retrieval_service import RetrievalService
|
||||
from core.rag.models.document import Document as RetrievalDocument
|
||||
from core.rag.retrieval.retrieval_methods import RetrievalMethod
|
||||
from core.tools.tool.dataset_retriever.dataset_retriever_base_tool import DatasetRetrieverBaseTool
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Dataset, Document, DocumentSegment
|
||||
from services.external_knowledge_service import ExternalDatasetService
|
||||
|
||||
default_retrieval_model = {
|
||||
"search_method": RetrievalMethod.SEMANTIC_SEARCH.value,
|
||||
@ -53,97 +55,137 @@ class DatasetRetrieverTool(DatasetRetrieverBaseTool):
|
||||
|
||||
for hit_callback in self.hit_callbacks:
|
||||
hit_callback.on_query(query, dataset.id)
|
||||
|
||||
# get retrieval model , if the model is not setting , using default
|
||||
retrieval_model = dataset.retrieval_model or default_retrieval_model
|
||||
if dataset.indexing_technique == "economy":
|
||||
# use keyword table query
|
||||
documents = RetrievalService.retrieve(
|
||||
retrieval_method="keyword_search", dataset_id=dataset.id, query=query, top_k=self.top_k
|
||||
if dataset.provider == "external":
|
||||
results = []
|
||||
external_documents = ExternalDatasetService.fetch_external_knowledge_retrieval(
|
||||
tenant_id=dataset.tenant_id,
|
||||
dataset_id=dataset.id,
|
||||
query=query,
|
||||
external_retrieval_parameters=dataset.retrieval_model,
|
||||
)
|
||||
return str("\n".join([document.page_content for document in documents]))
|
||||
else:
|
||||
if self.top_k > 0:
|
||||
# retrieval source
|
||||
documents = RetrievalService.retrieve(
|
||||
retrieval_method=retrieval_model.get("search_method", "semantic_search"),
|
||||
dataset_id=dataset.id,
|
||||
query=query,
|
||||
top_k=self.top_k,
|
||||
score_threshold=retrieval_model.get("score_threshold", 0.0)
|
||||
if retrieval_model["score_threshold_enabled"]
|
||||
else 0.0,
|
||||
reranking_model=retrieval_model.get("reranking_model", None)
|
||||
if retrieval_model["reranking_enable"]
|
||||
else None,
|
||||
reranking_mode=retrieval_model.get("reranking_mode") or "reranking_model",
|
||||
weights=retrieval_model.get("weights", None),
|
||||
for external_document in external_documents:
|
||||
document = RetrievalDocument(
|
||||
page_content=external_document.get("content"),
|
||||
metadata=external_document.get("metadata"),
|
||||
provider="external",
|
||||
)
|
||||
else:
|
||||
documents = []
|
||||
|
||||
document.metadata["score"] = external_document.get("score")
|
||||
document.metadata["title"] = external_document.get("title")
|
||||
document.metadata["dataset_id"] = dataset.id
|
||||
document.metadata["dataset_name"] = dataset.name
|
||||
results.append(document)
|
||||
# deal with external documents
|
||||
context_list = []
|
||||
for position, item in enumerate(results, start=1):
|
||||
source = {
|
||||
"position": position,
|
||||
"dataset_id": item.metadata.get("dataset_id"),
|
||||
"dataset_name": item.metadata.get("dataset_name"),
|
||||
"document_name": item.metadata.get("title"),
|
||||
"data_source_type": "external",
|
||||
"retriever_from": self.retriever_from,
|
||||
"score": item.metadata.get("score"),
|
||||
"title": item.metadata.get("title"),
|
||||
"content": item.page_content,
|
||||
}
|
||||
context_list.append(source)
|
||||
for hit_callback in self.hit_callbacks:
|
||||
hit_callback.on_tool_end(documents)
|
||||
document_score_list = {}
|
||||
if dataset.indexing_technique != "economy":
|
||||
for item in documents:
|
||||
if item.metadata.get("score"):
|
||||
document_score_list[item.metadata["doc_id"]] = item.metadata["score"]
|
||||
document_context_list = []
|
||||
index_node_ids = [document.metadata["doc_id"] for document in documents]
|
||||
segments = DocumentSegment.query.filter(
|
||||
DocumentSegment.dataset_id == self.dataset_id,
|
||||
DocumentSegment.completed_at.isnot(None),
|
||||
DocumentSegment.status == "completed",
|
||||
DocumentSegment.enabled == True,
|
||||
DocumentSegment.index_node_id.in_(index_node_ids),
|
||||
).all()
|
||||
hit_callback.return_retriever_resource_info(context_list)
|
||||
|
||||
if segments:
|
||||
index_node_id_to_position = {id: position for position, id in enumerate(index_node_ids)}
|
||||
sorted_segments = sorted(
|
||||
segments, key=lambda segment: index_node_id_to_position.get(segment.index_node_id, float("inf"))
|
||||
return str("\n".join([item.page_content for item in results]))
|
||||
else:
|
||||
# get retrieval model , if the model is not setting , using default
|
||||
retrieval_model = dataset.retrieval_model or default_retrieval_model
|
||||
if dataset.indexing_technique == "economy":
|
||||
# use keyword table query
|
||||
documents = RetrievalService.retrieve(
|
||||
retrieval_method="keyword_search", dataset_id=dataset.id, query=query, top_k=self.top_k
|
||||
)
|
||||
for segment in sorted_segments:
|
||||
if segment.answer:
|
||||
document_context_list.append(f"question:{segment.get_sign_content()} answer:{segment.answer}")
|
||||
else:
|
||||
document_context_list.append(segment.get_sign_content())
|
||||
if self.return_resource:
|
||||
context_list = []
|
||||
resource_number = 1
|
||||
return str("\n".join([document.page_content for document in documents]))
|
||||
else:
|
||||
if self.top_k > 0:
|
||||
# retrieval source
|
||||
documents = RetrievalService.retrieve(
|
||||
retrieval_method=retrieval_model.get("search_method", "semantic_search"),
|
||||
dataset_id=dataset.id,
|
||||
query=query,
|
||||
top_k=self.top_k,
|
||||
score_threshold=retrieval_model.get("score_threshold", 0.0)
|
||||
if retrieval_model["score_threshold_enabled"]
|
||||
else 0.0,
|
||||
reranking_model=retrieval_model.get("reranking_model", None)
|
||||
if retrieval_model["reranking_enable"]
|
||||
else None,
|
||||
reranking_mode=retrieval_model.get("reranking_mode") or "reranking_model",
|
||||
weights=retrieval_model.get("weights", None),
|
||||
)
|
||||
else:
|
||||
documents = []
|
||||
|
||||
for hit_callback in self.hit_callbacks:
|
||||
hit_callback.on_tool_end(documents)
|
||||
document_score_list = {}
|
||||
if dataset.indexing_technique != "economy":
|
||||
for item in documents:
|
||||
if item.metadata.get("score"):
|
||||
document_score_list[item.metadata["doc_id"]] = item.metadata["score"]
|
||||
document_context_list = []
|
||||
index_node_ids = [document.metadata["doc_id"] for document in documents]
|
||||
segments = DocumentSegment.query.filter(
|
||||
DocumentSegment.dataset_id == self.dataset_id,
|
||||
DocumentSegment.completed_at.isnot(None),
|
||||
DocumentSegment.status == "completed",
|
||||
DocumentSegment.enabled == True,
|
||||
DocumentSegment.index_node_id.in_(index_node_ids),
|
||||
).all()
|
||||
|
||||
if segments:
|
||||
index_node_id_to_position = {id: position for position, id in enumerate(index_node_ids)}
|
||||
sorted_segments = sorted(
|
||||
segments, key=lambda segment: index_node_id_to_position.get(segment.index_node_id, float("inf"))
|
||||
)
|
||||
for segment in sorted_segments:
|
||||
context = {}
|
||||
document = Document.query.filter(
|
||||
Document.id == segment.document_id,
|
||||
Document.enabled == True,
|
||||
Document.archived == False,
|
||||
).first()
|
||||
if dataset and document:
|
||||
source = {
|
||||
"position": resource_number,
|
||||
"dataset_id": dataset.id,
|
||||
"dataset_name": dataset.name,
|
||||
"document_id": document.id,
|
||||
"document_name": document.name,
|
||||
"data_source_type": document.data_source_type,
|
||||
"segment_id": segment.id,
|
||||
"retriever_from": self.retriever_from,
|
||||
"score": document_score_list.get(segment.index_node_id, None),
|
||||
}
|
||||
if self.retriever_from == "dev":
|
||||
source["hit_count"] = segment.hit_count
|
||||
source["word_count"] = segment.word_count
|
||||
source["segment_position"] = segment.position
|
||||
source["index_node_hash"] = segment.index_node_hash
|
||||
if segment.answer:
|
||||
source["content"] = f"question:{segment.content} \nanswer:{segment.answer}"
|
||||
else:
|
||||
source["content"] = segment.content
|
||||
context_list.append(source)
|
||||
resource_number += 1
|
||||
if segment.answer:
|
||||
document_context_list.append(
|
||||
f"question:{segment.get_sign_content()} answer:{segment.answer}"
|
||||
)
|
||||
else:
|
||||
document_context_list.append(segment.get_sign_content())
|
||||
if self.return_resource:
|
||||
context_list = []
|
||||
resource_number = 1
|
||||
for segment in sorted_segments:
|
||||
context = {}
|
||||
document = Document.query.filter(
|
||||
Document.id == segment.document_id,
|
||||
Document.enabled == True,
|
||||
Document.archived == False,
|
||||
).first()
|
||||
if dataset and document:
|
||||
source = {
|
||||
"position": resource_number,
|
||||
"dataset_id": dataset.id,
|
||||
"dataset_name": dataset.name,
|
||||
"document_id": document.id,
|
||||
"document_name": document.name,
|
||||
"data_source_type": document.data_source_type,
|
||||
"segment_id": segment.id,
|
||||
"retriever_from": self.retriever_from,
|
||||
"score": document_score_list.get(segment.index_node_id, None),
|
||||
}
|
||||
if self.retriever_from == "dev":
|
||||
source["hit_count"] = segment.hit_count
|
||||
source["word_count"] = segment.word_count
|
||||
source["segment_position"] = segment.position
|
||||
source["index_node_hash"] = segment.index_node_hash
|
||||
if segment.answer:
|
||||
source["content"] = f"question:{segment.content} \nanswer:{segment.answer}"
|
||||
else:
|
||||
source["content"] = segment.content
|
||||
context_list.append(source)
|
||||
resource_number += 1
|
||||
|
||||
for hit_callback in self.hit_callbacks:
|
||||
hit_callback.return_retriever_resource_info(context_list)
|
||||
for hit_callback in self.hit_callbacks:
|
||||
hit_callback.return_retriever_resource_info(context_list)
|
||||
|
||||
return str("\n".join(document_context_list))
|
||||
return str("\n".join(document_context_list))
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
from typing import Optional
|
||||
|
||||
import httpx
|
||||
|
||||
from core.tools.errors import ToolProviderCredentialValidationError
|
||||
@ -32,7 +34,12 @@ class FeishuRequest:
|
||||
return res.get("tenant_access_token")
|
||||
|
||||
def _send_request(
|
||||
self, url: str, method: str = "post", require_token: bool = True, payload: dict = None, params: dict = None
|
||||
self,
|
||||
url: str,
|
||||
method: str = "post",
|
||||
require_token: bool = True,
|
||||
payload: Optional[dict] = None,
|
||||
params: Optional[dict] = None,
|
||||
):
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
|
||||
@ -3,6 +3,7 @@ import uuid
|
||||
from json import dumps as json_dumps
|
||||
from json import loads as json_loads
|
||||
from json.decoder import JSONDecodeError
|
||||
from typing import Optional
|
||||
|
||||
from requests import get
|
||||
from yaml import YAMLError, safe_load
|
||||
|
||||
@ -92,10 +92,13 @@ class WorkflowTool(Tool):
|
||||
if data.get("error"):
|
||||
raise Exception(data.get("error"))
|
||||
|
||||
outputs = data.get("outputs", {})
|
||||
outputs, files = self._extract_files(outputs)
|
||||
for file in files:
|
||||
yield self.create_file_var_message(file)
|
||||
outputs = data.get("outputs")
|
||||
if outputs == None:
|
||||
outputs = {}
|
||||
else:
|
||||
outputs, files = self._extract_files(outputs)
|
||||
for file in files:
|
||||
yield self.create_file_var_message(file)
|
||||
|
||||
yield self.create_text_message(json.dumps(outputs, ensure_ascii=False))
|
||||
yield self.create_json_message(outputs)
|
||||
|
||||
Reference in New Issue
Block a user