mirror of
https://github.com/infiniflow/ragflow.git
synced 2026-05-06 10:17:49 +08:00
fix: file logs not displayed in dataset ingestion page (#14479)
### What problem does this PR solve?
## Summary
Fixed a bug where the **File Logs** tab in the dataset ingestion page
always showed "No logs" even after files were parsed successfully.
## Root Cause
Both the **File Logs** and **Dataset Logs** tabs on the frontend called
the same backend endpoint `/datasets/{dataset_id}/ingestions`. However,
the backend only queried `get_dataset_logs_by_kb_id`, which
hard-filtered records by `document_id == GRAPH_RAPTOR_FAKE_DOC_ID`
(dataset-level logs). As a result, real file-level logs were never
returned, causing the table to appear empty.
## Changes
### Backend
- **`api/apps/restful_apis/dataset_api.py`**
- Added two new query parameters to `list_ingestion_logs`:
- `log_type` — `"file"` or `"dataset"` (default: `"dataset"`)
- `keywords` — search keyword for filtering by document / task name
- **`api/apps/services/dataset_api_service.py`**
- Updated `list_ingestion_logs` signature to accept `log_type` and
`keywords`.
- Added conditional routing:
- When `log_type == "file"`, call
`PipelineOperationLogService.get_file_logs_by_kb_id`
- Otherwise, call
`PipelineOperationLogService.get_dataset_logs_by_kb_id`
- **`api/db/services/pipeline_operation_log_service.py`**
- Extended `get_dataset_logs_by_kb_id` with an optional `keywords`
parameter so dataset logs can also be searched.
### Frontend
- **`web/src/pages/dataset/dataset-overview/hook.ts`**
- Removed the separate API function switching (`listPipelineDatasetLogs`
vs `listDataPipelineLogDocument`).
- Unified both tabs to call `listDataPipelineLogDocument` with the new
`log_type` query parameter (`"file"` or `"dataset"`).
- Ensured `keywords` and filter values are passed through correctly.
## Behavior After Fix
| Tab | `log_type` | Returned Records | Searchable Field |
|---|---|---|---|
| File Logs | `file` | Real document-level logs | `document_name` (file
name) |
| Dataset Logs | `dataset` | GraphRAG / RAPTOR / MindMap logs |
`document_name` (task type) |
### Type of change
- [x] Bug Fix (non-breaking change which fixes an issue)
---------
Signed-off-by: noob <yixiao121314@outlook.com>
Co-authored-by: Wang Qi <wangq8@outlook.com>
Co-authored-by: Yingfeng Zhang <yingfeng.zhang@gmail.com>
This commit is contained in:
@ -79,7 +79,7 @@ def get_flattened_metadata(tenant_id):
|
||||
@manager.route("/datasets", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@add_tenant_id_to_kwargs
|
||||
async def create(tenant_id: str=None):
|
||||
async def create(tenant_id: str = None):
|
||||
"""
|
||||
Create a new dataset.
|
||||
---
|
||||
@ -477,7 +477,7 @@ async def rename_tag(tenant_id, dataset_id):
|
||||
return get_error_data_result(message="Internal server error")
|
||||
|
||||
|
||||
@manager.route('/datasets/<dataset_id>/search', methods=['POST']) # noqa: F821
|
||||
@manager.route("/datasets/<dataset_id>/search", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@add_tenant_id_to_kwargs
|
||||
async def search(tenant_id, dataset_id):
|
||||
@ -506,7 +506,7 @@ async def search(tenant_id, dataset_id):
|
||||
return get_error_data_result(message="Internal server error")
|
||||
|
||||
|
||||
@manager.route('/datasets/<dataset_id>/graph/search', methods=['GET']) # noqa: F821
|
||||
@manager.route("/datasets/<dataset_id>/graph/search", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
@add_tenant_id_to_kwargs
|
||||
async def knowledge_graph(tenant_id, dataset_id):
|
||||
@ -515,17 +515,13 @@ async def knowledge_graph(tenant_id, dataset_id):
|
||||
if success:
|
||||
return get_result(data=result)
|
||||
else:
|
||||
return get_result(
|
||||
data=False,
|
||||
message=result,
|
||||
code=RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
return get_result(data=False, message=result, code=RetCode.AUTHENTICATION_ERROR)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Internal server error")
|
||||
|
||||
|
||||
@manager.route('/datasets/<dataset_id>/graph', methods=['GET']) # noqa: F821
|
||||
@manager.route("/datasets/<dataset_id>/graph", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
@add_tenant_id_to_kwargs
|
||||
async def get_knowledge_graph(tenant_id, dataset_id):
|
||||
@ -541,17 +537,13 @@ async def get_knowledge_graph(tenant_id, dataset_id):
|
||||
if success:
|
||||
return get_result(data=result)
|
||||
else:
|
||||
return get_result(
|
||||
data=False,
|
||||
message=result,
|
||||
code=RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
return get_result(data=False, message=result, code=RetCode.AUTHENTICATION_ERROR)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Internal server error")
|
||||
|
||||
|
||||
@manager.route('/datasets/<dataset_id>/graph', methods=['DELETE']) # noqa: F821
|
||||
@manager.route("/datasets/<dataset_id>/graph", methods=["DELETE"]) # noqa: F821
|
||||
@login_required
|
||||
@add_tenant_id_to_kwargs
|
||||
def delete_knowledge_graph(tenant_id, dataset_id):
|
||||
@ -560,11 +552,7 @@ def delete_knowledge_graph(tenant_id, dataset_id):
|
||||
if success:
|
||||
return get_result(data=result)
|
||||
else:
|
||||
return get_result(
|
||||
data=False,
|
||||
message=result,
|
||||
code=RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
return get_result(data=False, message=result, code=RetCode.AUTHENTICATION_ERROR)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Internal server error")
|
||||
@ -655,9 +643,9 @@ def list_ingestion_logs(tenant_id, dataset_id):
|
||||
operation_status = request.args.getlist("operation_status")
|
||||
create_date_from = request.args.get("create_date_from", None)
|
||||
create_date_to = request.args.get("create_date_to", None)
|
||||
success, result = dataset_api_service.list_ingestion_logs(
|
||||
dataset_id, tenant_id, page, page_size, orderby, desc, operation_status, create_date_from, create_date_to
|
||||
)
|
||||
log_type = request.args.get("log_type", "dataset")
|
||||
keywords = request.args.get("keywords", None)
|
||||
success, result = dataset_api_service.list_ingestion_logs(dataset_id, tenant_id, page, page_size, orderby, desc, operation_status, create_date_from, create_date_to, log_type, keywords)
|
||||
if success:
|
||||
return get_result(data=result)
|
||||
else:
|
||||
@ -762,6 +750,7 @@ async def update_auto_metadata(tenant_id, dataset_id):
|
||||
type: object
|
||||
"""
|
||||
from api.utils.validation_utils import AutoMetadataConfig
|
||||
|
||||
cfg, err = await validate_and_parse_json_request(request, AutoMetadataConfig)
|
||||
if err is not None:
|
||||
return get_error_argument_result(err)
|
||||
|
||||
@ -122,6 +122,8 @@ async def retrieval(tenant_id):
|
||||
retrieval_setting = req.get("retrieval_setting", {})
|
||||
similarity_threshold = float(retrieval_setting.get("score_threshold", 0.0))
|
||||
top = int(retrieval_setting.get("top_k", 1024))
|
||||
if top <= 0:
|
||||
return build_error_result(message="`top_k` must be greater than 0", code=RetCode.DATA_ERROR)
|
||||
metadata_condition = req.get("metadata_condition", {}) or {}
|
||||
metas = DocMetadataService.get_flatted_meta_by_kbs([kb_id])
|
||||
|
||||
|
||||
@ -436,6 +436,8 @@ async def retrieval_test(tenant_id):
|
||||
similarity_threshold = float(req.get("similarity_threshold", 0.2))
|
||||
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
||||
top = int(req.get("top_k", 1024))
|
||||
if top <= 0:
|
||||
return get_error_data_result("`top_k` must be greater than 0")
|
||||
highlight_val = req.get("highlight", None)
|
||||
if highlight_val is None:
|
||||
highlight = False
|
||||
|
||||
@ -319,6 +319,8 @@ async def retrieval_test_embedded():
|
||||
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
||||
use_kg = req.get("use_kg", False)
|
||||
top = int(req.get("top_k", 1024))
|
||||
if top <= 0:
|
||||
return get_error_data_result("`top_k` must be greater than 0")
|
||||
langs = req.get("cross_languages", [])
|
||||
rerank_id = req.get("rerank_id", "")
|
||||
tenant_rerank_id = req.get("tenant_rerank_id", "")
|
||||
|
||||
@ -81,12 +81,7 @@ async def create_dataset(tenant_id: str, req: dict):
|
||||
req["parser_config"] = parser_cfg
|
||||
req.update(ext_fields)
|
||||
|
||||
e, create_dict = KnowledgebaseService.create_with_name(
|
||||
name=req.pop("name", None),
|
||||
tenant_id=tenant_id,
|
||||
parser_id=req.pop("parser_id", None),
|
||||
**req
|
||||
)
|
||||
e, create_dict = KnowledgebaseService.create_with_name(name=req.pop("name", None), tenant_id=tenant_id, parser_id=req.pop("parser_id", None), **req)
|
||||
|
||||
if not e:
|
||||
return False, create_dict
|
||||
@ -152,12 +147,12 @@ async def delete_datasets(tenant_id: str, ids: list = None, delete_all: bool = F
|
||||
]
|
||||
)
|
||||
File2DocumentService.delete_by_document_id(doc.id)
|
||||
FileService.filter_delete(
|
||||
[File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kb.name])
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kb.name])
|
||||
|
||||
# Drop index for this dataset
|
||||
try:
|
||||
from rag.nlp import search
|
||||
|
||||
idxnm = search.index_name(kb.tenant_id)
|
||||
settings.docStoreConn.delete_idx(idxnm, kb_id)
|
||||
except Exception as e:
|
||||
@ -266,7 +261,7 @@ async def update_dataset(tenant_id: str, dataset_id: str, req: dict):
|
||||
parser_cfg["metadata"] = fields
|
||||
parser_cfg["enable_metadata"] = auto_meta.get("enabled", True)
|
||||
req["parser_config"] = parser_cfg
|
||||
|
||||
|
||||
# Merge ext fields with req
|
||||
req.update(ext_fields)
|
||||
|
||||
@ -303,8 +298,7 @@ async def update_dataset(tenant_id: str, dataset_id: str, req: dict):
|
||||
req["pipeline_id"] = ""
|
||||
|
||||
if "name" in req and req["name"].lower() != kb.name.lower():
|
||||
exists = KnowledgebaseService.get_or_none(name=req["name"], tenant_id=tenant_id,
|
||||
status=StatusEnum.VALID.value)
|
||||
exists = KnowledgebaseService.get_or_none(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)
|
||||
if exists:
|
||||
return False, f"Dataset name '{req['name']}' already exists"
|
||||
|
||||
@ -323,13 +317,13 @@ async def update_dataset(tenant_id: str, dataset_id: str, req: dict):
|
||||
|
||||
if req["pagerank"] > 0:
|
||||
from rag.nlp import search
|
||||
settings.docStoreConn.update({"kb_id": kb.id}, {PAGERANK_FLD: req["pagerank"]},
|
||||
search.index_name(kb.tenant_id), kb.id)
|
||||
|
||||
settings.docStoreConn.update({"kb_id": kb.id}, {PAGERANK_FLD: req["pagerank"]}, search.index_name(kb.tenant_id), kb.id)
|
||||
else:
|
||||
# Elasticsearch requires PAGERANK_FLD be non-zero!
|
||||
from rag.nlp import search
|
||||
settings.docStoreConn.update({"exists": PAGERANK_FLD}, {"remove": PAGERANK_FLD},
|
||||
search.index_name(kb.tenant_id), kb.id)
|
||||
|
||||
settings.docStoreConn.update({"exists": PAGERANK_FLD}, {"remove": PAGERANK_FLD}, search.index_name(kb.tenant_id), kb.id)
|
||||
if "parse_type" in req:
|
||||
del req["parse_type"]
|
||||
|
||||
@ -388,27 +382,13 @@ def list_datasets(tenant_id: str, args: dict):
|
||||
else:
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(tenant_id)
|
||||
tenant_ids = [m["tenant_id"] for m in tenants]
|
||||
kbs, total = KnowledgebaseService.get_list(
|
||||
tenant_ids,
|
||||
tenant_id,
|
||||
page,
|
||||
page_size,
|
||||
orderby,
|
||||
desc,
|
||||
kb_id,
|
||||
name,
|
||||
keywords,
|
||||
parser_id
|
||||
)
|
||||
kbs, total = KnowledgebaseService.get_list(tenant_ids, tenant_id, page, page_size, orderby, desc, kb_id, name, keywords, parser_id)
|
||||
users = UserService.get_by_ids([m["tenant_id"] for m in kbs])
|
||||
user_map = {m.id: m.to_dict() for m in users}
|
||||
response_data_list = []
|
||||
for kb in kbs:
|
||||
user_dict = user_map.get(kb["tenant_id"], {})
|
||||
kb.update({
|
||||
"nickname": user_dict.get("nickname", ""),
|
||||
"tenant_avatar": user_dict.get("avatar", "")
|
||||
})
|
||||
kb.update({"nickname": user_dict.get("nickname", ""), "tenant_avatar": user_dict.get("avatar", "")})
|
||||
response_data_list.append(remap_dictionary_keys(kb))
|
||||
return True, {"data": response_data_list, "total": total}
|
||||
|
||||
@ -425,13 +405,11 @@ async def get_knowledge_graph(dataset_id: str, tenant_id: str):
|
||||
return False, "No authorization."
|
||||
_, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||
|
||||
req = {
|
||||
"kb_id": [dataset_id],
|
||||
"knowledge_graph_kwd": ["graph"]
|
||||
}
|
||||
req = {"kb_id": [dataset_id], "knowledge_graph_kwd": ["graph"]}
|
||||
|
||||
obj = {"graph": {}, "mind_map": {}}
|
||||
from rag.nlp import search
|
||||
|
||||
if not settings.docStoreConn.index_exist(search.index_name(kb.tenant_id), dataset_id):
|
||||
return True, obj
|
||||
sres = await settings.retriever.search(req, search.index_name(kb.tenant_id), [dataset_id])
|
||||
@ -451,8 +429,7 @@ async def get_knowledge_graph(dataset_id: str, tenant_id: str):
|
||||
obj["graph"]["nodes"] = sorted(obj["graph"]["nodes"], key=lambda x: x.get("pagerank", 0), reverse=True)[:256]
|
||||
if "edges" in obj["graph"]:
|
||||
node_id_set = {o["id"] for o in obj["graph"]["nodes"]}
|
||||
filtered_edges = [o for o in obj["graph"]["edges"] if
|
||||
o["source"] != o["target"] and o["source"] in node_id_set and o["target"] in node_id_set]
|
||||
filtered_edges = [o for o in obj["graph"]["edges"] if o["source"] != o["target"] and o["source"] in node_id_set and o["target"] in node_id_set]
|
||||
obj["graph"]["edges"] = sorted(filtered_edges, key=lambda x: x.get("weight", 0), reverse=True)[:128]
|
||||
return True, obj
|
||||
|
||||
@ -469,8 +446,8 @@ def delete_knowledge_graph(dataset_id: str, tenant_id: str):
|
||||
return False, "No authorization."
|
||||
_, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||
from rag.nlp import search
|
||||
settings.docStoreConn.delete({"knowledge_graph_kwd": ["graph", "subgraph", "entity", "relation"]},
|
||||
search.index_name(kb.tenant_id), dataset_id)
|
||||
|
||||
settings.docStoreConn.delete({"knowledge_graph_kwd": ["graph", "subgraph", "entity", "relation"]}, search.index_name(kb.tenant_id), dataset_id)
|
||||
|
||||
return True, True
|
||||
|
||||
@ -636,6 +613,7 @@ def get_flattened_metadata(dataset_ids: list[str], tenant_id: str):
|
||||
return False, f"No authorization for dataset '{dataset_id}'"
|
||||
|
||||
from api.db.services.doc_metadata_service import DocMetadataService
|
||||
|
||||
return True, DocMetadataService.get_flatted_meta_by_kbs(dataset_ids)
|
||||
|
||||
|
||||
@ -697,15 +675,26 @@ def delete_tags(dataset_id: str, tenant_id: str, tags: list[str]):
|
||||
return False, "Invalid Dataset ID"
|
||||
|
||||
from rag.nlp import search
|
||||
|
||||
for t in tags:
|
||||
settings.docStoreConn.update({"tag_kwd": t, "kb_id": [dataset_id]},
|
||||
{"remove": {"tag_kwd": t}},
|
||||
search.index_name(kb.tenant_id),
|
||||
dataset_id)
|
||||
settings.docStoreConn.update({"tag_kwd": t, "kb_id": [dataset_id]}, {"remove": {"tag_kwd": t}}, search.index_name(kb.tenant_id), dataset_id)
|
||||
|
||||
return True, {}
|
||||
|
||||
def list_ingestion_logs(dataset_id: str, tenant_id: str, page: int, page_size: int, orderby: str, desc: bool, operation_status: list = None, create_date_from: str = None, create_date_to: str = None):
|
||||
|
||||
def list_ingestion_logs(
|
||||
dataset_id: str,
|
||||
tenant_id: str,
|
||||
page: int,
|
||||
page_size: int,
|
||||
orderby: str,
|
||||
desc: bool,
|
||||
operation_status: list = None,
|
||||
create_date_from: str = None,
|
||||
create_date_to: str = None,
|
||||
log_type: str = "dataset",
|
||||
keywords: str = None,
|
||||
):
|
||||
"""
|
||||
List ingestion logs for a dataset.
|
||||
|
||||
@ -718,6 +707,8 @@ def list_ingestion_logs(dataset_id: str, tenant_id: str, page: int, page_size: i
|
||||
:param operation_status: filter by operation status
|
||||
:param create_date_from: filter start date
|
||||
:param create_date_to: filter end date
|
||||
:param log_type: "dataset" or "file"
|
||||
:param keywords: search keywords for file logs
|
||||
:return: (success, result) or (success, error_message)
|
||||
"""
|
||||
if not dataset_id:
|
||||
@ -727,9 +718,30 @@ def list_ingestion_logs(dataset_id: str, tenant_id: str, page: int, page_size: i
|
||||
return False, "No authorization."
|
||||
|
||||
from api.db.services.pipeline_operation_log_service import PipelineOperationLogService
|
||||
logs, total = PipelineOperationLogService.get_dataset_logs_by_kb_id(
|
||||
dataset_id, page, page_size, orderby, desc, operation_status or [], create_date_from, create_date_to
|
||||
|
||||
allowed_log_types = {"dataset", "file"}
|
||||
if log_type not in allowed_log_types:
|
||||
logging.warning(
|
||||
"list_ingestion_logs invalid log_type: dataset_id=%s tenant_id=%s log_type=%s",
|
||||
dataset_id,
|
||||
tenant_id,
|
||||
log_type,
|
||||
)
|
||||
return False, 'Invalid "log_type", expected "dataset" or "file"'
|
||||
|
||||
logging.info(
|
||||
"list_ingestion_logs: dataset_id=%s tenant_id=%s log_type=%s page=%s page_size=%s",
|
||||
dataset_id,
|
||||
tenant_id,
|
||||
log_type,
|
||||
page,
|
||||
page_size,
|
||||
)
|
||||
|
||||
if log_type == "file":
|
||||
logs, total = PipelineOperationLogService.get_file_logs_by_kb_id(dataset_id, page, page_size, orderby, desc, keywords, operation_status or [], None, None, create_date_from, create_date_to)
|
||||
else:
|
||||
logs, total = PipelineOperationLogService.get_dataset_logs_by_kb_id(dataset_id, page, page_size, orderby, desc, operation_status or [], create_date_from, create_date_to, keywords)
|
||||
return True, {"total": total, "logs": logs}
|
||||
|
||||
|
||||
@ -749,10 +761,9 @@ def get_ingestion_log(dataset_id: str, tenant_id: str, log_id: str):
|
||||
return False, "No authorization."
|
||||
|
||||
from api.db.services.pipeline_operation_log_service import PipelineOperationLogService
|
||||
|
||||
fields = PipelineOperationLogService.get_dataset_logs_fields()
|
||||
log = PipelineOperationLogService.model.select(*fields).where(
|
||||
(PipelineOperationLogService.model.id == log_id) & (PipelineOperationLogService.model.kb_id == dataset_id)
|
||||
).first()
|
||||
log = PipelineOperationLogService.model.select(*fields).where((PipelineOperationLogService.model.id == log_id) & (PipelineOperationLogService.model.kb_id == dataset_id)).first()
|
||||
if not log:
|
||||
return False, "Log not found"
|
||||
|
||||
@ -787,6 +798,7 @@ def delete_index(dataset_id: str, tenant_id: str, index_type: str):
|
||||
|
||||
if task_id:
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
|
||||
try:
|
||||
REDIS_CONN.set(f"{task_id}-cancel", "x")
|
||||
except Exception as e:
|
||||
@ -795,12 +807,12 @@ def delete_index(dataset_id: str, tenant_id: str, index_type: str):
|
||||
|
||||
if index_type == "graph":
|
||||
from rag.nlp import search
|
||||
settings.docStoreConn.delete({"knowledge_graph_kwd": ["graph", "subgraph", "entity", "relation"]},
|
||||
search.index_name(kb.tenant_id), dataset_id)
|
||||
|
||||
settings.docStoreConn.delete({"knowledge_graph_kwd": ["graph", "subgraph", "entity", "relation"]}, search.index_name(kb.tenant_id), dataset_id)
|
||||
elif index_type == "raptor":
|
||||
from rag.nlp import search
|
||||
settings.docStoreConn.delete({"raptor_kwd": ["raptor"]},
|
||||
search.index_name(kb.tenant_id), dataset_id)
|
||||
|
||||
settings.docStoreConn.delete({"raptor_kwd": ["raptor"]}, search.index_name(kb.tenant_id), dataset_id)
|
||||
|
||||
KnowledgebaseService.update_by_id(kb.id, {task_id_field: "", task_finish_at_field: None})
|
||||
return True, {}
|
||||
@ -867,10 +879,8 @@ def rename_tag(dataset_id: str, tenant_id: str, from_tag: str, to_tag: str):
|
||||
return False, "Invalid Dataset ID"
|
||||
|
||||
from rag.nlp import search
|
||||
settings.docStoreConn.update({"tag_kwd": from_tag, "kb_id": [dataset_id]},
|
||||
{"remove": {"tag_kwd": from_tag.strip()}, "add": {"tag_kwd": to_tag}},
|
||||
search.index_name(kb.tenant_id),
|
||||
dataset_id)
|
||||
|
||||
settings.docStoreConn.update({"tag_kwd": from_tag, "kb_id": [dataset_id]}, {"remove": {"tag_kwd": from_tag.strip()}, "add": {"tag_kwd": to_tag}}, search.index_name(kb.tenant_id), dataset_id)
|
||||
|
||||
return True, {"from": from_tag, "to": to_tag}
|
||||
|
||||
@ -987,36 +997,30 @@ async def search(dataset_id: str, tenant_id: str, req: dict):
|
||||
|
||||
labels = label_question(_question, [kb])
|
||||
ranks = await settings.retriever.retrieval(
|
||||
_question,
|
||||
embd_mdl,
|
||||
tenant_ids,
|
||||
[dataset_id],
|
||||
page,
|
||||
size,
|
||||
float(req.get("similarity_threshold", 0.0)),
|
||||
float(req.get("vector_similarity_weight", 0.3)),
|
||||
doc_ids=local_doc_ids,
|
||||
top=top,
|
||||
rerank_mdl=rerank_mdl,
|
||||
rank_feature=labels
|
||||
)
|
||||
_question,
|
||||
embd_mdl,
|
||||
tenant_ids,
|
||||
[dataset_id],
|
||||
page,
|
||||
size,
|
||||
float(req.get("similarity_threshold", 0.0)),
|
||||
float(req.get("vector_similarity_weight", 0.3)),
|
||||
doc_ids=local_doc_ids,
|
||||
top=top,
|
||||
rerank_mdl=rerank_mdl,
|
||||
rank_feature=labels,
|
||||
)
|
||||
|
||||
if use_kg:
|
||||
try:
|
||||
default_chat_model_config = get_tenant_default_model_by_type(tenant_id, LLMType.CHAT)
|
||||
ck = await settings.kg_retriever.retrieval(_question,
|
||||
tenant_ids,
|
||||
[dataset_id],
|
||||
embd_mdl,
|
||||
LLMBundle(kb.tenant_id, default_chat_model_config))
|
||||
ck = await settings.kg_retriever.retrieval(_question, tenant_ids, [dataset_id], embd_mdl, LLMBundle(kb.tenant_id, default_chat_model_config))
|
||||
if ck["content_with_weight"]:
|
||||
ranks["chunks"].insert(0, ck)
|
||||
except Exception:
|
||||
logging.warning("search KG retrieval failed: dataset=%s tenant=%s", dataset_id, tenant_id, exc_info=True)
|
||||
total = ranks.get("total", 0)
|
||||
ranks["chunks"] = settings.retriever.retrieval_by_children(
|
||||
ranks["chunks"], tenant_ids
|
||||
)
|
||||
ranks["chunks"] = settings.retriever.retrieval_by_children(ranks["chunks"], tenant_ids)
|
||||
ranks["total"] = total
|
||||
|
||||
for c in ranks["chunks"]:
|
||||
|
||||
Reference in New Issue
Block a user