mirror of
https://github.com/infiniflow/ragflow.git
synced 2026-01-19 11:45:10 +08:00
Fix: empty chunk issue. (#12638)
#12570 ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue)
This commit is contained in:
@ -373,13 +373,14 @@ def my_llms():
|
||||
|
||||
@manager.route("/list", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def list_app():
|
||||
async def list_app():
|
||||
self_deployed = ["FastEmbed", "Ollama", "Xinference", "LocalAI", "LM-Studio", "GPUStack"]
|
||||
weighted = []
|
||||
model_type = request.args.get("model_type")
|
||||
tenant_id = current_user.id
|
||||
try:
|
||||
TenantLLMService.ensure_mineru_from_env(current_user.id)
|
||||
objs = TenantLLMService.query(tenant_id=current_user.id)
|
||||
TenantLLMService.ensure_mineru_from_env(tenant_id)
|
||||
objs = TenantLLMService.query(tenant_id=tenant_id)
|
||||
facts = set([o.to_dict()["llm_factory"] for o in objs if o.api_key and o.status == StatusEnum.VALID.value])
|
||||
status = {(o.llm_name + "@" + o.llm_factory) for o in objs if o.status == StatusEnum.VALID.value}
|
||||
llms = LLMService.get_all()
|
||||
|
||||
@ -93,6 +93,8 @@ class Splitter(ProcessBase):
|
||||
split_sec = re.split(r"(%s)" % custom_pattern, c, flags=re.DOTALL)
|
||||
if split_sec:
|
||||
for j in range(0, len(split_sec), 2):
|
||||
if not split_sec[j].strip():
|
||||
continue
|
||||
docs.append({
|
||||
"text": split_sec[j],
|
||||
"mom": c
|
||||
@ -156,6 +158,8 @@ class Splitter(ProcessBase):
|
||||
if split_sec:
|
||||
c["mom"] = c["text"]
|
||||
for j in range(0, len(split_sec), 2):
|
||||
if not split_sec[j].strip():
|
||||
continue
|
||||
cc = deepcopy(c)
|
||||
cc["text"] = split_sec[j]
|
||||
docs.append(cc)
|
||||
|
||||
Reference in New Issue
Block a user