mirror of
https://github.com/langgenius/dify.git
synced 2026-01-21 20:45:22 +08:00
Compare commits
22 Commits
deploy/dev
...
copilot/su
| Author | SHA1 | Date | |
|---|---|---|---|
| dd446a730e | |||
| a31315d1a6 | |||
| 46a8b2a6ce | |||
| 5a563a180f | |||
| 6a34c02463 | |||
| a99df44053 | |||
| 427eeefbdd | |||
| 75647df8cf | |||
| cf734a682c | |||
| 4d0c7ccb99 | |||
| 8f05df9d02 | |||
| bb677b3900 | |||
| 65e04f7342 | |||
| ee2f5d3ad9 | |||
| 58d9d2f38c | |||
| d540b14959 | |||
| 31d27639ef | |||
| 2d1de24baf | |||
| d9a2ba2617 | |||
| 10f10f4785 | |||
| 6890f7d09c | |||
| 46f0e3c07e |
@ -965,16 +965,6 @@ class MailConfig(BaseSettings):
|
||||
default=None,
|
||||
)
|
||||
|
||||
ENABLE_TRIAL_APP: bool = Field(
|
||||
description="Enable trial app",
|
||||
default=False,
|
||||
)
|
||||
|
||||
ENABLE_EXPLORE_BANNER: bool = Field(
|
||||
description="Enable explore banner",
|
||||
default=False,
|
||||
)
|
||||
|
||||
|
||||
class RagEtlConfig(BaseSettings):
|
||||
"""
|
||||
|
||||
@ -107,12 +107,10 @@ from .datasets.rag_pipeline import (
|
||||
|
||||
# Import explore controllers
|
||||
from .explore import (
|
||||
banner,
|
||||
installed_app,
|
||||
parameter,
|
||||
recommended_app,
|
||||
saved_message,
|
||||
trial,
|
||||
)
|
||||
|
||||
# Import tag controllers
|
||||
@ -147,7 +145,6 @@ __all__ = [
|
||||
"apikey",
|
||||
"app",
|
||||
"audio",
|
||||
"banner",
|
||||
"billing",
|
||||
"bp",
|
||||
"completion",
|
||||
@ -201,7 +198,6 @@ __all__ = [
|
||||
"statistic",
|
||||
"tags",
|
||||
"tool_providers",
|
||||
"trial",
|
||||
"trigger_providers",
|
||||
"version",
|
||||
"website",
|
||||
|
||||
@ -15,7 +15,7 @@ from controllers.console.wraps import only_edition_cloud
|
||||
from core.db.session_factory import session_factory
|
||||
from extensions.ext_database import db
|
||||
from libs.token import extract_access_token
|
||||
from models.model import App, ExporleBanner, InstalledApp, RecommendedApp, TrialApp
|
||||
from models.model import App, InstalledApp, RecommendedApp
|
||||
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
@ -32,8 +32,6 @@ class InsertExploreAppPayload(BaseModel):
|
||||
language: str = Field(...)
|
||||
category: str = Field(...)
|
||||
position: int = Field(...)
|
||||
can_trial: bool = Field(default=False)
|
||||
trial_limit: int = Field(default=0)
|
||||
|
||||
@field_validator("language")
|
||||
@classmethod
|
||||
@ -41,33 +39,11 @@ class InsertExploreAppPayload(BaseModel):
|
||||
return supported_language(value)
|
||||
|
||||
|
||||
class InsertExploreBannerPayload(BaseModel):
|
||||
category: str = Field(...)
|
||||
title: str = Field(...)
|
||||
description: str = Field(...)
|
||||
img_src: str = Field(..., alias="img-src")
|
||||
language: str = Field(default="en-US")
|
||||
link: str = Field(...)
|
||||
sort: int = Field(...)
|
||||
|
||||
@field_validator("language")
|
||||
@classmethod
|
||||
def validate_language(cls, value: str) -> str:
|
||||
return supported_language(value)
|
||||
|
||||
model_config = {"populate_by_name": True}
|
||||
|
||||
|
||||
console_ns.schema_model(
|
||||
InsertExploreAppPayload.__name__,
|
||||
InsertExploreAppPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0),
|
||||
)
|
||||
|
||||
console_ns.schema_model(
|
||||
InsertExploreBannerPayload.__name__,
|
||||
InsertExploreBannerPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0),
|
||||
)
|
||||
|
||||
|
||||
def admin_required(view: Callable[P, R]):
|
||||
@wraps(view)
|
||||
@ -133,20 +109,6 @@ class InsertExploreAppListApi(Resource):
|
||||
)
|
||||
|
||||
db.session.add(recommended_app)
|
||||
if payload.can_trial:
|
||||
trial_app = db.session.execute(
|
||||
select(TrialApp).where(TrialApp.app_id == payload.app_id)
|
||||
).scalar_one_or_none()
|
||||
if not trial_app:
|
||||
db.session.add(
|
||||
TrialApp(
|
||||
app_id=payload.app_id,
|
||||
tenant_id=app.tenant_id,
|
||||
trial_limit=payload.trial_limit,
|
||||
)
|
||||
)
|
||||
else:
|
||||
trial_app.trial_limit = payload.trial_limit
|
||||
|
||||
app.is_public = True
|
||||
db.session.commit()
|
||||
@ -161,20 +123,6 @@ class InsertExploreAppListApi(Resource):
|
||||
recommended_app.category = payload.category
|
||||
recommended_app.position = payload.position
|
||||
|
||||
if payload.can_trial:
|
||||
trial_app = db.session.execute(
|
||||
select(TrialApp).where(TrialApp.app_id == payload.app_id)
|
||||
).scalar_one_or_none()
|
||||
if not trial_app:
|
||||
db.session.add(
|
||||
TrialApp(
|
||||
app_id=payload.app_id,
|
||||
tenant_id=app.tenant_id,
|
||||
trial_limit=payload.trial_limit,
|
||||
)
|
||||
)
|
||||
else:
|
||||
trial_app.trial_limit = payload.trial_limit
|
||||
app.is_public = True
|
||||
|
||||
db.session.commit()
|
||||
@ -220,62 +168,7 @@ class InsertExploreAppApi(Resource):
|
||||
for installed_app in installed_apps:
|
||||
session.delete(installed_app)
|
||||
|
||||
trial_app = session.execute(
|
||||
select(TrialApp).where(TrialApp.app_id == recommended_app.app_id)
|
||||
).scalar_one_or_none()
|
||||
if trial_app:
|
||||
session.delete(trial_app)
|
||||
|
||||
db.session.delete(recommended_app)
|
||||
db.session.commit()
|
||||
|
||||
return {"result": "success"}, 204
|
||||
|
||||
|
||||
@console_ns.route("/admin/insert-explore-banner")
|
||||
class InsertExploreBannerApi(Resource):
|
||||
@console_ns.doc("insert_explore_banner")
|
||||
@console_ns.doc(description="Insert an explore banner")
|
||||
@console_ns.expect(console_ns.models[InsertExploreBannerPayload.__name__])
|
||||
@console_ns.response(201, "Banner inserted successfully")
|
||||
@only_edition_cloud
|
||||
@admin_required
|
||||
def post(self):
|
||||
payload = InsertExploreBannerPayload.model_validate(console_ns.payload)
|
||||
|
||||
content = {
|
||||
"category": payload.category,
|
||||
"title": payload.title,
|
||||
"description": payload.description,
|
||||
"img-src": payload.img_src,
|
||||
}
|
||||
|
||||
banner = ExporleBanner(
|
||||
content=content,
|
||||
link=payload.link,
|
||||
sort=payload.sort,
|
||||
language=payload.language,
|
||||
)
|
||||
db.session.add(banner)
|
||||
db.session.commit()
|
||||
|
||||
return {"result": "success"}, 201
|
||||
|
||||
|
||||
@console_ns.route("/admin/delete-explore-banner/<uuid:banner_id>")
|
||||
class DeleteExploreBannerApi(Resource):
|
||||
@console_ns.doc("delete_explore_banner")
|
||||
@console_ns.doc(description="Delete an explore banner")
|
||||
@console_ns.doc(params={"banner_id": "Banner ID to delete"})
|
||||
@console_ns.response(204, "Banner deleted successfully")
|
||||
@only_edition_cloud
|
||||
@admin_required
|
||||
def delete(self, banner_id):
|
||||
banner = db.session.execute(select(ExporleBanner).where(ExporleBanner.id == banner_id)).scalar_one_or_none()
|
||||
if not banner:
|
||||
raise NotFound(f"Banner '{banner_id}' is not found")
|
||||
|
||||
db.session.delete(banner)
|
||||
db.session.commit()
|
||||
|
||||
return {"result": "success"}, 204
|
||||
|
||||
@ -115,9 +115,3 @@ class InvokeRateLimitError(BaseHTTPException):
|
||||
error_code = "rate_limit_error"
|
||||
description = "Rate Limit Error"
|
||||
code = 429
|
||||
|
||||
|
||||
class NeedAddIdsError(BaseHTTPException):
|
||||
error_code = "need_add_ids"
|
||||
description = "Need to add ids."
|
||||
code = 400
|
||||
|
||||
@ -23,11 +23,6 @@ def _load_app_model(app_id: str) -> App | None:
|
||||
return app_model
|
||||
|
||||
|
||||
def _load_app_model_with_trial(app_id: str) -> App | None:
|
||||
app_model = db.session.query(App).where(App.id == app_id, App.status == "normal").first()
|
||||
return app_model
|
||||
|
||||
|
||||
def get_app_model(view: Callable[P, R] | None = None, *, mode: Union[AppMode, list[AppMode], None] = None):
|
||||
def decorator(view_func: Callable[P1, R1]):
|
||||
@wraps(view_func)
|
||||
@ -67,44 +62,3 @@ def get_app_model(view: Callable[P, R] | None = None, *, mode: Union[AppMode, li
|
||||
return decorator
|
||||
else:
|
||||
return decorator(view)
|
||||
|
||||
|
||||
def get_app_model_with_trial(view: Callable[P, R] | None = None, *, mode: Union[AppMode, list[AppMode], None] = None):
|
||||
def decorator(view_func: Callable[P, R]):
|
||||
@wraps(view_func)
|
||||
def decorated_view(*args: P.args, **kwargs: P.kwargs):
|
||||
if not kwargs.get("app_id"):
|
||||
raise ValueError("missing app_id in path parameters")
|
||||
|
||||
app_id = kwargs.get("app_id")
|
||||
app_id = str(app_id)
|
||||
|
||||
del kwargs["app_id"]
|
||||
|
||||
app_model = _load_app_model_with_trial(app_id)
|
||||
|
||||
if not app_model:
|
||||
raise AppNotFoundError()
|
||||
|
||||
app_mode = AppMode.value_of(app_model.mode)
|
||||
|
||||
if mode is not None:
|
||||
if isinstance(mode, list):
|
||||
modes = mode
|
||||
else:
|
||||
modes = [mode]
|
||||
|
||||
if app_mode not in modes:
|
||||
mode_values = {m.value for m in modes}
|
||||
raise AppNotFoundError(f"App mode is not in the supported list: {mode_values}")
|
||||
|
||||
kwargs["app_model"] = app_model
|
||||
|
||||
return view_func(*args, **kwargs)
|
||||
|
||||
return decorated_view
|
||||
|
||||
if view is None:
|
||||
return decorator
|
||||
else:
|
||||
return decorator(view)
|
||||
|
||||
@ -146,7 +146,6 @@ class DatasetUpdatePayload(BaseModel):
|
||||
embedding_model: str | None = None
|
||||
embedding_model_provider: str | None = None
|
||||
retrieval_model: dict[str, Any] | None = None
|
||||
summary_index_setting: dict[str, Any] | None = None
|
||||
partial_member_list: list[dict[str, str]] | None = None
|
||||
external_retrieval_model: dict[str, Any] | None = None
|
||||
external_knowledge_id: str | None = None
|
||||
|
||||
@ -41,11 +41,10 @@ from fields.document_fields import (
|
||||
from libs.datetime_utils import naive_utc_now
|
||||
from libs.login import current_account_with_tenant, login_required
|
||||
from models import DatasetProcessRule, Document, DocumentSegment, UploadFile
|
||||
from models.dataset import DocumentPipelineExecutionLog, DocumentSegmentSummary
|
||||
from models.dataset import DocumentPipelineExecutionLog
|
||||
from services.dataset_service import DatasetService, DocumentService
|
||||
from services.entities.knowledge_entities.knowledge_entities import KnowledgeConfig, ProcessRule, RetrievalModel
|
||||
from services.file_service import FileService
|
||||
from tasks.generate_summary_index_task import generate_summary_index_task
|
||||
|
||||
from ..app.error import (
|
||||
ProviderModelCurrentlyNotSupportError,
|
||||
@ -111,10 +110,6 @@ class DocumentRenamePayload(BaseModel):
|
||||
name: str
|
||||
|
||||
|
||||
class GenerateSummaryPayload(BaseModel):
|
||||
document_list: list[str]
|
||||
|
||||
|
||||
class DocumentBatchDownloadZipPayload(BaseModel):
|
||||
"""Request payload for bulk downloading documents as a zip archive."""
|
||||
|
||||
@ -137,7 +132,6 @@ register_schema_models(
|
||||
RetrievalModel,
|
||||
DocumentRetryPayload,
|
||||
DocumentRenamePayload,
|
||||
GenerateSummaryPayload,
|
||||
DocumentBatchDownloadZipPayload,
|
||||
)
|
||||
|
||||
@ -325,89 +319,6 @@ class DatasetDocumentListApi(Resource):
|
||||
|
||||
paginated_documents = db.paginate(select=query, page=page, per_page=limit, max_per_page=100, error_out=False)
|
||||
documents = paginated_documents.items
|
||||
|
||||
# Check if dataset has summary index enabled
|
||||
has_summary_index = dataset.summary_index_setting and dataset.summary_index_setting.get("enable") is True
|
||||
|
||||
# Filter documents that need summary calculation
|
||||
documents_need_summary = [doc for doc in documents if doc.need_summary is True]
|
||||
document_ids_need_summary = [str(doc.id) for doc in documents_need_summary]
|
||||
|
||||
# Calculate summary_index_status for documents that need summary (only if dataset summary index is enabled)
|
||||
summary_status_map = {}
|
||||
if has_summary_index and document_ids_need_summary:
|
||||
# Get all segments for these documents (excluding qa_model and re_segment)
|
||||
segments = (
|
||||
db.session.query(DocumentSegment.id, DocumentSegment.document_id)
|
||||
.where(
|
||||
DocumentSegment.document_id.in_(document_ids_need_summary),
|
||||
DocumentSegment.status != "re_segment",
|
||||
DocumentSegment.tenant_id == current_tenant_id,
|
||||
)
|
||||
.all()
|
||||
)
|
||||
|
||||
# Group segments by document_id
|
||||
document_segments_map = {}
|
||||
for segment in segments:
|
||||
doc_id = str(segment.document_id)
|
||||
if doc_id not in document_segments_map:
|
||||
document_segments_map[doc_id] = []
|
||||
document_segments_map[doc_id].append(segment.id)
|
||||
|
||||
# Get all summary records for these segments
|
||||
all_segment_ids = [seg.id for seg in segments]
|
||||
summaries = {}
|
||||
if all_segment_ids:
|
||||
summary_records = (
|
||||
db.session.query(DocumentSegmentSummary)
|
||||
.where(
|
||||
DocumentSegmentSummary.chunk_id.in_(all_segment_ids),
|
||||
DocumentSegmentSummary.dataset_id == dataset_id,
|
||||
DocumentSegmentSummary.enabled == True, # Only count enabled summaries
|
||||
)
|
||||
.all()
|
||||
)
|
||||
summaries = {summary.chunk_id: summary.status for summary in summary_records}
|
||||
|
||||
# Calculate summary_index_status for each document
|
||||
for doc_id in document_ids_need_summary:
|
||||
segment_ids = document_segments_map.get(doc_id, [])
|
||||
if not segment_ids:
|
||||
# No segments, status is None (not started)
|
||||
summary_status_map[doc_id] = None
|
||||
continue
|
||||
|
||||
# Count summary statuses for this document's segments
|
||||
status_counts = {"completed": 0, "generating": 0, "error": 0, "not_started": 0}
|
||||
for segment_id in segment_ids:
|
||||
status = summaries.get(segment_id, "not_started")
|
||||
if status in status_counts:
|
||||
status_counts[status] += 1
|
||||
else:
|
||||
status_counts["not_started"] += 1
|
||||
|
||||
generating_count = status_counts["generating"]
|
||||
|
||||
# Determine overall status:
|
||||
# - "SUMMARIZING" only when task is queued and at least one summary is generating
|
||||
# - None (empty) for all other cases (not queued, all completed/error)
|
||||
if generating_count > 0:
|
||||
# Task is queued and at least one summary is still generating
|
||||
summary_status_map[doc_id] = "SUMMARIZING"
|
||||
else:
|
||||
# Task not queued yet, or all summaries are completed/error (task finished)
|
||||
summary_status_map[doc_id] = None
|
||||
|
||||
# Add summary_index_status to each document
|
||||
for document in documents:
|
||||
if has_summary_index and document.need_summary is True:
|
||||
# Get status from map, default to None (not queued yet)
|
||||
document.summary_index_status = summary_status_map.get(str(document.id))
|
||||
else:
|
||||
# Return null if summary index is not enabled or document doesn't need summary
|
||||
document.summary_index_status = None
|
||||
|
||||
if fetch:
|
||||
for document in documents:
|
||||
completed_segments = (
|
||||
@ -893,7 +804,6 @@ class DocumentApi(DocumentResource):
|
||||
"display_status": document.display_status,
|
||||
"doc_form": document.doc_form,
|
||||
"doc_language": document.doc_language,
|
||||
"need_summary": document.need_summary if document.need_summary is not None else False,
|
||||
}
|
||||
else:
|
||||
dataset_process_rules = DatasetService.get_process_rules(dataset_id)
|
||||
@ -929,7 +839,6 @@ class DocumentApi(DocumentResource):
|
||||
"display_status": document.display_status,
|
||||
"doc_form": document.doc_form,
|
||||
"doc_language": document.doc_language,
|
||||
"need_summary": document.need_summary if document.need_summary is not None else False,
|
||||
}
|
||||
|
||||
return response, 200
|
||||
@ -1353,216 +1262,3 @@ class DocumentPipelineExecutionLogApi(DocumentResource):
|
||||
"input_data": log.input_data,
|
||||
"datasource_node_id": log.datasource_node_id,
|
||||
}, 200
|
||||
|
||||
|
||||
@console_ns.route("/datasets/<uuid:dataset_id>/documents/generate-summary")
|
||||
class DocumentGenerateSummaryApi(Resource):
|
||||
@console_ns.doc("generate_summary_for_documents")
|
||||
@console_ns.doc(description="Generate summary index for documents")
|
||||
@console_ns.doc(params={"dataset_id": "Dataset ID"})
|
||||
@console_ns.expect(console_ns.models[GenerateSummaryPayload.__name__])
|
||||
@console_ns.response(200, "Summary generation started successfully")
|
||||
@console_ns.response(400, "Invalid request or dataset configuration")
|
||||
@console_ns.response(403, "Permission denied")
|
||||
@console_ns.response(404, "Dataset not found")
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
@cloud_edition_billing_rate_limit_check("knowledge")
|
||||
def post(self, dataset_id):
|
||||
"""
|
||||
Generate summary index for specified documents.
|
||||
|
||||
This endpoint checks if the dataset configuration supports summary generation
|
||||
(indexing_technique must be 'high_quality' and summary_index_setting.enable must be true),
|
||||
then asynchronously generates summary indexes for the provided documents.
|
||||
"""
|
||||
current_user, _ = current_account_with_tenant()
|
||||
dataset_id = str(dataset_id)
|
||||
|
||||
# Get dataset
|
||||
dataset = DatasetService.get_dataset(dataset_id)
|
||||
if not dataset:
|
||||
raise NotFound("Dataset not found.")
|
||||
|
||||
# Check permissions
|
||||
if not current_user.is_dataset_editor:
|
||||
raise Forbidden()
|
||||
|
||||
try:
|
||||
DatasetService.check_dataset_permission(dataset, current_user)
|
||||
except services.errors.account.NoPermissionError as e:
|
||||
raise Forbidden(str(e))
|
||||
|
||||
# Validate request payload
|
||||
payload = GenerateSummaryPayload.model_validate(console_ns.payload or {})
|
||||
document_list = payload.document_list
|
||||
|
||||
if not document_list:
|
||||
raise ValueError("document_list cannot be empty.")
|
||||
|
||||
# Check if dataset configuration supports summary generation
|
||||
if dataset.indexing_technique != "high_quality":
|
||||
raise ValueError(
|
||||
f"Summary generation is only available for 'high_quality' indexing technique. "
|
||||
f"Current indexing technique: {dataset.indexing_technique}"
|
||||
)
|
||||
|
||||
summary_index_setting = dataset.summary_index_setting
|
||||
if not summary_index_setting or not summary_index_setting.get("enable"):
|
||||
raise ValueError("Summary index is not enabled for this dataset. Please enable it in the dataset settings.")
|
||||
|
||||
# Verify all documents exist and belong to the dataset
|
||||
documents = (
|
||||
db.session.query(Document)
|
||||
.filter(
|
||||
Document.id.in_(document_list),
|
||||
Document.dataset_id == dataset_id,
|
||||
)
|
||||
.all()
|
||||
)
|
||||
|
||||
if len(documents) != len(document_list):
|
||||
found_ids = {doc.id for doc in documents}
|
||||
missing_ids = set(document_list) - found_ids
|
||||
raise NotFound(f"Some documents not found: {list(missing_ids)}")
|
||||
|
||||
# Dispatch async tasks for each document
|
||||
for document in documents:
|
||||
# Skip qa_model documents as they don't generate summaries
|
||||
if document.doc_form == "qa_model":
|
||||
logger.info("Skipping summary generation for qa_model document %s", document.id)
|
||||
continue
|
||||
|
||||
# Dispatch async task
|
||||
generate_summary_index_task(dataset_id, document.id)
|
||||
logger.info(
|
||||
"Dispatched summary generation task for document %s in dataset %s",
|
||||
document.id,
|
||||
dataset_id,
|
||||
)
|
||||
|
||||
return {"result": "success"}, 200
|
||||
|
||||
|
||||
@console_ns.route("/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/summary-status")
|
||||
class DocumentSummaryStatusApi(DocumentResource):
|
||||
@console_ns.doc("get_document_summary_status")
|
||||
@console_ns.doc(description="Get summary index generation status for a document")
|
||||
@console_ns.doc(params={"dataset_id": "Dataset ID", "document_id": "Document ID"})
|
||||
@console_ns.response(200, "Summary status retrieved successfully")
|
||||
@console_ns.response(404, "Document not found")
|
||||
@setup_required
|
||||
@login_required
|
||||
@account_initialization_required
|
||||
def get(self, dataset_id, document_id):
|
||||
"""
|
||||
Get summary index generation status for a document.
|
||||
|
||||
Returns:
|
||||
- total_segments: Total number of segments in the document
|
||||
- summary_status: Dictionary with status counts
|
||||
- completed: Number of summaries completed
|
||||
- generating: Number of summaries being generated
|
||||
- error: Number of summaries with errors
|
||||
- not_started: Number of segments without summary records
|
||||
- summaries: List of summary records with status and content preview
|
||||
"""
|
||||
current_user, _ = current_account_with_tenant()
|
||||
dataset_id = str(dataset_id)
|
||||
document_id = str(document_id)
|
||||
|
||||
# Get document
|
||||
document = self.get_document(dataset_id, document_id)
|
||||
|
||||
# Get dataset
|
||||
dataset = DatasetService.get_dataset(dataset_id)
|
||||
if not dataset:
|
||||
raise NotFound("Dataset not found.")
|
||||
|
||||
# Check permissions
|
||||
try:
|
||||
DatasetService.check_dataset_permission(dataset, current_user)
|
||||
except services.errors.account.NoPermissionError as e:
|
||||
raise Forbidden(str(e))
|
||||
|
||||
# Get all segments for this document
|
||||
segments = (
|
||||
db.session.query(DocumentSegment)
|
||||
.filter(
|
||||
DocumentSegment.document_id == document_id,
|
||||
DocumentSegment.dataset_id == dataset_id,
|
||||
DocumentSegment.status == "completed",
|
||||
DocumentSegment.enabled == True,
|
||||
)
|
||||
.all()
|
||||
)
|
||||
|
||||
total_segments = len(segments)
|
||||
|
||||
# Get all summary records for these segments
|
||||
segment_ids = [segment.id for segment in segments]
|
||||
summaries = []
|
||||
if segment_ids:
|
||||
summaries = (
|
||||
db.session.query(DocumentSegmentSummary)
|
||||
.filter(
|
||||
DocumentSegmentSummary.document_id == document_id,
|
||||
DocumentSegmentSummary.dataset_id == dataset_id,
|
||||
DocumentSegmentSummary.chunk_id.in_(segment_ids),
|
||||
DocumentSegmentSummary.enabled == True, # Only return enabled summaries
|
||||
)
|
||||
.all()
|
||||
)
|
||||
|
||||
# Create a mapping of chunk_id to summary
|
||||
summary_map = {summary.chunk_id: summary for summary in summaries}
|
||||
|
||||
# Count statuses
|
||||
status_counts = {
|
||||
"completed": 0,
|
||||
"generating": 0,
|
||||
"error": 0,
|
||||
"not_started": 0,
|
||||
}
|
||||
|
||||
summary_list = []
|
||||
for segment in segments:
|
||||
summary = summary_map.get(segment.id)
|
||||
if summary:
|
||||
status = summary.status
|
||||
status_counts[status] = status_counts.get(status, 0) + 1
|
||||
summary_list.append(
|
||||
{
|
||||
"segment_id": segment.id,
|
||||
"segment_position": segment.position,
|
||||
"status": summary.status,
|
||||
"summary_preview": (
|
||||
summary.summary_content[:100] + "..."
|
||||
if summary.summary_content and len(summary.summary_content) > 100
|
||||
else summary.summary_content
|
||||
),
|
||||
"error": summary.error,
|
||||
"created_at": int(summary.created_at.timestamp()) if summary.created_at else None,
|
||||
"updated_at": int(summary.updated_at.timestamp()) if summary.updated_at else None,
|
||||
}
|
||||
)
|
||||
else:
|
||||
status_counts["not_started"] += 1
|
||||
summary_list.append(
|
||||
{
|
||||
"segment_id": segment.id,
|
||||
"segment_position": segment.position,
|
||||
"status": "not_started",
|
||||
"summary_preview": None,
|
||||
"error": None,
|
||||
"created_at": None,
|
||||
"updated_at": None,
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"total_segments": total_segments,
|
||||
"summary_status": status_counts,
|
||||
"summaries": summary_list,
|
||||
}, 200
|
||||
|
||||
@ -32,7 +32,7 @@ from extensions.ext_redis import redis_client
|
||||
from fields.segment_fields import child_chunk_fields, segment_fields
|
||||
from libs.helper import escape_like_pattern
|
||||
from libs.login import current_account_with_tenant, login_required
|
||||
from models.dataset import ChildChunk, DocumentSegment, DocumentSegmentSummary
|
||||
from models.dataset import ChildChunk, DocumentSegment
|
||||
from models.model import UploadFile
|
||||
from services.dataset_service import DatasetService, DocumentService, SegmentService
|
||||
from services.entities.knowledge_entities.knowledge_entities import ChildChunkUpdateArgs, SegmentUpdateArgs
|
||||
@ -41,23 +41,6 @@ from services.errors.chunk import ChildChunkIndexingError as ChildChunkIndexingS
|
||||
from tasks.batch_create_segment_to_index_task import batch_create_segment_to_index_task
|
||||
|
||||
|
||||
def _get_segment_with_summary(segment, dataset_id):
|
||||
"""Helper function to marshal segment and add summary information."""
|
||||
segment_dict = marshal(segment, segment_fields)
|
||||
# Query summary for this segment (only enabled summaries)
|
||||
summary = (
|
||||
db.session.query(DocumentSegmentSummary)
|
||||
.where(
|
||||
DocumentSegmentSummary.chunk_id == segment.id,
|
||||
DocumentSegmentSummary.dataset_id == dataset_id,
|
||||
DocumentSegmentSummary.enabled == True, # Only return enabled summaries
|
||||
)
|
||||
.first()
|
||||
)
|
||||
segment_dict["summary"] = summary.summary_content if summary else None
|
||||
return segment_dict
|
||||
|
||||
|
||||
class SegmentListQuery(BaseModel):
|
||||
limit: int = Field(default=20, ge=1, le=100)
|
||||
status: list[str] = Field(default_factory=list)
|
||||
@ -80,7 +63,6 @@ class SegmentUpdatePayload(BaseModel):
|
||||
keywords: list[str] | None = None
|
||||
regenerate_child_chunks: bool = False
|
||||
attachment_ids: list[str] | None = None
|
||||
summary: str | None = None # Summary content for summary index
|
||||
|
||||
|
||||
class BatchImportPayload(BaseModel):
|
||||
@ -198,32 +180,8 @@ class DatasetDocumentSegmentListApi(Resource):
|
||||
|
||||
segments = db.paginate(select=query, page=page, per_page=limit, max_per_page=100, error_out=False)
|
||||
|
||||
# Query summaries for all segments in this page (batch query for efficiency)
|
||||
segment_ids = [segment.id for segment in segments.items]
|
||||
summaries = {}
|
||||
if segment_ids:
|
||||
summary_records = (
|
||||
db.session.query(DocumentSegmentSummary)
|
||||
.where(
|
||||
DocumentSegmentSummary.chunk_id.in_(segment_ids),
|
||||
DocumentSegmentSummary.dataset_id == dataset_id,
|
||||
)
|
||||
.all()
|
||||
)
|
||||
# Only include enabled summaries
|
||||
summaries = {
|
||||
summary.chunk_id: summary.summary_content for summary in summary_records if summary.enabled is True
|
||||
}
|
||||
|
||||
# Add summary to each segment
|
||||
segments_with_summary = []
|
||||
for segment in segments.items:
|
||||
segment_dict = marshal(segment, segment_fields)
|
||||
segment_dict["summary"] = summaries.get(segment.id)
|
||||
segments_with_summary.append(segment_dict)
|
||||
|
||||
response = {
|
||||
"data": segments_with_summary,
|
||||
"data": marshal(segments.items, segment_fields),
|
||||
"limit": limit,
|
||||
"total": segments.total,
|
||||
"total_pages": segments.pages,
|
||||
@ -369,7 +327,7 @@ class DatasetDocumentSegmentAddApi(Resource):
|
||||
payload_dict = payload.model_dump(exclude_none=True)
|
||||
SegmentService.segment_create_args_validate(payload_dict, document)
|
||||
segment = SegmentService.create_segment(payload_dict, document, dataset)
|
||||
return {"data": _get_segment_with_summary(segment, dataset_id), "doc_form": document.doc_form}, 200
|
||||
return {"data": marshal(segment, segment_fields), "doc_form": document.doc_form}, 200
|
||||
|
||||
|
||||
@console_ns.route("/datasets/<uuid:dataset_id>/documents/<uuid:document_id>/segments/<uuid:segment_id>")
|
||||
@ -431,12 +389,10 @@ class DatasetDocumentSegmentUpdateApi(Resource):
|
||||
payload = SegmentUpdatePayload.model_validate(console_ns.payload or {})
|
||||
payload_dict = payload.model_dump(exclude_none=True)
|
||||
SegmentService.segment_create_args_validate(payload_dict, document)
|
||||
|
||||
# Update segment (summary update with change detection is handled in SegmentService.update_segment)
|
||||
segment = SegmentService.update_segment(
|
||||
SegmentUpdateArgs.model_validate(payload.model_dump(exclude_none=True)), segment, document, dataset
|
||||
)
|
||||
return {"data": _get_segment_with_summary(segment, dataset_id), "doc_form": document.doc_form}, 200
|
||||
return {"data": marshal(segment, segment_fields), "doc_form": document.doc_form}, 200
|
||||
|
||||
@setup_required
|
||||
@login_required
|
||||
|
||||
@ -1,13 +1,6 @@
|
||||
from flask_restx import Resource, fields
|
||||
from flask_restx import Resource
|
||||
|
||||
from controllers.common.schema import register_schema_model
|
||||
from fields.hit_testing_fields import (
|
||||
child_chunk_fields,
|
||||
document_fields,
|
||||
files_fields,
|
||||
hit_testing_record_fields,
|
||||
segment_fields,
|
||||
)
|
||||
from libs.login import login_required
|
||||
|
||||
from .. import console_ns
|
||||
@ -21,45 +14,13 @@ from ..wraps import (
|
||||
register_schema_model(console_ns, HitTestingPayload)
|
||||
|
||||
|
||||
def _get_or_create_model(model_name: str, field_def):
|
||||
"""Get or create a flask_restx model to avoid dict type issues in Swagger."""
|
||||
existing = console_ns.models.get(model_name)
|
||||
if existing is None:
|
||||
existing = console_ns.model(model_name, field_def)
|
||||
return existing
|
||||
|
||||
|
||||
# Register models for flask_restx to avoid dict type issues in Swagger
|
||||
document_model = _get_or_create_model("HitTestingDocument", document_fields)
|
||||
|
||||
segment_fields_copy = segment_fields.copy()
|
||||
segment_fields_copy["document"] = fields.Nested(document_model)
|
||||
segment_model = _get_or_create_model("HitTestingSegment", segment_fields_copy)
|
||||
|
||||
child_chunk_model = _get_or_create_model("HitTestingChildChunk", child_chunk_fields)
|
||||
files_model = _get_or_create_model("HitTestingFile", files_fields)
|
||||
|
||||
hit_testing_record_fields_copy = hit_testing_record_fields.copy()
|
||||
hit_testing_record_fields_copy["segment"] = fields.Nested(segment_model)
|
||||
hit_testing_record_fields_copy["child_chunks"] = fields.List(fields.Nested(child_chunk_model))
|
||||
hit_testing_record_fields_copy["files"] = fields.List(fields.Nested(files_model))
|
||||
hit_testing_record_model = _get_or_create_model("HitTestingRecord", hit_testing_record_fields_copy)
|
||||
|
||||
# Response model for hit testing API
|
||||
hit_testing_response_fields = {
|
||||
"query": fields.String,
|
||||
"records": fields.List(fields.Nested(hit_testing_record_model)),
|
||||
}
|
||||
hit_testing_response_model = _get_or_create_model("HitTestingResponse", hit_testing_response_fields)
|
||||
|
||||
|
||||
@console_ns.route("/datasets/<uuid:dataset_id>/hit-testing")
|
||||
class HitTestingApi(Resource, DatasetsHitTestingBase):
|
||||
@console_ns.doc("test_dataset_retrieval")
|
||||
@console_ns.doc(description="Test dataset knowledge retrieval")
|
||||
@console_ns.doc(params={"dataset_id": "Dataset ID"})
|
||||
@console_ns.expect(console_ns.models[HitTestingPayload.__name__])
|
||||
@console_ns.response(200, "Hit testing completed successfully", model=hit_testing_response_model)
|
||||
@console_ns.response(200, "Hit testing completed successfully")
|
||||
@console_ns.response(404, "Dataset not found")
|
||||
@console_ns.response(400, "Invalid parameters")
|
||||
@setup_required
|
||||
|
||||
@ -1,43 +0,0 @@
|
||||
from flask import request
|
||||
from flask_restx import Resource
|
||||
|
||||
from controllers.console import api
|
||||
from controllers.console.explore.wraps import explore_banner_enabled
|
||||
from extensions.ext_database import db
|
||||
from models.model import ExporleBanner
|
||||
|
||||
|
||||
class BannerApi(Resource):
|
||||
"""Resource for banner list."""
|
||||
|
||||
@explore_banner_enabled
|
||||
def get(self):
|
||||
"""Get banner list."""
|
||||
language = request.args.get("language", "en-US")
|
||||
|
||||
# Build base query for enabled banners
|
||||
base_query = db.session.query(ExporleBanner).where(ExporleBanner.status == "enabled")
|
||||
|
||||
# Try to get banners in the requested language
|
||||
banners = base_query.where(ExporleBanner.language == language).order_by(ExporleBanner.sort).all()
|
||||
|
||||
# Fallback to en-US if no banners found and language is not en-US
|
||||
if not banners and language != "en-US":
|
||||
banners = base_query.where(ExporleBanner.language == "en-US").order_by(ExporleBanner.sort).all()
|
||||
# Convert banners to serializable format
|
||||
result = []
|
||||
for banner in banners:
|
||||
banner_data = {
|
||||
"id": banner.id,
|
||||
"content": banner.content, # Already parsed as JSON by SQLAlchemy
|
||||
"link": banner.link,
|
||||
"sort": banner.sort,
|
||||
"status": banner.status,
|
||||
"created_at": banner.created_at.isoformat() if banner.created_at else None,
|
||||
}
|
||||
result.append(banner_data)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
api.add_resource(BannerApi, "/explore/banners")
|
||||
@ -29,25 +29,3 @@ class AppAccessDeniedError(BaseHTTPException):
|
||||
error_code = "access_denied"
|
||||
description = "App access denied."
|
||||
code = 403
|
||||
|
||||
|
||||
class TrialAppNotAllowed(BaseHTTPException):
|
||||
"""*403* `Trial App Not Allowed`
|
||||
|
||||
Raise if the user has reached the trial app limit.
|
||||
"""
|
||||
|
||||
error_code = "trial_app_not_allowed"
|
||||
code = 403
|
||||
description = "the app is not allowed to be trial."
|
||||
|
||||
|
||||
class TrialAppLimitExceeded(BaseHTTPException):
|
||||
"""*403* `Trial App Limit Exceeded`
|
||||
|
||||
Raise if the user has exceeded the trial app limit.
|
||||
"""
|
||||
|
||||
error_code = "trial_app_limit_exceeded"
|
||||
code = 403
|
||||
description = "The user has exceeded the trial app limit."
|
||||
|
||||
@ -29,7 +29,6 @@ recommended_app_fields = {
|
||||
"category": fields.String,
|
||||
"position": fields.Integer,
|
||||
"is_listed": fields.Boolean,
|
||||
"can_trial": fields.Boolean,
|
||||
}
|
||||
|
||||
recommended_app_list_fields = {
|
||||
|
||||
@ -1,512 +0,0 @@
|
||||
import logging
|
||||
from typing import Any, cast
|
||||
|
||||
from flask import request
|
||||
from flask_restx import Resource, marshal, marshal_with, reqparse
|
||||
from werkzeug.exceptions import Forbidden, InternalServerError, NotFound
|
||||
|
||||
import services
|
||||
from controllers.common.fields import Parameters as ParametersResponse
|
||||
from controllers.common.fields import Site as SiteResponse
|
||||
from controllers.console import api
|
||||
from controllers.console.app.error import (
|
||||
AppUnavailableError,
|
||||
AudioTooLargeError,
|
||||
CompletionRequestError,
|
||||
ConversationCompletedError,
|
||||
NeedAddIdsError,
|
||||
NoAudioUploadedError,
|
||||
ProviderModelCurrentlyNotSupportError,
|
||||
ProviderNotInitializeError,
|
||||
ProviderNotSupportSpeechToTextError,
|
||||
ProviderQuotaExceededError,
|
||||
UnsupportedAudioTypeError,
|
||||
)
|
||||
from controllers.console.app.wraps import get_app_model_with_trial
|
||||
from controllers.console.explore.error import (
|
||||
AppSuggestedQuestionsAfterAnswerDisabledError,
|
||||
NotChatAppError,
|
||||
NotCompletionAppError,
|
||||
NotWorkflowAppError,
|
||||
)
|
||||
from controllers.console.explore.wraps import TrialAppResource, trial_feature_enable
|
||||
from controllers.web.error import InvokeRateLimitError as InvokeRateLimitHttpError
|
||||
from core.app.app_config.common.parameters_mapping import get_parameters_from_feature_dict
|
||||
from core.app.apps.base_app_queue_manager import AppQueueManager
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.errors.error import (
|
||||
ModelCurrentlyNotSupportError,
|
||||
ProviderTokenNotInitError,
|
||||
QuotaExceededError,
|
||||
)
|
||||
from core.model_runtime.errors.invoke import InvokeError
|
||||
from core.workflow.graph_engine.manager import GraphEngineManager
|
||||
from extensions.ext_database import db
|
||||
from fields.app_fields import app_detail_fields_with_site
|
||||
from fields.dataset_fields import dataset_fields
|
||||
from fields.workflow_fields import workflow_fields
|
||||
from libs import helper
|
||||
from libs.helper import uuid_value
|
||||
from libs.login import current_user
|
||||
from models import Account
|
||||
from models.account import TenantStatus
|
||||
from models.model import AppMode, Site
|
||||
from models.workflow import Workflow
|
||||
from services.app_generate_service import AppGenerateService
|
||||
from services.app_service import AppService
|
||||
from services.audio_service import AudioService
|
||||
from services.dataset_service import DatasetService
|
||||
from services.errors.audio import (
|
||||
AudioTooLargeServiceError,
|
||||
NoAudioUploadedServiceError,
|
||||
ProviderNotSupportSpeechToTextServiceError,
|
||||
UnsupportedAudioTypeServiceError,
|
||||
)
|
||||
from services.errors.conversation import ConversationNotExistsError
|
||||
from services.errors.llm import InvokeRateLimitError
|
||||
from services.errors.message import (
|
||||
MessageNotExistsError,
|
||||
SuggestedQuestionsAfterAnswerDisabledError,
|
||||
)
|
||||
from services.message_service import MessageService
|
||||
from services.recommended_app_service import RecommendedAppService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TrialAppWorkflowRunApi(TrialAppResource):
|
||||
def post(self, trial_app):
|
||||
"""
|
||||
Run workflow
|
||||
"""
|
||||
app_model = trial_app
|
||||
if not app_model:
|
||||
raise NotWorkflowAppError()
|
||||
app_mode = AppMode.value_of(app_model.mode)
|
||||
if app_mode != AppMode.WORKFLOW:
|
||||
raise NotWorkflowAppError()
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("inputs", type=dict, required=True, nullable=False, location="json")
|
||||
parser.add_argument("files", type=list, required=False, location="json")
|
||||
args = parser.parse_args()
|
||||
assert current_user is not None
|
||||
try:
|
||||
app_id = app_model.id
|
||||
user_id = current_user.id
|
||||
response = AppGenerateService.generate(
|
||||
app_model=app_model, user=current_user, args=args, invoke_from=InvokeFrom.EXPLORE, streaming=True
|
||||
)
|
||||
RecommendedAppService.add_trial_app_record(app_id, user_id)
|
||||
return helper.compact_generate_response(response)
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except InvokeError as e:
|
||||
raise CompletionRequestError(e.description)
|
||||
except InvokeRateLimitError as ex:
|
||||
raise InvokeRateLimitHttpError(ex.description)
|
||||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
class TrialAppWorkflowTaskStopApi(TrialAppResource):
|
||||
def post(self, trial_app, task_id: str):
|
||||
"""
|
||||
Stop workflow task
|
||||
"""
|
||||
app_model = trial_app
|
||||
if not app_model:
|
||||
raise NotWorkflowAppError()
|
||||
app_mode = AppMode.value_of(app_model.mode)
|
||||
if app_mode != AppMode.WORKFLOW:
|
||||
raise NotWorkflowAppError()
|
||||
assert current_user is not None
|
||||
|
||||
# Stop using both mechanisms for backward compatibility
|
||||
# Legacy stop flag mechanism (without user check)
|
||||
AppQueueManager.set_stop_flag_no_user_check(task_id)
|
||||
|
||||
# New graph engine command channel mechanism
|
||||
GraphEngineManager.send_stop_command(task_id)
|
||||
|
||||
return {"result": "success"}
|
||||
|
||||
|
||||
class TrialChatApi(TrialAppResource):
|
||||
@trial_feature_enable
|
||||
def post(self, trial_app):
|
||||
app_model = trial_app
|
||||
app_mode = AppMode.value_of(app_model.mode)
|
||||
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
|
||||
raise NotChatAppError()
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("inputs", type=dict, required=True, location="json")
|
||||
parser.add_argument("query", type=str, required=True, location="json")
|
||||
parser.add_argument("files", type=list, required=False, location="json")
|
||||
parser.add_argument("conversation_id", type=uuid_value, location="json")
|
||||
parser.add_argument("parent_message_id", type=uuid_value, required=False, location="json")
|
||||
parser.add_argument("retriever_from", type=str, required=False, default="explore_app", location="json")
|
||||
args = parser.parse_args()
|
||||
|
||||
args["auto_generate_name"] = False
|
||||
|
||||
try:
|
||||
if not isinstance(current_user, Account):
|
||||
raise ValueError("current_user must be an Account instance")
|
||||
|
||||
# Get IDs before they might be detached from session
|
||||
app_id = app_model.id
|
||||
user_id = current_user.id
|
||||
|
||||
response = AppGenerateService.generate(
|
||||
app_model=app_model, user=current_user, args=args, invoke_from=InvokeFrom.EXPLORE, streaming=True
|
||||
)
|
||||
RecommendedAppService.add_trial_app_record(app_id, user_id)
|
||||
return helper.compact_generate_response(response)
|
||||
except services.errors.conversation.ConversationNotExistsError:
|
||||
raise NotFound("Conversation Not Exists.")
|
||||
except services.errors.conversation.ConversationCompletedError:
|
||||
raise ConversationCompletedError()
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except InvokeError as e:
|
||||
raise CompletionRequestError(e.description)
|
||||
except InvokeRateLimitError as ex:
|
||||
raise InvokeRateLimitHttpError(ex.description)
|
||||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
class TrialMessageSuggestedQuestionApi(TrialAppResource):
|
||||
@trial_feature_enable
|
||||
def get(self, trial_app, message_id):
|
||||
app_model = trial_app
|
||||
app_mode = AppMode.value_of(app_model.mode)
|
||||
if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
|
||||
raise NotChatAppError()
|
||||
|
||||
message_id = str(message_id)
|
||||
|
||||
try:
|
||||
if not isinstance(current_user, Account):
|
||||
raise ValueError("current_user must be an Account instance")
|
||||
questions = MessageService.get_suggested_questions_after_answer(
|
||||
app_model=app_model, user=current_user, message_id=message_id, invoke_from=InvokeFrom.EXPLORE
|
||||
)
|
||||
except MessageNotExistsError:
|
||||
raise NotFound("Message not found")
|
||||
except ConversationNotExistsError:
|
||||
raise NotFound("Conversation not found")
|
||||
except SuggestedQuestionsAfterAnswerDisabledError:
|
||||
raise AppSuggestedQuestionsAfterAnswerDisabledError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except InvokeError as e:
|
||||
raise CompletionRequestError(e.description)
|
||||
except Exception:
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
return {"data": questions}
|
||||
|
||||
|
||||
class TrialChatAudioApi(TrialAppResource):
|
||||
@trial_feature_enable
|
||||
def post(self, trial_app):
|
||||
app_model = trial_app
|
||||
|
||||
file = request.files["file"]
|
||||
|
||||
try:
|
||||
if not isinstance(current_user, Account):
|
||||
raise ValueError("current_user must be an Account instance")
|
||||
|
||||
# Get IDs before they might be detached from session
|
||||
app_id = app_model.id
|
||||
user_id = current_user.id
|
||||
|
||||
response = AudioService.transcript_asr(app_model=app_model, file=file, end_user=None)
|
||||
RecommendedAppService.add_trial_app_record(app_id, user_id)
|
||||
return response
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except NoAudioUploadedServiceError:
|
||||
raise NoAudioUploadedError()
|
||||
except AudioTooLargeServiceError as e:
|
||||
raise AudioTooLargeError(str(e))
|
||||
except UnsupportedAudioTypeServiceError:
|
||||
raise UnsupportedAudioTypeError()
|
||||
except ProviderNotSupportSpeechToTextServiceError:
|
||||
raise ProviderNotSupportSpeechToTextError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except InvokeError as e:
|
||||
raise CompletionRequestError(e.description)
|
||||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
class TrialChatTextApi(TrialAppResource):
|
||||
@trial_feature_enable
|
||||
def post(self, trial_app):
|
||||
app_model = trial_app
|
||||
try:
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("message_id", type=str, required=False, location="json")
|
||||
parser.add_argument("voice", type=str, location="json")
|
||||
parser.add_argument("text", type=str, location="json")
|
||||
parser.add_argument("streaming", type=bool, location="json")
|
||||
args = parser.parse_args()
|
||||
|
||||
message_id = args.get("message_id", None)
|
||||
text = args.get("text", None)
|
||||
voice = args.get("voice", None)
|
||||
if not isinstance(current_user, Account):
|
||||
raise ValueError("current_user must be an Account instance")
|
||||
|
||||
# Get IDs before they might be detached from session
|
||||
app_id = app_model.id
|
||||
user_id = current_user.id
|
||||
|
||||
response = AudioService.transcript_tts(app_model=app_model, text=text, voice=voice, message_id=message_id)
|
||||
RecommendedAppService.add_trial_app_record(app_id, user_id)
|
||||
return response
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except NoAudioUploadedServiceError:
|
||||
raise NoAudioUploadedError()
|
||||
except AudioTooLargeServiceError as e:
|
||||
raise AudioTooLargeError(str(e))
|
||||
except UnsupportedAudioTypeServiceError:
|
||||
raise UnsupportedAudioTypeError()
|
||||
except ProviderNotSupportSpeechToTextServiceError:
|
||||
raise ProviderNotSupportSpeechToTextError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except InvokeError as e:
|
||||
raise CompletionRequestError(e.description)
|
||||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
class TrialCompletionApi(TrialAppResource):
|
||||
@trial_feature_enable
|
||||
def post(self, trial_app):
|
||||
app_model = trial_app
|
||||
if app_model.mode != "completion":
|
||||
raise NotCompletionAppError()
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("inputs", type=dict, required=True, location="json")
|
||||
parser.add_argument("query", type=str, location="json", default="")
|
||||
parser.add_argument("files", type=list, required=False, location="json")
|
||||
parser.add_argument("response_mode", type=str, choices=["blocking", "streaming"], location="json")
|
||||
parser.add_argument("retriever_from", type=str, required=False, default="explore_app", location="json")
|
||||
args = parser.parse_args()
|
||||
|
||||
streaming = args["response_mode"] == "streaming"
|
||||
args["auto_generate_name"] = False
|
||||
|
||||
try:
|
||||
if not isinstance(current_user, Account):
|
||||
raise ValueError("current_user must be an Account instance")
|
||||
|
||||
# Get IDs before they might be detached from session
|
||||
app_id = app_model.id
|
||||
user_id = current_user.id
|
||||
|
||||
response = AppGenerateService.generate(
|
||||
app_model=app_model, user=current_user, args=args, invoke_from=InvokeFrom.EXPLORE, streaming=streaming
|
||||
)
|
||||
|
||||
RecommendedAppService.add_trial_app_record(app_id, user_id)
|
||||
return helper.compact_generate_response(response)
|
||||
except services.errors.conversation.ConversationNotExistsError:
|
||||
raise NotFound("Conversation Not Exists.")
|
||||
except services.errors.conversation.ConversationCompletedError:
|
||||
raise ConversationCompletedError()
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
except QuotaExceededError:
|
||||
raise ProviderQuotaExceededError()
|
||||
except ModelCurrentlyNotSupportError:
|
||||
raise ProviderModelCurrentlyNotSupportError()
|
||||
except InvokeError as e:
|
||||
raise CompletionRequestError(e.description)
|
||||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
class TrialSitApi(Resource):
|
||||
"""Resource for trial app sites."""
|
||||
|
||||
@trial_feature_enable
|
||||
@get_app_model_with_trial
|
||||
def get(self, app_model):
|
||||
"""Retrieve app site info.
|
||||
|
||||
Returns the site configuration for the application including theme, icons, and text.
|
||||
"""
|
||||
site = db.session.query(Site).where(Site.app_id == app_model.id).first()
|
||||
|
||||
if not site:
|
||||
raise Forbidden()
|
||||
|
||||
assert app_model.tenant
|
||||
if app_model.tenant.status == TenantStatus.ARCHIVE:
|
||||
raise Forbidden()
|
||||
|
||||
return SiteResponse.model_validate(site).model_dump(mode="json")
|
||||
|
||||
|
||||
class TrialAppParameterApi(Resource):
|
||||
"""Resource for app variables."""
|
||||
|
||||
@trial_feature_enable
|
||||
@get_app_model_with_trial
|
||||
def get(self, app_model):
|
||||
"""Retrieve app parameters."""
|
||||
|
||||
if app_model is None:
|
||||
raise AppUnavailableError()
|
||||
|
||||
if app_model.mode in {AppMode.ADVANCED_CHAT, AppMode.WORKFLOW}:
|
||||
workflow = app_model.workflow
|
||||
if workflow is None:
|
||||
raise AppUnavailableError()
|
||||
|
||||
features_dict = workflow.features_dict
|
||||
user_input_form = workflow.user_input_form(to_old_structure=True)
|
||||
else:
|
||||
app_model_config = app_model.app_model_config
|
||||
if app_model_config is None:
|
||||
raise AppUnavailableError()
|
||||
|
||||
features_dict = app_model_config.to_dict()
|
||||
|
||||
user_input_form = features_dict.get("user_input_form", [])
|
||||
|
||||
parameters = get_parameters_from_feature_dict(features_dict=features_dict, user_input_form=user_input_form)
|
||||
return ParametersResponse.model_validate(parameters).model_dump(mode="json")
|
||||
|
||||
|
||||
class AppApi(Resource):
|
||||
@trial_feature_enable
|
||||
@get_app_model_with_trial
|
||||
@marshal_with(app_detail_fields_with_site)
|
||||
def get(self, app_model):
|
||||
"""Get app detail"""
|
||||
|
||||
app_service = AppService()
|
||||
app_model = app_service.get_app(app_model)
|
||||
|
||||
return app_model
|
||||
|
||||
|
||||
class AppWorkflowApi(Resource):
|
||||
@trial_feature_enable
|
||||
@get_app_model_with_trial
|
||||
@marshal_with(workflow_fields)
|
||||
def get(self, app_model):
|
||||
"""Get workflow detail"""
|
||||
if not app_model.workflow_id:
|
||||
raise AppUnavailableError()
|
||||
|
||||
workflow = (
|
||||
db.session.query(Workflow)
|
||||
.where(
|
||||
Workflow.id == app_model.workflow_id,
|
||||
)
|
||||
.first()
|
||||
)
|
||||
return workflow
|
||||
|
||||
|
||||
class DatasetListApi(Resource):
|
||||
@trial_feature_enable
|
||||
@get_app_model_with_trial
|
||||
def get(self, app_model):
|
||||
page = request.args.get("page", default=1, type=int)
|
||||
limit = request.args.get("limit", default=20, type=int)
|
||||
ids = request.args.getlist("ids")
|
||||
|
||||
tenant_id = app_model.tenant_id
|
||||
if ids:
|
||||
datasets, total = DatasetService.get_datasets_by_ids(ids, tenant_id)
|
||||
else:
|
||||
raise NeedAddIdsError()
|
||||
|
||||
data = cast(list[dict[str, Any]], marshal(datasets, dataset_fields))
|
||||
|
||||
response = {"data": data, "has_more": len(datasets) == limit, "limit": limit, "total": total, "page": page}
|
||||
return response
|
||||
|
||||
|
||||
api.add_resource(TrialChatApi, "/trial-apps/<uuid:app_id>/chat-messages", endpoint="trial_app_chat_completion")
|
||||
|
||||
api.add_resource(
|
||||
TrialMessageSuggestedQuestionApi,
|
||||
"/trial-apps/<uuid:app_id>/messages/<uuid:message_id>/suggested-questions",
|
||||
endpoint="trial_app_suggested_question",
|
||||
)
|
||||
|
||||
api.add_resource(TrialChatAudioApi, "/trial-apps/<uuid:app_id>/audio-to-text", endpoint="trial_app_audio")
|
||||
api.add_resource(TrialChatTextApi, "/trial-apps/<uuid:app_id>/text-to-audio", endpoint="trial_app_text")
|
||||
|
||||
api.add_resource(TrialCompletionApi, "/trial-apps/<uuid:app_id>/completion-messages", endpoint="trial_app_completion")
|
||||
|
||||
api.add_resource(TrialSitApi, "/trial-apps/<uuid:app_id>/site")
|
||||
|
||||
api.add_resource(TrialAppParameterApi, "/trial-apps/<uuid:app_id>/parameters", endpoint="trial_app_parameters")
|
||||
|
||||
api.add_resource(AppApi, "/trial-apps/<uuid:app_id>", endpoint="trial_app")
|
||||
|
||||
api.add_resource(TrialAppWorkflowRunApi, "/trial-apps/<uuid:app_id>/workflows/run", endpoint="trial_app_workflow_run")
|
||||
api.add_resource(TrialAppWorkflowTaskStopApi, "/trial-apps/<uuid:app_id>/workflows/tasks/<string:task_id>/stop")
|
||||
|
||||
api.add_resource(AppWorkflowApi, "/trial-apps/<uuid:app_id>/workflows", endpoint="trial_app_workflow")
|
||||
api.add_resource(DatasetListApi, "/trial-apps/<uuid:app_id>/datasets", endpoint="trial_app_datasets")
|
||||
@ -2,15 +2,14 @@ from collections.abc import Callable
|
||||
from functools import wraps
|
||||
from typing import Concatenate, ParamSpec, TypeVar
|
||||
|
||||
from flask import abort
|
||||
from flask_restx import Resource
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from controllers.console.explore.error import AppAccessDeniedError, TrialAppLimitExceeded, TrialAppNotAllowed
|
||||
from controllers.console.explore.error import AppAccessDeniedError
|
||||
from controllers.console.wraps import account_initialization_required
|
||||
from extensions.ext_database import db
|
||||
from libs.login import current_account_with_tenant, login_required
|
||||
from models import AccountTrialAppRecord, App, InstalledApp, TrialApp
|
||||
from models import InstalledApp
|
||||
from services.enterprise.enterprise_service import EnterpriseService
|
||||
from services.feature_service import FeatureService
|
||||
|
||||
@ -72,61 +71,6 @@ def user_allowed_to_access_app(view: Callable[Concatenate[InstalledApp, P], R] |
|
||||
return decorator
|
||||
|
||||
|
||||
def trial_app_required(view: Callable[Concatenate[App, P], R] | None = None):
|
||||
def decorator(view: Callable[Concatenate[App, P], R]):
|
||||
@wraps(view)
|
||||
def decorated(app_id: str, *args: P.args, **kwargs: P.kwargs):
|
||||
current_user, _ = current_account_with_tenant()
|
||||
|
||||
trial_app = db.session.query(TrialApp).where(TrialApp.app_id == str(app_id)).first()
|
||||
|
||||
if trial_app is None:
|
||||
raise TrialAppNotAllowed()
|
||||
app = trial_app.app
|
||||
|
||||
if app is None:
|
||||
raise TrialAppNotAllowed()
|
||||
|
||||
account_trial_app_record = (
|
||||
db.session.query(AccountTrialAppRecord)
|
||||
.where(AccountTrialAppRecord.account_id == current_user.id, AccountTrialAppRecord.app_id == app_id)
|
||||
.first()
|
||||
)
|
||||
if account_trial_app_record:
|
||||
if account_trial_app_record.count >= trial_app.trial_limit:
|
||||
raise TrialAppLimitExceeded()
|
||||
|
||||
return view(app, *args, **kwargs)
|
||||
|
||||
return decorated
|
||||
|
||||
if view:
|
||||
return decorator(view)
|
||||
return decorator
|
||||
|
||||
|
||||
def trial_feature_enable(view: Callable[..., R]) -> Callable[..., R]:
|
||||
@wraps(view)
|
||||
def decorated(*args, **kwargs):
|
||||
features = FeatureService.get_system_features()
|
||||
if not features.enable_trial_app:
|
||||
abort(403, "Trial app feature is not enabled.")
|
||||
return view(*args, **kwargs)
|
||||
|
||||
return decorated
|
||||
|
||||
|
||||
def explore_banner_enabled(view: Callable[..., R]) -> Callable[..., R]:
|
||||
@wraps(view)
|
||||
def decorated(*args, **kwargs):
|
||||
features = FeatureService.get_system_features()
|
||||
if not features.enable_explore_banner:
|
||||
abort(403, "Explore banner feature is not enabled.")
|
||||
return view(*args, **kwargs)
|
||||
|
||||
return decorated
|
||||
|
||||
|
||||
class InstalledAppResource(Resource):
|
||||
# must be reversed if there are multiple decorators
|
||||
|
||||
@ -136,13 +80,3 @@ class InstalledAppResource(Resource):
|
||||
account_initialization_required,
|
||||
login_required,
|
||||
]
|
||||
|
||||
|
||||
class TrialAppResource(Resource):
|
||||
# must be reversed if there are multiple decorators
|
||||
|
||||
method_decorators = [
|
||||
trial_app_required,
|
||||
account_initialization_required,
|
||||
login_required,
|
||||
]
|
||||
|
||||
@ -3,7 +3,6 @@ from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
class PreviewDetail(BaseModel):
|
||||
content: str
|
||||
summary: str | None = None
|
||||
child_chunks: list[str] | None = None
|
||||
|
||||
|
||||
|
||||
@ -311,18 +311,14 @@ class IndexingRunner:
|
||||
qa_preview_texts: list[QAPreviewDetail] = []
|
||||
|
||||
total_segments = 0
|
||||
# doc_form represents the segmentation method (general, parent-child, QA)
|
||||
index_type = doc_form
|
||||
index_processor = IndexProcessorFactory(index_type).init_index_processor()
|
||||
# one extract_setting is one source document
|
||||
for extract_setting in extract_settings:
|
||||
# extract
|
||||
processing_rule = DatasetProcessRule(
|
||||
mode=tmp_processing_rule["mode"], rules=json.dumps(tmp_processing_rule["rules"])
|
||||
)
|
||||
# Extract document content
|
||||
text_docs = index_processor.extract(extract_setting, process_rule_mode=tmp_processing_rule["mode"])
|
||||
# Cleaning and segmentation
|
||||
documents = index_processor.transform(
|
||||
text_docs,
|
||||
current_user=None,
|
||||
@ -365,12 +361,6 @@ class IndexingRunner:
|
||||
|
||||
if doc_form and doc_form == "qa_model":
|
||||
return IndexingEstimate(total_segments=total_segments * 20, qa_preview=qa_preview_texts, preview=[])
|
||||
|
||||
# Generate summary preview
|
||||
summary_index_setting = tmp_processing_rule.get("summary_index_setting")
|
||||
if summary_index_setting and summary_index_setting.get("enable") and preview_texts:
|
||||
preview_texts = index_processor.generate_summary_preview(tenant_id, preview_texts, summary_index_setting)
|
||||
|
||||
return IndexingEstimate(total_segments=total_segments, preview=preview_texts)
|
||||
|
||||
def _extract(
|
||||
|
||||
@ -434,20 +434,3 @@ INSTRUCTION_GENERATE_TEMPLATE_PROMPT = """The output of this prompt is not as ex
|
||||
You should edit the prompt according to the IDEAL OUTPUT."""
|
||||
|
||||
INSTRUCTION_GENERATE_TEMPLATE_CODE = """Please fix the errors in the {{#error_message#}}."""
|
||||
|
||||
DEFAULT_GENERATOR_SUMMARY_PROMPT = (
|
||||
"""Summarize the following content. Extract only the key information and main points. """
|
||||
"""Remove redundant details.
|
||||
|
||||
Requirements:
|
||||
1. Write a concise summary in plain text
|
||||
2. Use the same language as the input content
|
||||
3. Focus on important facts, concepts, and details
|
||||
4. If images are included, describe their key information
|
||||
5. Do not use words like "好的", "ok", "I understand", "This text discusses", "The content mentions"
|
||||
6. Write directly without extra words
|
||||
|
||||
Output only the summary text. Start summarizing now:
|
||||
|
||||
"""
|
||||
)
|
||||
@ -389,14 +389,15 @@ class RetrievalService:
|
||||
.all()
|
||||
}
|
||||
|
||||
records = []
|
||||
include_segment_ids = set()
|
||||
segment_child_map = {}
|
||||
|
||||
valid_dataset_documents = {}
|
||||
image_doc_ids: list[Any] = []
|
||||
child_index_node_ids = []
|
||||
index_node_ids = []
|
||||
doc_to_document_map = {}
|
||||
summary_segment_ids = set() # Track segments retrieved via summary
|
||||
|
||||
# First pass: collect all document IDs and identify summary documents
|
||||
for document in documents:
|
||||
document_id = document.metadata.get("document_id")
|
||||
if document_id not in dataset_documents:
|
||||
@ -407,24 +408,16 @@ class RetrievalService:
|
||||
continue
|
||||
valid_dataset_documents[document_id] = dataset_document
|
||||
|
||||
doc_id = document.metadata.get("doc_id") or ""
|
||||
doc_to_document_map[doc_id] = document
|
||||
|
||||
# Check if this is a summary document
|
||||
is_summary = document.metadata.get("is_summary", False)
|
||||
if is_summary:
|
||||
# For summary documents, find the original chunk via original_chunk_id
|
||||
original_chunk_id = document.metadata.get("original_chunk_id")
|
||||
if original_chunk_id:
|
||||
summary_segment_ids.add(original_chunk_id)
|
||||
continue # Skip adding to other lists for summary documents
|
||||
|
||||
if dataset_document.doc_form == IndexStructureType.PARENT_CHILD_INDEX:
|
||||
doc_id = document.metadata.get("doc_id") or ""
|
||||
doc_to_document_map[doc_id] = document
|
||||
if document.metadata.get("doc_type") == DocType.IMAGE:
|
||||
image_doc_ids.append(doc_id)
|
||||
else:
|
||||
child_index_node_ids.append(doc_id)
|
||||
else:
|
||||
doc_id = document.metadata.get("doc_id") or ""
|
||||
doc_to_document_map[doc_id] = document
|
||||
if document.metadata.get("doc_type") == DocType.IMAGE:
|
||||
image_doc_ids.append(doc_id)
|
||||
else:
|
||||
@ -440,7 +433,6 @@ class RetrievalService:
|
||||
attachment_map: dict[str, list[dict[str, Any]]] = {}
|
||||
child_chunk_map: dict[str, list[ChildChunk]] = {}
|
||||
doc_segment_map: dict[str, list[str]] = {}
|
||||
segment_summary_map: dict[str, str] = {} # Map segment_id to summary content
|
||||
|
||||
with session_factory.create_session() as session:
|
||||
attachments = cls.get_segment_attachment_infos(image_doc_ids, session)
|
||||
@ -455,7 +447,6 @@ class RetrievalService:
|
||||
doc_segment_map[attachment["segment_id"]].append(attachment["attachment_id"])
|
||||
else:
|
||||
doc_segment_map[attachment["segment_id"]] = [attachment["attachment_id"]]
|
||||
|
||||
child_chunk_stmt = select(ChildChunk).where(ChildChunk.index_node_id.in_(child_index_node_ids))
|
||||
child_index_nodes = session.execute(child_chunk_stmt).scalars().all()
|
||||
|
||||
@ -479,7 +470,6 @@ class RetrievalService:
|
||||
index_node_segments = session.execute(document_segment_stmt).scalars().all() # type: ignore
|
||||
for index_node_segment in index_node_segments:
|
||||
doc_segment_map[index_node_segment.id] = [index_node_segment.index_node_id]
|
||||
|
||||
if segment_ids:
|
||||
document_segment_stmt = select(DocumentSegment).where(
|
||||
DocumentSegment.enabled == True,
|
||||
@ -491,42 +481,6 @@ class RetrievalService:
|
||||
if index_node_segments:
|
||||
segments.extend(index_node_segments)
|
||||
|
||||
# Handle summary documents: query segments by original_chunk_id
|
||||
if summary_segment_ids:
|
||||
summary_segment_ids_list = list(summary_segment_ids)
|
||||
summary_segment_stmt = select(DocumentSegment).where(
|
||||
DocumentSegment.enabled == True,
|
||||
DocumentSegment.status == "completed",
|
||||
DocumentSegment.id.in_(summary_segment_ids_list),
|
||||
)
|
||||
summary_segments = session.execute(summary_segment_stmt).scalars().all() # type: ignore
|
||||
segments.extend(summary_segments)
|
||||
# Add summary segment IDs to segment_ids for summary query
|
||||
for seg in summary_segments:
|
||||
if seg.id not in segment_ids:
|
||||
segment_ids.append(seg.id)
|
||||
|
||||
# Batch query summaries for segments retrieved via summary (only enabled summaries)
|
||||
if summary_segment_ids:
|
||||
from models.dataset import DocumentSegmentSummary
|
||||
|
||||
summaries = (
|
||||
session.query(DocumentSegmentSummary)
|
||||
.filter(
|
||||
DocumentSegmentSummary.chunk_id.in_(list(summary_segment_ids)),
|
||||
DocumentSegmentSummary.status == "completed",
|
||||
DocumentSegmentSummary.enabled == True, # Only retrieve enabled summaries
|
||||
)
|
||||
.all()
|
||||
)
|
||||
for summary in summaries:
|
||||
if summary.summary_content:
|
||||
segment_summary_map[summary.chunk_id] = summary.summary_content
|
||||
|
||||
include_segment_ids = set()
|
||||
segment_child_map: dict[str, dict[str, Any]] = {}
|
||||
records: list[dict[str, Any]] = []
|
||||
|
||||
for segment in segments:
|
||||
child_chunks: list[ChildChunk] = child_chunk_map.get(segment.id, [])
|
||||
attachment_infos: list[dict[str, Any]] = attachment_map.get(segment.id, [])
|
||||
@ -539,7 +493,7 @@ class RetrievalService:
|
||||
child_chunk_details = []
|
||||
max_score = 0.0
|
||||
for child_chunk in child_chunks:
|
||||
document = doc_to_document_map.get(child_chunk.index_node_id)
|
||||
document = doc_to_document_map[child_chunk.index_node_id]
|
||||
child_chunk_detail = {
|
||||
"id": child_chunk.id,
|
||||
"content": child_chunk.content,
|
||||
@ -549,7 +503,7 @@ class RetrievalService:
|
||||
child_chunk_details.append(child_chunk_detail)
|
||||
max_score = max(max_score, document.metadata.get("score", 0.0) if document else 0.0)
|
||||
for attachment_info in attachment_infos:
|
||||
file_document = doc_to_document_map.get(attachment_info["id"])
|
||||
file_document = doc_to_document_map[attachment_info["id"]]
|
||||
max_score = max(
|
||||
max_score, file_document.metadata.get("score", 0.0) if file_document else 0.0
|
||||
)
|
||||
@ -622,16 +576,9 @@ class RetrievalService:
|
||||
else None
|
||||
)
|
||||
|
||||
# Extract summary if this segment was retrieved via summary
|
||||
summary_content = segment_summary_map.get(segment.id)
|
||||
|
||||
# Create RetrievalSegments object
|
||||
retrieval_segment = RetrievalSegments(
|
||||
segment=segment,
|
||||
child_chunks=child_chunks_list,
|
||||
score=score,
|
||||
files=files,
|
||||
summary=summary_content,
|
||||
segment=segment, child_chunks=child_chunks_list, score=score, files=files
|
||||
)
|
||||
result.append(retrieval_segment)
|
||||
|
||||
|
||||
@ -20,4 +20,3 @@ class RetrievalSegments(BaseModel):
|
||||
child_chunks: list[RetrievalChildChunk] | None = None
|
||||
score: float | None = None
|
||||
files: list[dict[str, str | int]] | None = None
|
||||
summary: str | None = None # Summary content if retrieved via summary index
|
||||
|
||||
@ -13,7 +13,6 @@ from urllib.parse import unquote, urlparse
|
||||
import httpx
|
||||
|
||||
from configs import dify_config
|
||||
from core.entities.knowledge_entities import PreviewDetail
|
||||
from core.helper import ssrf_proxy
|
||||
from core.rag.extractor.entity.extract_setting import ExtractSetting
|
||||
from core.rag.index_processor.constant.doc_type import DocType
|
||||
@ -46,17 +45,6 @@ class BaseIndexProcessor(ABC):
|
||||
def transform(self, documents: list[Document], current_user: Account | None = None, **kwargs) -> list[Document]:
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def generate_summary_preview(
|
||||
self, tenant_id: str, preview_texts: list[PreviewDetail], summary_index_setting: dict
|
||||
) -> list[PreviewDetail]:
|
||||
"""
|
||||
For each segment in preview_texts, generate a summary using LLM and attach it to the segment.
|
||||
The summary can be stored in a new attribute, e.g., summary.
|
||||
This method should be implemented by subclasses.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def load(
|
||||
self,
|
||||
|
||||
@ -1,25 +1,9 @@
|
||||
"""Paragraph index processor."""
|
||||
|
||||
import logging
|
||||
import re
|
||||
import uuid
|
||||
from collections.abc import Mapping
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from core.entities.knowledge_entities import PreviewDetail
|
||||
from core.file import File, FileTransferMethod, FileType, file_manager
|
||||
from core.llm_generator.prompts import DEFAULT_GENERATOR_SUMMARY_PROMPT
|
||||
from core.model_manager import ModelInstance
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
ImagePromptMessageContent,
|
||||
PromptMessageContentUnionTypes,
|
||||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import ModelFeature, ModelType
|
||||
from core.provider_manager import ProviderManager
|
||||
from core.rag.cleaner.clean_processor import CleanProcessor
|
||||
from core.rag.datasource.keyword.keyword_factory import Keyword
|
||||
from core.rag.datasource.retrieval_service import RetrievalService
|
||||
@ -33,16 +17,12 @@ from core.rag.index_processor.index_processor_base import BaseIndexProcessor
|
||||
from core.rag.models.document import AttachmentDocument, Document, MultimodalGeneralStructureChunk
|
||||
from core.rag.retrieval.retrieval_methods import RetrievalMethod
|
||||
from core.tools.utils.text_processing_utils import remove_leading_symbols
|
||||
from extensions.ext_database import db
|
||||
from factories.file_factory import build_from_mapping
|
||||
from libs import helper
|
||||
from models import UploadFile
|
||||
from models.account import Account
|
||||
from models.dataset import Dataset, DatasetProcessRule, DocumentSegment, SegmentAttachmentBinding
|
||||
from models.dataset import Dataset, DatasetProcessRule
|
||||
from models.dataset import Document as DatasetDocument
|
||||
from services.account_service import AccountService
|
||||
from services.entities.knowledge_entities.knowledge_entities import Rule
|
||||
from services.summary_index_service import SummaryIndexService
|
||||
|
||||
|
||||
class ParagraphIndexProcessor(BaseIndexProcessor):
|
||||
@ -128,29 +108,6 @@ class ParagraphIndexProcessor(BaseIndexProcessor):
|
||||
keyword.add_texts(documents)
|
||||
|
||||
def clean(self, dataset: Dataset, node_ids: list[str] | None, with_keywords: bool = True, **kwargs):
|
||||
# Note: Summary indexes are now disabled (not deleted) when segments are disabled.
|
||||
# This method is called for actual deletion scenarios (e.g., when segment is deleted).
|
||||
# For disable operations, disable_summaries_for_segments is called directly in the task.
|
||||
# Only delete summaries if explicitly requested (e.g., when segment is actually deleted)
|
||||
delete_summaries = kwargs.get("delete_summaries", False)
|
||||
if delete_summaries:
|
||||
if node_ids:
|
||||
# Find segments by index_node_id
|
||||
segments = (
|
||||
db.session.query(DocumentSegment)
|
||||
.filter(
|
||||
DocumentSegment.dataset_id == dataset.id,
|
||||
DocumentSegment.index_node_id.in_(node_ids),
|
||||
)
|
||||
.all()
|
||||
)
|
||||
segment_ids = [segment.id for segment in segments]
|
||||
if segment_ids:
|
||||
SummaryIndexService.delete_summaries_for_segments(dataset, segment_ids)
|
||||
else:
|
||||
# Delete all summaries for the dataset
|
||||
SummaryIndexService.delete_summaries_for_segments(dataset, None)
|
||||
|
||||
if dataset.indexing_technique == "high_quality":
|
||||
vector = Vector(dataset)
|
||||
if node_ids:
|
||||
@ -270,303 +227,3 @@ class ParagraphIndexProcessor(BaseIndexProcessor):
|
||||
}
|
||||
else:
|
||||
raise ValueError("Chunks is not a list")
|
||||
|
||||
def generate_summary_preview(
|
||||
self, tenant_id: str, preview_texts: list[PreviewDetail], summary_index_setting: dict
|
||||
) -> list[PreviewDetail]:
|
||||
"""
|
||||
For each segment, concurrently call generate_summary to generate a summary
|
||||
and write it to the summary attribute of PreviewDetail.
|
||||
In preview mode (indexing-estimate), if any summary generation fails, the method will raise an exception.
|
||||
"""
|
||||
import concurrent.futures
|
||||
|
||||
from flask import current_app
|
||||
|
||||
# Capture Flask app context for worker threads
|
||||
flask_app = None
|
||||
try:
|
||||
flask_app = current_app._get_current_object() # type: ignore
|
||||
except RuntimeError:
|
||||
logger.warning("No Flask application context available, summary generation may fail")
|
||||
|
||||
def process(preview: PreviewDetail) -> None:
|
||||
"""Generate summary for a single preview item."""
|
||||
if flask_app:
|
||||
# Ensure Flask app context in worker thread
|
||||
with flask_app.app_context():
|
||||
summary = self.generate_summary(tenant_id, preview.content, summary_index_setting)
|
||||
preview.summary = summary
|
||||
else:
|
||||
# Fallback: try without app context (may fail)
|
||||
summary = self.generate_summary(tenant_id, preview.content, summary_index_setting)
|
||||
preview.summary = summary
|
||||
|
||||
# Generate summaries concurrently using ThreadPoolExecutor
|
||||
# Set a reasonable timeout to prevent hanging (60 seconds per chunk, max 5 minutes total)
|
||||
timeout_seconds = min(300, 60 * len(preview_texts))
|
||||
errors: list[Exception] = []
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=min(10, len(preview_texts))) as executor:
|
||||
futures = [
|
||||
executor.submit(process, preview)
|
||||
for preview in preview_texts
|
||||
]
|
||||
# Wait for all tasks to complete with timeout
|
||||
done, not_done = concurrent.futures.wait(futures, timeout=timeout_seconds)
|
||||
|
||||
# Cancel tasks that didn't complete in time
|
||||
if not_done:
|
||||
timeout_error_msg = (
|
||||
f"Summary generation timeout: {len(not_done)} chunks did not complete within {timeout_seconds}s"
|
||||
)
|
||||
logger.warning("%s. Cancelling remaining tasks...", timeout_error_msg)
|
||||
# In preview mode, timeout is also an error
|
||||
errors.append(TimeoutError(timeout_error_msg))
|
||||
for future in not_done:
|
||||
future.cancel()
|
||||
# Wait a bit for cancellation to take effect
|
||||
concurrent.futures.wait(not_done, timeout=5)
|
||||
|
||||
# Collect exceptions from completed futures
|
||||
for future in done:
|
||||
try:
|
||||
future.result() # This will raise any exception that occurred
|
||||
except Exception as e:
|
||||
logger.exception("Error in summary generation future")
|
||||
errors.append(e)
|
||||
|
||||
# In preview mode (indexing-estimate), if there are any errors, fail the request
|
||||
if errors:
|
||||
error_messages = [str(e) for e in errors]
|
||||
error_summary = (
|
||||
f"Failed to generate summaries for {len(errors)} chunk(s). "
|
||||
f"Errors: {'; '.join(error_messages[:3])}" # Show first 3 errors
|
||||
)
|
||||
if len(errors) > 3:
|
||||
error_summary += f" (and {len(errors) - 3} more)"
|
||||
logger.error("Summary generation failed in preview mode: %s", error_summary)
|
||||
raise ValueError(error_summary)
|
||||
|
||||
return preview_texts
|
||||
|
||||
@staticmethod
|
||||
def generate_summary(
|
||||
tenant_id: str,
|
||||
text: str,
|
||||
summary_index_setting: dict | None = None,
|
||||
segment_id: str | None = None,
|
||||
) -> str:
|
||||
"""
|
||||
Generate summary for the given text using ModelInstance.invoke_llm and the default or custom summary prompt,
|
||||
and supports vision models by including images from the segment attachments or text content.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID
|
||||
text: Text content to summarize
|
||||
summary_index_setting: Summary index configuration
|
||||
segment_id: Optional segment ID to fetch attachments from SegmentAttachmentBinding table
|
||||
"""
|
||||
if not summary_index_setting or not summary_index_setting.get("enable"):
|
||||
raise ValueError("summary_index_setting is required and must be enabled to generate summary.")
|
||||
|
||||
model_name = summary_index_setting.get("model_name")
|
||||
model_provider_name = summary_index_setting.get("model_provider_name")
|
||||
summary_prompt = summary_index_setting.get("summary_prompt")
|
||||
|
||||
# Import default summary prompt
|
||||
if not summary_prompt:
|
||||
summary_prompt = DEFAULT_GENERATOR_SUMMARY_PROMPT
|
||||
|
||||
provider_manager = ProviderManager()
|
||||
provider_model_bundle = provider_manager.get_provider_model_bundle(
|
||||
tenant_id, model_provider_name, ModelType.LLM
|
||||
)
|
||||
model_instance = ModelInstance(provider_model_bundle, model_name)
|
||||
|
||||
# Get model schema to check if vision is supported
|
||||
model_schema = model_instance.model_type_instance.get_model_schema(model_name, model_instance.credentials)
|
||||
supports_vision = model_schema and model_schema.features and ModelFeature.VISION in model_schema.features
|
||||
|
||||
# Extract images if model supports vision
|
||||
image_files = []
|
||||
if supports_vision:
|
||||
# First, try to get images from SegmentAttachmentBinding (preferred method)
|
||||
if segment_id:
|
||||
image_files = ParagraphIndexProcessor._extract_images_from_segment_attachments(tenant_id, segment_id)
|
||||
|
||||
# If no images from attachments, fall back to extracting from text
|
||||
if not image_files:
|
||||
image_files = ParagraphIndexProcessor._extract_images_from_text(tenant_id, text)
|
||||
|
||||
# Build prompt messages
|
||||
prompt_messages = []
|
||||
|
||||
if image_files:
|
||||
# If we have images, create a UserPromptMessage with both text and images
|
||||
prompt_message_contents: list[PromptMessageContentUnionTypes] = []
|
||||
|
||||
# Add images first
|
||||
for file in image_files:
|
||||
try:
|
||||
file_content = file_manager.to_prompt_message_content(
|
||||
file, image_detail_config=ImagePromptMessageContent.DETAIL.LOW
|
||||
)
|
||||
prompt_message_contents.append(file_content)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to convert image file to prompt message content: %s", str(e))
|
||||
continue
|
||||
|
||||
# Add text content
|
||||
if prompt_message_contents: # Only add text if we successfully added images
|
||||
prompt_message_contents.append(TextPromptMessageContent(data=f"{summary_prompt}\n{text}"))
|
||||
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
|
||||
else:
|
||||
# If image conversion failed, fall back to text-only
|
||||
prompt = f"{summary_prompt}\n{text}"
|
||||
prompt_messages.append(UserPromptMessage(content=prompt))
|
||||
else:
|
||||
# No images, use simple text prompt
|
||||
prompt = f"{summary_prompt}\n{text}"
|
||||
prompt_messages.append(UserPromptMessage(content=prompt))
|
||||
|
||||
result = model_instance.invoke_llm(prompt_messages=prompt_messages, model_parameters={}, stream=False)
|
||||
|
||||
return getattr(result.message, "content", "")
|
||||
|
||||
@staticmethod
|
||||
def _extract_images_from_text(tenant_id: str, text: str) -> list[File]:
|
||||
"""
|
||||
Extract images from markdown text and convert them to File objects.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID
|
||||
text: Text content that may contain markdown image links
|
||||
|
||||
Returns:
|
||||
List of File objects representing images found in the text
|
||||
"""
|
||||
# Extract markdown images using regex pattern
|
||||
pattern = r"!\[.*?\]\((.*?)\)"
|
||||
images = re.findall(pattern, text)
|
||||
|
||||
if not images:
|
||||
return []
|
||||
|
||||
upload_file_id_list = []
|
||||
|
||||
for image in images:
|
||||
# For data before v0.10.0
|
||||
pattern = r"/files/([a-f0-9\-]+)/image-preview(?:\?.*?)?"
|
||||
match = re.search(pattern, image)
|
||||
if match:
|
||||
upload_file_id = match.group(1)
|
||||
upload_file_id_list.append(upload_file_id)
|
||||
continue
|
||||
|
||||
# For data after v0.10.0
|
||||
pattern = r"/files/([a-f0-9\-]+)/file-preview(?:\?.*?)?"
|
||||
match = re.search(pattern, image)
|
||||
if match:
|
||||
upload_file_id = match.group(1)
|
||||
upload_file_id_list.append(upload_file_id)
|
||||
continue
|
||||
|
||||
# For tools directory - direct file formats (e.g., .png, .jpg, etc.)
|
||||
pattern = r"/files/tools/([a-f0-9\-]+)\.([a-zA-Z0-9]+)(?:\?[^\s\)\"\']*)?"
|
||||
match = re.search(pattern, image)
|
||||
if match:
|
||||
# Tool files are handled differently, skip for now
|
||||
continue
|
||||
|
||||
if not upload_file_id_list:
|
||||
return []
|
||||
|
||||
# Get unique IDs for database query
|
||||
unique_upload_file_ids = list(set(upload_file_id_list))
|
||||
upload_files = (
|
||||
db.session.query(UploadFile)
|
||||
.where(UploadFile.id.in_(unique_upload_file_ids), UploadFile.tenant_id == tenant_id)
|
||||
.all()
|
||||
)
|
||||
|
||||
# Create File objects from UploadFile records
|
||||
file_objects = []
|
||||
for upload_file in upload_files:
|
||||
# Only process image files
|
||||
if not upload_file.mime_type or "image" not in upload_file.mime_type:
|
||||
continue
|
||||
|
||||
mapping = {
|
||||
"upload_file_id": upload_file.id,
|
||||
"transfer_method": FileTransferMethod.LOCAL_FILE.value,
|
||||
"type": FileType.IMAGE.value,
|
||||
}
|
||||
|
||||
try:
|
||||
file_obj = build_from_mapping(
|
||||
mapping=mapping,
|
||||
tenant_id=tenant_id,
|
||||
)
|
||||
file_objects.append(file_obj)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to create File object from UploadFile %s: %s", upload_file.id, str(e))
|
||||
continue
|
||||
|
||||
return file_objects
|
||||
|
||||
@staticmethod
|
||||
def _extract_images_from_segment_attachments(tenant_id: str, segment_id: str) -> list[File]:
|
||||
"""
|
||||
Extract images from SegmentAttachmentBinding table (preferred method).
|
||||
This matches how DatasetRetrieval gets segment attachments.
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant ID
|
||||
segment_id: Segment ID to fetch attachments for
|
||||
|
||||
Returns:
|
||||
List of File objects representing images found in segment attachments
|
||||
"""
|
||||
from sqlalchemy import select
|
||||
|
||||
# Query attachments from SegmentAttachmentBinding table
|
||||
attachments_with_bindings = db.session.execute(
|
||||
select(SegmentAttachmentBinding, UploadFile)
|
||||
.join(UploadFile, UploadFile.id == SegmentAttachmentBinding.attachment_id)
|
||||
.where(
|
||||
SegmentAttachmentBinding.segment_id == segment_id,
|
||||
SegmentAttachmentBinding.tenant_id == tenant_id,
|
||||
)
|
||||
).all()
|
||||
|
||||
if not attachments_with_bindings:
|
||||
return []
|
||||
|
||||
file_objects = []
|
||||
for _, upload_file in attachments_with_bindings:
|
||||
# Only process image files
|
||||
if not upload_file.mime_type or "image" not in upload_file.mime_type:
|
||||
continue
|
||||
|
||||
try:
|
||||
# Create File object directly (similar to DatasetRetrieval)
|
||||
file_obj = File(
|
||||
id=upload_file.id,
|
||||
filename=upload_file.name,
|
||||
extension="." + upload_file.extension,
|
||||
mime_type=upload_file.mime_type,
|
||||
tenant_id=tenant_id,
|
||||
type=FileType.IMAGE,
|
||||
transfer_method=FileTransferMethod.LOCAL_FILE,
|
||||
remote_url=upload_file.source_url,
|
||||
related_id=upload_file.id,
|
||||
size=upload_file.size,
|
||||
storage_key=upload_file.key,
|
||||
)
|
||||
file_objects.append(file_obj)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to create File object from UploadFile %s: %s", upload_file.id, str(e))
|
||||
continue
|
||||
|
||||
return file_objects
|
||||
|
||||
@ -1,13 +1,11 @@
|
||||
"""Paragraph index processor."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import uuid
|
||||
from collections.abc import Mapping
|
||||
from typing import Any
|
||||
|
||||
from configs import dify_config
|
||||
from core.entities.knowledge_entities import PreviewDetail
|
||||
from core.model_manager import ModelInstance
|
||||
from core.rag.cleaner.clean_processor import CleanProcessor
|
||||
from core.rag.datasource.retrieval_service import RetrievalService
|
||||
@ -27,9 +25,6 @@ from models.dataset import ChildChunk, Dataset, DatasetProcessRule, DocumentSegm
|
||||
from models.dataset import Document as DatasetDocument
|
||||
from services.account_service import AccountService
|
||||
from services.entities.knowledge_entities.knowledge_entities import ParentMode, Rule
|
||||
from services.summary_index_service import SummaryIndexService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ParentChildIndexProcessor(BaseIndexProcessor):
|
||||
@ -140,29 +135,6 @@ class ParentChildIndexProcessor(BaseIndexProcessor):
|
||||
|
||||
def clean(self, dataset: Dataset, node_ids: list[str] | None, with_keywords: bool = True, **kwargs):
|
||||
# node_ids is segment's node_ids
|
||||
# Note: Summary indexes are now disabled (not deleted) when segments are disabled.
|
||||
# This method is called for actual deletion scenarios (e.g., when segment is deleted).
|
||||
# For disable operations, disable_summaries_for_segments is called directly in the task.
|
||||
# Only delete summaries if explicitly requested (e.g., when segment is actually deleted)
|
||||
delete_summaries = kwargs.get("delete_summaries", False)
|
||||
if delete_summaries:
|
||||
if node_ids:
|
||||
# Find segments by index_node_id
|
||||
segments = (
|
||||
db.session.query(DocumentSegment)
|
||||
.filter(
|
||||
DocumentSegment.dataset_id == dataset.id,
|
||||
DocumentSegment.index_node_id.in_(node_ids),
|
||||
)
|
||||
.all()
|
||||
)
|
||||
segment_ids = [segment.id for segment in segments]
|
||||
if segment_ids:
|
||||
SummaryIndexService.delete_summaries_for_segments(dataset, segment_ids)
|
||||
else:
|
||||
# Delete all summaries for the dataset
|
||||
SummaryIndexService.delete_summaries_for_segments(dataset, None)
|
||||
|
||||
if dataset.indexing_technique == "high_quality":
|
||||
delete_child_chunks = kwargs.get("delete_child_chunks") or False
|
||||
precomputed_child_node_ids = kwargs.get("precomputed_child_node_ids")
|
||||
@ -354,93 +326,3 @@ class ParentChildIndexProcessor(BaseIndexProcessor):
|
||||
"preview": preview,
|
||||
"total_segments": len(parent_childs.parent_child_chunks),
|
||||
}
|
||||
|
||||
def generate_summary_preview(
|
||||
self, tenant_id: str, preview_texts: list[PreviewDetail], summary_index_setting: dict
|
||||
) -> list[PreviewDetail]:
|
||||
"""
|
||||
For each parent chunk in preview_texts, concurrently call generate_summary to generate a summary
|
||||
and write it to the summary attribute of PreviewDetail.
|
||||
In preview mode (indexing-estimate), if any summary generation fails, the method will raise an exception.
|
||||
|
||||
Note: For parent-child structure, we only generate summaries for parent chunks.
|
||||
"""
|
||||
import concurrent.futures
|
||||
|
||||
from flask import current_app
|
||||
|
||||
# Capture Flask app context for worker threads
|
||||
flask_app = None
|
||||
try:
|
||||
flask_app = current_app._get_current_object() # type: ignore
|
||||
except RuntimeError:
|
||||
logger.warning("No Flask application context available, summary generation may fail")
|
||||
|
||||
def process(preview: PreviewDetail) -> None:
|
||||
"""Generate summary for a single preview item (parent chunk)."""
|
||||
from core.rag.index_processor.processor.paragraph_index_processor import ParagraphIndexProcessor
|
||||
if flask_app:
|
||||
# Ensure Flask app context in worker thread
|
||||
with flask_app.app_context():
|
||||
summary = ParagraphIndexProcessor.generate_summary(
|
||||
tenant_id=tenant_id,
|
||||
text=preview.content,
|
||||
summary_index_setting=summary_index_setting,
|
||||
)
|
||||
preview.summary = summary
|
||||
else:
|
||||
# Fallback: try without app context (may fail)
|
||||
summary = ParagraphIndexProcessor.generate_summary(
|
||||
tenant_id=tenant_id,
|
||||
text=preview.content,
|
||||
summary_index_setting=summary_index_setting,
|
||||
)
|
||||
preview.summary = summary
|
||||
|
||||
# Generate summaries concurrently using ThreadPoolExecutor
|
||||
# Set a reasonable timeout to prevent hanging (60 seconds per chunk, max 5 minutes total)
|
||||
timeout_seconds = min(300, 60 * len(preview_texts))
|
||||
errors: list[Exception] = []
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=min(10, len(preview_texts))) as executor:
|
||||
futures = [
|
||||
executor.submit(process, preview)
|
||||
for preview in preview_texts
|
||||
]
|
||||
# Wait for all tasks to complete with timeout
|
||||
done, not_done = concurrent.futures.wait(futures, timeout=timeout_seconds)
|
||||
|
||||
# Cancel tasks that didn't complete in time
|
||||
if not_done:
|
||||
timeout_error_msg = (
|
||||
f"Summary generation timeout: {len(not_done)} chunks did not complete within {timeout_seconds}s"
|
||||
)
|
||||
logger.warning("%s. Cancelling remaining tasks...", timeout_error_msg)
|
||||
# In preview mode, timeout is also an error
|
||||
errors.append(TimeoutError(timeout_error_msg))
|
||||
for future in not_done:
|
||||
future.cancel()
|
||||
# Wait a bit for cancellation to take effect
|
||||
concurrent.futures.wait(not_done, timeout=5)
|
||||
|
||||
# Collect exceptions from completed futures
|
||||
for future in done:
|
||||
try:
|
||||
future.result() # This will raise any exception that occurred
|
||||
except Exception as e:
|
||||
logger.exception("Error in summary generation future")
|
||||
errors.append(e)
|
||||
|
||||
# In preview mode (indexing-estimate), if there are any errors, fail the request
|
||||
if errors:
|
||||
error_messages = [str(e) for e in errors]
|
||||
error_summary = (
|
||||
f"Failed to generate summaries for {len(errors)} chunk(s). "
|
||||
f"Errors: {'; '.join(error_messages[:3])}" # Show first 3 errors
|
||||
)
|
||||
if len(errors) > 3:
|
||||
error_summary += f" (and {len(errors) - 3} more)"
|
||||
logger.error("Summary generation failed in preview mode: %s", error_summary)
|
||||
raise ValueError(error_summary)
|
||||
|
||||
return preview_texts
|
||||
|
||||
@ -11,7 +11,6 @@ import pandas as pd
|
||||
from flask import Flask, current_app
|
||||
from werkzeug.datastructures import FileStorage
|
||||
|
||||
from core.entities.knowledge_entities import PreviewDetail
|
||||
from core.llm_generator.llm_generator import LLMGenerator
|
||||
from core.rag.cleaner.clean_processor import CleanProcessor
|
||||
from core.rag.datasource.retrieval_service import RetrievalService
|
||||
@ -26,10 +25,9 @@ from core.rag.retrieval.retrieval_methods import RetrievalMethod
|
||||
from core.tools.utils.text_processing_utils import remove_leading_symbols
|
||||
from libs import helper
|
||||
from models.account import Account
|
||||
from models.dataset import Dataset, DocumentSegment
|
||||
from models.dataset import Dataset
|
||||
from models.dataset import Document as DatasetDocument
|
||||
from services.entities.knowledge_entities.knowledge_entities import Rule
|
||||
from services.summary_index_service import SummaryIndexService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -146,30 +144,6 @@ class QAIndexProcessor(BaseIndexProcessor):
|
||||
vector.create_multimodal(multimodal_documents)
|
||||
|
||||
def clean(self, dataset: Dataset, node_ids: list[str] | None, with_keywords: bool = True, **kwargs):
|
||||
# Note: Summary indexes are now disabled (not deleted) when segments are disabled.
|
||||
# This method is called for actual deletion scenarios (e.g., when segment is deleted).
|
||||
# For disable operations, disable_summaries_for_segments is called directly in the task.
|
||||
# Note: qa_model doesn't generate summaries, but we clean them for completeness
|
||||
# Only delete summaries if explicitly requested (e.g., when segment is actually deleted)
|
||||
delete_summaries = kwargs.get("delete_summaries", False)
|
||||
if delete_summaries:
|
||||
if node_ids:
|
||||
# Find segments by index_node_id
|
||||
segments = (
|
||||
db.session.query(DocumentSegment)
|
||||
.filter(
|
||||
DocumentSegment.dataset_id == dataset.id,
|
||||
DocumentSegment.index_node_id.in_(node_ids),
|
||||
)
|
||||
.all()
|
||||
)
|
||||
segment_ids = [segment.id for segment in segments]
|
||||
if segment_ids:
|
||||
SummaryIndexService.delete_summaries_for_segments(dataset, segment_ids)
|
||||
else:
|
||||
# Delete all summaries for the dataset
|
||||
SummaryIndexService.delete_summaries_for_segments(dataset, None)
|
||||
|
||||
vector = Vector(dataset)
|
||||
if node_ids:
|
||||
vector.delete_by_ids(node_ids)
|
||||
@ -238,17 +212,6 @@ class QAIndexProcessor(BaseIndexProcessor):
|
||||
"total_segments": len(qa_chunks.qa_chunks),
|
||||
}
|
||||
|
||||
def generate_summary_preview(
|
||||
self, tenant_id: str, preview_texts: list[PreviewDetail], summary_index_setting: dict
|
||||
) -> list[PreviewDetail]:
|
||||
"""
|
||||
QA model doesn't generate summaries, so this method returns preview_texts unchanged.
|
||||
|
||||
Note: QA model uses question-answer pairs, which don't require summary generation.
|
||||
"""
|
||||
# QA model doesn't generate summaries, return as-is
|
||||
return preview_texts
|
||||
|
||||
def _format_qa_document(self, flask_app: Flask, tenant_id: str, document_node, all_qa_documents, document_language):
|
||||
format_documents = []
|
||||
if document_node.page_content is None or not document_node.page_content.strip():
|
||||
|
||||
@ -62,21 +62,6 @@ class DocumentExtractorNode(Node[DocumentExtractorNodeData]):
|
||||
inputs = {"variable_selector": variable_selector}
|
||||
process_data = {"documents": value if isinstance(value, list) else [value]}
|
||||
|
||||
# Ensure storage_key is loaded for File objects
|
||||
files_to_check = value if isinstance(value, list) else [value]
|
||||
files_needing_storage_key = [
|
||||
f for f in files_to_check if isinstance(f, File) and not f.storage_key and f.related_id
|
||||
]
|
||||
if files_needing_storage_key:
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from extensions.ext_database import db
|
||||
from factories.file_factory import StorageKeyLoader
|
||||
|
||||
with Session(bind=db.engine) as session:
|
||||
storage_key_loader = StorageKeyLoader(session, tenant_id=self.tenant_id)
|
||||
storage_key_loader.load_storage_keys(files_needing_storage_key)
|
||||
|
||||
try:
|
||||
if isinstance(value, list):
|
||||
extracted_text_list = list(map(_extract_text_from_file, value))
|
||||
@ -430,16 +415,6 @@ def _download_file_content(file: File) -> bytes:
|
||||
response.raise_for_status()
|
||||
return response.content
|
||||
else:
|
||||
# Check if storage_key is set
|
||||
if not file.storage_key:
|
||||
raise FileDownloadError(f"File storage_key is missing for file: {file.filename}")
|
||||
|
||||
# Check if file exists before downloading
|
||||
from extensions.ext_storage import storage
|
||||
|
||||
if not storage.exists(file.storage_key):
|
||||
raise FileDownloadError(f"File not found in storage: {file.storage_key}")
|
||||
|
||||
return file_manager.download(file)
|
||||
except Exception as e:
|
||||
raise FileDownloadError(f"Error downloading file: {str(e)}") from e
|
||||
|
||||
@ -158,5 +158,3 @@ class KnowledgeIndexNodeData(BaseNodeData):
|
||||
type: str = "knowledge-index"
|
||||
chunk_structure: str
|
||||
index_chunk_variable_selector: list[str]
|
||||
indexing_technique: str | None = None
|
||||
summary_index_setting: dict | None = None
|
||||
|
||||
@ -1,11 +1,9 @@
|
||||
import concurrent.futures
|
||||
import datetime
|
||||
import logging
|
||||
import time
|
||||
from collections.abc import Mapping
|
||||
from typing import Any
|
||||
|
||||
from flask import current_app
|
||||
from sqlalchemy import func, select
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
@ -18,9 +16,7 @@ from core.workflow.nodes.base.node import Node
|
||||
from core.workflow.nodes.base.template import Template
|
||||
from core.workflow.runtime import VariablePool
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Dataset, Document, DocumentSegment, DocumentSegmentSummary
|
||||
from services.summary_index_service import SummaryIndexService
|
||||
from tasks.generate_summary_index_task import generate_summary_index_task
|
||||
from models.dataset import Dataset, Document, DocumentSegment
|
||||
|
||||
from .entities import KnowledgeIndexNodeData
|
||||
from .exc import (
|
||||
@ -71,20 +67,7 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]):
|
||||
# index knowledge
|
||||
try:
|
||||
if is_preview:
|
||||
# Preview mode: generate summaries for chunks directly without saving to database
|
||||
# Format preview and generate summaries on-the-fly
|
||||
# Get indexing_technique and summary_index_setting from node_data (workflow graph config)
|
||||
# or fallback to dataset if not available in node_data
|
||||
indexing_technique = node_data.indexing_technique or dataset.indexing_technique
|
||||
summary_index_setting = node_data.summary_index_setting or dataset.summary_index_setting
|
||||
|
||||
outputs = self._get_preview_output_with_summaries(
|
||||
node_data.chunk_structure,
|
||||
chunks,
|
||||
dataset=dataset,
|
||||
indexing_technique=indexing_technique,
|
||||
summary_index_setting=summary_index_setting,
|
||||
)
|
||||
outputs = self._get_preview_output(node_data.chunk_structure, chunks)
|
||||
return NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.SUCCEEDED,
|
||||
inputs=variables,
|
||||
@ -180,9 +163,6 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]):
|
||||
|
||||
db.session.commit()
|
||||
|
||||
# Generate summary index if enabled
|
||||
self._handle_summary_index_generation(dataset, document, variable_pool)
|
||||
|
||||
return {
|
||||
"dataset_id": ds_id_value,
|
||||
"dataset_name": dataset_name_value,
|
||||
@ -193,307 +173,9 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]):
|
||||
"display_status": "completed",
|
||||
}
|
||||
|
||||
def _handle_summary_index_generation(
|
||||
self,
|
||||
dataset: Dataset,
|
||||
document: Document,
|
||||
variable_pool: VariablePool,
|
||||
) -> None:
|
||||
"""
|
||||
Handle summary index generation based on mode (debug/preview or production).
|
||||
|
||||
Args:
|
||||
dataset: Dataset containing the document
|
||||
document: Document to generate summaries for
|
||||
variable_pool: Variable pool to check invoke_from
|
||||
"""
|
||||
# Only generate summary index for high_quality indexing technique
|
||||
if dataset.indexing_technique != "high_quality":
|
||||
return
|
||||
|
||||
# Check if summary index is enabled
|
||||
summary_index_setting = dataset.summary_index_setting
|
||||
if not summary_index_setting or not summary_index_setting.get("enable"):
|
||||
return
|
||||
|
||||
# Skip qa_model documents
|
||||
if document.doc_form == "qa_model":
|
||||
return
|
||||
|
||||
# Determine if in preview/debug mode
|
||||
invoke_from = variable_pool.get(["sys", SystemVariableKey.INVOKE_FROM])
|
||||
is_preview = invoke_from and invoke_from.value == InvokeFrom.DEBUGGER
|
||||
|
||||
# Determine if only parent chunks should be processed
|
||||
only_parent_chunks = dataset.chunk_structure == "parent_child_index"
|
||||
|
||||
if is_preview:
|
||||
try:
|
||||
# Query segments that need summary generation
|
||||
query = db.session.query(DocumentSegment).filter_by(
|
||||
dataset_id=dataset.id,
|
||||
document_id=document.id,
|
||||
status="completed",
|
||||
enabled=True,
|
||||
)
|
||||
segments = query.all()
|
||||
|
||||
if not segments:
|
||||
logger.info("No segments found for document %s", document.id)
|
||||
return
|
||||
|
||||
# Filter segments based on mode
|
||||
segments_to_process = []
|
||||
for segment in segments:
|
||||
# Skip if summary already exists
|
||||
existing_summary = (
|
||||
db.session.query(DocumentSegmentSummary)
|
||||
.filter_by(chunk_id=segment.id, dataset_id=dataset.id, status="completed")
|
||||
.first()
|
||||
)
|
||||
if existing_summary:
|
||||
continue
|
||||
|
||||
# For parent-child mode, all segments are parent chunks, so process all
|
||||
segments_to_process.append(segment)
|
||||
|
||||
if not segments_to_process:
|
||||
logger.info("No segments need summary generation for document %s", document.id)
|
||||
return
|
||||
|
||||
# Use ThreadPoolExecutor for concurrent generation
|
||||
flask_app = current_app._get_current_object() # type: ignore
|
||||
max_workers = min(10, len(segments_to_process)) # Limit to 10 workers
|
||||
|
||||
def process_segment(segment: DocumentSegment) -> None:
|
||||
"""Process a single segment in a thread with Flask app context."""
|
||||
with flask_app.app_context():
|
||||
try:
|
||||
SummaryIndexService.generate_and_vectorize_summary(segment, dataset, summary_index_setting)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to generate summary for segment %s",
|
||||
segment.id,
|
||||
)
|
||||
# Continue processing other segments
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||
futures = [executor.submit(process_segment, segment) for segment in segments_to_process]
|
||||
# Wait for all tasks to complete
|
||||
concurrent.futures.wait(futures)
|
||||
|
||||
logger.info(
|
||||
"Successfully generated summary index for %s segments in document %s",
|
||||
len(segments_to_process),
|
||||
document.id,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to generate summary index for document %s", document.id)
|
||||
# Don't fail the entire indexing process if summary generation fails
|
||||
else:
|
||||
# Production mode: asynchronous generation
|
||||
logger.info(
|
||||
"Queuing summary index generation task for document %s (production mode)",
|
||||
document.id,
|
||||
)
|
||||
try:
|
||||
generate_summary_index_task.delay(dataset.id, document.id, None)
|
||||
logger.info("Summary index generation task queued for document %s", document.id)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to queue summary index generation task for document %s",
|
||||
document.id,
|
||||
)
|
||||
# Don't fail the entire indexing process if task queuing fails
|
||||
|
||||
def _get_preview_output_with_summaries(
|
||||
self,
|
||||
chunk_structure: str,
|
||||
chunks: Any,
|
||||
dataset: Dataset,
|
||||
indexing_technique: str | None = None,
|
||||
summary_index_setting: dict | None = None,
|
||||
) -> Mapping[str, Any]:
|
||||
"""
|
||||
Generate preview output with summaries for chunks in preview mode.
|
||||
This method generates summaries on-the-fly without saving to database.
|
||||
|
||||
Args:
|
||||
chunk_structure: Chunk structure type
|
||||
chunks: Chunks to generate preview for
|
||||
dataset: Dataset object (for tenant_id)
|
||||
indexing_technique: Indexing technique from node config or dataset
|
||||
summary_index_setting: Summary index setting from node config or dataset
|
||||
"""
|
||||
def _get_preview_output(self, chunk_structure: str, chunks: Any) -> Mapping[str, Any]:
|
||||
index_processor = IndexProcessorFactory(chunk_structure).init_index_processor()
|
||||
preview_output = index_processor.format_preview(chunks)
|
||||
|
||||
# Check if summary index is enabled
|
||||
if indexing_technique != "high_quality":
|
||||
return preview_output
|
||||
|
||||
if not summary_index_setting or not summary_index_setting.get("enable"):
|
||||
return preview_output
|
||||
|
||||
# Generate summaries for chunks
|
||||
if "preview" in preview_output and isinstance(preview_output["preview"], list):
|
||||
chunk_count = len(preview_output["preview"])
|
||||
logger.info(
|
||||
"Generating summaries for %s chunks in preview mode (dataset: %s)",
|
||||
chunk_count,
|
||||
dataset.id,
|
||||
)
|
||||
# Use ParagraphIndexProcessor's generate_summary method
|
||||
from core.rag.index_processor.processor.paragraph_index_processor import ParagraphIndexProcessor
|
||||
|
||||
# Get Flask app for application context in worker threads
|
||||
flask_app = None
|
||||
try:
|
||||
flask_app = current_app._get_current_object() # type: ignore
|
||||
except RuntimeError:
|
||||
logger.warning("No Flask application context available, summary generation may fail")
|
||||
|
||||
def generate_summary_for_chunk(preview_item: dict) -> None:
|
||||
"""Generate summary for a single chunk."""
|
||||
if "content" in preview_item:
|
||||
# Set Flask application context in worker thread
|
||||
if flask_app:
|
||||
with flask_app.app_context():
|
||||
summary = ParagraphIndexProcessor.generate_summary(
|
||||
tenant_id=dataset.tenant_id,
|
||||
text=preview_item["content"],
|
||||
summary_index_setting=summary_index_setting,
|
||||
)
|
||||
if summary:
|
||||
preview_item["summary"] = summary
|
||||
else:
|
||||
# Fallback: try without app context (may fail)
|
||||
summary = ParagraphIndexProcessor.generate_summary(
|
||||
tenant_id=dataset.tenant_id,
|
||||
text=preview_item["content"],
|
||||
summary_index_setting=summary_index_setting,
|
||||
)
|
||||
if summary:
|
||||
preview_item["summary"] = summary
|
||||
|
||||
# Generate summaries concurrently using ThreadPoolExecutor
|
||||
# Set a reasonable timeout to prevent hanging (60 seconds per chunk, max 5 minutes total)
|
||||
timeout_seconds = min(300, 60 * len(preview_output["preview"]))
|
||||
errors: list[Exception] = []
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=min(10, len(preview_output["preview"]))) as executor:
|
||||
futures = [
|
||||
executor.submit(generate_summary_for_chunk, preview_item)
|
||||
for preview_item in preview_output["preview"]
|
||||
]
|
||||
# Wait for all tasks to complete with timeout
|
||||
done, not_done = concurrent.futures.wait(futures, timeout=timeout_seconds)
|
||||
|
||||
# Cancel tasks that didn't complete in time
|
||||
if not_done:
|
||||
timeout_error_msg = (
|
||||
f"Summary generation timeout: {len(not_done)} chunks did not complete within {timeout_seconds}s"
|
||||
)
|
||||
logger.warning("%s. Cancelling remaining tasks...", timeout_error_msg)
|
||||
# In preview mode, timeout is also an error
|
||||
errors.append(TimeoutError(timeout_error_msg))
|
||||
for future in not_done:
|
||||
future.cancel()
|
||||
# Wait a bit for cancellation to take effect
|
||||
concurrent.futures.wait(not_done, timeout=5)
|
||||
|
||||
# Collect exceptions from completed futures
|
||||
for future in done:
|
||||
try:
|
||||
future.result() # This will raise any exception that occurred
|
||||
except Exception as e:
|
||||
logger.exception("Error in summary generation future")
|
||||
errors.append(e)
|
||||
|
||||
# In preview mode, if there are any errors, fail the request
|
||||
if errors:
|
||||
error_messages = [str(e) for e in errors]
|
||||
error_summary = (
|
||||
f"Failed to generate summaries for {len(errors)} chunk(s). "
|
||||
f"Errors: {'; '.join(error_messages[:3])}" # Show first 3 errors
|
||||
)
|
||||
if len(errors) > 3:
|
||||
error_summary += f" (and {len(errors) - 3} more)"
|
||||
logger.error("Summary generation failed in preview mode: %s", error_summary)
|
||||
raise KnowledgeIndexNodeError(error_summary)
|
||||
|
||||
completed_count = sum(1 for item in preview_output["preview"] if item.get("summary") is not None)
|
||||
logger.info(
|
||||
"Completed summary generation for preview chunks: %s/%s succeeded",
|
||||
completed_count,
|
||||
len(preview_output["preview"]),
|
||||
)
|
||||
|
||||
return preview_output
|
||||
|
||||
def _get_preview_output(
|
||||
self,
|
||||
chunk_structure: str,
|
||||
chunks: Any,
|
||||
dataset: Dataset | None = None,
|
||||
variable_pool: VariablePool | None = None,
|
||||
) -> Mapping[str, Any]:
|
||||
index_processor = IndexProcessorFactory(chunk_structure).init_index_processor()
|
||||
preview_output = index_processor.format_preview(chunks)
|
||||
|
||||
# If dataset is provided, try to enrich preview with summaries
|
||||
if dataset and variable_pool:
|
||||
document_id = variable_pool.get(["sys", SystemVariableKey.DOCUMENT_ID])
|
||||
if document_id:
|
||||
document = db.session.query(Document).filter_by(id=document_id.value).first()
|
||||
if document:
|
||||
# Query summaries for this document
|
||||
summaries = (
|
||||
db.session.query(DocumentSegmentSummary)
|
||||
.filter_by(
|
||||
dataset_id=dataset.id,
|
||||
document_id=document.id,
|
||||
status="completed",
|
||||
enabled=True,
|
||||
)
|
||||
.all()
|
||||
)
|
||||
|
||||
if summaries:
|
||||
# Create a map of segment content to summary for matching
|
||||
# Use content matching as chunks in preview might not be indexed yet
|
||||
summary_by_content = {}
|
||||
for summary in summaries:
|
||||
segment = (
|
||||
db.session.query(DocumentSegment)
|
||||
.filter_by(id=summary.chunk_id, dataset_id=dataset.id)
|
||||
.first()
|
||||
)
|
||||
if segment:
|
||||
# Normalize content for matching (strip whitespace)
|
||||
normalized_content = segment.content.strip()
|
||||
summary_by_content[normalized_content] = summary.summary_content
|
||||
|
||||
# Enrich preview with summaries by content matching
|
||||
if "preview" in preview_output and isinstance(preview_output["preview"], list):
|
||||
matched_count = 0
|
||||
for preview_item in preview_output["preview"]:
|
||||
if "content" in preview_item:
|
||||
# Normalize content for matching
|
||||
normalized_chunk_content = preview_item["content"].strip()
|
||||
if normalized_chunk_content in summary_by_content:
|
||||
preview_item["summary"] = summary_by_content[normalized_chunk_content]
|
||||
matched_count += 1
|
||||
|
||||
if matched_count > 0:
|
||||
logger.info(
|
||||
"Enriched preview with %s existing summaries (dataset: %s, document: %s)",
|
||||
matched_count,
|
||||
dataset.id,
|
||||
document.id,
|
||||
)
|
||||
|
||||
return preview_output
|
||||
return index_processor.format_preview(chunks)
|
||||
|
||||
@classmethod
|
||||
def version(cls) -> str:
|
||||
|
||||
@ -1,21 +0,0 @@
|
||||
from enum import StrEnum
|
||||
|
||||
|
||||
class HostedTrialProvider(StrEnum):
|
||||
"""
|
||||
Enum representing hosted model provider names for trial access.
|
||||
"""
|
||||
|
||||
ANTHROPIC = "langgenius/anthropic/anthropic"
|
||||
OPENAI = "langgenius/openai/openai"
|
||||
GEMINI = "langgenius/gemini/google"
|
||||
X = "langgenius/x/x"
|
||||
DEEPSEEK = "langgenius/deepseek/deepseek"
|
||||
TONGYI = "langgenius/tongyi/tongyi"
|
||||
|
||||
@property
|
||||
def config_key(self) -> str:
|
||||
"""Return the config key used in dify_config (e.g., HOSTED_{config_key}_PAID_ENABLED)."""
|
||||
if self == HostedTrialProvider.X:
|
||||
return "XAI"
|
||||
return self.name
|
||||
@ -102,8 +102,6 @@ def init_app(app: DifyApp) -> Celery:
|
||||
imports = [
|
||||
"tasks.async_workflow_tasks", # trigger workers
|
||||
"tasks.trigger_processing_tasks", # async trigger processing
|
||||
"tasks.generate_summary_index_task", # summary index generation
|
||||
"tasks.regenerate_summary_index_task", # summary index regeneration
|
||||
]
|
||||
day = dify_config.CELERY_BEAT_SCHEDULER_TIME
|
||||
|
||||
|
||||
@ -39,14 +39,6 @@ dataset_retrieval_model_fields = {
|
||||
"score_threshold_enabled": fields.Boolean,
|
||||
"score_threshold": fields.Float,
|
||||
}
|
||||
|
||||
dataset_summary_index_fields = {
|
||||
"enable": fields.Boolean,
|
||||
"model_name": fields.String,
|
||||
"model_provider_name": fields.String,
|
||||
"summary_prompt": fields.String,
|
||||
}
|
||||
|
||||
external_retrieval_model_fields = {
|
||||
"top_k": fields.Integer,
|
||||
"score_threshold": fields.Float,
|
||||
@ -91,7 +83,6 @@ dataset_detail_fields = {
|
||||
"embedding_model_provider": fields.String,
|
||||
"embedding_available": fields.Boolean,
|
||||
"retrieval_model_dict": fields.Nested(dataset_retrieval_model_fields),
|
||||
"summary_index_setting": fields.Nested(dataset_summary_index_fields),
|
||||
"tags": fields.List(fields.Nested(tag_fields)),
|
||||
"doc_form": fields.String,
|
||||
"external_knowledge_info": fields.Nested(external_knowledge_info_fields),
|
||||
|
||||
@ -33,11 +33,6 @@ document_fields = {
|
||||
"hit_count": fields.Integer,
|
||||
"doc_form": fields.String,
|
||||
"doc_metadata": fields.List(fields.Nested(document_metadata_fields), attribute="doc_metadata_details"),
|
||||
# Summary index generation status:
|
||||
# "SUMMARIZING" (when task is queued and generating)
|
||||
"summary_index_status": fields.String,
|
||||
# Whether this document needs summary index generation
|
||||
"need_summary": fields.Boolean,
|
||||
}
|
||||
|
||||
document_with_segments_fields = {
|
||||
@ -65,10 +60,6 @@ document_with_segments_fields = {
|
||||
"completed_segments": fields.Integer,
|
||||
"total_segments": fields.Integer,
|
||||
"doc_metadata": fields.List(fields.Nested(document_metadata_fields), attribute="doc_metadata_details"),
|
||||
# Summary index generation status:
|
||||
# "SUMMARIZING" (when task is queued and generating)
|
||||
"summary_index_status": fields.String,
|
||||
"need_summary": fields.Boolean, # Whether this document needs summary index generation
|
||||
}
|
||||
|
||||
dataset_and_document_fields = {
|
||||
|
||||
@ -58,5 +58,4 @@ hit_testing_record_fields = {
|
||||
"score": fields.Float,
|
||||
"tsne_position": fields.Raw,
|
||||
"files": fields.List(fields.Nested(files_fields)),
|
||||
"summary": fields.String, # Summary content if retrieved via summary index
|
||||
}
|
||||
|
||||
@ -49,5 +49,4 @@ segment_fields = {
|
||||
"stopped_at": TimestampField,
|
||||
"child_chunks": fields.List(fields.Nested(child_chunk_fields)),
|
||||
"attachments": fields.List(fields.Nested(attachment_fields)),
|
||||
"summary": fields.String, # Summary content for the segment
|
||||
}
|
||||
|
||||
@ -1,69 +0,0 @@
|
||||
"""add SummaryIndex feature
|
||||
|
||||
Revision ID: 562dcce7d77c
|
||||
Revises: 03ea244985ce
|
||||
Create Date: 2026-01-12 13:58:40.584802
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import models as models
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '562dcce7d77c'
|
||||
down_revision = '03ea244985ce'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.create_table('document_segment_summary',
|
||||
sa.Column('id', models.types.StringUUID(), nullable=False),
|
||||
sa.Column('dataset_id', models.types.StringUUID(), nullable=False),
|
||||
sa.Column('document_id', models.types.StringUUID(), nullable=False),
|
||||
sa.Column('chunk_id', models.types.StringUUID(), nullable=False),
|
||||
sa.Column('summary_content', models.types.LongText(), nullable=True),
|
||||
sa.Column('summary_index_node_id', sa.String(length=255), nullable=True),
|
||||
sa.Column('summary_index_node_hash', sa.String(length=255), nullable=True),
|
||||
sa.Column('status', sa.String(length=32), server_default=sa.text("'generating'"), nullable=False),
|
||||
sa.Column('error', models.types.LongText(), nullable=True),
|
||||
sa.Column('enabled', sa.Boolean(), server_default=sa.text('true'), nullable=False),
|
||||
sa.Column('disabled_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('disabled_by', models.types.StringUUID(), nullable=True),
|
||||
sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
|
||||
sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id', name='document_segment_summary_pkey')
|
||||
)
|
||||
with op.batch_alter_table('document_segment_summary', schema=None) as batch_op:
|
||||
batch_op.create_index('document_segment_summary_chunk_id_idx', ['chunk_id'], unique=False)
|
||||
batch_op.create_index('document_segment_summary_dataset_id_idx', ['dataset_id'], unique=False)
|
||||
batch_op.create_index('document_segment_summary_document_id_idx', ['document_id'], unique=False)
|
||||
batch_op.create_index('document_segment_summary_status_idx', ['status'], unique=False)
|
||||
|
||||
with op.batch_alter_table('datasets', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('summary_index_setting', models.types.AdjustedJSON(), nullable=True))
|
||||
|
||||
with op.batch_alter_table('documents', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('need_summary', sa.Boolean(), server_default=sa.text('false'), nullable=True))
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('documents', schema=None) as batch_op:
|
||||
batch_op.drop_column('need_summary')
|
||||
|
||||
with op.batch_alter_table('datasets', schema=None) as batch_op:
|
||||
batch_op.drop_column('summary_index_setting')
|
||||
|
||||
with op.batch_alter_table('document_segment_summary', schema=None) as batch_op:
|
||||
batch_op.drop_index('document_segment_summary_status_idx')
|
||||
batch_op.drop_index('document_segment_summary_document_id_idx')
|
||||
batch_op.drop_index('document_segment_summary_dataset_id_idx')
|
||||
batch_op.drop_index('document_segment_summary_chunk_id_idx')
|
||||
|
||||
op.drop_table('document_segment_summary')
|
||||
# ### end Alembic commands ###
|
||||
@ -1,73 +0,0 @@
|
||||
"""add table explore banner and trial
|
||||
|
||||
Revision ID: f9f6d18a37f9
|
||||
Revises: 288345cd01d1
|
||||
Create Date: 2026-01-017 11:10:18.079355
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import models as models
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'f9f6d18a37f9'
|
||||
down_revision = '288345cd01d1'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.create_table('account_trial_app_records',
|
||||
sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False),
|
||||
sa.Column('account_id', models.types.StringUUID(), nullable=False),
|
||||
sa.Column('app_id', models.types.StringUUID(), nullable=False),
|
||||
sa.Column('count', sa.Integer(), nullable=False),
|
||||
sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id', name='user_trial_app_pkey'),
|
||||
sa.UniqueConstraint('account_id', 'app_id', name='unique_account_trial_app_record')
|
||||
)
|
||||
with op.batch_alter_table('account_trial_app_records', schema=None) as batch_op:
|
||||
batch_op.create_index('account_trial_app_record_account_id_idx', ['account_id'], unique=False)
|
||||
batch_op.create_index('account_trial_app_record_app_id_idx', ['app_id'], unique=False)
|
||||
|
||||
op.create_table('exporle_banners',
|
||||
sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False),
|
||||
sa.Column('content', sa.JSON(), nullable=False),
|
||||
sa.Column('link', sa.String(length=255), nullable=False),
|
||||
sa.Column('sort', sa.Integer(), nullable=False),
|
||||
sa.Column('status', sa.String(length=255), server_default=sa.text("'enabled'::character varying"), nullable=False),
|
||||
sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
|
||||
sa.Column('language', sa.String(length=255), server_default=sa.text("'en-US'::character varying"), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id', name='exporler_banner_pkey')
|
||||
)
|
||||
op.create_table('trial_apps',
|
||||
sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False),
|
||||
sa.Column('app_id', models.types.StringUUID(), nullable=False),
|
||||
sa.Column('tenant_id', models.types.StringUUID(), nullable=False),
|
||||
sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
|
||||
sa.Column('trial_limit', sa.Integer(), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id', name='trial_app_pkey'),
|
||||
sa.UniqueConstraint('app_id', name='unique_trail_app_id')
|
||||
)
|
||||
with op.batch_alter_table('trial_apps', schema=None) as batch_op:
|
||||
batch_op.create_index('trial_app_app_id_idx', ['app_id'], unique=False)
|
||||
batch_op.create_index('trial_app_tenant_id_idx', ['tenant_id'], unique=False)
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('trial_apps', schema=None) as batch_op:
|
||||
batch_op.drop_index('trial_app_tenant_id_idx')
|
||||
batch_op.drop_index('trial_app_app_id_idx')
|
||||
|
||||
op.drop_table('trial_apps')
|
||||
op.drop_table('exporle_banners')
|
||||
with op.batch_alter_table('account_trial_app_records', schema=None) as batch_op:
|
||||
batch_op.drop_index('account_trial_app_record_app_id_idx')
|
||||
batch_op.drop_index('account_trial_app_record_account_id_idx')
|
||||
|
||||
op.drop_table('account_trial_app_records')
|
||||
# ### end Alembic commands ###
|
||||
@ -35,7 +35,6 @@ from .enums import (
|
||||
WorkflowTriggerStatus,
|
||||
)
|
||||
from .model import (
|
||||
AccountTrialAppRecord,
|
||||
ApiRequest,
|
||||
ApiToken,
|
||||
App,
|
||||
@ -48,7 +47,6 @@ from .model import (
|
||||
DatasetRetrieverResource,
|
||||
DifySetup,
|
||||
EndUser,
|
||||
ExporleBanner,
|
||||
IconType,
|
||||
InstalledApp,
|
||||
Message,
|
||||
@ -64,7 +62,6 @@ from .model import (
|
||||
TagBinding,
|
||||
TenantCreditPool,
|
||||
TraceAppConfig,
|
||||
TrialApp,
|
||||
UploadFile,
|
||||
)
|
||||
from .oauth import DatasourceOauthParamConfig, DatasourceProvider
|
||||
@ -117,7 +114,6 @@ __all__ = [
|
||||
"Account",
|
||||
"AccountIntegrate",
|
||||
"AccountStatus",
|
||||
"AccountTrialAppRecord",
|
||||
"ApiRequest",
|
||||
"ApiToken",
|
||||
"ApiToolProvider",
|
||||
@ -154,7 +150,6 @@ __all__ = [
|
||||
"DocumentSegment",
|
||||
"Embedding",
|
||||
"EndUser",
|
||||
"ExporleBanner",
|
||||
"ExternalKnowledgeApis",
|
||||
"ExternalKnowledgeBindings",
|
||||
"IconType",
|
||||
@ -193,7 +188,6 @@ __all__ = [
|
||||
"ToolLabelBinding",
|
||||
"ToolModelInvoke",
|
||||
"TraceAppConfig",
|
||||
"TrialApp",
|
||||
"TriggerOAuthSystemClient",
|
||||
"TriggerOAuthTenantClient",
|
||||
"TriggerSubscription",
|
||||
|
||||
@ -72,7 +72,6 @@ class Dataset(Base):
|
||||
keyword_number = mapped_column(sa.Integer, nullable=True, server_default=sa.text("10"))
|
||||
collection_binding_id = mapped_column(StringUUID, nullable=True)
|
||||
retrieval_model = mapped_column(AdjustedJSON, nullable=True)
|
||||
summary_index_setting = mapped_column(AdjustedJSON, nullable=True)
|
||||
built_in_field_enabled = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false"))
|
||||
icon_info = mapped_column(AdjustedJSON, nullable=True)
|
||||
runtime_mode = mapped_column(sa.String(255), nullable=True, server_default=sa.text("'general'"))
|
||||
@ -420,7 +419,6 @@ class Document(Base):
|
||||
doc_metadata = mapped_column(AdjustedJSON, nullable=True)
|
||||
doc_form = mapped_column(String(255), nullable=False, server_default=sa.text("'text_model'"))
|
||||
doc_language = mapped_column(String(255), nullable=True)
|
||||
need_summary: Mapped[bool | None] = mapped_column(sa.Boolean, nullable=True, server_default=sa.text("false"))
|
||||
|
||||
DATA_SOURCES = ["upload_file", "notion_import", "website_crawl"]
|
||||
|
||||
@ -1577,35 +1575,3 @@ class SegmentAttachmentBinding(Base):
|
||||
segment_id: Mapped[str] = mapped_column(StringUUID, nullable=False)
|
||||
attachment_id: Mapped[str] = mapped_column(StringUUID, nullable=False)
|
||||
created_at: Mapped[datetime] = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp())
|
||||
|
||||
|
||||
class DocumentSegmentSummary(Base):
|
||||
__tablename__ = "document_segment_summary"
|
||||
__table_args__ = (
|
||||
sa.PrimaryKeyConstraint("id", name="document_segment_summary_pkey"),
|
||||
sa.Index("document_segment_summary_dataset_id_idx", "dataset_id"),
|
||||
sa.Index("document_segment_summary_document_id_idx", "document_id"),
|
||||
sa.Index("document_segment_summary_chunk_id_idx", "chunk_id"),
|
||||
sa.Index("document_segment_summary_status_idx", "status"),
|
||||
)
|
||||
|
||||
id: Mapped[str] = mapped_column(StringUUID, nullable=False, default=lambda: str(uuid4()))
|
||||
dataset_id: Mapped[str] = mapped_column(StringUUID, nullable=False)
|
||||
document_id: Mapped[str] = mapped_column(StringUUID, nullable=False)
|
||||
# corresponds to DocumentSegment.id or parent chunk id
|
||||
chunk_id: Mapped[str] = mapped_column(StringUUID, nullable=False)
|
||||
summary_content: Mapped[str] = mapped_column(LongText, nullable=True)
|
||||
summary_index_node_id: Mapped[str] = mapped_column(String(255), nullable=True)
|
||||
summary_index_node_hash: Mapped[str] = mapped_column(String(255), nullable=True)
|
||||
status: Mapped[str] = mapped_column(String(32), nullable=False, server_default=sa.text("'generating'"))
|
||||
error: Mapped[str] = mapped_column(LongText, nullable=True)
|
||||
enabled: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("true"))
|
||||
disabled_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True)
|
||||
disabled_by = mapped_column(StringUUID, nullable=True)
|
||||
created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp())
|
||||
updated_at: Mapped[datetime] = mapped_column(
|
||||
DateTime, nullable=False, server_default=func.current_timestamp(), onupdate=func.current_timestamp()
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<DocumentSegmentSummary id={self.id} chunk_id={self.chunk_id} status={self.status}>"
|
||||
|
||||
@ -603,64 +603,6 @@ class InstalledApp(TypeBase):
|
||||
return tenant
|
||||
|
||||
|
||||
class TrialApp(Base):
|
||||
__tablename__ = "trial_apps"
|
||||
__table_args__ = (
|
||||
sa.PrimaryKeyConstraint("id", name="trial_app_pkey"),
|
||||
sa.Index("trial_app_app_id_idx", "app_id"),
|
||||
sa.Index("trial_app_tenant_id_idx", "tenant_id"),
|
||||
sa.UniqueConstraint("app_id", name="unique_trail_app_id"),
|
||||
)
|
||||
|
||||
id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"))
|
||||
app_id = mapped_column(StringUUID, nullable=False)
|
||||
tenant_id = mapped_column(StringUUID, nullable=False)
|
||||
created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp())
|
||||
trial_limit = mapped_column(sa.Integer, nullable=False, default=3)
|
||||
|
||||
@property
|
||||
def app(self) -> App | None:
|
||||
app = db.session.query(App).where(App.id == self.app_id).first()
|
||||
return app
|
||||
|
||||
|
||||
class AccountTrialAppRecord(Base):
|
||||
__tablename__ = "account_trial_app_records"
|
||||
__table_args__ = (
|
||||
sa.PrimaryKeyConstraint("id", name="user_trial_app_pkey"),
|
||||
sa.Index("account_trial_app_record_account_id_idx", "account_id"),
|
||||
sa.Index("account_trial_app_record_app_id_idx", "app_id"),
|
||||
sa.UniqueConstraint("account_id", "app_id", name="unique_account_trial_app_record"),
|
||||
)
|
||||
id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"))
|
||||
account_id = mapped_column(StringUUID, nullable=False)
|
||||
app_id = mapped_column(StringUUID, nullable=False)
|
||||
count = mapped_column(sa.Integer, nullable=False, default=0)
|
||||
created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp())
|
||||
|
||||
@property
|
||||
def app(self) -> App | None:
|
||||
app = db.session.query(App).where(App.id == self.app_id).first()
|
||||
return app
|
||||
|
||||
@property
|
||||
def user(self) -> Account | None:
|
||||
user = db.session.query(Account).where(Account.id == self.account_id).first()
|
||||
return user
|
||||
|
||||
|
||||
class ExporleBanner(Base):
|
||||
__tablename__ = "exporle_banners"
|
||||
__table_args__ = (sa.PrimaryKeyConstraint("id", name="exporler_banner_pkey"),)
|
||||
id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"))
|
||||
content = mapped_column(sa.JSON, nullable=False)
|
||||
link = mapped_column(String(255), nullable=False)
|
||||
sort = mapped_column(sa.Integer, nullable=False)
|
||||
status = mapped_column(sa.String(255), nullable=False, server_default=sa.text("'enabled'::character varying"))
|
||||
created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp())
|
||||
language = mapped_column(String(255), nullable=False, server_default=sa.text("'en-US'::character varying"))
|
||||
|
||||
|
||||
class OAuthProviderApp(TypeBase):
|
||||
"""
|
||||
Globally shared OAuth provider app information.
|
||||
|
||||
@ -89,7 +89,6 @@ from tasks.disable_segments_from_index_task import disable_segments_from_index_t
|
||||
from tasks.document_indexing_update_task import document_indexing_update_task
|
||||
from tasks.enable_segments_to_index_task import enable_segments_to_index_task
|
||||
from tasks.recover_document_indexing_task import recover_document_indexing_task
|
||||
from tasks.regenerate_summary_index_task import regenerate_summary_index_task
|
||||
from tasks.remove_document_from_index_task import remove_document_from_index_task
|
||||
from tasks.retry_document_indexing_task import retry_document_indexing_task
|
||||
from tasks.sync_website_document_indexing_task import sync_website_document_indexing_task
|
||||
@ -477,11 +476,6 @@ class DatasetService:
|
||||
if external_retrieval_model:
|
||||
dataset.retrieval_model = external_retrieval_model
|
||||
|
||||
# Update summary index setting if provided
|
||||
summary_index_setting = data.get("summary_index_setting", None)
|
||||
if summary_index_setting is not None:
|
||||
dataset.summary_index_setting = summary_index_setting
|
||||
|
||||
# Update basic dataset properties
|
||||
dataset.name = data.get("name", dataset.name)
|
||||
dataset.description = data.get("description", dataset.description)
|
||||
@ -564,18 +558,12 @@ class DatasetService:
|
||||
# Handle indexing technique changes and embedding model updates
|
||||
action = DatasetService._handle_indexing_technique_change(dataset, data, filtered_data)
|
||||
|
||||
# Check if summary_index_setting model changed (before updating database)
|
||||
summary_model_changed = DatasetService._check_summary_index_setting_model_changed(dataset, data)
|
||||
|
||||
# Add metadata fields
|
||||
filtered_data["updated_by"] = user.id
|
||||
filtered_data["updated_at"] = naive_utc_now()
|
||||
# update Retrieval model
|
||||
if data.get("retrieval_model"):
|
||||
filtered_data["retrieval_model"] = data["retrieval_model"]
|
||||
# update summary index setting
|
||||
if data.get("summary_index_setting"):
|
||||
filtered_data["summary_index_setting"] = data.get("summary_index_setting")
|
||||
# update icon info
|
||||
if data.get("icon_info"):
|
||||
filtered_data["icon_info"] = data.get("icon_info")
|
||||
@ -584,30 +572,12 @@ class DatasetService:
|
||||
db.session.query(Dataset).filter_by(id=dataset.id).update(filtered_data)
|
||||
db.session.commit()
|
||||
|
||||
# Reload dataset to get updated values
|
||||
db.session.refresh(dataset)
|
||||
|
||||
# update pipeline knowledge base node data
|
||||
DatasetService._update_pipeline_knowledge_base_node_data(dataset, user.id)
|
||||
|
||||
# Trigger vector index task if indexing technique changed
|
||||
if action:
|
||||
deal_dataset_vector_index_task.delay(dataset.id, action)
|
||||
# If embedding_model changed, also regenerate summary vectors
|
||||
if action == "update":
|
||||
regenerate_summary_index_task.delay(
|
||||
dataset.id,
|
||||
regenerate_reason="embedding_model_changed",
|
||||
regenerate_vectors_only=True,
|
||||
)
|
||||
|
||||
# Trigger summary index regeneration if summary model changed
|
||||
if summary_model_changed:
|
||||
regenerate_summary_index_task.delay(
|
||||
dataset.id,
|
||||
regenerate_reason="summary_model_changed",
|
||||
regenerate_vectors_only=False,
|
||||
)
|
||||
|
||||
return dataset
|
||||
|
||||
@ -646,7 +616,6 @@ class DatasetService:
|
||||
knowledge_index_node_data["chunk_structure"] = dataset.chunk_structure
|
||||
knowledge_index_node_data["indexing_technique"] = dataset.indexing_technique # pyright: ignore[reportAttributeAccessIssue]
|
||||
knowledge_index_node_data["keyword_number"] = dataset.keyword_number
|
||||
knowledge_index_node_data["summary_index_setting"] = dataset.summary_index_setting
|
||||
node["data"] = knowledge_index_node_data
|
||||
updated = True
|
||||
except Exception:
|
||||
@ -885,53 +854,6 @@ class DatasetService:
|
||||
)
|
||||
filtered_data["collection_binding_id"] = dataset_collection_binding.id
|
||||
|
||||
@staticmethod
|
||||
def _check_summary_index_setting_model_changed(dataset: Dataset, data: dict[str, Any]) -> bool:
|
||||
"""
|
||||
Check if summary_index_setting model (model_name or model_provider_name) has changed.
|
||||
|
||||
Args:
|
||||
dataset: Current dataset object
|
||||
data: Update data dictionary
|
||||
|
||||
Returns:
|
||||
bool: True if summary model changed, False otherwise
|
||||
"""
|
||||
# Check if summary_index_setting is being updated
|
||||
if "summary_index_setting" not in data or data.get("summary_index_setting") is None:
|
||||
return False
|
||||
|
||||
new_summary_setting = data.get("summary_index_setting")
|
||||
old_summary_setting = dataset.summary_index_setting
|
||||
|
||||
# If old setting doesn't exist or is disabled, no need to regenerate
|
||||
if not old_summary_setting or not old_summary_setting.get("enable"):
|
||||
return False
|
||||
|
||||
# If new setting is disabled, no need to regenerate
|
||||
if not new_summary_setting or not new_summary_setting.get("enable"):
|
||||
return False
|
||||
|
||||
# Compare model_name and model_provider_name
|
||||
old_model_name = old_summary_setting.get("model_name")
|
||||
old_model_provider = old_summary_setting.get("model_provider_name")
|
||||
new_model_name = new_summary_setting.get("model_name")
|
||||
new_model_provider = new_summary_setting.get("model_provider_name")
|
||||
|
||||
# Check if model changed
|
||||
if old_model_name != new_model_name or old_model_provider != new_model_provider:
|
||||
logger.info(
|
||||
"Summary index setting model changed for dataset %s: old=%s/%s, new=%s/%s",
|
||||
dataset.id,
|
||||
old_model_provider,
|
||||
old_model_name,
|
||||
new_model_provider,
|
||||
new_model_name,
|
||||
)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def update_rag_pipeline_dataset_settings(
|
||||
session: Session, dataset: Dataset, knowledge_configuration: KnowledgeConfiguration, has_published: bool = False
|
||||
@ -967,9 +889,6 @@ class DatasetService:
|
||||
else:
|
||||
raise ValueError("Invalid index method")
|
||||
dataset.retrieval_model = knowledge_configuration.retrieval_model.model_dump()
|
||||
# Update summary_index_setting if provided
|
||||
if knowledge_configuration.summary_index_setting is not None:
|
||||
dataset.summary_index_setting = knowledge_configuration.summary_index_setting
|
||||
session.add(dataset)
|
||||
else:
|
||||
if dataset.chunk_structure and dataset.chunk_structure != knowledge_configuration.chunk_structure:
|
||||
@ -1075,9 +994,6 @@ class DatasetService:
|
||||
if dataset.keyword_number != knowledge_configuration.keyword_number:
|
||||
dataset.keyword_number = knowledge_configuration.keyword_number
|
||||
dataset.retrieval_model = knowledge_configuration.retrieval_model.model_dump()
|
||||
# Update summary_index_setting if provided
|
||||
if knowledge_configuration.summary_index_setting is not None:
|
||||
dataset.summary_index_setting = knowledge_configuration.summary_index_setting
|
||||
session.add(dataset)
|
||||
session.commit()
|
||||
if action:
|
||||
@ -2048,8 +1964,6 @@ class DocumentService:
|
||||
DuplicateDocumentIndexingTaskProxy(
|
||||
dataset.tenant_id, dataset.id, duplicate_document_ids
|
||||
).delay()
|
||||
# Note: Summary index generation is triggered in document_indexing_task after indexing completes
|
||||
# to ensure segments are available. See tasks/document_indexing_task.py
|
||||
except LockNotOwnedError:
|
||||
pass
|
||||
|
||||
@ -2354,11 +2268,6 @@ class DocumentService:
|
||||
name: str,
|
||||
batch: str,
|
||||
):
|
||||
# Set need_summary based on dataset's summary_index_setting
|
||||
need_summary = False
|
||||
if dataset.summary_index_setting and dataset.summary_index_setting.get("enable") is True:
|
||||
need_summary = True
|
||||
|
||||
document = Document(
|
||||
tenant_id=dataset.tenant_id,
|
||||
dataset_id=dataset.id,
|
||||
@ -2372,7 +2281,6 @@ class DocumentService:
|
||||
created_by=account.id,
|
||||
doc_form=document_form,
|
||||
doc_language=document_language,
|
||||
need_summary=need_summary,
|
||||
)
|
||||
doc_metadata = {}
|
||||
if dataset.built_in_field_enabled:
|
||||
@ -2597,7 +2505,6 @@ class DocumentService:
|
||||
embedding_model_provider=knowledge_config.embedding_model_provider,
|
||||
collection_binding_id=dataset_collection_binding_id,
|
||||
retrieval_model=retrieval_model.model_dump() if retrieval_model else None,
|
||||
summary_index_setting=knowledge_config.summary_index_setting,
|
||||
is_multimodal=knowledge_config.is_multimodal,
|
||||
)
|
||||
|
||||
@ -2779,14 +2686,6 @@ class DocumentService:
|
||||
if not isinstance(args["process_rule"]["rules"]["segmentation"]["max_tokens"], int):
|
||||
raise ValueError("Process rule segmentation max_tokens is invalid")
|
||||
|
||||
# valid summary index setting
|
||||
summary_index_setting = args["process_rule"].get("summary_index_setting")
|
||||
if summary_index_setting and summary_index_setting.get("enable"):
|
||||
if "model_name" not in summary_index_setting or not summary_index_setting["model_name"]:
|
||||
raise ValueError("Summary index model name is required")
|
||||
if "model_provider_name" not in summary_index_setting or not summary_index_setting["model_provider_name"]:
|
||||
raise ValueError("Summary index model provider name is required")
|
||||
|
||||
@staticmethod
|
||||
def batch_update_document_status(
|
||||
dataset: Dataset, document_ids: list[str], action: Literal["enable", "disable", "archive", "un_archive"], user
|
||||
@ -3255,39 +3154,6 @@ class SegmentService:
|
||||
if args.enabled or keyword_changed:
|
||||
# update segment vector index
|
||||
VectorService.update_segment_vector(args.keywords, segment, dataset)
|
||||
# update summary index if summary is provided and has changed
|
||||
if args.summary is not None:
|
||||
# Check if summary index is enabled
|
||||
has_summary_index = (
|
||||
dataset.indexing_technique == "high_quality"
|
||||
and dataset.summary_index_setting
|
||||
and dataset.summary_index_setting.get("enable") is True
|
||||
)
|
||||
|
||||
if has_summary_index:
|
||||
# Query existing summary from database
|
||||
from models.dataset import DocumentSegmentSummary
|
||||
|
||||
existing_summary = (
|
||||
db.session.query(DocumentSegmentSummary)
|
||||
.where(
|
||||
DocumentSegmentSummary.chunk_id == segment.id,
|
||||
DocumentSegmentSummary.dataset_id == dataset.id,
|
||||
)
|
||||
.first()
|
||||
)
|
||||
|
||||
# Check if summary has changed
|
||||
existing_summary_content = existing_summary.summary_content if existing_summary else None
|
||||
if existing_summary_content != args.summary:
|
||||
# Summary has changed, update it
|
||||
from services.summary_index_service import SummaryIndexService
|
||||
|
||||
try:
|
||||
SummaryIndexService.update_summary_for_segment(segment, dataset, args.summary)
|
||||
except Exception:
|
||||
logger.exception("Failed to update summary for segment %s", segment.id)
|
||||
# Don't fail the entire update if summary update fails
|
||||
else:
|
||||
segment_hash = helper.generate_text_hash(content)
|
||||
tokens = 0
|
||||
@ -3362,15 +3228,6 @@ class SegmentService:
|
||||
elif document.doc_form in (IndexStructureType.PARAGRAPH_INDEX, IndexStructureType.QA_INDEX):
|
||||
# update segment vector index
|
||||
VectorService.update_segment_vector(args.keywords, segment, dataset)
|
||||
# update summary index if summary is provided
|
||||
if args.summary is not None:
|
||||
from services.summary_index_service import SummaryIndexService
|
||||
|
||||
try:
|
||||
SummaryIndexService.update_summary_for_segment(segment, dataset, args.summary)
|
||||
except Exception:
|
||||
logger.exception("Failed to update summary for segment %s", segment.id)
|
||||
# Don't fail the entire update if summary update fails
|
||||
# update multimodel vector index
|
||||
VectorService.update_multimodel_vector(segment, args.attachment_ids or [], dataset)
|
||||
except Exception as e:
|
||||
|
||||
@ -119,7 +119,6 @@ class KnowledgeConfig(BaseModel):
|
||||
data_source: DataSource | None = None
|
||||
process_rule: ProcessRule | None = None
|
||||
retrieval_model: RetrievalModel | None = None
|
||||
summary_index_setting: dict | None = None
|
||||
doc_form: str = "text_model"
|
||||
doc_language: str = "English"
|
||||
embedding_model: str | None = None
|
||||
@ -142,7 +141,6 @@ class SegmentUpdateArgs(BaseModel):
|
||||
regenerate_child_chunks: bool = False
|
||||
enabled: bool | None = None
|
||||
attachment_ids: list[str] | None = None
|
||||
summary: str | None = None # Summary content for summary index
|
||||
|
||||
|
||||
class ChildChunkUpdateArgs(BaseModel):
|
||||
|
||||
@ -116,8 +116,6 @@ class KnowledgeConfiguration(BaseModel):
|
||||
embedding_model: str = ""
|
||||
keyword_number: int | None = 10
|
||||
retrieval_model: RetrievalSetting
|
||||
# add summary index setting
|
||||
summary_index_setting: dict | None = None
|
||||
|
||||
@field_validator("embedding_model_provider", mode="before")
|
||||
@classmethod
|
||||
|
||||
@ -4,7 +4,6 @@ from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from configs import dify_config
|
||||
from enums.cloud_plan import CloudPlan
|
||||
from enums.hosted_provider import HostedTrialProvider
|
||||
from services.billing_service import BillingService
|
||||
from services.enterprise.enterprise_service import EnterpriseService
|
||||
|
||||
@ -171,9 +170,6 @@ class SystemFeatureModel(BaseModel):
|
||||
plugin_installation_permission: PluginInstallationPermissionModel = PluginInstallationPermissionModel()
|
||||
enable_change_email: bool = True
|
||||
plugin_manager: PluginManagerModel = PluginManagerModel()
|
||||
trial_models: list[str] = []
|
||||
enable_trial_app: bool = False
|
||||
enable_explore_banner: bool = False
|
||||
|
||||
|
||||
class FeatureService:
|
||||
@ -229,21 +225,7 @@ class FeatureService:
|
||||
system_features.is_allow_register = dify_config.ALLOW_REGISTER
|
||||
system_features.is_allow_create_workspace = dify_config.ALLOW_CREATE_WORKSPACE
|
||||
system_features.is_email_setup = dify_config.MAIL_TYPE is not None and dify_config.MAIL_TYPE != ""
|
||||
system_features.trial_models = cls._fulfill_trial_models_from_env()
|
||||
system_features.enable_trial_app = dify_config.ENABLE_TRIAL_APP
|
||||
system_features.enable_explore_banner = dify_config.ENABLE_EXPLORE_BANNER
|
||||
|
||||
@classmethod
|
||||
def _fulfill_trial_models_from_env(cls) -> list[str]:
|
||||
return [
|
||||
provider.value
|
||||
for provider in HostedTrialProvider
|
||||
if (
|
||||
getattr(dify_config, f"HOSTED_{provider.config_key}_PAID_ENABLED", False)
|
||||
and getattr(dify_config, f"HOSTED_{provider.config_key}_TRIAL_ENABLED", False)
|
||||
)
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def _fulfill_params_from_env(cls, features: FeatureModel):
|
||||
features.can_replace_logo = dify_config.CAN_REPLACE_LOGO
|
||||
|
||||
@ -1,7 +1,4 @@
|
||||
from configs import dify_config
|
||||
from extensions.ext_database import db
|
||||
from models.model import AccountTrialAppRecord, TrialApp
|
||||
from services.feature_service import FeatureService
|
||||
from services.recommend_app.recommend_app_factory import RecommendAppRetrievalFactory
|
||||
|
||||
|
||||
@ -23,15 +20,6 @@ class RecommendedAppService:
|
||||
)
|
||||
)
|
||||
|
||||
if FeatureService.get_system_features().enable_trial_app:
|
||||
apps = result["recommended_apps"]
|
||||
for app in apps:
|
||||
app_id = app["app_id"]
|
||||
trial_app_model = db.session.query(TrialApp).where(TrialApp.app_id == app_id).first()
|
||||
if trial_app_model:
|
||||
app["can_trial"] = True
|
||||
else:
|
||||
app["can_trial"] = False
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
@ -44,30 +32,4 @@ class RecommendedAppService:
|
||||
mode = dify_config.HOSTED_FETCH_APP_TEMPLATES_MODE
|
||||
retrieval_instance = RecommendAppRetrievalFactory.get_recommend_app_factory(mode)()
|
||||
result: dict = retrieval_instance.get_recommend_app_detail(app_id)
|
||||
if FeatureService.get_system_features().enable_trial_app:
|
||||
app_id = result["id"]
|
||||
trial_app_model = db.session.query(TrialApp).where(TrialApp.app_id == app_id).first()
|
||||
if trial_app_model:
|
||||
result["can_trial"] = True
|
||||
else:
|
||||
result["can_trial"] = False
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def add_trial_app_record(cls, app_id: str, account_id: str):
|
||||
"""
|
||||
Add trial app record.
|
||||
:param app_id: app id
|
||||
:return:
|
||||
"""
|
||||
account_trial_app_record = (
|
||||
db.session.query(AccountTrialAppRecord)
|
||||
.where(AccountTrialAppRecord.app_id == app_id, AccountTrialAppRecord.account_id == account_id)
|
||||
.first()
|
||||
)
|
||||
if account_trial_app_record:
|
||||
account_trial_app_record.count += 1
|
||||
db.session.commit()
|
||||
else:
|
||||
db.session.add(AccountTrialAppRecord(app_id=app_id, count=1, account_id=account_id))
|
||||
db.session.commit()
|
||||
|
||||
@ -1,626 +0,0 @@
|
||||
"""Summary index service for generating and managing document segment summaries."""
|
||||
|
||||
import logging
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from core.rag.datasource.vdb.vector_factory import Vector
|
||||
from core.rag.index_processor.constant.doc_type import DocType
|
||||
from core.rag.models.document import Document
|
||||
from extensions.ext_database import db
|
||||
from libs import helper
|
||||
from models.dataset import Dataset, DocumentSegment, DocumentSegmentSummary
|
||||
from models.dataset import Document as DatasetDocument
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SummaryIndexService:
|
||||
"""Service for generating and managing summary indexes."""
|
||||
|
||||
@staticmethod
|
||||
def generate_summary_for_segment(
|
||||
segment: DocumentSegment,
|
||||
dataset: Dataset,
|
||||
summary_index_setting: dict,
|
||||
) -> str:
|
||||
"""
|
||||
Generate summary for a single segment.
|
||||
|
||||
Args:
|
||||
segment: DocumentSegment to generate summary for
|
||||
dataset: Dataset containing the segment
|
||||
summary_index_setting: Summary index configuration
|
||||
|
||||
Returns:
|
||||
Generated summary text
|
||||
|
||||
Raises:
|
||||
ValueError: If summary_index_setting is invalid or generation fails
|
||||
"""
|
||||
# Reuse the existing generate_summary method from ParagraphIndexProcessor
|
||||
# Use lazy import to avoid circular import
|
||||
from core.rag.index_processor.processor.paragraph_index_processor import ParagraphIndexProcessor
|
||||
|
||||
summary_content = ParagraphIndexProcessor.generate_summary(
|
||||
tenant_id=dataset.tenant_id,
|
||||
text=segment.content,
|
||||
summary_index_setting=summary_index_setting,
|
||||
segment_id=segment.id,
|
||||
)
|
||||
|
||||
if not summary_content:
|
||||
raise ValueError("Generated summary is empty")
|
||||
|
||||
return summary_content
|
||||
|
||||
@staticmethod
|
||||
def create_summary_record(
|
||||
segment: DocumentSegment,
|
||||
dataset: Dataset,
|
||||
summary_content: str,
|
||||
status: str = "generating",
|
||||
) -> DocumentSegmentSummary:
|
||||
"""
|
||||
Create or update a DocumentSegmentSummary record.
|
||||
If a summary record already exists for this segment, it will be updated instead of creating a new one.
|
||||
|
||||
Args:
|
||||
segment: DocumentSegment to create summary for
|
||||
dataset: Dataset containing the segment
|
||||
summary_content: Generated summary content
|
||||
status: Summary status (default: "generating")
|
||||
|
||||
Returns:
|
||||
Created or updated DocumentSegmentSummary instance
|
||||
"""
|
||||
# Check if summary record already exists
|
||||
existing_summary = (
|
||||
db.session.query(DocumentSegmentSummary).filter_by(chunk_id=segment.id, dataset_id=dataset.id).first()
|
||||
)
|
||||
|
||||
if existing_summary:
|
||||
# Update existing record
|
||||
existing_summary.summary_content = summary_content
|
||||
existing_summary.status = status
|
||||
existing_summary.error = None # Clear any previous errors
|
||||
# Re-enable if it was disabled
|
||||
if not existing_summary.enabled:
|
||||
existing_summary.enabled = True
|
||||
existing_summary.disabled_at = None
|
||||
existing_summary.disabled_by = None
|
||||
db.session.add(existing_summary)
|
||||
db.session.flush()
|
||||
return existing_summary
|
||||
else:
|
||||
# Create new record (enabled by default)
|
||||
summary_record = DocumentSegmentSummary(
|
||||
dataset_id=dataset.id,
|
||||
document_id=segment.document_id,
|
||||
chunk_id=segment.id,
|
||||
summary_content=summary_content,
|
||||
status=status,
|
||||
enabled=True, # Explicitly set enabled to True
|
||||
)
|
||||
db.session.add(summary_record)
|
||||
db.session.flush()
|
||||
return summary_record
|
||||
|
||||
@staticmethod
|
||||
def vectorize_summary(
|
||||
summary_record: DocumentSegmentSummary,
|
||||
segment: DocumentSegment,
|
||||
dataset: Dataset,
|
||||
) -> None:
|
||||
"""
|
||||
Vectorize summary and store in vector database.
|
||||
|
||||
Args:
|
||||
summary_record: DocumentSegmentSummary record
|
||||
segment: Original DocumentSegment
|
||||
dataset: Dataset containing the segment
|
||||
"""
|
||||
if dataset.indexing_technique != "high_quality":
|
||||
logger.warning(
|
||||
"Summary vectorization skipped for dataset %s: indexing_technique is not high_quality",
|
||||
dataset.id,
|
||||
)
|
||||
return
|
||||
|
||||
# Reuse existing index_node_id if available (like segment does), otherwise generate new one
|
||||
old_summary_node_id = summary_record.summary_index_node_id
|
||||
if old_summary_node_id:
|
||||
# Reuse existing index_node_id (like segment behavior)
|
||||
summary_index_node_id = old_summary_node_id
|
||||
else:
|
||||
# Generate new index node ID only for new summaries
|
||||
summary_index_node_id = str(uuid.uuid4())
|
||||
|
||||
# Always regenerate hash (in case summary content changed)
|
||||
summary_hash = helper.generate_text_hash(summary_record.summary_content)
|
||||
|
||||
# Delete old vector only if we're reusing the same index_node_id (to overwrite)
|
||||
# If index_node_id changed, the old vector should have been deleted elsewhere
|
||||
if old_summary_node_id and old_summary_node_id == summary_index_node_id:
|
||||
try:
|
||||
vector = Vector(dataset)
|
||||
vector.delete_by_ids([old_summary_node_id])
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to delete old summary vector for segment %s: %s. Continuing with new vectorization.",
|
||||
segment.id,
|
||||
str(e),
|
||||
)
|
||||
|
||||
# Create document with summary content and metadata
|
||||
summary_document = Document(
|
||||
page_content=summary_record.summary_content,
|
||||
metadata={
|
||||
"doc_id": summary_index_node_id,
|
||||
"doc_hash": summary_hash,
|
||||
"dataset_id": dataset.id,
|
||||
"document_id": segment.document_id,
|
||||
"original_chunk_id": segment.id, # Key: link to original chunk
|
||||
"doc_type": DocType.TEXT,
|
||||
"is_summary": True, # Identifier for summary documents
|
||||
},
|
||||
)
|
||||
|
||||
# Vectorize and store with retry mechanism for connection errors
|
||||
max_retries = 3
|
||||
retry_delay = 2.0
|
||||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
vector = Vector(dataset)
|
||||
vector.add_texts([summary_document], duplicate_check=True)
|
||||
|
||||
# Success - update summary record with index node info
|
||||
summary_record.summary_index_node_id = summary_index_node_id
|
||||
summary_record.summary_index_node_hash = summary_hash
|
||||
summary_record.status = "completed"
|
||||
db.session.add(summary_record)
|
||||
db.session.flush()
|
||||
return # Success, exit function
|
||||
|
||||
except (ConnectionError, Exception) as e:
|
||||
error_str = str(e).lower()
|
||||
# Check if it's a connection-related error that might be transient
|
||||
is_connection_error = any(
|
||||
keyword in error_str
|
||||
for keyword in [
|
||||
"connection",
|
||||
"disconnected",
|
||||
"timeout",
|
||||
"network",
|
||||
"could not connect",
|
||||
"server disconnected",
|
||||
"weaviate",
|
||||
]
|
||||
)
|
||||
|
||||
if is_connection_error and attempt < max_retries - 1:
|
||||
# Retry for connection errors
|
||||
wait_time = retry_delay * (2**attempt) # Exponential backoff
|
||||
logger.warning(
|
||||
"Vectorization attempt %s/%s failed for segment %s: %s. Retrying in %.1f seconds...",
|
||||
attempt + 1,
|
||||
max_retries,
|
||||
segment.id,
|
||||
str(e),
|
||||
wait_time,
|
||||
)
|
||||
time.sleep(wait_time)
|
||||
continue
|
||||
else:
|
||||
# Final attempt failed or non-connection error - log and update status
|
||||
logger.error(
|
||||
"Failed to vectorize summary for segment %s after %s attempts: %s",
|
||||
segment.id,
|
||||
attempt + 1,
|
||||
str(e),
|
||||
exc_info=True,
|
||||
)
|
||||
summary_record.status = "error"
|
||||
summary_record.error = f"Vectorization failed: {str(e)}"
|
||||
db.session.add(summary_record)
|
||||
db.session.flush()
|
||||
raise
|
||||
|
||||
@staticmethod
|
||||
def generate_and_vectorize_summary(
|
||||
segment: DocumentSegment,
|
||||
dataset: Dataset,
|
||||
summary_index_setting: dict,
|
||||
) -> DocumentSegmentSummary:
|
||||
"""
|
||||
Generate summary for a segment and vectorize it.
|
||||
|
||||
Args:
|
||||
segment: DocumentSegment to generate summary for
|
||||
dataset: Dataset containing the segment
|
||||
summary_index_setting: Summary index configuration
|
||||
|
||||
Returns:
|
||||
Created DocumentSegmentSummary instance
|
||||
|
||||
Raises:
|
||||
ValueError: If summary generation fails
|
||||
"""
|
||||
try:
|
||||
# Generate summary
|
||||
summary_content = SummaryIndexService.generate_summary_for_segment(segment, dataset, summary_index_setting)
|
||||
|
||||
# Create or update summary record (will handle overwrite internally)
|
||||
summary_record = SummaryIndexService.create_summary_record(
|
||||
segment, dataset, summary_content, status="generating"
|
||||
)
|
||||
|
||||
# Vectorize summary (will delete old vector if exists before creating new one)
|
||||
SummaryIndexService.vectorize_summary(summary_record, segment, dataset)
|
||||
|
||||
db.session.commit()
|
||||
logger.info("Successfully generated and vectorized summary for segment %s", segment.id)
|
||||
return summary_record
|
||||
|
||||
except Exception:
|
||||
logger.exception("Failed to generate summary for segment %s", segment.id)
|
||||
# Update summary record with error status if it exists
|
||||
summary_record = (
|
||||
db.session.query(DocumentSegmentSummary).filter_by(chunk_id=segment.id, dataset_id=dataset.id).first()
|
||||
)
|
||||
if summary_record:
|
||||
summary_record.status = "error"
|
||||
summary_record.error = str(e)
|
||||
db.session.add(summary_record)
|
||||
db.session.commit()
|
||||
raise
|
||||
|
||||
@staticmethod
|
||||
def generate_summaries_for_document(
|
||||
dataset: Dataset,
|
||||
document: DatasetDocument,
|
||||
summary_index_setting: dict,
|
||||
segment_ids: list[str] | None = None,
|
||||
only_parent_chunks: bool = False,
|
||||
) -> list[DocumentSegmentSummary]:
|
||||
"""
|
||||
Generate summaries for all segments in a document including vectorization.
|
||||
|
||||
Args:
|
||||
dataset: Dataset containing the document
|
||||
document: DatasetDocument to generate summaries for
|
||||
summary_index_setting: Summary index configuration
|
||||
segment_ids: Optional list of specific segment IDs to process
|
||||
only_parent_chunks: If True, only process parent chunks (for parent-child mode)
|
||||
|
||||
Returns:
|
||||
List of created DocumentSegmentSummary instances
|
||||
"""
|
||||
# Only generate summary index for high_quality indexing technique
|
||||
if dataset.indexing_technique != "high_quality":
|
||||
logger.info(
|
||||
"Skipping summary generation for dataset %s: indexing_technique is %s, not 'high_quality'",
|
||||
dataset.id,
|
||||
dataset.indexing_technique,
|
||||
)
|
||||
return []
|
||||
|
||||
if not summary_index_setting or not summary_index_setting.get("enable"):
|
||||
logger.info("Summary index is disabled for dataset %s", dataset.id)
|
||||
return []
|
||||
|
||||
# Skip qa_model documents
|
||||
if document.doc_form == "qa_model":
|
||||
logger.info("Skipping summary generation for qa_model document %s", document.id)
|
||||
return []
|
||||
|
||||
logger.info(
|
||||
"Starting summary generation for document %s in dataset %s, segment_ids: %s, only_parent_chunks: %s",
|
||||
document.id,
|
||||
dataset.id,
|
||||
len(segment_ids) if segment_ids else "all",
|
||||
only_parent_chunks,
|
||||
)
|
||||
|
||||
# Query segments (only enabled segments)
|
||||
query = db.session.query(DocumentSegment).filter_by(
|
||||
dataset_id=dataset.id,
|
||||
document_id=document.id,
|
||||
status="completed",
|
||||
enabled=True, # Only generate summaries for enabled segments
|
||||
)
|
||||
|
||||
if segment_ids:
|
||||
query = query.filter(DocumentSegment.id.in_(segment_ids))
|
||||
|
||||
segments = query.all()
|
||||
|
||||
if not segments:
|
||||
logger.info("No segments found for document %s", document.id)
|
||||
return []
|
||||
|
||||
summary_records = []
|
||||
|
||||
for segment in segments:
|
||||
# For parent-child mode, only process parent chunks
|
||||
# In parent-child mode, all DocumentSegments are parent chunks,
|
||||
# so we process all of them. Child chunks are stored in ChildChunk table
|
||||
# and are not DocumentSegments, so they won't be in the segments list.
|
||||
# This check is mainly for clarity and future-proofing.
|
||||
if only_parent_chunks:
|
||||
# In parent-child mode, all segments in the query are parent chunks
|
||||
# Child chunks are not DocumentSegments, so they won't appear here
|
||||
# We can process all segments
|
||||
pass
|
||||
|
||||
try:
|
||||
summary_record = SummaryIndexService.generate_and_vectorize_summary(
|
||||
segment, dataset, summary_index_setting
|
||||
)
|
||||
summary_records.append(summary_record)
|
||||
except Exception:
|
||||
logger.exception("Failed to generate summary for segment %s", segment.id)
|
||||
# Continue with other segments
|
||||
continue
|
||||
|
||||
logger.info(
|
||||
"Completed summary generation for document %s: %s summaries generated and vectorized",
|
||||
document.id,
|
||||
len(summary_records),
|
||||
)
|
||||
return summary_records
|
||||
|
||||
@staticmethod
|
||||
def disable_summaries_for_segments(
|
||||
dataset: Dataset,
|
||||
segment_ids: list[str] | None = None,
|
||||
disabled_by: str | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Disable summary records and remove vectors from vector database for segments.
|
||||
Unlike delete, this preserves the summary records but marks them as disabled.
|
||||
|
||||
Args:
|
||||
dataset: Dataset containing the segments
|
||||
segment_ids: List of segment IDs to disable summaries for. If None, disable all.
|
||||
disabled_by: User ID who disabled the summaries
|
||||
"""
|
||||
from libs.datetime_utils import naive_utc_now
|
||||
|
||||
query = db.session.query(DocumentSegmentSummary).filter_by(
|
||||
dataset_id=dataset.id,
|
||||
enabled=True, # Only disable enabled summaries
|
||||
)
|
||||
|
||||
if segment_ids:
|
||||
query = query.filter(DocumentSegmentSummary.chunk_id.in_(segment_ids))
|
||||
|
||||
summaries = query.all()
|
||||
|
||||
if not summaries:
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"Disabling %s summary records for dataset %s, segment_ids: %s",
|
||||
len(summaries),
|
||||
dataset.id,
|
||||
len(segment_ids) if segment_ids else "all",
|
||||
)
|
||||
|
||||
# Remove from vector database (but keep records)
|
||||
if dataset.indexing_technique == "high_quality":
|
||||
summary_node_ids = [s.summary_index_node_id for s in summaries if s.summary_index_node_id]
|
||||
if summary_node_ids:
|
||||
try:
|
||||
vector = Vector(dataset)
|
||||
vector.delete_by_ids(summary_node_ids)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to remove summary vectors: %s", str(e))
|
||||
|
||||
# Disable summary records (don't delete)
|
||||
now = naive_utc_now()
|
||||
for summary in summaries:
|
||||
summary.enabled = False
|
||||
summary.disabled_at = now
|
||||
summary.disabled_by = disabled_by
|
||||
db.session.add(summary)
|
||||
|
||||
db.session.commit()
|
||||
logger.info("Disabled %s summary records for dataset %s", len(summaries), dataset.id)
|
||||
|
||||
@staticmethod
|
||||
def enable_summaries_for_segments(
|
||||
dataset: Dataset,
|
||||
segment_ids: list[str] | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Enable summary records and re-add vectors to vector database for segments.
|
||||
|
||||
Note: This method enables summaries based on chunk status, not summary_index_setting.enable.
|
||||
The summary_index_setting.enable flag only controls automatic generation,
|
||||
not whether existing summaries can be used.
|
||||
Summary.enabled should always be kept in sync with chunk.enabled.
|
||||
|
||||
Args:
|
||||
dataset: Dataset containing the segments
|
||||
segment_ids: List of segment IDs to enable summaries for. If None, enable all.
|
||||
"""
|
||||
# Only enable summary index for high_quality indexing technique
|
||||
if dataset.indexing_technique != "high_quality":
|
||||
return
|
||||
|
||||
query = db.session.query(DocumentSegmentSummary).filter_by(
|
||||
dataset_id=dataset.id,
|
||||
enabled=False, # Only enable disabled summaries
|
||||
)
|
||||
|
||||
if segment_ids:
|
||||
query = query.filter(DocumentSegmentSummary.chunk_id.in_(segment_ids))
|
||||
|
||||
summaries = query.all()
|
||||
|
||||
if not summaries:
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"Enabling %s summary records for dataset %s, segment_ids: %s",
|
||||
len(summaries),
|
||||
dataset.id,
|
||||
len(segment_ids) if segment_ids else "all",
|
||||
)
|
||||
|
||||
# Re-vectorize and re-add to vector database
|
||||
enabled_count = 0
|
||||
for summary in summaries:
|
||||
# Get the original segment
|
||||
segment = (
|
||||
db.session.query(DocumentSegment)
|
||||
.filter_by(
|
||||
id=summary.chunk_id,
|
||||
dataset_id=dataset.id,
|
||||
)
|
||||
.first()
|
||||
)
|
||||
|
||||
# Summary.enabled stays in sync with chunk.enabled, only enable summary if the associated chunk is enabled.
|
||||
if not segment or not segment.enabled or segment.status != "completed":
|
||||
continue
|
||||
|
||||
if not summary.summary_content:
|
||||
continue
|
||||
|
||||
try:
|
||||
# Re-vectorize summary
|
||||
SummaryIndexService.vectorize_summary(summary, segment, dataset)
|
||||
|
||||
# Enable summary record
|
||||
summary.enabled = True
|
||||
summary.disabled_at = None
|
||||
summary.disabled_by = None
|
||||
db.session.add(summary)
|
||||
enabled_count += 1
|
||||
except Exception:
|
||||
logger.exception("Failed to re-vectorize summary %s", summary.id)
|
||||
# Keep it disabled if vectorization fails
|
||||
continue
|
||||
|
||||
db.session.commit()
|
||||
logger.info("Enabled %s summary records for dataset %s", enabled_count, dataset.id)
|
||||
|
||||
@staticmethod
|
||||
def delete_summaries_for_segments(
|
||||
dataset: Dataset,
|
||||
segment_ids: list[str] | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Delete summary records and vectors for segments (used only for actual deletion scenarios).
|
||||
For disable/enable operations, use disable_summaries_for_segments/enable_summaries_for_segments.
|
||||
|
||||
Args:
|
||||
dataset: Dataset containing the segments
|
||||
segment_ids: List of segment IDs to delete summaries for. If None, delete all.
|
||||
"""
|
||||
query = db.session.query(DocumentSegmentSummary).filter_by(dataset_id=dataset.id)
|
||||
|
||||
if segment_ids:
|
||||
query = query.filter(DocumentSegmentSummary.chunk_id.in_(segment_ids))
|
||||
|
||||
summaries = query.all()
|
||||
|
||||
if not summaries:
|
||||
return
|
||||
|
||||
# Delete from vector database
|
||||
if dataset.indexing_technique == "high_quality":
|
||||
summary_node_ids = [s.summary_index_node_id for s in summaries if s.summary_index_node_id]
|
||||
if summary_node_ids:
|
||||
vector = Vector(dataset)
|
||||
vector.delete_by_ids(summary_node_ids)
|
||||
|
||||
# Delete summary records
|
||||
for summary in summaries:
|
||||
db.session.delete(summary)
|
||||
|
||||
db.session.commit()
|
||||
logger.info("Deleted %s summary records for dataset %s", len(summaries), dataset.id)
|
||||
|
||||
@staticmethod
|
||||
def update_summary_for_segment(
|
||||
segment: DocumentSegment,
|
||||
dataset: Dataset,
|
||||
summary_content: str,
|
||||
) -> DocumentSegmentSummary | None:
|
||||
"""
|
||||
Update summary for a segment and re-vectorize it.
|
||||
|
||||
Args:
|
||||
segment: DocumentSegment to update summary for
|
||||
dataset: Dataset containing the segment
|
||||
summary_content: New summary content
|
||||
|
||||
Returns:
|
||||
Updated DocumentSegmentSummary instance, or None if summary index is not enabled
|
||||
"""
|
||||
# Only update summary index for high_quality indexing technique
|
||||
if dataset.indexing_technique != "high_quality":
|
||||
return None
|
||||
|
||||
# Check if summary index is enabled
|
||||
summary_index_setting = dataset.summary_index_setting
|
||||
if not summary_index_setting or not summary_index_setting.get("enable"):
|
||||
return None
|
||||
|
||||
# Skip qa_model documents
|
||||
if segment.document and segment.document.doc_form == "qa_model":
|
||||
return None
|
||||
|
||||
try:
|
||||
# Find existing summary record
|
||||
summary_record = (
|
||||
db.session.query(DocumentSegmentSummary).filter_by(chunk_id=segment.id, dataset_id=dataset.id).first()
|
||||
)
|
||||
|
||||
if summary_record:
|
||||
# Update existing summary
|
||||
old_summary_node_id = summary_record.summary_index_node_id
|
||||
|
||||
# Update summary content
|
||||
summary_record.summary_content = summary_content
|
||||
summary_record.status = "generating"
|
||||
db.session.add(summary_record)
|
||||
db.session.flush()
|
||||
|
||||
# Delete old vector if exists
|
||||
if old_summary_node_id:
|
||||
vector = Vector(dataset)
|
||||
vector.delete_by_ids([old_summary_node_id])
|
||||
|
||||
# Re-vectorize summary
|
||||
SummaryIndexService.vectorize_summary(summary_record, segment, dataset)
|
||||
|
||||
db.session.commit()
|
||||
logger.info("Successfully updated and re-vectorized summary for segment %s", segment.id)
|
||||
return summary_record
|
||||
else:
|
||||
# Create new summary record if doesn't exist
|
||||
summary_record = SummaryIndexService.create_summary_record(
|
||||
segment, dataset, summary_content, status="generating"
|
||||
)
|
||||
SummaryIndexService.vectorize_summary(summary_record, segment, dataset)
|
||||
db.session.commit()
|
||||
logger.info("Successfully created and vectorized summary for segment %s", segment.id)
|
||||
return summary_record
|
||||
|
||||
except Exception:
|
||||
logger.exception("Failed to update summary for segment %s", segment.id)
|
||||
# Update summary record with error status if it exists
|
||||
summary_record = (
|
||||
db.session.query(DocumentSegmentSummary).filter_by(chunk_id=segment.id, dataset_id=dataset.id).first()
|
||||
)
|
||||
if summary_record:
|
||||
summary_record.status = "error"
|
||||
summary_record.error = str(e)
|
||||
db.session.add(summary_record)
|
||||
db.session.commit()
|
||||
raise
|
||||
@ -118,19 +118,6 @@ def add_document_to_index_task(dataset_document_id: str):
|
||||
)
|
||||
session.commit()
|
||||
|
||||
# Enable summary indexes for all segments in this document
|
||||
from services.summary_index_service import SummaryIndexService
|
||||
|
||||
segment_ids_list = [segment.id for segment in segments]
|
||||
if segment_ids_list:
|
||||
try:
|
||||
SummaryIndexService.enable_summaries_for_segments(
|
||||
dataset=dataset,
|
||||
segment_ids=segment_ids_list,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to enable summaries for document %s: %s", dataset_document.id, str(e))
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logger.info(
|
||||
click.style(f"Document added to index: {dataset_document.id} latency: {end_at - start_at}", fg="green")
|
||||
|
||||
@ -47,7 +47,6 @@ def delete_segment_from_index_task(
|
||||
doc_form = dataset_document.doc_form
|
||||
|
||||
# Proceed with index cleanup using the index_node_ids directly
|
||||
# For actual deletion, we should delete summaries (not just disable them)
|
||||
index_processor = IndexProcessorFactory(doc_form).init_index_processor()
|
||||
index_processor.clean(
|
||||
dataset,
|
||||
@ -55,7 +54,6 @@ def delete_segment_from_index_task(
|
||||
with_keywords=True,
|
||||
delete_child_chunks=True,
|
||||
precomputed_child_node_ids=child_node_ids,
|
||||
delete_summaries=True, # Actually delete summaries when segment is deleted
|
||||
)
|
||||
if dataset.is_multimodal:
|
||||
# delete segment attachment binding
|
||||
|
||||
@ -60,18 +60,6 @@ def disable_segment_from_index_task(segment_id: str):
|
||||
index_processor = IndexProcessorFactory(index_type).init_index_processor()
|
||||
index_processor.clean(dataset, [segment.index_node_id])
|
||||
|
||||
# Disable summary index for this segment
|
||||
from services.summary_index_service import SummaryIndexService
|
||||
|
||||
try:
|
||||
SummaryIndexService.disable_summaries_for_segments(
|
||||
dataset=dataset,
|
||||
segment_ids=[segment.id],
|
||||
disabled_by=segment.disabled_by,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to disable summary for segment %s: %s", segment.id, str(e))
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logger.info(
|
||||
click.style(
|
||||
|
||||
@ -68,21 +68,6 @@ def disable_segments_from_index_task(segment_ids: list, dataset_id: str, documen
|
||||
index_node_ids.extend(attachment_ids)
|
||||
index_processor.clean(dataset, index_node_ids, with_keywords=True, delete_child_chunks=False)
|
||||
|
||||
# Disable summary indexes for these segments
|
||||
from services.summary_index_service import SummaryIndexService
|
||||
|
||||
segment_ids_list = [segment.id for segment in segments]
|
||||
try:
|
||||
# Get disabled_by from first segment (they should all have the same disabled_by)
|
||||
disabled_by = segments[0].disabled_by if segments else None
|
||||
SummaryIndexService.disable_summaries_for_segments(
|
||||
dataset=dataset,
|
||||
segment_ids=segment_ids_list,
|
||||
disabled_by=disabled_by,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to disable summaries for segments: %s", str(e))
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logger.info(click.style(f"Segments removed from index latency: {end_at - start_at}", fg="green"))
|
||||
except Exception:
|
||||
|
||||
@ -14,7 +14,6 @@ from enums.cloud_plan import CloudPlan
|
||||
from libs.datetime_utils import naive_utc_now
|
||||
from models.dataset import Dataset, Document
|
||||
from services.feature_service import FeatureService
|
||||
from tasks.generate_summary_index_task import generate_summary_index_task
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -100,69 +99,6 @@ def _document_indexing(dataset_id: str, document_ids: Sequence[str]):
|
||||
indexing_runner.run(documents)
|
||||
end_at = time.perf_counter()
|
||||
logger.info(click.style(f"Processed dataset: {dataset_id} latency: {end_at - start_at}", fg="green"))
|
||||
|
||||
# Trigger summary index generation for completed documents if enabled
|
||||
# Only generate for high_quality indexing technique and when summary_index_setting is enabled
|
||||
# Re-query dataset to get latest summary_index_setting (in case it was updated)
|
||||
dataset = session.query(Dataset).where(Dataset.id == dataset_id).first()
|
||||
if not dataset:
|
||||
logger.warning("Dataset %s not found after indexing", dataset_id)
|
||||
return
|
||||
|
||||
if dataset.indexing_technique == "high_quality":
|
||||
summary_index_setting = dataset.summary_index_setting
|
||||
if summary_index_setting and summary_index_setting.get("enable"):
|
||||
# Check each document's indexing status and trigger summary generation if completed
|
||||
for document_id in document_ids:
|
||||
# Re-query document to get latest status (IndexingRunner may have updated it)
|
||||
document = (
|
||||
session.query(Document)
|
||||
.where(Document.id == document_id, Document.dataset_id == dataset_id)
|
||||
.first()
|
||||
)
|
||||
if document:
|
||||
logger.info(
|
||||
"Checking document %s for summary generation: status=%s, doc_form=%s",
|
||||
document_id,
|
||||
document.indexing_status,
|
||||
document.doc_form,
|
||||
)
|
||||
if document.indexing_status == "completed" and document.doc_form != "qa_model":
|
||||
try:
|
||||
generate_summary_index_task.delay(dataset.id, document_id, None)
|
||||
logger.info(
|
||||
"Queued summary index generation task for document %s in dataset %s "
|
||||
"after indexing completed",
|
||||
document_id,
|
||||
dataset.id,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to queue summary index generation task for document %s",
|
||||
document_id,
|
||||
)
|
||||
# Don't fail the entire indexing process if summary task queuing fails
|
||||
else:
|
||||
logger.info(
|
||||
"Skipping summary generation for document %s: status=%s, doc_form=%s",
|
||||
document_id,
|
||||
document.indexing_status,
|
||||
document.doc_form,
|
||||
)
|
||||
else:
|
||||
logger.warning("Document %s not found after indexing", document_id)
|
||||
else:
|
||||
logger.info(
|
||||
"Summary index generation skipped for dataset %s: summary_index_setting.enable=%s",
|
||||
dataset.id,
|
||||
summary_index_setting.get("enable") if summary_index_setting else None,
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
"Summary index generation skipped for dataset %s: indexing_technique=%s (not 'high_quality')",
|
||||
dataset.id,
|
||||
dataset.indexing_technique,
|
||||
)
|
||||
except DocumentIsPausedError as ex:
|
||||
logger.info(click.style(str(ex), fg="yellow"))
|
||||
except Exception:
|
||||
|
||||
@ -106,17 +106,6 @@ def enable_segment_to_index_task(segment_id: str):
|
||||
# save vector index
|
||||
index_processor.load(dataset, [document], multimodal_documents=multimodel_documents)
|
||||
|
||||
# Enable summary index for this segment
|
||||
from services.summary_index_service import SummaryIndexService
|
||||
|
||||
try:
|
||||
SummaryIndexService.enable_summaries_for_segments(
|
||||
dataset=dataset,
|
||||
segment_ids=[segment.id],
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to enable summary for segment %s: %s", segment.id, str(e))
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logger.info(click.style(f"Segment enabled to index: {segment.id} latency: {end_at - start_at}", fg="green"))
|
||||
except Exception as e:
|
||||
|
||||
@ -106,18 +106,6 @@ def enable_segments_to_index_task(segment_ids: list, dataset_id: str, document_i
|
||||
# save vector index
|
||||
index_processor.load(dataset, documents, multimodal_documents=multimodal_documents)
|
||||
|
||||
# Enable summary indexes for these segments
|
||||
from services.summary_index_service import SummaryIndexService
|
||||
|
||||
segment_ids_list = [segment.id for segment in segments]
|
||||
try:
|
||||
SummaryIndexService.enable_summaries_for_segments(
|
||||
dataset=dataset,
|
||||
segment_ids=segment_ids_list,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to enable summaries for segments: %s", str(e))
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logger.info(click.style(f"Segments enabled to index latency: {end_at - start_at}", fg="green"))
|
||||
except Exception as e:
|
||||
|
||||
@ -1,112 +0,0 @@
|
||||
"""Async task for generating summary indexes."""
|
||||
|
||||
import logging
|
||||
import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Dataset, DocumentSegment
|
||||
from models.dataset import Document as DatasetDocument
|
||||
from services.summary_index_service import SummaryIndexService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def generate_summary_index_task(dataset_id: str, document_id: str, segment_ids: list[str] | None = None):
|
||||
"""
|
||||
Async generate summary index for document segments.
|
||||
|
||||
Args:
|
||||
dataset_id: Dataset ID
|
||||
document_id: Document ID
|
||||
segment_ids: Optional list of specific segment IDs to process. If None, process all segments.
|
||||
|
||||
Usage:
|
||||
generate_summary_index_task.delay(dataset_id, document_id)
|
||||
generate_summary_index_task.delay(dataset_id, document_id, segment_ids)
|
||||
"""
|
||||
logger.info(
|
||||
click.style(
|
||||
f"Start generating summary index for document {document_id} in dataset {dataset_id}",
|
||||
fg="green",
|
||||
)
|
||||
)
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
dataset = db.session.query(Dataset).where(Dataset.id == dataset_id).first()
|
||||
if not dataset:
|
||||
logger.error(click.style(f"Dataset not found: {dataset_id}", fg="red"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
document = db.session.query(DatasetDocument).where(DatasetDocument.id == document_id).first()
|
||||
if not document:
|
||||
logger.error(click.style(f"Document not found: {document_id}", fg="red"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
# Only generate summary index for high_quality indexing technique
|
||||
if dataset.indexing_technique != "high_quality":
|
||||
logger.info(
|
||||
click.style(
|
||||
f"Skipping summary generation for dataset {dataset_id}: "
|
||||
f"indexing_technique is {dataset.indexing_technique}, not 'high_quality'",
|
||||
fg="cyan",
|
||||
)
|
||||
)
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
# Check if summary index is enabled
|
||||
summary_index_setting = dataset.summary_index_setting
|
||||
if not summary_index_setting or not summary_index_setting.get("enable"):
|
||||
logger.info(
|
||||
click.style(
|
||||
f"Summary index is disabled for dataset {dataset_id}",
|
||||
fg="cyan",
|
||||
)
|
||||
)
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
# Determine if only parent chunks should be processed
|
||||
only_parent_chunks = dataset.chunk_structure == "parent_child_index"
|
||||
|
||||
# Generate summaries
|
||||
summary_records = SummaryIndexService.generate_summaries_for_document(
|
||||
dataset=dataset,
|
||||
document=document,
|
||||
summary_index_setting=summary_index_setting,
|
||||
segment_ids=segment_ids,
|
||||
only_parent_chunks=only_parent_chunks,
|
||||
)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logger.info(
|
||||
click.style(
|
||||
f"Summary index generation completed for document {document_id}: "
|
||||
f"{len(summary_records)} summaries generated, latency: {end_at - start_at}",
|
||||
fg="green",
|
||||
)
|
||||
)
|
||||
|
||||
except Exception:
|
||||
logger.exception("Failed to generate summary index for document %s", document_id)
|
||||
# Update document segments with error status if needed
|
||||
if segment_ids:
|
||||
db.session.query(DocumentSegment).filter(
|
||||
DocumentSegment.id.in_(segment_ids),
|
||||
DocumentSegment.dataset_id == dataset_id,
|
||||
).update(
|
||||
{
|
||||
DocumentSegment.error: f"Summary generation failed: {str(e)}",
|
||||
},
|
||||
synchronize_session=False,
|
||||
)
|
||||
db.session.commit()
|
||||
finally:
|
||||
db.session.close()
|
||||
@ -1,221 +0,0 @@
|
||||
"""Task for regenerating summary indexes when dataset settings change."""
|
||||
|
||||
import logging
|
||||
import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from sqlalchemy import select
|
||||
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Dataset, DocumentSegment, DocumentSegmentSummary
|
||||
from models.dataset import Document as DatasetDocument
|
||||
from services.summary_index_service import SummaryIndexService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def regenerate_summary_index_task(
|
||||
dataset_id: str,
|
||||
regenerate_reason: str = "summary_model_changed",
|
||||
regenerate_vectors_only: bool = False,
|
||||
):
|
||||
"""
|
||||
Regenerate summary indexes for all documents in a dataset.
|
||||
|
||||
This task is triggered when:
|
||||
1. summary_index_setting model changes (regenerate_reason="summary_model_changed")
|
||||
- Regenerates summary content and vectors for all existing summaries
|
||||
2. embedding_model changes (regenerate_reason="embedding_model_changed")
|
||||
- Only regenerates vectors for existing summaries (keeps summary content)
|
||||
|
||||
Args:
|
||||
dataset_id: Dataset ID
|
||||
regenerate_reason: Reason for regeneration ("summary_model_changed" or "embedding_model_changed")
|
||||
regenerate_vectors_only: If True, only regenerate vectors without regenerating summary content
|
||||
"""
|
||||
logger.info(
|
||||
click.style(
|
||||
f"Start regenerate summary index for dataset {dataset_id}, reason: {regenerate_reason}",
|
||||
fg="green",
|
||||
)
|
||||
)
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
dataset = db.session.query(Dataset).filter_by(id=dataset_id).first()
|
||||
if not dataset:
|
||||
logger.error(click.style(f"Dataset not found: {dataset_id}", fg="red"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
# Only regenerate summary index for high_quality indexing technique
|
||||
if dataset.indexing_technique != "high_quality":
|
||||
logger.info(
|
||||
click.style(
|
||||
f"Skipping summary regeneration for dataset {dataset_id}: "
|
||||
f"indexing_technique is {dataset.indexing_technique}, not 'high_quality'",
|
||||
fg="cyan",
|
||||
)
|
||||
)
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
# Check if summary index is enabled
|
||||
summary_index_setting = dataset.summary_index_setting
|
||||
if not summary_index_setting or not summary_index_setting.get("enable"):
|
||||
logger.info(
|
||||
click.style(
|
||||
f"Summary index is disabled for dataset {dataset_id}",
|
||||
fg="cyan",
|
||||
)
|
||||
)
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
# Get all documents with completed indexing status
|
||||
dataset_documents = db.session.scalars(
|
||||
select(DatasetDocument).where(
|
||||
DatasetDocument.dataset_id == dataset_id,
|
||||
DatasetDocument.indexing_status == "completed",
|
||||
DatasetDocument.enabled == True,
|
||||
DatasetDocument.archived == False,
|
||||
)
|
||||
).all()
|
||||
|
||||
if not dataset_documents:
|
||||
logger.info(
|
||||
click.style(
|
||||
f"No documents found for summary regeneration in dataset {dataset_id}",
|
||||
fg="cyan",
|
||||
)
|
||||
)
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"Found %s documents for summary regeneration in dataset %s",
|
||||
len(dataset_documents),
|
||||
dataset_id,
|
||||
)
|
||||
|
||||
total_segments_processed = 0
|
||||
total_segments_failed = 0
|
||||
|
||||
for dataset_document in dataset_documents:
|
||||
# Skip qa_model documents
|
||||
if dataset_document.doc_form == "qa_model":
|
||||
continue
|
||||
|
||||
try:
|
||||
# Get all segments with existing summaries
|
||||
segments = (
|
||||
db.session.query(DocumentSegment)
|
||||
.join(
|
||||
DocumentSegmentSummary,
|
||||
DocumentSegment.id == DocumentSegmentSummary.chunk_id,
|
||||
)
|
||||
.where(
|
||||
DocumentSegment.document_id == dataset_document.id,
|
||||
DocumentSegment.dataset_id == dataset_id,
|
||||
DocumentSegment.status == "completed",
|
||||
DocumentSegment.enabled == True,
|
||||
DocumentSegmentSummary.dataset_id == dataset_id,
|
||||
)
|
||||
.order_by(DocumentSegment.position.asc())
|
||||
.all()
|
||||
)
|
||||
|
||||
if not segments:
|
||||
continue
|
||||
|
||||
logger.info(
|
||||
"Regenerating summaries for %s segments in document %s",
|
||||
len(segments),
|
||||
dataset_document.id,
|
||||
)
|
||||
|
||||
for segment in segments:
|
||||
try:
|
||||
# Get existing summary record
|
||||
summary_record = (
|
||||
db.session.query(DocumentSegmentSummary)
|
||||
.filter_by(
|
||||
chunk_id=segment.id,
|
||||
dataset_id=dataset_id,
|
||||
)
|
||||
.first()
|
||||
)
|
||||
|
||||
if not summary_record:
|
||||
logger.warning("Summary record not found for segment %s, skipping", segment.id)
|
||||
continue
|
||||
|
||||
if regenerate_vectors_only:
|
||||
# Only regenerate vectors (for embedding_model change)
|
||||
# Delete old vector
|
||||
if summary_record.summary_index_node_id:
|
||||
try:
|
||||
from core.rag.datasource.vdb.vector_factory import Vector
|
||||
|
||||
vector = Vector(dataset)
|
||||
vector.delete_by_ids([summary_record.summary_index_node_id])
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to delete old summary vector for segment %s: %s",
|
||||
segment.id,
|
||||
str(e),
|
||||
)
|
||||
|
||||
# Re-vectorize with new embedding model
|
||||
SummaryIndexService.vectorize_summary(summary_record, segment, dataset)
|
||||
db.session.commit()
|
||||
else:
|
||||
# Regenerate both summary content and vectors (for summary_model change)
|
||||
SummaryIndexService.generate_and_vectorize_summary(segment, dataset, summary_index_setting)
|
||||
db.session.commit()
|
||||
|
||||
total_segments_processed += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to regenerate summary for segment %s: %s",
|
||||
segment.id,
|
||||
str(e),
|
||||
exc_info=True,
|
||||
)
|
||||
total_segments_failed += 1
|
||||
# Update summary record with error status
|
||||
if summary_record:
|
||||
summary_record.status = "error"
|
||||
summary_record.error = f"Regeneration failed: {str(e)}"
|
||||
db.session.add(summary_record)
|
||||
db.session.commit()
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to process document %s for summary regeneration: %s",
|
||||
dataset_document.id,
|
||||
str(e),
|
||||
exc_info=True,
|
||||
)
|
||||
continue
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logger.info(
|
||||
click.style(
|
||||
f"Summary index regeneration completed for dataset {dataset_id}: "
|
||||
f"{total_segments_processed} segments processed successfully, "
|
||||
f"{total_segments_failed} segments failed, "
|
||||
f"total documents: {len(dataset_documents)}, "
|
||||
f"latency: {end_at - start_at:.2f}s",
|
||||
fg="green",
|
||||
)
|
||||
)
|
||||
|
||||
except Exception:
|
||||
logger.exception("Regenerate summary index failed for dataset %s", dataset_id)
|
||||
finally:
|
||||
db.session.close()
|
||||
@ -46,21 +46,6 @@ def remove_document_from_index_task(document_id: str):
|
||||
index_processor = IndexProcessorFactory(document.doc_form).init_index_processor()
|
||||
|
||||
segments = session.scalars(select(DocumentSegment).where(DocumentSegment.document_id == document.id)).all()
|
||||
|
||||
# Disable summary indexes for all segments in this document
|
||||
from services.summary_index_service import SummaryIndexService
|
||||
|
||||
segment_ids_list = [segment.id for segment in segments]
|
||||
if segment_ids_list:
|
||||
try:
|
||||
SummaryIndexService.disable_summaries_for_segments(
|
||||
dataset=dataset,
|
||||
segment_ids=segment_ids_list,
|
||||
disabled_by=document.disabled_by,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to disable summaries for document %s: %s", document.id, str(e))
|
||||
|
||||
index_node_ids = [segment.index_node_id for segment in segments]
|
||||
if index_node_ids:
|
||||
try:
|
||||
|
||||
@ -1,6 +1,12 @@
|
||||
import { render, screen } from '@testing-library/react'
|
||||
import * as React from 'react'
|
||||
import HistoryPanel from './history-panel'
|
||||
|
||||
const mockDocLink = vi.fn(() => 'doc-link')
|
||||
vi.mock('@/context/i18n', () => ({
|
||||
useDocLink: () => mockDocLink,
|
||||
}))
|
||||
|
||||
vi.mock('@/app/components/app/configuration/base/operation-btn', () => ({
|
||||
default: ({ onClick }: { onClick: () => void }) => (
|
||||
<button type="button" data-testid="edit-button" onClick={onClick}>
|
||||
@ -18,10 +24,12 @@ describe('HistoryPanel', () => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
it('should render warning content when showWarning is true', () => {
|
||||
it('should render warning content and link when showWarning is true', () => {
|
||||
render(<HistoryPanel showWarning onShowEditModal={vi.fn()} />)
|
||||
|
||||
expect(screen.getByText('appDebug.feature.conversationHistory.tip')).toBeInTheDocument()
|
||||
const link = screen.getByText('appDebug.feature.conversationHistory.learnMore')
|
||||
expect(link).toHaveAttribute('href', 'doc-link')
|
||||
})
|
||||
|
||||
it('should hide warning when showWarning is false', () => {
|
||||
|
||||
@ -271,9 +271,9 @@ const ConfigVar: FC<IConfigVarProps> = ({ promptVariables, readonly, onPromptVar
|
||||
</div>
|
||||
)}
|
||||
{hasVar && (
|
||||
<div className={cn('mt-1 grid px-3 pb-3')}>
|
||||
<div className="mt-1 px-3 pb-3">
|
||||
<ReactSortable
|
||||
className={cn('grid-col-1 grid space-y-1', readonly && 'grid-cols-2 gap-1 space-y-0')}
|
||||
className="space-y-1"
|
||||
list={promptVariablesWithIds}
|
||||
setList={(list) => { onPromptVariablesChange?.(list.map(item => item.variable)) }}
|
||||
handle=".handle"
|
||||
|
||||
@ -39,7 +39,7 @@ const VarItem: FC<ItemProps> = ({
|
||||
const [isDeleting, setIsDeleting] = useState(false)
|
||||
|
||||
return (
|
||||
<div className={cn('group relative mb-1 flex h-[34px] w-full items-center rounded-lg border-[0.5px] border-components-panel-border-subtle bg-components-panel-on-panel-item-bg pl-2.5 pr-3 shadow-xs last-of-type:mb-0 hover:bg-components-panel-on-panel-item-bg-hover hover:shadow-sm', isDeleting && 'border-state-destructive-border hover:bg-state-destructive-hover', readonly && 'cursor-not-allowed', className)}>
|
||||
<div className={cn('group relative mb-1 flex h-[34px] w-full items-center rounded-lg border-[0.5px] border-components-panel-border-subtle bg-components-panel-on-panel-item-bg pl-2.5 pr-3 shadow-xs last-of-type:mb-0 hover:bg-components-panel-on-panel-item-bg-hover hover:shadow-sm', isDeleting && 'border-state-destructive-border hover:bg-state-destructive-hover', readonly && 'cursor-not-allowed opacity-30', className)}>
|
||||
<VarIcon className={cn('mr-1 h-4 w-4 shrink-0 text-text-accent', canDrag && 'group-hover:opacity-0')} />
|
||||
{canDrag && (
|
||||
<RiDraggable className="absolute left-3 top-3 hidden h-3 w-3 cursor-pointer text-text-tertiary group-hover:block" />
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
'use client'
|
||||
import type { FC } from 'react'
|
||||
import { noop } from 'es-toolkit/function'
|
||||
import { produce } from 'immer'
|
||||
import * as React from 'react'
|
||||
import { useCallback } from 'react'
|
||||
@ -11,17 +10,14 @@ import { useFeatures, useFeaturesStore } from '@/app/components/base/features/ho
|
||||
import { Vision } from '@/app/components/base/icons/src/vender/features'
|
||||
import Switch from '@/app/components/base/switch'
|
||||
import Tooltip from '@/app/components/base/tooltip'
|
||||
import OptionCard from '@/app/components/workflow/nodes/_base/components/option-card'
|
||||
import { SupportUploadFileTypes } from '@/app/components/workflow/types'
|
||||
// import OptionCard from '@/app/components/workflow/nodes/_base/components/option-card'
|
||||
import ConfigContext from '@/context/debug-configuration'
|
||||
import { Resolution } from '@/types/app'
|
||||
import { cn } from '@/utils/classnames'
|
||||
import ParamConfig from './param-config'
|
||||
|
||||
const ConfigVision: FC = () => {
|
||||
const { t } = useTranslation()
|
||||
const { isShowVisionConfig, isAllowVideoUpload, readonly } = useContext(ConfigContext)
|
||||
const { isShowVisionConfig, isAllowVideoUpload } = useContext(ConfigContext)
|
||||
const file = useFeatures(s => s.features.file)
|
||||
const featuresStore = useFeaturesStore()
|
||||
|
||||
@ -58,7 +54,7 @@ const ConfigVision: FC = () => {
|
||||
setFeatures(newFeatures)
|
||||
}, [featuresStore, isAllowVideoUpload])
|
||||
|
||||
if (!isShowVisionConfig || (readonly && !isImageEnabled))
|
||||
if (!isShowVisionConfig)
|
||||
return null
|
||||
|
||||
return (
|
||||
@ -79,55 +75,37 @@ const ConfigVision: FC = () => {
|
||||
/>
|
||||
</div>
|
||||
<div className="flex shrink-0 items-center">
|
||||
{readonly
|
||||
? (
|
||||
<>
|
||||
<div className="mr-2 flex items-center gap-0.5">
|
||||
<div className="system-xs-medium-uppercase text-text-tertiary">{t('vision.visionSettings.resolution', { ns: 'appDebug' })}</div>
|
||||
<Tooltip
|
||||
popupContent={(
|
||||
<div className="w-[180px]">
|
||||
{t('vision.visionSettings.resolutionTooltip', { ns: 'appDebug' }).split('\n').map(item => (
|
||||
<div key={item}>{item}</div>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
<div className="flex items-center gap-1">
|
||||
<OptionCard
|
||||
title={t('vision.visionSettings.high', { ns: 'appDebug' })}
|
||||
selected={file?.image?.detail === Resolution.high}
|
||||
onSelect={noop}
|
||||
className={cn(
|
||||
'cursor-not-allowed rounded-lg px-3 hover:shadow-none',
|
||||
file?.image?.detail !== Resolution.high && 'hover:border-components-option-card-option-border',
|
||||
)}
|
||||
/>
|
||||
<OptionCard
|
||||
title={t('vision.visionSettings.low', { ns: 'appDebug' })}
|
||||
selected={file?.image?.detail === Resolution.low}
|
||||
onSelect={noop}
|
||||
className={cn(
|
||||
'cursor-not-allowed rounded-lg px-3 hover:shadow-none',
|
||||
file?.image?.detail !== Resolution.low && 'hover:border-components-option-card-option-border',
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
</>
|
||||
)
|
||||
: (
|
||||
<>
|
||||
<ParamConfig />
|
||||
<div className="ml-1 mr-3 h-3.5 w-[1px] bg-divider-regular"></div>
|
||||
<Switch
|
||||
defaultValue={isImageEnabled}
|
||||
onChange={handleChange}
|
||||
size="md"
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
|
||||
{/* <div className='mr-2 flex items-center gap-0.5'>
|
||||
<div className='text-text-tertiary system-xs-medium-uppercase'>{t('appDebug.vision.visionSettings.resolution')}</div>
|
||||
<Tooltip
|
||||
popupContent={
|
||||
<div className='w-[180px]' >
|
||||
{t('appDebug.vision.visionSettings.resolutionTooltip').split('\n').map(item => (
|
||||
<div key={item}>{item}</div>
|
||||
))}
|
||||
</div>
|
||||
}
|
||||
/>
|
||||
</div> */}
|
||||
{/* <div className='flex items-center gap-1'>
|
||||
<OptionCard
|
||||
title={t('appDebug.vision.visionSettings.high')}
|
||||
selected={file?.image?.detail === Resolution.high}
|
||||
onSelect={() => handleChange(Resolution.high)}
|
||||
/>
|
||||
<OptionCard
|
||||
title={t('appDebug.vision.visionSettings.low')}
|
||||
selected={file?.image?.detail === Resolution.low}
|
||||
onSelect={() => handleChange(Resolution.low)}
|
||||
/>
|
||||
</div> */}
|
||||
<ParamConfig />
|
||||
<div className="ml-1 mr-3 h-3.5 w-[1px] bg-divider-regular"></div>
|
||||
<Switch
|
||||
defaultValue={isImageEnabled}
|
||||
onChange={handleChange}
|
||||
size="md"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
|
||||
@ -40,7 +40,7 @@ type AgentToolWithMoreInfo = AgentTool & { icon: any, collection?: Collection }
|
||||
const AgentTools: FC = () => {
|
||||
const { t } = useTranslation()
|
||||
const [isShowChooseTool, setIsShowChooseTool] = useState(false)
|
||||
const { readonly, modelConfig, setModelConfig } = useContext(ConfigContext)
|
||||
const { modelConfig, setModelConfig } = useContext(ConfigContext)
|
||||
const { data: buildInTools } = useAllBuiltInTools()
|
||||
const { data: customTools } = useAllCustomTools()
|
||||
const { data: workflowTools } = useAllWorkflowTools()
|
||||
@ -168,10 +168,10 @@ const AgentTools: FC = () => {
|
||||
{tools.filter(item => !!item.enabled).length}
|
||||
/
|
||||
{tools.length}
|
||||
|
||||
|
||||
{t('agent.tools.enabled', { ns: 'appDebug' })}
|
||||
</div>
|
||||
{tools.length < MAX_TOOLS_NUM && !readonly && (
|
||||
{tools.length < MAX_TOOLS_NUM && (
|
||||
<>
|
||||
<div className="ml-3 mr-1 h-3.5 w-px bg-divider-regular"></div>
|
||||
<ToolPicker
|
||||
@ -189,7 +189,7 @@ const AgentTools: FC = () => {
|
||||
</div>
|
||||
)}
|
||||
>
|
||||
<div className={cn('grid grid-cols-1 items-center gap-1 2xl:grid-cols-2', readonly && 'cursor-not-allowed grid-cols-2')}>
|
||||
<div className="grid grid-cols-1 flex-wrap items-center justify-between gap-1 2xl:grid-cols-2">
|
||||
{tools.map((item: AgentTool & { icon: any, collection?: Collection }, index) => (
|
||||
<div
|
||||
key={index}
|
||||
@ -214,7 +214,7 @@ const AgentTools: FC = () => {
|
||||
>
|
||||
<span className="system-xs-medium pr-1.5 text-text-secondary">{getProviderShowName(item)}</span>
|
||||
<span className="text-text-tertiary">{item.tool_label}</span>
|
||||
{!item.isDeleted && !readonly && (
|
||||
{!item.isDeleted && (
|
||||
<Tooltip
|
||||
popupContent={(
|
||||
<div className="w-[180px]">
|
||||
@ -259,7 +259,7 @@ const AgentTools: FC = () => {
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
{!item.isDeleted && !readonly && (
|
||||
{!item.isDeleted && (
|
||||
<div className="mr-2 hidden items-center gap-1 group-hover:flex">
|
||||
{!item.notAuthor && (
|
||||
<Tooltip
|
||||
@ -298,7 +298,7 @@ const AgentTools: FC = () => {
|
||||
{!item.notAuthor && (
|
||||
<Switch
|
||||
defaultValue={item.isDeleted ? false : item.enabled}
|
||||
disabled={item.isDeleted || readonly}
|
||||
disabled={item.isDeleted}
|
||||
size="md"
|
||||
onChange={(enabled) => {
|
||||
const newModelConfig = produce(modelConfig, (draft) => {
|
||||
@ -312,7 +312,6 @@ const AgentTools: FC = () => {
|
||||
{item.notAuthor && (
|
||||
<Button
|
||||
variant="secondary"
|
||||
disabled={readonly}
|
||||
size="small"
|
||||
onClick={() => {
|
||||
setCurrentTool(item)
|
||||
|
||||
@ -17,7 +17,7 @@ const ConfigAudio: FC = () => {
|
||||
const { t } = useTranslation()
|
||||
const file = useFeatures(s => s.features.file)
|
||||
const featuresStore = useFeaturesStore()
|
||||
const { isShowAudioConfig, readonly } = useContext(ConfigContext)
|
||||
const { isShowAudioConfig } = useContext(ConfigContext)
|
||||
|
||||
const isAudioEnabled = file?.allowed_file_types?.includes(SupportUploadFileTypes.audio) ?? false
|
||||
|
||||
@ -45,7 +45,7 @@ const ConfigAudio: FC = () => {
|
||||
setFeatures(newFeatures)
|
||||
}, [featuresStore])
|
||||
|
||||
if (!isShowAudioConfig || (readonly && !isAudioEnabled))
|
||||
if (!isShowAudioConfig)
|
||||
return null
|
||||
|
||||
return (
|
||||
@ -65,16 +65,14 @@ const ConfigAudio: FC = () => {
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
{!readonly && (
|
||||
<div className="flex shrink-0 items-center">
|
||||
<div className="ml-1 mr-3 h-3.5 w-[1px] bg-divider-subtle"></div>
|
||||
<Switch
|
||||
defaultValue={isAudioEnabled}
|
||||
onChange={handleChange}
|
||||
size="md"
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
<div className="flex shrink-0 items-center">
|
||||
<div className="ml-1 mr-3 h-3.5 w-[1px] bg-divider-subtle"></div>
|
||||
<Switch
|
||||
defaultValue={isAudioEnabled}
|
||||
onChange={handleChange}
|
||||
size="md"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@ -17,7 +17,7 @@ const ConfigDocument: FC = () => {
|
||||
const { t } = useTranslation()
|
||||
const file = useFeatures(s => s.features.file)
|
||||
const featuresStore = useFeaturesStore()
|
||||
const { isShowDocumentConfig, readonly } = useContext(ConfigContext)
|
||||
const { isShowDocumentConfig } = useContext(ConfigContext)
|
||||
|
||||
const isDocumentEnabled = file?.allowed_file_types?.includes(SupportUploadFileTypes.document) ?? false
|
||||
|
||||
@ -45,7 +45,7 @@ const ConfigDocument: FC = () => {
|
||||
setFeatures(newFeatures)
|
||||
}, [featuresStore])
|
||||
|
||||
if (!isShowDocumentConfig || (readonly && !isDocumentEnabled))
|
||||
if (!isShowDocumentConfig)
|
||||
return null
|
||||
|
||||
return (
|
||||
@ -65,16 +65,14 @@ const ConfigDocument: FC = () => {
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
{!readonly && (
|
||||
<div className="flex shrink-0 items-center">
|
||||
<div className="ml-1 mr-3 h-3.5 w-[1px] bg-divider-subtle"></div>
|
||||
<Switch
|
||||
defaultValue={isDocumentEnabled}
|
||||
onChange={handleChange}
|
||||
size="md"
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
<div className="flex shrink-0 items-center">
|
||||
<div className="ml-1 mr-3 h-3.5 w-[1px] bg-divider-subtle"></div>
|
||||
<Switch
|
||||
defaultValue={isDocumentEnabled}
|
||||
onChange={handleChange}
|
||||
size="md"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@ -18,7 +18,6 @@ import ConfigDocument from './config-document'
|
||||
|
||||
const Config: FC = () => {
|
||||
const {
|
||||
readonly,
|
||||
mode,
|
||||
isAdvancedMode,
|
||||
modelModeType,
|
||||
@ -28,7 +27,6 @@ const Config: FC = () => {
|
||||
modelConfig,
|
||||
setModelConfig,
|
||||
setPrevPromptConfig,
|
||||
dataSets,
|
||||
} = useContext(ConfigContext)
|
||||
const isChatApp = [AppModeEnum.ADVANCED_CHAT, AppModeEnum.AGENT_CHAT, AppModeEnum.CHAT].includes(mode)
|
||||
const formattingChangedDispatcher = useFormattingChangedDispatcher()
|
||||
@ -67,27 +65,19 @@ const Config: FC = () => {
|
||||
promptTemplate={promptTemplate}
|
||||
promptVariables={promptVariables}
|
||||
onChange={handlePromptChange}
|
||||
readonly={readonly}
|
||||
/>
|
||||
|
||||
{/* Variables */}
|
||||
{!(readonly && promptVariables.length === 0) && (
|
||||
<ConfigVar
|
||||
promptVariables={promptVariables}
|
||||
onPromptVariablesChange={handlePromptVariablesNameChange}
|
||||
readonly={readonly}
|
||||
/>
|
||||
)}
|
||||
<ConfigVar
|
||||
promptVariables={promptVariables}
|
||||
onPromptVariablesChange={handlePromptVariablesNameChange}
|
||||
/>
|
||||
|
||||
{/* Dataset */}
|
||||
{!(readonly && dataSets.length === 0) && (
|
||||
<DatasetConfig
|
||||
readonly={readonly}
|
||||
hideMetadataFilter={readonly}
|
||||
/>
|
||||
)}
|
||||
<DatasetConfig />
|
||||
|
||||
{/* Tools */}
|
||||
{isAgent && !(readonly && modelConfig.agentConfig.tools.length === 0) && (
|
||||
{isAgent && (
|
||||
<AgentTools />
|
||||
)}
|
||||
|
||||
@ -98,7 +88,7 @@ const Config: FC = () => {
|
||||
<ConfigAudio />
|
||||
|
||||
{/* Chat History */}
|
||||
{!readonly && isAdvancedMode && isChatApp && modelModeType === ModelModeType.completion && (
|
||||
{isAdvancedMode && isChatApp && modelModeType === ModelModeType.completion && (
|
||||
<HistoryPanel
|
||||
showWarning={!hasSetBlockStatus.history}
|
||||
onShowEditModal={showHistoryModal}
|
||||
|
||||
@ -30,7 +30,6 @@ const Item: FC<ItemProps> = ({
|
||||
config,
|
||||
onSave,
|
||||
onRemove,
|
||||
readonly = false,
|
||||
editable = true,
|
||||
}) => {
|
||||
const media = useBreakpoints()
|
||||
@ -57,7 +56,6 @@ const Item: FC<ItemProps> = ({
|
||||
<div className={cn(
|
||||
'group relative mb-1 flex h-10 w-full cursor-pointer items-center justify-between rounded-lg border-[0.5px] border-components-panel-border-subtle bg-components-panel-on-panel-item-bg px-2 last-of-type:mb-0 hover:bg-components-panel-on-panel-item-bg-hover',
|
||||
isDeleting && 'border-state-destructive-border hover:bg-state-destructive-hover',
|
||||
readonly && 'cursor-not-allowed',
|
||||
)}
|
||||
>
|
||||
<div className="flex w-0 grow items-center space-x-1.5">
|
||||
@ -72,7 +70,7 @@ const Item: FC<ItemProps> = ({
|
||||
</div>
|
||||
<div className="ml-2 hidden shrink-0 items-center space-x-1 group-hover:flex">
|
||||
{
|
||||
editable && !readonly && (
|
||||
editable && (
|
||||
<ActionButton
|
||||
onClick={(e) => {
|
||||
e.stopPropagation()
|
||||
@ -83,18 +81,14 @@ const Item: FC<ItemProps> = ({
|
||||
</ActionButton>
|
||||
)
|
||||
}
|
||||
{
|
||||
!readonly && (
|
||||
<ActionButton
|
||||
onClick={() => onRemove(config.id)}
|
||||
state={isDeleting ? ActionButtonState.Destructive : ActionButtonState.Default}
|
||||
onMouseEnter={() => setIsDeleting(true)}
|
||||
onMouseLeave={() => setIsDeleting(false)}
|
||||
>
|
||||
<RiDeleteBinLine className={cn('h-4 w-4 shrink-0 text-text-tertiary', isDeleting && 'text-text-destructive')} />
|
||||
</ActionButton>
|
||||
)
|
||||
}
|
||||
<ActionButton
|
||||
onClick={() => onRemove(config.id)}
|
||||
state={isDeleting ? ActionButtonState.Destructive : ActionButtonState.Default}
|
||||
onMouseEnter={() => setIsDeleting(true)}
|
||||
onMouseLeave={() => setIsDeleting(false)}
|
||||
>
|
||||
<RiDeleteBinLine className={cn('h-4 w-4 shrink-0 text-text-tertiary', isDeleting && 'text-text-destructive')} />
|
||||
</ActionButton>
|
||||
</div>
|
||||
{
|
||||
!!config.indexing_technique && (
|
||||
@ -113,13 +107,11 @@ const Item: FC<ItemProps> = ({
|
||||
)
|
||||
}
|
||||
<Drawer isOpen={showSettingsModal} onClose={() => setShowSettingsModal(false)} footer={null} mask={isMobile} panelClassName="mt-16 mx-2 sm:mr-2 mb-3 !p-0 !max-w-[640px] rounded-xl">
|
||||
{showSettingsModal && (
|
||||
<SettingsModal
|
||||
currentDataset={config}
|
||||
onCancel={() => setShowSettingsModal(false)}
|
||||
onSave={handleSave}
|
||||
/>
|
||||
)}
|
||||
<SettingsModal
|
||||
currentDataset={config}
|
||||
onCancel={() => setShowSettingsModal(false)}
|
||||
onSave={handleSave}
|
||||
/>
|
||||
</Drawer>
|
||||
</div>
|
||||
)
|
||||
|
||||
@ -30,7 +30,6 @@ import {
|
||||
import { useSelector as useAppContextSelector } from '@/context/app-context'
|
||||
import ConfigContext from '@/context/debug-configuration'
|
||||
import { AppModeEnum } from '@/types/app'
|
||||
import { cn } from '@/utils/classnames'
|
||||
import { hasEditPermissionForDataset } from '@/utils/permission'
|
||||
import FeaturePanel from '../base/feature-panel'
|
||||
import OperationBtn from '../base/operation-btn'
|
||||
@ -39,11 +38,7 @@ import CardItem from './card-item'
|
||||
import ContextVar from './context-var'
|
||||
import ParamsConfig from './params-config'
|
||||
|
||||
type Props = {
|
||||
readonly?: boolean
|
||||
hideMetadataFilter?: boolean
|
||||
}
|
||||
const DatasetConfig: FC<Props> = ({ readonly, hideMetadataFilter }) => {
|
||||
const DatasetConfig: FC = () => {
|
||||
const { t } = useTranslation()
|
||||
const userProfile = useAppContextSelector(s => s.userProfile)
|
||||
const {
|
||||
@ -264,19 +259,17 @@ const DatasetConfig: FC<Props> = ({ readonly, hideMetadataFilter }) => {
|
||||
className="mt-2"
|
||||
title={t('feature.dataSet.title', { ns: 'appDebug' })}
|
||||
headerRight={(
|
||||
!readonly && (
|
||||
<div className="flex items-center gap-1">
|
||||
{!isAgent && <ParamsConfig disabled={!hasData} selectedDatasets={dataSet} />}
|
||||
<OperationBtn type="add" onClick={showSelectDataSet} />
|
||||
</div>
|
||||
)
|
||||
<div className="flex items-center gap-1">
|
||||
{!isAgent && <ParamsConfig disabled={!hasData} selectedDatasets={dataSet} />}
|
||||
<OperationBtn type="add" onClick={showSelectDataSet} />
|
||||
</div>
|
||||
)}
|
||||
hasHeaderBottomBorder={!hasData}
|
||||
noBodySpacing
|
||||
>
|
||||
{hasData
|
||||
? (
|
||||
<div className={cn('mt-1 grid grid-cols-1 px-3 pb-3', readonly && 'grid-cols-2 gap-1')}>
|
||||
<div className="mt-1 flex flex-wrap justify-between px-3 pb-3">
|
||||
{formattedDataset.map(item => (
|
||||
<CardItem
|
||||
key={item.id}
|
||||
@ -284,7 +277,6 @@ const DatasetConfig: FC<Props> = ({ readonly, hideMetadataFilter }) => {
|
||||
onRemove={onRemove}
|
||||
onSave={handleSave}
|
||||
editable={item.editable}
|
||||
readonly={readonly}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
@ -295,29 +287,27 @@ const DatasetConfig: FC<Props> = ({ readonly, hideMetadataFilter }) => {
|
||||
</div>
|
||||
)}
|
||||
|
||||
{!hideMetadataFilter && (
|
||||
<div className="border-t border-t-divider-subtle py-2">
|
||||
<MetadataFilter
|
||||
metadataList={metadataList}
|
||||
selectedDatasetsLoaded
|
||||
metadataFilterMode={datasetConfigs.metadata_filtering_mode}
|
||||
metadataFilteringConditions={datasetConfigs.metadata_filtering_conditions}
|
||||
handleAddCondition={handleAddCondition}
|
||||
handleMetadataFilterModeChange={handleMetadataFilterModeChange}
|
||||
handleRemoveCondition={handleRemoveCondition}
|
||||
handleToggleConditionLogicalOperator={handleToggleConditionLogicalOperator}
|
||||
handleUpdateCondition={handleUpdateCondition}
|
||||
metadataModelConfig={datasetConfigs.metadata_model_config}
|
||||
handleMetadataModelChange={handleMetadataModelChange}
|
||||
handleMetadataCompletionParamsChange={handleMetadataCompletionParamsChange}
|
||||
isCommonVariable
|
||||
availableCommonStringVars={promptVariablesToSelect.filter(item => item.type === MetadataFilteringVariableType.string || item.type === MetadataFilteringVariableType.select)}
|
||||
availableCommonNumberVars={promptVariablesToSelect.filter(item => item.type === MetadataFilteringVariableType.number)}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
<div className="border-t border-t-divider-subtle py-2">
|
||||
<MetadataFilter
|
||||
metadataList={metadataList}
|
||||
selectedDatasetsLoaded
|
||||
metadataFilterMode={datasetConfigs.metadata_filtering_mode}
|
||||
metadataFilteringConditions={datasetConfigs.metadata_filtering_conditions}
|
||||
handleAddCondition={handleAddCondition}
|
||||
handleMetadataFilterModeChange={handleMetadataFilterModeChange}
|
||||
handleRemoveCondition={handleRemoveCondition}
|
||||
handleToggleConditionLogicalOperator={handleToggleConditionLogicalOperator}
|
||||
handleUpdateCondition={handleUpdateCondition}
|
||||
metadataModelConfig={datasetConfigs.metadata_model_config}
|
||||
handleMetadataModelChange={handleMetadataModelChange}
|
||||
handleMetadataCompletionParamsChange={handleMetadataCompletionParamsChange}
|
||||
isCommonVariable
|
||||
availableCommonStringVars={promptVariablesToSelect.filter(item => item.type === MetadataFilteringVariableType.string || item.type === MetadataFilteringVariableType.select)}
|
||||
availableCommonNumberVars={promptVariablesToSelect.filter(item => item.type === MetadataFilteringVariableType.number)}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{!readonly && mode === AppModeEnum.COMPLETION && dataSet.length > 0 && (
|
||||
{mode === AppModeEnum.COMPLETION && dataSet.length > 0 && (
|
||||
<ContextVar
|
||||
value={selectedContextVar?.key}
|
||||
options={promptVariablesToSelect}
|
||||
|
||||
@ -245,8 +245,8 @@ describe('RetrievalSection', () => {
|
||||
// Assert
|
||||
expect(screen.getByText('dataset.retrieval.semantic_search.title')).toBeInTheDocument()
|
||||
const learnMoreLink = screen.getByRole('link', { name: 'datasetSettings.form.retrievalSetting.learnMore' })
|
||||
expect(learnMoreLink).toHaveAttribute('href', 'https://docs.example/use-dify/knowledge/create-knowledge/setting-indexing-methods')
|
||||
expect(docLink).toHaveBeenCalledWith('/use-dify/knowledge/create-knowledge/setting-indexing-methods')
|
||||
expect(learnMoreLink).toHaveAttribute('href', 'https://docs.example/guides/knowledge-base/create-knowledge-and-upload-documents/setting-indexing-methods#setting-the-retrieval-setting')
|
||||
expect(docLink).toHaveBeenCalledWith('/guides/knowledge-base/create-knowledge-and-upload-documents/setting-indexing-methods#setting-the-retrieval-setting')
|
||||
})
|
||||
|
||||
it('propagates retrieval config changes for economical indexing', async () => {
|
||||
|
||||
@ -18,7 +18,7 @@ const ChatUserInput = ({
|
||||
inputs,
|
||||
}: Props) => {
|
||||
const { t } = useTranslation()
|
||||
const { modelConfig, setInputs, readonly } = useContext(ConfigContext)
|
||||
const { modelConfig, setInputs } = useContext(ConfigContext)
|
||||
|
||||
const promptVariables = modelConfig.configs.prompt_variables.filter(({ key, name }) => {
|
||||
return key && key?.trim() && name && name?.trim()
|
||||
@ -96,7 +96,6 @@ const ChatUserInput = ({
|
||||
placeholder={name}
|
||||
value={inputs[key] ? `${inputs[key]}` : ''}
|
||||
onChange={(e) => { handleInputValueChange(key, e.target.value) }}
|
||||
readOnly={readonly}
|
||||
/>
|
||||
)}
|
||||
{type === 'select' && (
|
||||
@ -106,7 +105,6 @@ const ChatUserInput = ({
|
||||
onSelect={(i) => { handleInputValueChange(key, i.value as string) }}
|
||||
items={(options || []).map(i => ({ name: i, value: i }))}
|
||||
allowSearch={false}
|
||||
disabled={readonly}
|
||||
/>
|
||||
)}
|
||||
{type === 'number' && (
|
||||
@ -125,7 +123,6 @@ const ChatUserInput = ({
|
||||
value={!!inputs[key]}
|
||||
required={required}
|
||||
onChange={(value) => { handleInputValueChange(key, value) }}
|
||||
readonly={readonly}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@ -15,7 +15,6 @@ import { DEFAULT_CHAT_PROMPT_CONFIG, DEFAULT_COMPLETION_PROMPT_CONFIG } from '@/
|
||||
import { useDebugConfigurationContext } from '@/context/debug-configuration'
|
||||
import { useEventEmitterContextContext } from '@/context/event-emitter'
|
||||
import { useProviderContext } from '@/context/provider-context'
|
||||
import { AppSourceType } from '@/service/share'
|
||||
import { promptVariablesToUserInputsForm } from '@/utils/model-config'
|
||||
import { APP_CHAT_WITH_MULTIPLE_MODEL } from '../types'
|
||||
|
||||
@ -131,11 +130,11 @@ const TextGenerationItem: FC<TextGenerationItemProps> = ({
|
||||
|
||||
return (
|
||||
<TextGeneration
|
||||
appSourceType={AppSourceType.webApp}
|
||||
className="flex h-full flex-col overflow-y-auto border-none"
|
||||
content={completion}
|
||||
isLoading={!completion && isResponding}
|
||||
isResponding={isResponding}
|
||||
isInstalledApp={false}
|
||||
siteInfo={null}
|
||||
messageId={messageId}
|
||||
isError={false}
|
||||
|
||||
@ -39,7 +39,6 @@ const DebugWithSingleModel = (
|
||||
) => {
|
||||
const { userProfile } = useAppContext()
|
||||
const {
|
||||
readonly,
|
||||
modelConfig,
|
||||
appId,
|
||||
inputs,
|
||||
@ -151,7 +150,6 @@ const DebugWithSingleModel = (
|
||||
|
||||
return (
|
||||
<Chat
|
||||
readonly={readonly}
|
||||
config={config}
|
||||
chatList={chatList}
|
||||
isResponding={isResponding}
|
||||
|
||||
@ -38,7 +38,6 @@ import ConfigContext from '@/context/debug-configuration'
|
||||
import { useEventEmitterContextContext } from '@/context/event-emitter'
|
||||
import { useProviderContext } from '@/context/provider-context'
|
||||
import { sendCompletionMessage } from '@/service/debug'
|
||||
import { AppSourceType } from '@/service/share'
|
||||
import { AppModeEnum, ModelModeType, TransferMethod } from '@/types/app'
|
||||
import { formatBooleanInputs, promptVariablesToUserInputsForm } from '@/utils/model-config'
|
||||
import GroupName from '../base/group-name'
|
||||
@ -73,7 +72,6 @@ const Debug: FC<IDebug> = ({
|
||||
}) => {
|
||||
const { t } = useTranslation()
|
||||
const {
|
||||
readonly,
|
||||
appId,
|
||||
mode,
|
||||
modelModeType,
|
||||
@ -418,33 +416,25 @@ const Debug: FC<IDebug> = ({
|
||||
}
|
||||
{mode !== AppModeEnum.COMPLETION && (
|
||||
<>
|
||||
{
|
||||
!readonly && (
|
||||
<TooltipPlus
|
||||
popupContent={t('operation.refresh', { ns: 'common' })}
|
||||
>
|
||||
<ActionButton onClick={clearConversation}>
|
||||
<RefreshCcw01 className="h-4 w-4" />
|
||||
</ActionButton>
|
||||
</TooltipPlus>
|
||||
{varList.length > 0 && (
|
||||
<div className="relative ml-1 mr-2">
|
||||
<TooltipPlus
|
||||
popupContent={t('operation.refresh', { ns: 'common' })}
|
||||
popupContent={t('panel.userInputField', { ns: 'workflow' })}
|
||||
>
|
||||
<ActionButton onClick={clearConversation}>
|
||||
<RefreshCcw01 className="h-4 w-4" />
|
||||
<ActionButton state={expanded ? ActionButtonState.Active : undefined} onClick={() => setExpanded(!expanded)}>
|
||||
<RiEqualizer2Line className="h-4 w-4" />
|
||||
</ActionButton>
|
||||
|
||||
</TooltipPlus>
|
||||
)
|
||||
}
|
||||
|
||||
{
|
||||
varList.length > 0 && (
|
||||
<div className="relative ml-1 mr-2">
|
||||
<TooltipPlus
|
||||
popupContent={t('panel.userInputField', { ns: 'workflow' })}
|
||||
>
|
||||
<ActionButton state={expanded ? ActionButtonState.Active : undefined} onClick={() => !readonly && setExpanded(!expanded)}>
|
||||
<RiEqualizer2Line className="h-4 w-4" />
|
||||
</ActionButton>
|
||||
</TooltipPlus>
|
||||
{expanded && <div className="absolute bottom-[-14px] right-[5px] z-10 h-3 w-3 rotate-45 border-l-[0.5px] border-t-[0.5px] border-components-panel-border-subtle bg-components-panel-on-panel-item-bg" />}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
{expanded && <div className="absolute bottom-[-14px] right-[5px] z-10 h-3 w-3 rotate-45 border-l-[0.5px] border-t-[0.5px] border-components-panel-border-subtle bg-components-panel-on-panel-item-bg" />}
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
@ -454,21 +444,19 @@ const Debug: FC<IDebug> = ({
|
||||
<ChatUserInput inputs={inputs} />
|
||||
</div>
|
||||
)}
|
||||
{
|
||||
mode === AppModeEnum.COMPLETION && (
|
||||
<PromptValuePanel
|
||||
appType={mode as AppModeEnum}
|
||||
onSend={handleSendTextCompletion}
|
||||
inputs={inputs}
|
||||
visionConfig={{
|
||||
...features.file! as VisionSettings,
|
||||
transfer_methods: features.file!.allowed_file_upload_methods || [],
|
||||
image_file_size_limit: features.file?.fileUploadConfig?.image_file_size_limit,
|
||||
}}
|
||||
onVisionFilesChange={setCompletionFiles}
|
||||
/>
|
||||
)
|
||||
}
|
||||
{mode === AppModeEnum.COMPLETION && (
|
||||
<PromptValuePanel
|
||||
appType={mode as AppModeEnum}
|
||||
onSend={handleSendTextCompletion}
|
||||
inputs={inputs}
|
||||
visionConfig={{
|
||||
...features.file! as VisionSettings,
|
||||
transfer_methods: features.file!.allowed_file_upload_methods || [],
|
||||
image_file_size_limit: features.file?.fileUploadConfig?.image_file_size_limit,
|
||||
}}
|
||||
onVisionFilesChange={setCompletionFiles}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
{
|
||||
debugWithMultipleModel && (
|
||||
@ -522,12 +510,12 @@ const Debug: FC<IDebug> = ({
|
||||
<div className="mx-4 mt-3"><GroupName name={t('result', { ns: 'appDebug' })} /></div>
|
||||
<div className="mx-3 mb-8">
|
||||
<TextGeneration
|
||||
appSourceType={AppSourceType.webApp}
|
||||
className="mt-2"
|
||||
content={completionRes}
|
||||
isLoading={!completionRes && isResponding}
|
||||
isShowTextToSpeech={textToSpeechConfig.enabled && !!text2speechDefaultModel}
|
||||
isResponding={isResponding}
|
||||
isInstalledApp={false}
|
||||
messageId={messageId}
|
||||
isError={false}
|
||||
onRetry={noop}
|
||||
@ -562,15 +550,13 @@ const Debug: FC<IDebug> = ({
|
||||
</div>
|
||||
)
|
||||
}
|
||||
{
|
||||
isShowFormattingChangeConfirm && (
|
||||
<FormattingChanged
|
||||
onConfirm={handleConfirm}
|
||||
onCancel={handleCancel}
|
||||
/>
|
||||
)
|
||||
}
|
||||
{!isAPIKeySet && !readonly && (<HasNotSetAPIKEY isTrailFinished={!IS_CE_EDITION} onSetting={onSetting} />)}
|
||||
{isShowFormattingChangeConfirm && (
|
||||
<FormattingChanged
|
||||
onConfirm={handleConfirm}
|
||||
onCancel={handleCancel}
|
||||
/>
|
||||
)}
|
||||
{!isAPIKeySet && (<HasNotSetAPIKEY isTrailFinished={!IS_CE_EDITION} onSetting={onSetting} />)}
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
@ -40,7 +40,7 @@ const PromptValuePanel: FC<IPromptValuePanelProps> = ({
|
||||
onVisionFilesChange,
|
||||
}) => {
|
||||
const { t } = useTranslation()
|
||||
const { readonly, modelModeType, modelConfig, setInputs, mode, isAdvancedMode, completionPromptConfig, chatPromptConfig } = useContext(ConfigContext)
|
||||
const { modelModeType, modelConfig, setInputs, mode, isAdvancedMode, completionPromptConfig, chatPromptConfig } = useContext(ConfigContext)
|
||||
const [userInputFieldCollapse, setUserInputFieldCollapse] = useState(false)
|
||||
const promptVariables = modelConfig.configs.prompt_variables.filter(({ key, name }) => {
|
||||
return key && key?.trim() && name && name?.trim()
|
||||
@ -78,12 +78,12 @@ const PromptValuePanel: FC<IPromptValuePanelProps> = ({
|
||||
|
||||
if (isAdvancedMode) {
|
||||
if (modelModeType === ModelModeType.chat)
|
||||
return chatPromptConfig?.prompt.every(({ text }) => !text)
|
||||
return chatPromptConfig.prompt.every(({ text }) => !text)
|
||||
return !completionPromptConfig.prompt?.text
|
||||
}
|
||||
|
||||
else { return !modelConfig.configs.prompt_template }
|
||||
}, [chatPromptConfig?.prompt, completionPromptConfig.prompt?.text, isAdvancedMode, mode, modelConfig.configs.prompt_template, modelModeType])
|
||||
}, [chatPromptConfig.prompt, completionPromptConfig.prompt?.text, isAdvancedMode, mode, modelConfig.configs.prompt_template, modelModeType])
|
||||
|
||||
const handleInputValueChange = (key: string, value: string | boolean) => {
|
||||
if (!(key in promptVariableObj))
|
||||
@ -150,7 +150,6 @@ const PromptValuePanel: FC<IPromptValuePanelProps> = ({
|
||||
placeholder={name}
|
||||
value={inputs[key] ? `${inputs[key]}` : ''}
|
||||
onChange={(e) => { handleInputValueChange(key, e.target.value) }}
|
||||
readOnly={readonly}
|
||||
/>
|
||||
)}
|
||||
{type === 'select' && (
|
||||
@ -161,7 +160,6 @@ const PromptValuePanel: FC<IPromptValuePanelProps> = ({
|
||||
items={(options || []).map(i => ({ name: i, value: i }))}
|
||||
allowSearch={false}
|
||||
bgClassName="bg-gray-50"
|
||||
disabled={readonly}
|
||||
/>
|
||||
)}
|
||||
{type === 'number' && (
|
||||
@ -180,7 +178,6 @@ const PromptValuePanel: FC<IPromptValuePanelProps> = ({
|
||||
value={!!inputs[key]}
|
||||
required={required}
|
||||
onChange={(value) => { handleInputValueChange(key, value) }}
|
||||
readonly={readonly}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
@ -199,7 +196,6 @@ const PromptValuePanel: FC<IPromptValuePanelProps> = ({
|
||||
url: fileItem.url,
|
||||
upload_file_id: fileItem.fileId,
|
||||
})))}
|
||||
disabled={readonly}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
@ -208,12 +204,12 @@ const PromptValuePanel: FC<IPromptValuePanelProps> = ({
|
||||
)}
|
||||
{!userInputFieldCollapse && (
|
||||
<div className="flex justify-between border-t border-divider-subtle p-4 pt-3">
|
||||
<Button className="w-[72px]" disabled={readonly} onClick={onClear}>{t('operation.clear', { ns: 'common' })}</Button>
|
||||
<Button className="w-[72px]" onClick={onClear}>{t('operation.clear', { ns: 'common' })}</Button>
|
||||
{canNotRun && (
|
||||
<Tooltip popupContent={t('otherError.promptNoBeEmpty', { ns: 'appDebug' })}>
|
||||
<Button
|
||||
variant="primary"
|
||||
disabled={canNotRun || readonly}
|
||||
disabled={canNotRun}
|
||||
onClick={() => onSend?.()}
|
||||
className="w-[96px]"
|
||||
>
|
||||
@ -225,7 +221,7 @@ const PromptValuePanel: FC<IPromptValuePanelProps> = ({
|
||||
{!canNotRun && (
|
||||
<Button
|
||||
variant="primary"
|
||||
disabled={canNotRun || readonly}
|
||||
disabled={canNotRun}
|
||||
onClick={() => onSend?.()}
|
||||
className="w-[96px]"
|
||||
>
|
||||
@ -241,8 +237,6 @@ const PromptValuePanel: FC<IPromptValuePanelProps> = ({
|
||||
showFileUpload={false}
|
||||
isChatMode={appType !== AppModeEnum.COMPLETION}
|
||||
onFeatureBarClick={setShowAppConfigureFeaturesModal}
|
||||
disabled={readonly}
|
||||
hideEditEntrance={readonly}
|
||||
/>
|
||||
</div>
|
||||
</>
|
||||
|
||||
@ -10,7 +10,6 @@ vi.mock('@heroicons/react/20/solid', () => ({
|
||||
}))
|
||||
|
||||
const mockApp: App = {
|
||||
can_trial: true,
|
||||
app: {
|
||||
id: 'test-app-id',
|
||||
mode: AppModeEnum.CHAT,
|
||||
|
||||
@ -1,14 +1,9 @@
|
||||
'use client'
|
||||
import type { App } from '@/models/explore'
|
||||
import { PlusIcon } from '@heroicons/react/20/solid'
|
||||
import { RiInformation2Line } from '@remixicon/react'
|
||||
import { useCallback } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { useContextSelector } from 'use-context-selector'
|
||||
import AppIcon from '@/app/components/base/app-icon'
|
||||
import Button from '@/app/components/base/button'
|
||||
import AppListContext from '@/context/app-list-context'
|
||||
import { useGlobalPublicStore } from '@/context/global-public-context'
|
||||
import { cn } from '@/utils/classnames'
|
||||
import { AppTypeIcon, AppTypeLabel } from '../../type-selector'
|
||||
|
||||
@ -25,14 +20,6 @@ const AppCard = ({
|
||||
}: AppCardProps) => {
|
||||
const { t } = useTranslation()
|
||||
const { app: appBasicInfo } = app
|
||||
const { systemFeatures } = useGlobalPublicStore()
|
||||
const isTrialApp = app.can_trial && systemFeatures.enable_trial_app
|
||||
const setShowTryAppPanel = useContextSelector(AppListContext, ctx => ctx.setShowTryAppPanel)
|
||||
const showTryAPPPanel = useCallback((appId: string) => {
|
||||
return () => {
|
||||
setShowTryAppPanel?.(true, { appId, app })
|
||||
}
|
||||
}, [setShowTryAppPanel, app.category])
|
||||
return (
|
||||
<div className={cn('group relative flex h-[132px] cursor-pointer flex-col overflow-hidden rounded-xl border-[0.5px] border-components-panel-border bg-components-panel-on-panel-item-bg p-4 shadow-xs hover:shadow-lg')}>
|
||||
<div className="flex shrink-0 grow-0 items-center gap-3 pb-2">
|
||||
@ -64,17 +51,11 @@ const AppCard = ({
|
||||
</div>
|
||||
{canCreate && (
|
||||
<div className={cn('absolute bottom-0 left-0 right-0 hidden bg-gradient-to-t from-components-panel-gradient-2 from-[60.27%] to-transparent p-4 pt-8 group-hover:flex')}>
|
||||
<div className={cn('grid h-8 w-full grid-cols-1 items-center space-x-2', isTrialApp && 'grid-cols-2')}>
|
||||
<Button variant="primary" onClick={() => onCreate()}>
|
||||
<div className={cn('flex h-8 w-full items-center space-x-2')}>
|
||||
<Button variant="primary" className="grow" onClick={() => onCreate()}>
|
||||
<PlusIcon className="mr-1 h-4 w-4" />
|
||||
<span className="text-xs">{t('newApp.useTemplate', { ns: 'app' })}</span>
|
||||
</Button>
|
||||
{isTrialApp && (
|
||||
<Button onClick={showTryAPPPanel(app.app_id)}>
|
||||
<RiInformation2Line className="mr-1 size-4" />
|
||||
<span>{t('appCard.try', { ns: 'explore' })}</span>
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
@ -146,7 +146,7 @@ const Apps = ({
|
||||
if (app.app_id)
|
||||
await handleCheckPluginDependencies(app.app_id)
|
||||
localStorage.setItem(NEED_REFRESH_APP_LIST_KEY, '1')
|
||||
getRedirection(isCurrentWorkspaceEditor, { id: app.app_id!, mode: mode! }, push)
|
||||
getRedirection(isCurrentWorkspaceEditor, { id: app.app_id!, mode }, push)
|
||||
}
|
||||
catch {
|
||||
Toast.notify({ type: 'error', message: t('newApp.appCreateFailed', { ns: 'app' }) })
|
||||
|
||||
@ -41,7 +41,7 @@ vi.mock('@/context/app-context', () => ({
|
||||
useAppContext: vi.fn(),
|
||||
}))
|
||||
vi.mock('@/context/i18n', () => ({
|
||||
useDocLink: () => () => '/guides',
|
||||
useDocLink: () => () => 'https://docs.dify.ai/en',
|
||||
}))
|
||||
vi.mock('@/hooks/use-theme', () => ({
|
||||
default: () => ({ theme: 'light' }),
|
||||
|
||||
@ -39,7 +39,6 @@ import { useAppContext } from '@/context/app-context'
|
||||
import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints'
|
||||
import useTimestamp from '@/hooks/use-timestamp'
|
||||
import { fetchChatMessages, updateLogMessageAnnotations, updateLogMessageFeedbacks } from '@/service/log'
|
||||
import { AppSourceType } from '@/service/share'
|
||||
import { useChatConversationDetail, useCompletionConversationDetail } from '@/service/use-log'
|
||||
import { AppModeEnum } from '@/types/app'
|
||||
import { cn } from '@/utils/classnames'
|
||||
@ -639,12 +638,12 @@ function DetailPanel({ detail, onFeedback }: IDetailPanel) {
|
||||
</div>
|
||||
</div>
|
||||
<TextGeneration
|
||||
appSourceType={AppSourceType.webApp}
|
||||
className="mt-2"
|
||||
content={detail.message.answer}
|
||||
messageId={detail.message.id}
|
||||
isError={false}
|
||||
onRetry={noop}
|
||||
isInstalledApp={false}
|
||||
supportFeedback
|
||||
feedback={detail.message.feedbacks.find((item: any) => item.from_source === 'admin')}
|
||||
onFeedback={feedback => onFeedback(detail.message.id, feedback)}
|
||||
|
||||
@ -305,7 +305,7 @@ describe('CustomizeModal', () => {
|
||||
// Assert
|
||||
expect(mockWindowOpen).toHaveBeenCalledTimes(1)
|
||||
expect(mockWindowOpen).toHaveBeenCalledWith(
|
||||
expect.stringContaining('/use-dify/publish/developing-with-apis'),
|
||||
expect.stringContaining('/guides/application-publishing/developing-with-apis'),
|
||||
'_blank',
|
||||
)
|
||||
})
|
||||
|
||||
@ -29,7 +29,7 @@ import { Markdown } from '@/app/components/base/markdown'
|
||||
import NewAudioButton from '@/app/components/base/new-audio-button'
|
||||
import Toast from '@/app/components/base/toast'
|
||||
import { fetchTextGenerationMessage } from '@/service/debug'
|
||||
import { AppSourceType, fetchMoreLikeThis, updateFeedback } from '@/service/share'
|
||||
import { fetchMoreLikeThis, updateFeedback } from '@/service/share'
|
||||
import { cn } from '@/utils/classnames'
|
||||
import ResultTab from './result-tab'
|
||||
|
||||
@ -53,7 +53,7 @@ export type IGenerationItemProps = {
|
||||
onFeedback?: (feedback: FeedbackType) => void
|
||||
onSave?: (messageId: string) => void
|
||||
isMobile?: boolean
|
||||
appSourceType: AppSourceType
|
||||
isInstalledApp: boolean
|
||||
installedAppId?: string
|
||||
taskId?: string
|
||||
controlClearMoreLikeThis?: number
|
||||
@ -87,7 +87,7 @@ const GenerationItem: FC<IGenerationItemProps> = ({
|
||||
onSave,
|
||||
depth = 1,
|
||||
isMobile,
|
||||
appSourceType,
|
||||
isInstalledApp,
|
||||
installedAppId,
|
||||
taskId,
|
||||
controlClearMoreLikeThis,
|
||||
@ -100,7 +100,6 @@ const GenerationItem: FC<IGenerationItemProps> = ({
|
||||
const { t } = useTranslation()
|
||||
const params = useParams()
|
||||
const isTop = depth === 1
|
||||
const isTryApp = appSourceType === AppSourceType.tryApp
|
||||
const [completionRes, setCompletionRes] = useState('')
|
||||
const [childMessageId, setChildMessageId] = useState<string | null>(null)
|
||||
const [childFeedback, setChildFeedback] = useState<FeedbackType>({
|
||||
@ -114,7 +113,7 @@ const GenerationItem: FC<IGenerationItemProps> = ({
|
||||
const setShowPromptLogModal = useAppStore(s => s.setShowPromptLogModal)
|
||||
|
||||
const handleFeedback = async (childFeedback: FeedbackType) => {
|
||||
await updateFeedback({ url: `/messages/${childMessageId}/feedbacks`, body: { rating: childFeedback.rating } }, appSourceType, installedAppId)
|
||||
await updateFeedback({ url: `/messages/${childMessageId}/feedbacks`, body: { rating: childFeedback.rating } }, isInstalledApp, installedAppId)
|
||||
setChildFeedback(childFeedback)
|
||||
}
|
||||
|
||||
@ -132,7 +131,7 @@ const GenerationItem: FC<IGenerationItemProps> = ({
|
||||
onSave,
|
||||
isShowTextToSpeech,
|
||||
isMobile,
|
||||
appSourceType,
|
||||
isInstalledApp,
|
||||
installedAppId,
|
||||
controlClearMoreLikeThis,
|
||||
isWorkflow,
|
||||
@ -146,7 +145,7 @@ const GenerationItem: FC<IGenerationItemProps> = ({
|
||||
return
|
||||
}
|
||||
startQuerying()
|
||||
const res: any = await fetchMoreLikeThis(messageId as string, appSourceType, installedAppId)
|
||||
const res: any = await fetchMoreLikeThis(messageId as string, isInstalledApp, installedAppId)
|
||||
setCompletionRes(res.answer)
|
||||
setChildFeedback({
|
||||
rating: null,
|
||||
@ -311,7 +310,7 @@ const GenerationItem: FC<IGenerationItemProps> = ({
|
||||
)}
|
||||
{/* action buttons */}
|
||||
<div className="absolute bottom-1 right-2 flex items-center">
|
||||
{!isInWebApp && (appSourceType !== AppSourceType.installedApp) && !isResponding && (
|
||||
{!isInWebApp && !isInstalledApp && !isResponding && (
|
||||
<div className="ml-1 flex items-center gap-0.5 rounded-[10px] border-[0.5px] border-components-actionbar-border bg-components-actionbar-bg p-0.5 shadow-md backdrop-blur-sm">
|
||||
<ActionButton disabled={isError || !messageId} onClick={handleOpenLogModal}>
|
||||
<RiFileList3Line className="h-4 w-4" />
|
||||
@ -320,12 +319,12 @@ const GenerationItem: FC<IGenerationItemProps> = ({
|
||||
</div>
|
||||
)}
|
||||
<div className="ml-1 flex items-center gap-0.5 rounded-[10px] border-[0.5px] border-components-actionbar-border bg-components-actionbar-bg p-0.5 shadow-md backdrop-blur-sm">
|
||||
{moreLikeThis && !isTryApp && (
|
||||
{moreLikeThis && (
|
||||
<ActionButton state={depth === MAX_DEPTH ? ActionButtonState.Disabled : ActionButtonState.Default} disabled={depth === MAX_DEPTH} onClick={handleMoreLikeThis}>
|
||||
<RiSparklingLine className="h-4 w-4" />
|
||||
</ActionButton>
|
||||
)}
|
||||
{isShowTextToSpeech && !isTryApp && (
|
||||
{isShowTextToSpeech && (
|
||||
<NewAudioButton
|
||||
id={messageId!}
|
||||
voice={config?.text_to_speech?.voice}
|
||||
@ -351,13 +350,13 @@ const GenerationItem: FC<IGenerationItemProps> = ({
|
||||
<RiReplay15Line className="h-4 w-4" />
|
||||
</ActionButton>
|
||||
)}
|
||||
{isInWebApp && !isWorkflow && !isTryApp && (
|
||||
{isInWebApp && !isWorkflow && (
|
||||
<ActionButton disabled={isError || !messageId} onClick={() => { onSave?.(messageId as string) }}>
|
||||
<RiBookmark3Line className="h-4 w-4" />
|
||||
</ActionButton>
|
||||
)}
|
||||
</div>
|
||||
{(supportFeedback || isInWebApp) && !isWorkflow && !isTryApp && !isError && messageId && (
|
||||
{(supportFeedback || isInWebApp) && !isWorkflow && !isError && messageId && (
|
||||
<div className="ml-1 flex items-center gap-0.5 rounded-[10px] border-[0.5px] border-components-actionbar-border bg-components-actionbar-bg p-0.5 shadow-md backdrop-blur-sm">
|
||||
{!feedback?.rating && (
|
||||
<>
|
||||
|
||||
@ -1,17 +1,7 @@
|
||||
'use client'
|
||||
import type { CreateAppModalProps } from '../explore/create-app-modal'
|
||||
import type { CurrentTryAppParams } from '@/context/explore-context'
|
||||
import { useCallback, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { useEducationInit } from '@/app/education-apply/hooks'
|
||||
import AppListContext from '@/context/app-list-context'
|
||||
import useDocumentTitle from '@/hooks/use-document-title'
|
||||
import { useImportDSL } from '@/hooks/use-import-dsl'
|
||||
import { DSLImportMode } from '@/models/app'
|
||||
import { fetchAppDetail } from '@/service/explore'
|
||||
import DSLConfirmModal from '../app/create-from-dsl-modal/dsl-confirm-modal'
|
||||
import CreateAppModal from '../explore/create-app-modal'
|
||||
import TryApp from '../explore/try-app'
|
||||
import List from './list'
|
||||
|
||||
const Apps = () => {
|
||||
@ -20,124 +10,10 @@ const Apps = () => {
|
||||
useDocumentTitle(t('menus.apps', { ns: 'common' }))
|
||||
useEducationInit()
|
||||
|
||||
const [currentTryAppParams, setCurrentTryAppParams] = useState<CurrentTryAppParams | undefined>(undefined)
|
||||
const currApp = currentTryAppParams?.app
|
||||
const [isShowTryAppPanel, setIsShowTryAppPanel] = useState(false)
|
||||
const hideTryAppPanel = useCallback(() => {
|
||||
setIsShowTryAppPanel(false)
|
||||
}, [])
|
||||
const setShowTryAppPanel = (showTryAppPanel: boolean, params?: CurrentTryAppParams) => {
|
||||
if (showTryAppPanel)
|
||||
setCurrentTryAppParams(params)
|
||||
else
|
||||
setCurrentTryAppParams(undefined)
|
||||
setIsShowTryAppPanel(showTryAppPanel)
|
||||
}
|
||||
const [isShowCreateModal, setIsShowCreateModal] = useState(false)
|
||||
|
||||
const handleShowFromTryApp = useCallback(() => {
|
||||
setIsShowCreateModal(true)
|
||||
}, [])
|
||||
|
||||
const [controlRefreshList, setControlRefreshList] = useState(0)
|
||||
const [controlHideCreateFromTemplatePanel, setControlHideCreateFromTemplatePanel] = useState(0)
|
||||
const onSuccess = useCallback(() => {
|
||||
setControlRefreshList(prev => prev + 1)
|
||||
setControlHideCreateFromTemplatePanel(prev => prev + 1)
|
||||
}, [])
|
||||
|
||||
const [showDSLConfirmModal, setShowDSLConfirmModal] = useState(false)
|
||||
|
||||
const {
|
||||
handleImportDSL,
|
||||
handleImportDSLConfirm,
|
||||
versions,
|
||||
isFetching,
|
||||
} = useImportDSL()
|
||||
|
||||
const onConfirmDSL = useCallback(async () => {
|
||||
await handleImportDSLConfirm({
|
||||
onSuccess,
|
||||
})
|
||||
}, [handleImportDSLConfirm, onSuccess])
|
||||
|
||||
const onCreate: CreateAppModalProps['onConfirm'] = async ({
|
||||
name,
|
||||
icon_type,
|
||||
icon,
|
||||
icon_background,
|
||||
description,
|
||||
}) => {
|
||||
hideTryAppPanel()
|
||||
|
||||
const { export_data } = await fetchAppDetail(
|
||||
currApp?.app.id as string,
|
||||
)
|
||||
const payload = {
|
||||
mode: DSLImportMode.YAML_CONTENT,
|
||||
yaml_content: export_data,
|
||||
name,
|
||||
icon_type,
|
||||
icon,
|
||||
icon_background,
|
||||
description,
|
||||
}
|
||||
await handleImportDSL(payload, {
|
||||
onSuccess: () => {
|
||||
setIsShowCreateModal(false)
|
||||
},
|
||||
onPending: () => {
|
||||
setShowDSLConfirmModal(true)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return (
|
||||
<AppListContext.Provider value={{
|
||||
currentApp: currentTryAppParams,
|
||||
isShowTryAppPanel,
|
||||
setShowTryAppPanel,
|
||||
controlHideCreateFromTemplatePanel,
|
||||
}}
|
||||
>
|
||||
<div className="relative flex h-0 shrink-0 grow flex-col overflow-y-auto bg-background-body">
|
||||
<List controlRefreshList={controlRefreshList} />
|
||||
{isShowTryAppPanel && (
|
||||
<TryApp
|
||||
appId={currentTryAppParams?.appId || ''}
|
||||
category={currentTryAppParams?.app?.category}
|
||||
onClose={hideTryAppPanel}
|
||||
onCreate={handleShowFromTryApp}
|
||||
/>
|
||||
)}
|
||||
|
||||
{
|
||||
showDSLConfirmModal && (
|
||||
<DSLConfirmModal
|
||||
versions={versions}
|
||||
onCancel={() => setShowDSLConfirmModal(false)}
|
||||
onConfirm={onConfirmDSL}
|
||||
confirmDisabled={isFetching}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
{isShowCreateModal && (
|
||||
<CreateAppModal
|
||||
appIconType={currApp?.app.icon_type || 'emoji'}
|
||||
appIcon={currApp?.app.icon || ''}
|
||||
appIconBackground={currApp?.app.icon_background || ''}
|
||||
appIconUrl={currApp?.app.icon_url}
|
||||
appName={currApp?.app.name || ''}
|
||||
appDescription={currApp?.app.description || ''}
|
||||
show
|
||||
onConfirm={onCreate}
|
||||
confirmDisabled={isFetching}
|
||||
onHide={() => setIsShowCreateModal(false)}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</AppListContext.Provider>
|
||||
<div className="relative flex h-0 shrink-0 grow flex-col overflow-y-auto bg-background-body">
|
||||
<List />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
'use client'
|
||||
|
||||
import type { FC } from 'react'
|
||||
import {
|
||||
RiApps2Line,
|
||||
RiDragDropLine,
|
||||
@ -54,12 +53,7 @@ const CreateFromDSLModal = dynamic(() => import('@/app/components/app/create-fro
|
||||
ssr: false,
|
||||
})
|
||||
|
||||
type Props = {
|
||||
controlRefreshList?: number
|
||||
}
|
||||
const List: FC<Props> = ({
|
||||
controlRefreshList = 0,
|
||||
}) => {
|
||||
const List = () => {
|
||||
const { t } = useTranslation()
|
||||
const { systemFeatures } = useGlobalPublicStore()
|
||||
const router = useRouter()
|
||||
@ -116,13 +110,6 @@ const List: FC<Props> = ({
|
||||
refetch,
|
||||
} = useInfiniteAppList(appListQueryParams, { enabled: !isCurrentWorkspaceDatasetOperator })
|
||||
|
||||
useEffect(() => {
|
||||
if (controlRefreshList > 0) {
|
||||
refetch()
|
||||
}
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [controlRefreshList])
|
||||
|
||||
const anchorRef = useRef<HTMLDivElement>(null)
|
||||
const options = [
|
||||
{ value: 'all', text: t('types.all', { ns: 'app' }), icon: <RiApps2Line className="mr-1 h-[14px] w-[14px]" /> },
|
||||
|
||||
@ -1,18 +1,15 @@
|
||||
'use client'
|
||||
|
||||
import type { AppModeEnum } from '@/types/app'
|
||||
import dynamic from 'next/dynamic'
|
||||
import {
|
||||
useRouter,
|
||||
useSearchParams,
|
||||
} from 'next/navigation'
|
||||
import * as React from 'react'
|
||||
import { useEffect, useMemo, useState } from 'react'
|
||||
import { useMemo, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { useContextSelector } from 'use-context-selector'
|
||||
import { CreateFromDSLModalTab } from '@/app/components/app/create-from-dsl-modal'
|
||||
import { FileArrow01, FilePlus01, FilePlus02 } from '@/app/components/base/icons/src/vender/line/files'
|
||||
import AppListContext from '@/context/app-list-context'
|
||||
import { useProviderContext } from '@/context/provider-context'
|
||||
import { cn } from '@/utils/classnames'
|
||||
|
||||
@ -58,12 +55,6 @@ const CreateAppCard = ({
|
||||
return undefined
|
||||
}, [dslUrl])
|
||||
|
||||
const controlHideCreateFromTemplatePanel = useContextSelector(AppListContext, ctx => ctx.controlHideCreateFromTemplatePanel)
|
||||
useEffect(() => {
|
||||
if (controlHideCreateFromTemplatePanel > 0)
|
||||
queueMicrotask(() => setShowNewAppTemplateDialog(false))
|
||||
}, [controlHideCreateFromTemplatePanel])
|
||||
|
||||
return (
|
||||
<div
|
||||
ref={ref}
|
||||
@ -106,7 +97,7 @@ const CreateAppCard = ({
|
||||
setShowNewAppTemplateDialog(true)
|
||||
setShowNewAppModal(false)
|
||||
}}
|
||||
defaultAppMode={selectedAppType !== 'all' ? selectedAppType as AppModeEnum : undefined}
|
||||
defaultAppMode={selectedAppType !== 'all' ? selectedAppType as any : undefined}
|
||||
/>
|
||||
)}
|
||||
{showNewAppTemplateDialog && (
|
||||
|
||||
@ -51,15 +51,11 @@ function getActionButtonState(state: ActionButtonState) {
|
||||
}
|
||||
}
|
||||
|
||||
const ActionButton = ({ className, size, state = ActionButtonState.Default, styleCss, children, ref, disabled, ...props }: ActionButtonProps) => {
|
||||
const ActionButton = ({ className, size, state = ActionButtonState.Default, styleCss, children, ref, ...props }: ActionButtonProps) => {
|
||||
return (
|
||||
<button
|
||||
type="button"
|
||||
className={cn(
|
||||
actionButtonVariants({ className, size }),
|
||||
getActionButtonState(state),
|
||||
disabled && 'cursor-not-allowed text-text-disabled hover:bg-transparent hover:text-text-disabled',
|
||||
)}
|
||||
className={cn(actionButtonVariants({ className, size }), getActionButtonState(state))}
|
||||
ref={ref}
|
||||
style={styleCss}
|
||||
{...props}
|
||||
|
||||
@ -1,59 +0,0 @@
|
||||
import {
|
||||
RiCloseLine,
|
||||
RiInformation2Fill,
|
||||
} from '@remixicon/react'
|
||||
import { cva } from 'class-variance-authority'
|
||||
import {
|
||||
memo,
|
||||
} from 'react'
|
||||
import { cn } from '@/utils/classnames'
|
||||
|
||||
type Props = {
|
||||
type?: 'info'
|
||||
message: string
|
||||
onHide: () => void
|
||||
className?: string
|
||||
}
|
||||
const bgVariants = cva(
|
||||
'',
|
||||
{
|
||||
variants: {
|
||||
type: {
|
||||
info: 'from-components-badge-status-light-normal-halo to-background-gradient-mask-transparent',
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
const Alert: React.FC<Props> = ({
|
||||
type = 'info',
|
||||
message,
|
||||
onHide,
|
||||
className,
|
||||
}) => {
|
||||
return (
|
||||
<div className={cn('pointer-events-none w-full', className)}>
|
||||
<div
|
||||
className="relative flex space-x-1 overflow-hidden rounded-xl border border-components-panel-border bg-components-panel-bg-blur p-3 shadow-lg"
|
||||
>
|
||||
<div className={cn('pointer-events-none absolute inset-0 bg-gradient-to-r opacity-[0.4]', bgVariants({ type }))}>
|
||||
</div>
|
||||
<div className="flex h-6 w-6 items-center justify-center">
|
||||
<RiInformation2Fill className="text-text-accent" />
|
||||
</div>
|
||||
<div className="p-1">
|
||||
<div className="system-xs-regular text-text-secondary">
|
||||
{message}
|
||||
</div>
|
||||
</div>
|
||||
<div
|
||||
className="pointer-events-auto flex h-6 w-6 cursor-pointer items-center justify-center"
|
||||
onClick={onHide}
|
||||
>
|
||||
<RiCloseLine className="h-4 w-4 text-text-tertiary" />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export default memo(Alert)
|
||||
@ -1,5 +1,5 @@
|
||||
import Toast from '@/app/components/base/toast'
|
||||
import { AppSourceType, textToAudioStream } from '@/service/share'
|
||||
import { textToAudioStream } from '@/service/share'
|
||||
|
||||
declare global {
|
||||
// eslint-disable-next-line ts/consistent-type-definitions
|
||||
@ -100,7 +100,7 @@ export default class AudioPlayer {
|
||||
|
||||
private async loadAudio() {
|
||||
try {
|
||||
const audioResponse: any = await textToAudioStream(this.url, this.isPublic ? AppSourceType.webApp : AppSourceType.installedApp, { content_type: 'audio/mpeg' }, {
|
||||
const audioResponse: any = await textToAudioStream(this.url, this.isPublic, { content_type: 'audio/mpeg' }, {
|
||||
message_id: this.msgId,
|
||||
streaming: true,
|
||||
voice: this.voice,
|
||||
|
||||
@ -1,228 +0,0 @@
|
||||
import type { UseEmblaCarouselType } from 'embla-carousel-react'
|
||||
import Autoplay from 'embla-carousel-autoplay'
|
||||
import useEmblaCarousel from 'embla-carousel-react'
|
||||
import * as React from 'react'
|
||||
import { cn } from '@/utils/classnames'
|
||||
|
||||
type CarouselApi = UseEmblaCarouselType[1]
|
||||
type UseCarouselParameters = Parameters<typeof useEmblaCarousel>
|
||||
type CarouselOptions = UseCarouselParameters[0]
|
||||
type CarouselPlugin = UseCarouselParameters[1]
|
||||
|
||||
type CarouselProps = {
|
||||
opts?: CarouselOptions
|
||||
plugins?: CarouselPlugin
|
||||
orientation?: 'horizontal' | 'vertical'
|
||||
}
|
||||
|
||||
type CarouselContextValue = {
|
||||
carouselRef: ReturnType<typeof useEmblaCarousel>[0]
|
||||
api: ReturnType<typeof useEmblaCarousel>[1]
|
||||
scrollPrev: () => void
|
||||
scrollNext: () => void
|
||||
selectedIndex: number
|
||||
canScrollPrev: boolean
|
||||
canScrollNext: boolean
|
||||
} & CarouselProps
|
||||
|
||||
const CarouselContext = React.createContext<CarouselContextValue | null>(null)
|
||||
|
||||
function useCarousel() {
|
||||
const context = React.useContext(CarouselContext)
|
||||
|
||||
if (!context)
|
||||
throw new Error('useCarousel must be used within a <Carousel />')
|
||||
|
||||
return context
|
||||
}
|
||||
|
||||
type TCarousel = {
|
||||
Content: typeof CarouselContent
|
||||
Item: typeof CarouselItem
|
||||
Previous: typeof CarouselPrevious
|
||||
Next: typeof CarouselNext
|
||||
Dot: typeof CarouselDot
|
||||
Plugin: typeof CarouselPlugins
|
||||
} & React.ForwardRefExoticComponent<
|
||||
React.HTMLAttributes<HTMLDivElement> & CarouselProps & React.RefAttributes<CarouselContextValue>
|
||||
>
|
||||
|
||||
const Carousel: TCarousel = React.forwardRef(
|
||||
({ orientation = 'horizontal', opts, plugins, className, children, ...props }, ref) => {
|
||||
const [carouselRef, api] = useEmblaCarousel(
|
||||
{ ...opts, axis: orientation === 'horizontal' ? 'x' : 'y' },
|
||||
plugins,
|
||||
)
|
||||
const [canScrollPrev, setCanScrollPrev] = React.useState(false)
|
||||
const [canScrollNext, setCanScrollNext] = React.useState(false)
|
||||
const [selectedIndex, setSelectedIndex] = React.useState(0)
|
||||
|
||||
const scrollPrev = React.useCallback(() => {
|
||||
api?.scrollPrev()
|
||||
}, [api])
|
||||
|
||||
const scrollNext = React.useCallback(() => {
|
||||
api?.scrollNext()
|
||||
}, [api])
|
||||
|
||||
React.useEffect(() => {
|
||||
if (!api)
|
||||
return
|
||||
|
||||
const onSelect = (api: CarouselApi) => {
|
||||
if (!api)
|
||||
return
|
||||
|
||||
queueMicrotask(() => {
|
||||
setSelectedIndex(api.selectedScrollSnap())
|
||||
setCanScrollPrev(api.canScrollPrev())
|
||||
setCanScrollNext(api.canScrollNext())
|
||||
})
|
||||
}
|
||||
|
||||
onSelect(api)
|
||||
api.on('reInit', onSelect)
|
||||
api.on('select', onSelect)
|
||||
|
||||
return () => {
|
||||
api?.off('select', onSelect)
|
||||
}
|
||||
}, [api])
|
||||
|
||||
React.useImperativeHandle(ref, () => ({
|
||||
carouselRef,
|
||||
api,
|
||||
opts,
|
||||
orientation,
|
||||
scrollPrev,
|
||||
scrollNext,
|
||||
selectedIndex,
|
||||
canScrollPrev,
|
||||
canScrollNext,
|
||||
}))
|
||||
|
||||
return (
|
||||
<CarouselContext.Provider
|
||||
value={{
|
||||
carouselRef,
|
||||
api,
|
||||
opts,
|
||||
orientation,
|
||||
scrollPrev,
|
||||
scrollNext,
|
||||
selectedIndex,
|
||||
canScrollPrev,
|
||||
canScrollNext,
|
||||
}}
|
||||
>
|
||||
<div
|
||||
ref={carouselRef}
|
||||
// onKeyDownCapture={handleKeyDown}
|
||||
className={cn('relative overflow-hidden', className)}
|
||||
role="region"
|
||||
aria-roledescription="carousel"
|
||||
{...props}
|
||||
>
|
||||
{children}
|
||||
</div>
|
||||
</CarouselContext.Provider>
|
||||
)
|
||||
},
|
||||
) as TCarousel
|
||||
Carousel.displayName = 'Carousel'
|
||||
|
||||
const CarouselContent = React.forwardRef<HTMLDivElement, React.HTMLAttributes<HTMLDivElement>>(
|
||||
({ className, ...props }, ref) => {
|
||||
const { orientation } = useCarousel()
|
||||
|
||||
return (
|
||||
<div
|
||||
ref={ref}
|
||||
className={cn('flex', orientation === 'vertical' && 'flex-col', className)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
},
|
||||
)
|
||||
CarouselContent.displayName = 'CarouselContent'
|
||||
|
||||
const CarouselItem = React.forwardRef<HTMLDivElement, React.HTMLAttributes<HTMLDivElement>>(
|
||||
({ className, ...props }, ref) => {
|
||||
return (
|
||||
<div
|
||||
ref={ref}
|
||||
role="group"
|
||||
aria-roledescription="slide"
|
||||
className={cn('min-w-0 shrink-0 grow-0 basis-full', className)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
},
|
||||
)
|
||||
CarouselItem.displayName = 'CarouselItem'
|
||||
|
||||
type CarouselActionProps = {
|
||||
children?: React.ReactNode
|
||||
} & Omit<React.HTMLAttributes<HTMLButtonElement>, 'disabled' | 'onClick'>
|
||||
|
||||
const CarouselPrevious = React.forwardRef<HTMLButtonElement, CarouselActionProps>(
|
||||
({ children, ...props }, ref) => {
|
||||
const { scrollPrev, canScrollPrev } = useCarousel()
|
||||
|
||||
return (
|
||||
<button ref={ref} {...props} disabled={!canScrollPrev} onClick={scrollPrev}>
|
||||
{children}
|
||||
</button>
|
||||
)
|
||||
},
|
||||
)
|
||||
CarouselPrevious.displayName = 'CarouselPrevious'
|
||||
|
||||
const CarouselNext = React.forwardRef<HTMLButtonElement, CarouselActionProps>(
|
||||
({ children, ...props }, ref) => {
|
||||
const { scrollNext, canScrollNext } = useCarousel()
|
||||
|
||||
return (
|
||||
<button ref={ref} {...props} disabled={!canScrollNext} onClick={scrollNext}>
|
||||
{children}
|
||||
</button>
|
||||
)
|
||||
},
|
||||
)
|
||||
CarouselNext.displayName = 'CarouselNext'
|
||||
|
||||
const CarouselDot = React.forwardRef<HTMLButtonElement, CarouselActionProps>(
|
||||
({ children, ...props }, ref) => {
|
||||
const { api, selectedIndex } = useCarousel()
|
||||
|
||||
return api?.slideNodes().map((_, index) => {
|
||||
return (
|
||||
<button
|
||||
key={index}
|
||||
ref={ref}
|
||||
{...props}
|
||||
data-state={index === selectedIndex ? 'active' : 'inactive'}
|
||||
onClick={() => {
|
||||
api.scrollTo(index)
|
||||
}}
|
||||
>
|
||||
{children}
|
||||
</button>
|
||||
)
|
||||
})
|
||||
},
|
||||
)
|
||||
CarouselDot.displayName = 'CarouselDot'
|
||||
|
||||
const CarouselPlugins = {
|
||||
Autoplay,
|
||||
}
|
||||
|
||||
Carousel.Content = CarouselContent
|
||||
Carousel.Item = CarouselItem
|
||||
Carousel.Previous = CarouselPrevious
|
||||
Carousel.Next = CarouselNext
|
||||
Carousel.Dot = CarouselDot
|
||||
Carousel.Plugin = CarouselPlugins
|
||||
|
||||
export { Carousel, useCarousel }
|
||||
@ -12,7 +12,6 @@ import SuggestedQuestions from '@/app/components/base/chat/chat/answer/suggested
|
||||
import { Markdown } from '@/app/components/base/markdown'
|
||||
import { InputVarType } from '@/app/components/workflow/types'
|
||||
import {
|
||||
AppSourceType,
|
||||
fetchSuggestedQuestions,
|
||||
getUrl,
|
||||
stopChatMessageResponding,
|
||||
@ -53,11 +52,6 @@ const ChatWrapper = () => {
|
||||
initUserVariables,
|
||||
} = useChatWithHistoryContext()
|
||||
|
||||
const appSourceType = isInstalledApp ? AppSourceType.installedApp : AppSourceType.webApp
|
||||
|
||||
// Semantic variable for better code readability
|
||||
const isHistoryConversation = !!currentConversationId
|
||||
|
||||
const appConfig = useMemo(() => {
|
||||
const config = appParams || {}
|
||||
|
||||
@ -85,7 +79,7 @@ const ChatWrapper = () => {
|
||||
inputsForm: inputsForms,
|
||||
},
|
||||
appPrevChatTree,
|
||||
taskId => stopChatMessageResponding('', taskId, appSourceType, appId),
|
||||
taskId => stopChatMessageResponding('', taskId, isInstalledApp, appId),
|
||||
clearChatList,
|
||||
setClearChatList,
|
||||
)
|
||||
@ -144,11 +138,11 @@ const ChatWrapper = () => {
|
||||
}
|
||||
|
||||
handleSend(
|
||||
getUrl('chat-messages', appSourceType, appId || ''),
|
||||
getUrl('chat-messages', isInstalledApp, appId || ''),
|
||||
data,
|
||||
{
|
||||
onGetSuggestedQuestions: responseItemId => fetchSuggestedQuestions(responseItemId, appSourceType, appId),
|
||||
onConversationComplete: isHistoryConversation ? undefined : handleNewConversationCompleted,
|
||||
onGetSuggestedQuestions: responseItemId => fetchSuggestedQuestions(responseItemId, isInstalledApp, appId),
|
||||
onConversationComplete: currentConversationId ? undefined : handleNewConversationCompleted,
|
||||
isPublicAPI: !isInstalledApp,
|
||||
},
|
||||
)
|
||||
|
||||
@ -27,7 +27,6 @@ import { useWebAppStore } from '@/context/web-app-context'
|
||||
import { useAppFavicon } from '@/hooks/use-app-favicon'
|
||||
import { changeLanguage } from '@/i18n-config/client'
|
||||
import {
|
||||
AppSourceType,
|
||||
delConversation,
|
||||
pinConversation,
|
||||
renameConversation,
|
||||
@ -73,7 +72,6 @@ function getFormattedChatList(messages: any[]) {
|
||||
|
||||
export const useChatWithHistory = (installedAppInfo?: InstalledApp) => {
|
||||
const isInstalledApp = useMemo(() => !!installedAppInfo, [installedAppInfo])
|
||||
const appSourceType = isInstalledApp ? AppSourceType.installedApp : AppSourceType.webApp
|
||||
const appInfo = useWebAppStore(s => s.appInfo)
|
||||
const appParams = useWebAppStore(s => s.appParams)
|
||||
const appMeta = useWebAppStore(s => s.appMeta)
|
||||
@ -179,7 +177,7 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => {
|
||||
}, [currentConversationId, newConversationId])
|
||||
|
||||
const { data: appPinnedConversationData } = useShareConversations({
|
||||
appSourceType,
|
||||
isInstalledApp,
|
||||
appId,
|
||||
pinned: true,
|
||||
limit: 100,
|
||||
@ -192,7 +190,7 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => {
|
||||
data: appConversationData,
|
||||
isLoading: appConversationDataLoading,
|
||||
} = useShareConversations({
|
||||
appSourceType,
|
||||
isInstalledApp,
|
||||
appId,
|
||||
pinned: false,
|
||||
limit: 100,
|
||||
@ -206,7 +204,7 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => {
|
||||
isLoading: appChatListDataLoading,
|
||||
} = useShareChatList({
|
||||
conversationId: chatShouldReloadKey,
|
||||
appSourceType,
|
||||
isInstalledApp,
|
||||
appId,
|
||||
}, {
|
||||
enabled: !!chatShouldReloadKey,
|
||||
@ -336,11 +334,10 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => {
|
||||
|
||||
const { data: newConversation } = useShareConversationName({
|
||||
conversationId: newConversationId,
|
||||
appSourceType,
|
||||
isInstalledApp,
|
||||
appId,
|
||||
}, {
|
||||
refetchOnWindowFocus: false,
|
||||
enabled: !!newConversationId,
|
||||
})
|
||||
const [originConversationList, setOriginConversationList] = useState<ConversationItem[]>([])
|
||||
useEffect(() => {
|
||||
@ -465,16 +462,16 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => {
|
||||
}, [invalidateShareConversations])
|
||||
|
||||
const handlePinConversation = useCallback(async (conversationId: string) => {
|
||||
await pinConversation(appSourceType, appId, conversationId)
|
||||
await pinConversation(isInstalledApp, appId, conversationId)
|
||||
notify({ type: 'success', message: t('api.success', { ns: 'common' }) })
|
||||
handleUpdateConversationList()
|
||||
}, [appSourceType, appId, notify, t, handleUpdateConversationList])
|
||||
}, [isInstalledApp, appId, notify, t, handleUpdateConversationList])
|
||||
|
||||
const handleUnpinConversation = useCallback(async (conversationId: string) => {
|
||||
await unpinConversation(appSourceType, appId, conversationId)
|
||||
await unpinConversation(isInstalledApp, appId, conversationId)
|
||||
notify({ type: 'success', message: t('api.success', { ns: 'common' }) })
|
||||
handleUpdateConversationList()
|
||||
}, [appSourceType, appId, notify, t, handleUpdateConversationList])
|
||||
}, [isInstalledApp, appId, notify, t, handleUpdateConversationList])
|
||||
|
||||
const [conversationDeleting, setConversationDeleting] = useState(false)
|
||||
const handleDeleteConversation = useCallback(async (
|
||||
@ -488,7 +485,7 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => {
|
||||
|
||||
try {
|
||||
setConversationDeleting(true)
|
||||
await delConversation(appSourceType, appId, conversationId)
|
||||
await delConversation(isInstalledApp, appId, conversationId)
|
||||
notify({ type: 'success', message: t('api.success', { ns: 'common' }) })
|
||||
onSuccess()
|
||||
}
|
||||
@ -523,7 +520,7 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => {
|
||||
|
||||
setConversationRenaming(true)
|
||||
try {
|
||||
await renameConversation(appSourceType, appId, conversationId, newName)
|
||||
await renameConversation(isInstalledApp, appId, conversationId, newName)
|
||||
|
||||
notify({
|
||||
type: 'success',
|
||||
@ -553,9 +550,9 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => {
|
||||
}, [handleConversationIdInfoChange, invalidateShareConversations])
|
||||
|
||||
const handleFeedback = useCallback(async (messageId: string, feedback: Feedback) => {
|
||||
await updateFeedback({ url: `/messages/${messageId}/feedbacks`, body: { rating: feedback.rating, content: feedback.content } }, appSourceType, appId)
|
||||
await updateFeedback({ url: `/messages/${messageId}/feedbacks`, body: { rating: feedback.rating, content: feedback.content } }, isInstalledApp, appId)
|
||||
notify({ type: 'success', message: t('api.success', { ns: 'common' }) })
|
||||
}, [appSourceType, appId, t, notify])
|
||||
}, [isInstalledApp, appId, t, notify])
|
||||
|
||||
return {
|
||||
isInstalledApp,
|
||||
|
||||
@ -150,7 +150,7 @@ const Answer: FC<AnswerProps> = ({
|
||||
data={workflowProcess}
|
||||
item={item}
|
||||
hideProcessDetail={hideProcessDetail}
|
||||
readonly={hideProcessDetail && appData ? !appData.site?.show_workflow_steps : undefined}
|
||||
readonly={hideProcessDetail && appData ? !appData.site.show_workflow_steps : undefined}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import type { FC } from 'react'
|
||||
import type { ChatItem } from '../../types'
|
||||
import { memo } from 'react'
|
||||
import { cn } from '@/utils/classnames'
|
||||
import { useChatContext } from '../context'
|
||||
|
||||
type SuggestedQuestionsProps = {
|
||||
@ -10,7 +9,7 @@ type SuggestedQuestionsProps = {
|
||||
const SuggestedQuestions: FC<SuggestedQuestionsProps> = ({
|
||||
item,
|
||||
}) => {
|
||||
const { onSend, readonly } = useChatContext()
|
||||
const { onSend } = useChatContext()
|
||||
|
||||
const {
|
||||
isOpeningStatement,
|
||||
@ -25,11 +24,8 @@ const SuggestedQuestions: FC<SuggestedQuestionsProps> = ({
|
||||
{suggestedQuestions.filter(q => !!q && q.trim()).map((question, index) => (
|
||||
<div
|
||||
key={index}
|
||||
className={cn(
|
||||
'system-sm-medium mr-1 mt-1 inline-flex max-w-full shrink-0 cursor-pointer flex-wrap rounded-lg border-[0.5px] border-components-button-secondary-border bg-components-button-secondary-bg px-3.5 py-2 text-components-button-secondary-accent-text shadow-xs last:mr-0 hover:border-components-button-secondary-border-hover hover:bg-components-button-secondary-bg-hover',
|
||||
readonly && 'pointer-events-none opacity-50',
|
||||
)}
|
||||
onClick={() => !readonly && onSend?.(question)}
|
||||
className="system-sm-medium mr-1 mt-1 inline-flex max-w-full shrink-0 cursor-pointer flex-wrap rounded-lg border-[0.5px] border-components-button-secondary-border bg-components-button-secondary-bg px-3.5 py-2 text-components-button-secondary-accent-text shadow-xs last:mr-0 hover:border-components-button-secondary-border-hover hover:bg-components-button-secondary-bg-hover"
|
||||
onClick={() => onSend?.(question)}
|
||||
>
|
||||
{question}
|
||||
</div>
|
||||
|
||||
@ -5,7 +5,6 @@ import type {
|
||||
} from '../../types'
|
||||
import type { InputForm } from '../type'
|
||||
import type { FileUpload } from '@/app/components/base/features/types'
|
||||
import { noop } from 'es-toolkit/function'
|
||||
import { decode } from 'html-entities'
|
||||
import Recorder from 'js-audio-recorder'
|
||||
import {
|
||||
@ -31,7 +30,6 @@ import { useTextAreaHeight } from './hooks'
|
||||
import Operation from './operation'
|
||||
|
||||
type ChatInputAreaProps = {
|
||||
readonly?: boolean
|
||||
botName?: string
|
||||
showFeatureBar?: boolean
|
||||
showFileUpload?: boolean
|
||||
@ -47,7 +45,6 @@ type ChatInputAreaProps = {
|
||||
disabled?: boolean
|
||||
}
|
||||
const ChatInputArea = ({
|
||||
readonly,
|
||||
botName,
|
||||
showFeatureBar,
|
||||
showFileUpload,
|
||||
@ -173,7 +170,6 @@ const ChatInputArea = ({
|
||||
const operation = (
|
||||
<Operation
|
||||
ref={holdSpaceRef}
|
||||
readonly={readonly}
|
||||
fileConfig={visionConfig}
|
||||
speechToTextConfig={speechToTextConfig}
|
||||
onShowVoiceInput={handleShowVoiceInput}
|
||||
@ -209,7 +205,7 @@ const ChatInputArea = ({
|
||||
className={cn(
|
||||
'body-lg-regular w-full resize-none bg-transparent p-1 leading-6 text-text-primary outline-none',
|
||||
)}
|
||||
placeholder={decode(t(readonly ? 'chat.inputDisabledPlaceholder' : 'chat.inputPlaceholder', { ns: 'common', botName }) || '')}
|
||||
placeholder={decode(t('chat.inputPlaceholder', { ns: 'common', botName }) || '')}
|
||||
autoFocus
|
||||
minRows={1}
|
||||
value={query}
|
||||
@ -222,7 +218,6 @@ const ChatInputArea = ({
|
||||
onDragLeave={handleDragFileLeave}
|
||||
onDragOver={handleDragFileOver}
|
||||
onDrop={handleDropFile}
|
||||
readOnly={readonly}
|
||||
/>
|
||||
</div>
|
||||
{
|
||||
@ -244,14 +239,7 @@ const ChatInputArea = ({
|
||||
)
|
||||
}
|
||||
</div>
|
||||
{showFeatureBar && (
|
||||
<FeatureBar
|
||||
showFileUpload={showFileUpload}
|
||||
disabled={featureBarDisabled}
|
||||
onFeatureBarClick={readonly ? noop : onFeatureBarClick}
|
||||
hideEditEntrance={readonly}
|
||||
/>
|
||||
)}
|
||||
{showFeatureBar && <FeatureBar showFileUpload={showFileUpload} disabled={featureBarDisabled} onFeatureBarClick={onFeatureBarClick} />}
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
@ -8,7 +8,6 @@ import {
|
||||
RiMicLine,
|
||||
RiSendPlane2Fill,
|
||||
} from '@remixicon/react'
|
||||
import { noop } from 'es-toolkit/function'
|
||||
import { memo } from 'react'
|
||||
import ActionButton from '@/app/components/base/action-button'
|
||||
import Button from '@/app/components/base/button'
|
||||
@ -16,7 +15,6 @@ import { FileUploaderInChatInput } from '@/app/components/base/file-uploader'
|
||||
import { cn } from '@/utils/classnames'
|
||||
|
||||
type OperationProps = {
|
||||
readonly?: boolean
|
||||
fileConfig?: FileUpload
|
||||
speechToTextConfig?: EnableType
|
||||
onShowVoiceInput?: () => void
|
||||
@ -25,7 +23,6 @@ type OperationProps = {
|
||||
ref?: Ref<HTMLDivElement>
|
||||
}
|
||||
const Operation: FC<OperationProps> = ({
|
||||
readonly,
|
||||
ref,
|
||||
fileConfig,
|
||||
speechToTextConfig,
|
||||
@ -44,12 +41,11 @@ const Operation: FC<OperationProps> = ({
|
||||
ref={ref}
|
||||
>
|
||||
<div className="flex items-center space-x-1">
|
||||
{fileConfig?.enabled && <FileUploaderInChatInput readonly={readonly} fileConfig={fileConfig} />}
|
||||
{fileConfig?.enabled && <FileUploaderInChatInput fileConfig={fileConfig} />}
|
||||
{
|
||||
speechToTextConfig?.enabled && (
|
||||
<ActionButton
|
||||
size="l"
|
||||
disabled={readonly}
|
||||
onClick={onShowVoiceInput}
|
||||
>
|
||||
<RiMicLine className="h-5 w-5" />
|
||||
@ -60,7 +56,7 @@ const Operation: FC<OperationProps> = ({
|
||||
<Button
|
||||
className="ml-3 w-8 px-0"
|
||||
variant="primary"
|
||||
onClick={readonly ? noop : onSend}
|
||||
onClick={onSend}
|
||||
style={
|
||||
theme
|
||||
? {
|
||||
|
||||
@ -15,14 +15,10 @@ export type ChatContextValue = Pick<ChatProps, 'config'
|
||||
| 'onAnnotationEdited'
|
||||
| 'onAnnotationAdded'
|
||||
| 'onAnnotationRemoved'
|
||||
| 'disableFeedback'
|
||||
| 'onFeedback'> & {
|
||||
readonly?: boolean
|
||||
}
|
||||
| 'onFeedback'>
|
||||
|
||||
const ChatContext = createContext<ChatContextValue>({
|
||||
chatList: [],
|
||||
readonly: false,
|
||||
})
|
||||
|
||||
type ChatContextProviderProps = {
|
||||
@ -31,7 +27,6 @@ type ChatContextProviderProps = {
|
||||
|
||||
export const ChatContextProvider = ({
|
||||
children,
|
||||
readonly = false,
|
||||
config,
|
||||
isResponding,
|
||||
chatList,
|
||||
@ -43,13 +38,11 @@ export const ChatContextProvider = ({
|
||||
onAnnotationEdited,
|
||||
onAnnotationAdded,
|
||||
onAnnotationRemoved,
|
||||
disableFeedback,
|
||||
onFeedback,
|
||||
}: ChatContextProviderProps) => {
|
||||
return (
|
||||
<ChatContext.Provider value={{
|
||||
config,
|
||||
readonly,
|
||||
isResponding,
|
||||
chatList: chatList || [],
|
||||
showPromptLog,
|
||||
@ -60,7 +53,6 @@ export const ChatContextProvider = ({
|
||||
onAnnotationEdited,
|
||||
onAnnotationAdded,
|
||||
onAnnotationRemoved,
|
||||
disableFeedback,
|
||||
onFeedback,
|
||||
}}
|
||||
>
|
||||
|
||||
@ -36,8 +36,6 @@ import Question from './question'
|
||||
import TryToAsk from './try-to-ask'
|
||||
|
||||
export type ChatProps = {
|
||||
isTryApp?: boolean
|
||||
readonly?: boolean
|
||||
appData?: AppData
|
||||
chatList: ChatItem[]
|
||||
config?: ChatConfig
|
||||
@ -62,7 +60,6 @@ export type ChatProps = {
|
||||
onAnnotationAdded?: (annotationId: string, authorName: string, question: string, answer: string, index: number) => void
|
||||
onAnnotationRemoved?: (index: number) => void
|
||||
chatNode?: ReactNode
|
||||
disableFeedback?: boolean
|
||||
onFeedback?: (messageId: string, feedback: Feedback) => void
|
||||
chatAnswerContainerInner?: string
|
||||
hideProcessDetail?: boolean
|
||||
@ -78,8 +75,6 @@ export type ChatProps = {
|
||||
}
|
||||
|
||||
const Chat: FC<ChatProps> = ({
|
||||
isTryApp,
|
||||
readonly = false,
|
||||
appData,
|
||||
config,
|
||||
onSend,
|
||||
@ -103,7 +98,6 @@ const Chat: FC<ChatProps> = ({
|
||||
onAnnotationEdited,
|
||||
onAnnotationRemoved,
|
||||
chatNode,
|
||||
disableFeedback,
|
||||
onFeedback,
|
||||
chatAnswerContainerInner,
|
||||
hideProcessDetail,
|
||||
@ -251,7 +245,6 @@ const Chat: FC<ChatProps> = ({
|
||||
|
||||
return (
|
||||
<ChatContextProvider
|
||||
readonly={readonly}
|
||||
config={config}
|
||||
chatList={chatList}
|
||||
isResponding={isResponding}
|
||||
@ -263,18 +256,17 @@ const Chat: FC<ChatProps> = ({
|
||||
onAnnotationAdded={onAnnotationAdded}
|
||||
onAnnotationEdited={onAnnotationEdited}
|
||||
onAnnotationRemoved={onAnnotationRemoved}
|
||||
disableFeedback={disableFeedback}
|
||||
onFeedback={onFeedback}
|
||||
>
|
||||
<div className={cn('relative h-full', isTryApp && 'flex flex-col')}>
|
||||
<div className="relative h-full">
|
||||
<div
|
||||
ref={chatContainerRef}
|
||||
className={cn('relative h-full overflow-y-auto overflow-x-hidden', isTryApp && 'h-0 grow', chatContainerClassName)}
|
||||
className={cn('relative h-full overflow-y-auto overflow-x-hidden', chatContainerClassName)}
|
||||
>
|
||||
{chatNode}
|
||||
<div
|
||||
ref={chatContainerInnerRef}
|
||||
className={cn('w-full', !noSpacing && 'px-8', chatContainerInnerClassName, isTryApp && 'px-0')}
|
||||
className={cn('w-full', !noSpacing && 'px-8', chatContainerInnerClassName)}
|
||||
>
|
||||
{
|
||||
chatList.map((item, index) => {
|
||||
@ -318,7 +310,7 @@ const Chat: FC<ChatProps> = ({
|
||||
>
|
||||
<div
|
||||
ref={chatFooterInnerRef}
|
||||
className={cn('relative', chatFooterInnerClassName, isTryApp && 'px-0')}
|
||||
className={cn('relative', chatFooterInnerClassName)}
|
||||
>
|
||||
{
|
||||
!noStopResponding && isResponding && (
|
||||
@ -341,7 +333,7 @@ const Chat: FC<ChatProps> = ({
|
||||
{
|
||||
!noChatInput && (
|
||||
<ChatInputArea
|
||||
botName={appData?.site?.title || 'Bot'}
|
||||
botName={appData?.site.title || 'Bot'}
|
||||
disabled={inputDisabled}
|
||||
showFeatureBar={showFeatureBar}
|
||||
showFileUpload={showFileUpload}
|
||||
@ -354,7 +346,6 @@ const Chat: FC<ChatProps> = ({
|
||||
inputsForm={inputsForm}
|
||||
theme={themeBuilder?.theme}
|
||||
isResponding={isResponding}
|
||||
readonly={readonly}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
@ -13,7 +13,6 @@ import LogoAvatar from '@/app/components/base/logo/logo-embedded-chat-avatar'
|
||||
import { Markdown } from '@/app/components/base/markdown'
|
||||
import { InputVarType } from '@/app/components/workflow/types'
|
||||
import {
|
||||
AppSourceType,
|
||||
fetchSuggestedQuestions,
|
||||
getUrl,
|
||||
stopChatMessageResponding,
|
||||
@ -43,7 +42,6 @@ const ChatWrapper = () => {
|
||||
isInstalledApp,
|
||||
appId,
|
||||
appMeta,
|
||||
disableFeedback,
|
||||
handleFeedback,
|
||||
currentChatInstanceRef,
|
||||
themeBuilder,
|
||||
@ -52,9 +50,7 @@ const ChatWrapper = () => {
|
||||
setIsResponding,
|
||||
allInputsHidden,
|
||||
initUserVariables,
|
||||
appSourceType,
|
||||
} = useEmbeddedChatbotContext()
|
||||
|
||||
const appConfig = useMemo(() => {
|
||||
const config = appParams || {}
|
||||
|
||||
@ -82,7 +78,7 @@ const ChatWrapper = () => {
|
||||
inputsForm: inputsForms,
|
||||
},
|
||||
appPrevChatList,
|
||||
taskId => stopChatMessageResponding('', taskId, appSourceType, appId),
|
||||
taskId => stopChatMessageResponding('', taskId, isInstalledApp, appId),
|
||||
clearChatList,
|
||||
setClearChatList,
|
||||
)
|
||||
@ -138,13 +134,14 @@ const ChatWrapper = () => {
|
||||
conversation_id: currentConversationId,
|
||||
parent_message_id: (isRegenerate ? parentAnswer?.id : getLastAnswer(chatList)?.id) || null,
|
||||
}
|
||||
|
||||
handleSend(
|
||||
getUrl('chat-messages', appSourceType, appId || ''),
|
||||
getUrl('chat-messages', isInstalledApp, appId || ''),
|
||||
data,
|
||||
{
|
||||
onGetSuggestedQuestions: responseItemId => fetchSuggestedQuestions(responseItemId, appSourceType, appId),
|
||||
onGetSuggestedQuestions: responseItemId => fetchSuggestedQuestions(responseItemId, isInstalledApp, appId),
|
||||
onConversationComplete: currentConversationId ? undefined : handleNewConversationCompleted,
|
||||
isPublicAPI: appSourceType === AppSourceType.webApp,
|
||||
isPublicAPI: !isInstalledApp,
|
||||
},
|
||||
)
|
||||
}, [currentConversationId, currentConversationInputs, newConversationInputs, chatList, handleSend, isInstalledApp, appId, handleNewConversationCompleted])
|
||||
@ -162,8 +159,7 @@ const ChatWrapper = () => {
|
||||
return chatList.filter(item => !item.isOpeningStatement)
|
||||
}, [chatList, currentConversationId])
|
||||
|
||||
const isTryApp = appSourceType === AppSourceType.tryApp
|
||||
const [collapsed, setCollapsed] = useState(!!currentConversationId && !isTryApp) // try app always use the new chat
|
||||
const [collapsed, setCollapsed] = useState(!!currentConversationId)
|
||||
|
||||
const chatNode = useMemo(() => {
|
||||
if (allInputsHidden || !inputsForms.length)
|
||||
@ -188,8 +184,6 @@ const ChatWrapper = () => {
|
||||
return null
|
||||
if (!collapsed && inputsForms.length > 0 && !allInputsHidden)
|
||||
return null
|
||||
if (!appData?.site)
|
||||
return null
|
||||
if (welcomeMessage.suggestedQuestions && welcomeMessage.suggestedQuestions?.length > 0) {
|
||||
return (
|
||||
<div className={cn('flex items-center justify-center px-4 py-12', isMobile ? 'min-h-[30vh] py-0' : 'h-[50vh]')}>
|
||||
@ -223,7 +217,7 @@ const ChatWrapper = () => {
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}, [appData?.site, chatList, collapsed, currentConversationId, inputsForms.length, respondingState, allInputsHidden])
|
||||
}, [appData?.site.icon, appData?.site.icon_background, appData?.site.icon_type, appData?.site.icon_url, chatList, collapsed, currentConversationId, inputsForms.length, respondingState, allInputsHidden])
|
||||
|
||||
const answerIcon = isDify()
|
||||
? <LogoAvatar className="relative shrink-0" />
|
||||
@ -240,7 +234,6 @@ const ChatWrapper = () => {
|
||||
|
||||
return (
|
||||
<Chat
|
||||
isTryApp={isTryApp}
|
||||
appData={appData || undefined}
|
||||
config={appConfig}
|
||||
chatList={messageList}
|
||||
@ -260,7 +253,6 @@ const ChatWrapper = () => {
|
||||
</>
|
||||
)}
|
||||
allToolIcons={appMeta?.tool_icons || {}}
|
||||
disableFeedback={disableFeedback}
|
||||
onFeedback={handleFeedback}
|
||||
suggestedQuestions={suggestedQuestions}
|
||||
answerIcon={answerIcon}
|
||||
|
||||
@ -15,7 +15,6 @@ import type {
|
||||
} from '@/models/share'
|
||||
import { noop } from 'es-toolkit/function'
|
||||
import { createContext, useContext } from 'use-context-selector'
|
||||
import { AppSourceType } from '@/service/share'
|
||||
|
||||
export type EmbeddedChatbotContextValue = {
|
||||
appMeta: AppMeta | null
|
||||
@ -38,10 +37,8 @@ export type EmbeddedChatbotContextValue = {
|
||||
chatShouldReloadKey: string
|
||||
isMobile: boolean
|
||||
isInstalledApp: boolean
|
||||
appSourceType: AppSourceType
|
||||
allowResetChat: boolean
|
||||
appId?: string
|
||||
disableFeedback?: boolean
|
||||
handleFeedback: (messageId: string, feedback: Feedback) => void
|
||||
currentChatInstanceRef: RefObject<{ handleStop: () => void }>
|
||||
themeBuilder?: ThemeBuilder
|
||||
@ -77,7 +74,6 @@ export const EmbeddedChatbotContext = createContext<EmbeddedChatbotContextValue>
|
||||
handleNewConversationCompleted: noop,
|
||||
chatShouldReloadKey: '',
|
||||
isMobile: false,
|
||||
appSourceType: AppSourceType.webApp,
|
||||
isInstalledApp: false,
|
||||
allowResetChat: true,
|
||||
handleFeedback: noop,
|
||||
|
||||
@ -5,7 +5,6 @@ import { QueryClient, QueryClientProvider } from '@tanstack/react-query'
|
||||
import { act, renderHook, waitFor } from '@testing-library/react'
|
||||
import { ToastProvider } from '@/app/components/base/toast'
|
||||
import {
|
||||
AppSourceType,
|
||||
fetchChatList,
|
||||
fetchConversations,
|
||||
generationConversationName,
|
||||
@ -146,7 +145,7 @@ describe('useEmbeddedChatbot', () => {
|
||||
mockFetchChatList.mockResolvedValue({ data: [] })
|
||||
|
||||
// Act
|
||||
const { result } = renderWithClient(() => useEmbeddedChatbot(AppSourceType.webApp))
|
||||
const { result } = renderWithClient(() => useEmbeddedChatbot())
|
||||
|
||||
// Assert
|
||||
await waitFor(() => {
|
||||
@ -178,7 +177,7 @@ describe('useEmbeddedChatbot', () => {
|
||||
mockFetchChatList.mockResolvedValue({ data: [] })
|
||||
mockGenerationConversationName.mockResolvedValue(generatedConversation)
|
||||
|
||||
const { result, queryClient } = renderWithClient(() => useEmbeddedChatbot(AppSourceType.webApp))
|
||||
const { result, queryClient } = renderWithClient(() => useEmbeddedChatbot())
|
||||
const invalidateSpy = vi.spyOn(queryClient, 'invalidateQueries')
|
||||
|
||||
// Act
|
||||
@ -208,7 +207,7 @@ describe('useEmbeddedChatbot', () => {
|
||||
mockFetchChatList.mockResolvedValue({ data: [] })
|
||||
mockGenerationConversationName.mockResolvedValue(createConversationItem({ id: 'conversation-1' }))
|
||||
|
||||
const { result } = renderWithClient(() => useEmbeddedChatbot(AppSourceType.webApp))
|
||||
const { result } = renderWithClient(() => useEmbeddedChatbot())
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockFetchChatList).toHaveBeenCalledTimes(1)
|
||||
@ -238,7 +237,7 @@ describe('useEmbeddedChatbot', () => {
|
||||
mockFetchChatList.mockResolvedValue({ data: [] })
|
||||
mockGenerationConversationName.mockResolvedValue(createConversationItem({ id: 'conversation-new' }))
|
||||
|
||||
const { result } = renderWithClient(() => useEmbeddedChatbot(AppSourceType.webApp))
|
||||
const { result } = renderWithClient(() => useEmbeddedChatbot())
|
||||
|
||||
// Act
|
||||
act(() => {
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
import type { CitationItem, InputForm, ThoughtItem } from '../chat/type'
|
||||
import type {
|
||||
ChatConfig,
|
||||
ChatItem,
|
||||
@ -6,10 +5,9 @@ import type {
|
||||
} from '../types'
|
||||
import type { Locale } from '@/i18n-config'
|
||||
import type {
|
||||
AppData,
|
||||
// AppData,
|
||||
ConversationItem,
|
||||
} from '@/models/share'
|
||||
import type { FileResponse } from '@/types/workflow'
|
||||
import { useLocalStorageState } from 'ahooks'
|
||||
import { noop } from 'es-toolkit/function'
|
||||
import { produce } from 'immer'
|
||||
@ -26,111 +24,65 @@ import { addFileInfos, sortAgentSorts } from '@/app/components/tools/utils'
|
||||
import { InputVarType } from '@/app/components/workflow/types'
|
||||
import { useWebAppStore } from '@/context/web-app-context'
|
||||
import { changeLanguage } from '@/i18n-config/client'
|
||||
import { AppSourceType, updateFeedback } from '@/service/share'
|
||||
import { updateFeedback } from '@/service/share'
|
||||
import {
|
||||
useInvalidateShareConversations,
|
||||
useShareChatList,
|
||||
useShareConversationName,
|
||||
useShareConversations,
|
||||
} from '@/service/use-share'
|
||||
import { useGetTryAppInfo, useGetTryAppParams } from '@/service/use-try-app'
|
||||
import { TransferMethod } from '@/types/app'
|
||||
import { getProcessedFilesFromResponse } from '../../file-uploader/utils'
|
||||
import { CONVERSATION_ID_INFO } from '../constants'
|
||||
|
||||
import { buildChatItemTree, getProcessedInputsFromUrlParams, getProcessedSystemVariablesFromUrlParams, getProcessedUserVariablesFromUrlParams } from '../utils'
|
||||
|
||||
// Message file type from API response
|
||||
type MessageFileFromApi = {
|
||||
id: string
|
||||
belongs_to: 'user' | 'assistant'
|
||||
[key: string]: unknown
|
||||
}
|
||||
|
||||
// Message item type from API response
|
||||
type MessageItem = {
|
||||
id: string
|
||||
query: string
|
||||
answer: string
|
||||
message_files?: MessageFileFromApi[]
|
||||
parent_message_id?: string
|
||||
feedback?: Feedback
|
||||
agent_thoughts?: ThoughtItem[]
|
||||
retriever_resources?: CitationItem[]
|
||||
}
|
||||
|
||||
function getFormattedChatList(messages: MessageItem[]) {
|
||||
function getFormattedChatList(messages: any[]) {
|
||||
const newChatList: ChatItem[] = []
|
||||
messages.forEach((item) => {
|
||||
const questionFiles = item.message_files?.filter(file => file.belongs_to === 'user') || []
|
||||
const questionFiles = item.message_files?.filter((file: any) => file.belongs_to === 'user') || []
|
||||
newChatList.push({
|
||||
id: `question-${item.id}`,
|
||||
content: item.query,
|
||||
isAnswer: false,
|
||||
message_files: getProcessedFilesFromResponse(questionFiles.map(file => ({ ...file, related_id: file.id })) as unknown as FileResponse[]),
|
||||
message_files: getProcessedFilesFromResponse(questionFiles.map((item: any) => ({ ...item, related_id: item.id }))),
|
||||
parentMessageId: item.parent_message_id || undefined,
|
||||
})
|
||||
const answerFiles = item.message_files?.filter(file => file.belongs_to === 'assistant') || []
|
||||
const answerFiles = item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || []
|
||||
newChatList.push({
|
||||
id: item.id,
|
||||
content: item.answer,
|
||||
agent_thoughts: addFileInfos(item.agent_thoughts ? sortAgentSorts(item.agent_thoughts) : [], item.message_files as unknown as FileResponse[]),
|
||||
agent_thoughts: addFileInfos(item.agent_thoughts ? sortAgentSorts(item.agent_thoughts) : item.agent_thoughts, item.message_files),
|
||||
feedback: item.feedback,
|
||||
isAnswer: true,
|
||||
citation: item.retriever_resources,
|
||||
message_files: getProcessedFilesFromResponse(answerFiles.map(file => ({ ...file, related_id: file.id })) as unknown as FileResponse[]),
|
||||
message_files: getProcessedFilesFromResponse(answerFiles.map((item: any) => ({ ...item, related_id: item.id }))),
|
||||
parentMessageId: `question-${item.id}`,
|
||||
})
|
||||
})
|
||||
return newChatList
|
||||
}
|
||||
|
||||
export const useEmbeddedChatbot = (appSourceType: AppSourceType, tryAppId?: string) => {
|
||||
const isInstalledApp = false // just can be webapp and try app
|
||||
const isTryApp = appSourceType === AppSourceType.tryApp
|
||||
const { data: tryAppInfo } = useGetTryAppInfo(isTryApp ? tryAppId! : '')
|
||||
const webAppInfo = useWebAppStore(s => s.appInfo)
|
||||
const appInfo = isTryApp ? tryAppInfo : webAppInfo
|
||||
export const useEmbeddedChatbot = () => {
|
||||
const isInstalledApp = false
|
||||
const appInfo = useWebAppStore(s => s.appInfo)
|
||||
const appMeta = useWebAppStore(s => s.appMeta)
|
||||
const { data: tryAppParams } = useGetTryAppParams(isTryApp ? tryAppId! : '')
|
||||
const webAppParams = useWebAppStore(s => s.appParams)
|
||||
const appParams = isTryApp ? tryAppParams : webAppParams
|
||||
|
||||
const appId = useMemo(() => {
|
||||
return isTryApp ? tryAppId : (appInfo as AppData | undefined)?.app_id
|
||||
}, [appInfo, isTryApp, tryAppId])
|
||||
|
||||
const appParams = useWebAppStore(s => s.appParams)
|
||||
const embeddedConversationId = useWebAppStore(s => s.embeddedConversationId)
|
||||
const embeddedUserId = useWebAppStore(s => s.embeddedUserId)
|
||||
const appId = useMemo(() => appInfo?.app_id, [appInfo])
|
||||
|
||||
const [userId, setUserId] = useState<string>()
|
||||
const [conversationId, setConversationId] = useState<string>()
|
||||
|
||||
useEffect(() => {
|
||||
if (isTryApp)
|
||||
return
|
||||
getProcessedSystemVariablesFromUrlParams().then(({ user_id, conversation_id }) => {
|
||||
setUserId(user_id)
|
||||
setConversationId(conversation_id)
|
||||
})
|
||||
}, [isTryApp])
|
||||
|
||||
// Sync embedded user/conversation IDs to internal state when external values change
|
||||
const syncedUserId = embeddedUserId || undefined
|
||||
const syncedConversationId = embeddedConversationId || undefined
|
||||
useEffect(() => {
|
||||
// eslint-disable-next-line react-hooks-extra/no-direct-set-state-in-use-effect
|
||||
setUserId(syncedUserId)
|
||||
}, [syncedUserId])
|
||||
setUserId(embeddedUserId || undefined)
|
||||
}, [embeddedUserId])
|
||||
|
||||
useEffect(() => {
|
||||
// eslint-disable-next-line react-hooks-extra/no-direct-set-state-in-use-effect
|
||||
setConversationId(syncedConversationId)
|
||||
}, [syncedConversationId])
|
||||
setConversationId(embeddedConversationId || undefined)
|
||||
}, [embeddedConversationId])
|
||||
|
||||
useEffect(() => {
|
||||
if (isTryApp)
|
||||
return
|
||||
const setLanguageFromParams = async () => {
|
||||
// Check URL parameters for language override
|
||||
const urlParams = new URLSearchParams(window.location.search)
|
||||
@ -148,25 +100,18 @@ export const useEmbeddedChatbot = (appSourceType: AppSourceType, tryAppId?: stri
|
||||
// If locale is set as a system variable, use that
|
||||
await changeLanguage(localeFromSysVar)
|
||||
}
|
||||
else if ((appInfo as unknown as AppData)?.site?.default_language) {
|
||||
else if (appInfo?.site.default_language) {
|
||||
// Otherwise use the default from app config
|
||||
await changeLanguage((appInfo as unknown as AppData).site?.default_language)
|
||||
await changeLanguage(appInfo.site.default_language)
|
||||
}
|
||||
}
|
||||
|
||||
setLanguageFromParams()
|
||||
}, [appInfo, isTryApp])
|
||||
}, [appInfo])
|
||||
|
||||
const [conversationIdInfo, setConversationIdInfo] = useLocalStorageState<Record<string, Record<string, string>>>(CONVERSATION_ID_INFO, {
|
||||
defaultValue: {},
|
||||
})
|
||||
const removeConversationIdInfo = useCallback((appId: string) => {
|
||||
setConversationIdInfo((prev) => {
|
||||
const newInfo = { ...prev }
|
||||
delete newInfo[appId]
|
||||
return newInfo
|
||||
})
|
||||
}, [setConversationIdInfo])
|
||||
const allowResetChat = !conversationId
|
||||
const currentConversationId = useMemo(() => conversationIdInfo?.[appId || '']?.[userId || 'DEFAULT'] || conversationId || '', [appId, conversationIdInfo, userId, conversationId])
|
||||
const handleConversationIdInfoChange = useCallback((changeConversationId: string) => {
|
||||
@ -193,7 +138,7 @@ export const useEmbeddedChatbot = (appSourceType: AppSourceType, tryAppId?: stri
|
||||
}, [currentConversationId, newConversationId])
|
||||
|
||||
const { data: appPinnedConversationData } = useShareConversations({
|
||||
appSourceType,
|
||||
isInstalledApp,
|
||||
appId,
|
||||
pinned: true,
|
||||
limit: 100,
|
||||
@ -202,7 +147,7 @@ export const useEmbeddedChatbot = (appSourceType: AppSourceType, tryAppId?: stri
|
||||
data: appConversationData,
|
||||
isLoading: appConversationDataLoading,
|
||||
} = useShareConversations({
|
||||
appSourceType,
|
||||
isInstalledApp,
|
||||
appId,
|
||||
pinned: false,
|
||||
limit: 100,
|
||||
@ -212,7 +157,7 @@ export const useEmbeddedChatbot = (appSourceType: AppSourceType, tryAppId?: stri
|
||||
isLoading: appChatListDataLoading,
|
||||
} = useShareChatList({
|
||||
conversationId: chatShouldReloadKey,
|
||||
appSourceType,
|
||||
isInstalledApp,
|
||||
appId,
|
||||
})
|
||||
const invalidateShareConversations = useInvalidateShareConversations()
|
||||
@ -232,22 +177,18 @@ export const useEmbeddedChatbot = (appSourceType: AppSourceType, tryAppId?: stri
|
||||
return appPinnedConversationData?.data || []
|
||||
}, [appPinnedConversationData])
|
||||
const { t } = useTranslation()
|
||||
const newConversationInputsRef = useRef<Record<string, unknown>>({})
|
||||
const [newConversationInputs, setNewConversationInputs] = useState<Record<string, unknown>>({})
|
||||
const [initInputs, setInitInputs] = useState<Record<string, unknown>>({})
|
||||
const [initUserVariables, setInitUserVariables] = useState<Record<string, unknown>>({})
|
||||
const handleNewConversationInputsChange = useCallback((newInputs: Record<string, unknown>) => {
|
||||
const newConversationInputsRef = useRef<Record<string, any>>({})
|
||||
const [newConversationInputs, setNewConversationInputs] = useState<Record<string, any>>({})
|
||||
const [initInputs, setInitInputs] = useState<Record<string, any>>({})
|
||||
const [initUserVariables, setInitUserVariables] = useState<Record<string, any>>({})
|
||||
const handleNewConversationInputsChange = useCallback((newInputs: Record<string, any>) => {
|
||||
newConversationInputsRef.current = newInputs
|
||||
// eslint-disable-next-line react-hooks-extra/no-direct-set-state-in-use-effect
|
||||
setNewConversationInputs(newInputs)
|
||||
}, [])
|
||||
const inputsForms = useMemo(() => {
|
||||
type FormItemRaw = { variable?: string, max_length?: number, default?: unknown, options?: string[], [key: string]: unknown }
|
||||
type UserInputFormItem = { 'paragraph'?: FormItemRaw, 'number'?: FormItemRaw, 'checkbox'?: FormItemRaw, 'select'?: FormItemRaw & { options?: string[] }, 'file-list'?: FormItemRaw, 'file'?: FormItemRaw, 'json_object'?: FormItemRaw, 'text-input'?: FormItemRaw, 'default'?: unknown, 'external_data_tool'?: unknown }
|
||||
return (appParams?.user_input_form || []).filter((item: UserInputFormItem) => !item.external_data_tool).map((item: UserInputFormItem) => {
|
||||
return (appParams?.user_input_form || []).filter((item: any) => !item.external_data_tool).map((item: any) => {
|
||||
if (item.paragraph) {
|
||||
const varKey = item.paragraph.variable
|
||||
let value = varKey ? initInputs[varKey] as string | undefined : undefined
|
||||
let value = initInputs[item.paragraph.variable]
|
||||
if (value && item.paragraph.max_length && value.length > item.paragraph.max_length)
|
||||
value = value.slice(0, item.paragraph.max_length)
|
||||
|
||||
@ -258,8 +199,7 @@ export const useEmbeddedChatbot = (appSourceType: AppSourceType, tryAppId?: stri
|
||||
}
|
||||
}
|
||||
if (item.number) {
|
||||
const varKey = item.number.variable
|
||||
const convertedNumber = varKey ? Number(initInputs[varKey]) : undefined
|
||||
const convertedNumber = Number(initInputs[item.number.variable])
|
||||
return {
|
||||
...item.number,
|
||||
default: convertedNumber || item.default || item.number.default,
|
||||
@ -268,8 +208,7 @@ export const useEmbeddedChatbot = (appSourceType: AppSourceType, tryAppId?: stri
|
||||
}
|
||||
|
||||
if (item.checkbox) {
|
||||
const varKey = item.checkbox.variable
|
||||
const preset = varKey ? initInputs[varKey] === true : false
|
||||
const preset = initInputs[item.checkbox.variable] === true
|
||||
return {
|
||||
...item.checkbox,
|
||||
default: preset || item.default || item.checkbox.default,
|
||||
@ -278,13 +217,10 @@ export const useEmbeddedChatbot = (appSourceType: AppSourceType, tryAppId?: stri
|
||||
}
|
||||
|
||||
if (item.select) {
|
||||
const varKey = item.select.variable
|
||||
const options = item.select.options
|
||||
const inputValue = varKey ? initInputs[varKey] : undefined
|
||||
const isInputInOptions = options?.includes(inputValue as string) ?? false
|
||||
const isInputInOptions = item.select.options.includes(initInputs[item.select.variable])
|
||||
return {
|
||||
...item.select,
|
||||
default: (isInputInOptions ? inputValue : undefined) || item.select.default,
|
||||
default: (isInputInOptions ? initInputs[item.select.variable] : undefined) || item.select.default,
|
||||
type: 'select',
|
||||
}
|
||||
}
|
||||
@ -310,21 +246,16 @@ export const useEmbeddedChatbot = (appSourceType: AppSourceType, tryAppId?: stri
|
||||
}
|
||||
}
|
||||
|
||||
const textInput = item['text-input']
|
||||
if (!textInput)
|
||||
return null
|
||||
|
||||
const textVarKey = textInput.variable
|
||||
let value = textVarKey ? initInputs[textVarKey] as string | undefined : undefined
|
||||
if (value && textInput.max_length && value.length > textInput.max_length)
|
||||
value = value.slice(0, textInput.max_length)
|
||||
let value = initInputs[item['text-input'].variable]
|
||||
if (value && item['text-input'].max_length && value.length > item['text-input'].max_length)
|
||||
value = value.slice(0, item['text-input'].max_length)
|
||||
|
||||
return {
|
||||
...textInput,
|
||||
default: value || item.default || textInput.default,
|
||||
...item['text-input'],
|
||||
default: value || item.default || item['text-input'].default,
|
||||
type: 'text-input',
|
||||
}
|
||||
}).filter(Boolean) as InputForm[]
|
||||
})
|
||||
}, [initInputs, appParams])
|
||||
|
||||
const allInputsHidden = useMemo(() => {
|
||||
@ -334,18 +265,16 @@ export const useEmbeddedChatbot = (appSourceType: AppSourceType, tryAppId?: stri
|
||||
useEffect(() => {
|
||||
// init inputs from url params
|
||||
(async () => {
|
||||
if (isTryApp)
|
||||
return
|
||||
const inputs = await getProcessedInputsFromUrlParams()
|
||||
const userVariables = await getProcessedUserVariablesFromUrlParams()
|
||||
setInitInputs(inputs)
|
||||
setInitUserVariables(userVariables)
|
||||
})()
|
||||
}, [isTryApp])
|
||||
}, [])
|
||||
useEffect(() => {
|
||||
const conversationInputs: Record<string, unknown> = {}
|
||||
const conversationInputs: Record<string, any> = {}
|
||||
|
||||
inputsForms.forEach((item: InputForm) => {
|
||||
inputsForms.forEach((item: any) => {
|
||||
conversationInputs[item.variable] = item.default || null
|
||||
})
|
||||
handleNewConversationInputsChange(conversationInputs)
|
||||
@ -353,19 +282,16 @@ export const useEmbeddedChatbot = (appSourceType: AppSourceType, tryAppId?: stri
|
||||
|
||||
const { data: newConversation } = useShareConversationName({
|
||||
conversationId: newConversationId,
|
||||
appSourceType,
|
||||
isInstalledApp,
|
||||
appId,
|
||||
}, {
|
||||
refetchOnWindowFocus: false,
|
||||
enabled: !isTryApp,
|
||||
})
|
||||
const [originConversationList, setOriginConversationList] = useState<ConversationItem[]>([])
|
||||
const conversationData = appConversationData?.data
|
||||
useEffect(() => {
|
||||
if (conversationData && !appConversationDataLoading)
|
||||
// eslint-disable-next-line react-hooks-extra/no-direct-set-state-in-use-effect
|
||||
setOriginConversationList(conversationData)
|
||||
}, [conversationData, appConversationDataLoading])
|
||||
if (appConversationData?.data && !appConversationDataLoading)
|
||||
setOriginConversationList(appConversationData?.data)
|
||||
}, [appConversationData, appConversationDataLoading])
|
||||
const conversationList = useMemo(() => {
|
||||
const data = originConversationList.slice()
|
||||
|
||||
@ -382,8 +308,7 @@ export const useEmbeddedChatbot = (appSourceType: AppSourceType, tryAppId?: stri
|
||||
|
||||
useEffect(() => {
|
||||
if (newConversation) {
|
||||
// eslint-disable-next-line react-hooks-extra/no-direct-set-state-in-use-effect
|
||||
setOriginConversationList(prev => produce(prev, (draft) => {
|
||||
setOriginConversationList(produce((draft) => {
|
||||
const index = draft.findIndex(item => item.id === newConversation.id)
|
||||
|
||||
if (index > -1)
|
||||
@ -408,12 +333,11 @@ export const useEmbeddedChatbot = (appSourceType: AppSourceType, tryAppId?: stri
|
||||
return newConversationInputsRef.current || {}
|
||||
return appChatListData.data.slice().pop().inputs || {}
|
||||
}, [appChatListData, currentConversationId])
|
||||
const [currentConversationInputs, setCurrentConversationInputs] = useState<Record<string, unknown>>(currentConversationLatestInputs || {})
|
||||
const [currentConversationInputs, setCurrentConversationInputs] = useState<Record<string, any>>(currentConversationLatestInputs || {})
|
||||
useEffect(() => {
|
||||
if (currentConversationItem && !isTryApp)
|
||||
// eslint-disable-next-line react-hooks-extra/no-direct-set-state-in-use-effect
|
||||
if (currentConversationItem)
|
||||
setCurrentConversationInputs(currentConversationLatestInputs || {})
|
||||
}, [currentConversationItem, currentConversationLatestInputs, isTryApp])
|
||||
}, [currentConversationItem, currentConversationLatestInputs])
|
||||
|
||||
const { notify } = useToastContext()
|
||||
const checkInputsRequired = useCallback((silent?: boolean) => {
|
||||
@ -435,9 +359,9 @@ export const useEmbeddedChatbot = (appSourceType: AppSourceType, tryAppId?: stri
|
||||
hasEmptyInput = label as string
|
||||
|
||||
if ((type === InputVarType.singleFile || type === InputVarType.multiFiles) && newConversationInputsRef.current[variable] && !silent) {
|
||||
const files = newConversationInputsRef.current[variable] as { transferMethod?: TransferMethod, uploadedId?: string } | { transferMethod?: TransferMethod, uploadedId?: string }[]
|
||||
const files = newConversationInputsRef.current[variable]
|
||||
if (Array.isArray(files))
|
||||
fileIsUploading = !!files.find(item => item.transferMethod === TransferMethod.local_file && !item.uploadedId)
|
||||
fileIsUploading = files.find(item => item.transferMethod === TransferMethod.local_file && !item.uploadedId)
|
||||
else
|
||||
fileIsUploading = files.transferMethod === TransferMethod.local_file && !files.uploadedId
|
||||
}
|
||||
@ -456,7 +380,7 @@ export const useEmbeddedChatbot = (appSourceType: AppSourceType, tryAppId?: stri
|
||||
|
||||
return true
|
||||
}, [inputsForms, notify, t, allInputsHidden])
|
||||
const handleStartChat = useCallback((callback?: () => void) => {
|
||||
const handleStartChat = useCallback((callback?: any) => {
|
||||
if (checkInputsRequired()) {
|
||||
setShowNewConversationItemInList(true)
|
||||
callback?.()
|
||||
@ -471,17 +395,12 @@ export const useEmbeddedChatbot = (appSourceType: AppSourceType, tryAppId?: stri
|
||||
setClearChatList(false)
|
||||
}, [handleConversationIdInfoChange, setClearChatList])
|
||||
const handleNewConversation = useCallback(async () => {
|
||||
if (isTryApp) {
|
||||
setClearChatList(true)
|
||||
return
|
||||
}
|
||||
|
||||
currentChatInstanceRef.current.handleStop()
|
||||
setShowNewConversationItemInList(true)
|
||||
handleChangeConversation('')
|
||||
handleNewConversationInputsChange(await getProcessedInputsFromUrlParams())
|
||||
setClearChatList(true)
|
||||
}, [isTryApp, setShowNewConversationItemInList, handleNewConversationInputsChange, setClearChatList, handleChangeConversation])
|
||||
}, [handleChangeConversation, setShowNewConversationItemInList, handleNewConversationInputsChange, setClearChatList])
|
||||
|
||||
const handleNewConversationCompleted = useCallback((newConversationId: string) => {
|
||||
setNewConversationId(newConversationId)
|
||||
@ -491,18 +410,16 @@ export const useEmbeddedChatbot = (appSourceType: AppSourceType, tryAppId?: stri
|
||||
}, [handleConversationIdInfoChange, invalidateShareConversations])
|
||||
|
||||
const handleFeedback = useCallback(async (messageId: string, feedback: Feedback) => {
|
||||
await updateFeedback({ url: `/messages/${messageId}/feedbacks`, body: { rating: feedback.rating, content: feedback.content } }, appSourceType, appId)
|
||||
await updateFeedback({ url: `/messages/${messageId}/feedbacks`, body: { rating: feedback.rating, content: feedback.content } }, isInstalledApp, appId)
|
||||
notify({ type: 'success', message: t('api.success', { ns: 'common' }) })
|
||||
}, [appSourceType, appId, t, notify])
|
||||
}, [isInstalledApp, appId, t, notify])
|
||||
|
||||
return {
|
||||
appSourceType,
|
||||
isInstalledApp,
|
||||
allowResetChat,
|
||||
appId,
|
||||
currentConversationId,
|
||||
currentConversationItem,
|
||||
removeConversationIdInfo,
|
||||
handleConversationIdInfoChange,
|
||||
appData: appInfo,
|
||||
appParams: appParams || {} as ChatConfig,
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
'use client'
|
||||
import type { AppData } from '@/models/share'
|
||||
import {
|
||||
useEffect,
|
||||
} from 'react'
|
||||
@ -12,7 +11,6 @@ import LogoHeader from '@/app/components/base/logo/logo-embedded-chat-header'
|
||||
import { useGlobalPublicStore } from '@/context/global-public-context'
|
||||
import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints'
|
||||
import useDocumentTitle from '@/hooks/use-document-title'
|
||||
import { AppSourceType } from '@/service/share'
|
||||
import { cn } from '@/utils/classnames'
|
||||
import {
|
||||
EmbeddedChatbotContext,
|
||||
@ -134,12 +132,11 @@ const EmbeddedChatbotWrapper = () => {
|
||||
setCurrentConversationInputs,
|
||||
allInputsHidden,
|
||||
initUserVariables,
|
||||
} = useEmbeddedChatbot(AppSourceType.webApp)
|
||||
} = useEmbeddedChatbot()
|
||||
|
||||
return (
|
||||
<EmbeddedChatbotContext.Provider value={{
|
||||
appSourceType: AppSourceType.webApp,
|
||||
appData: (appData as AppData) || null,
|
||||
appData,
|
||||
appParams,
|
||||
appMeta,
|
||||
appChatListDataLoading,
|
||||
|
||||
@ -4,7 +4,6 @@ import Button from '@/app/components/base/button'
|
||||
import InputsFormContent from '@/app/components/base/chat/embedded-chatbot/inputs-form/content'
|
||||
import Divider from '@/app/components/base/divider'
|
||||
import { Message3Fill } from '@/app/components/base/icons/src/public/other'
|
||||
import { AppSourceType } from '@/service/share'
|
||||
import { cn } from '@/utils/classnames'
|
||||
import { useEmbeddedChatbotContext } from '../context'
|
||||
|
||||
@ -19,7 +18,6 @@ const InputsFormNode = ({
|
||||
}: Props) => {
|
||||
const { t } = useTranslation()
|
||||
const {
|
||||
appSourceType,
|
||||
isMobile,
|
||||
currentConversationId,
|
||||
themeBuilder,
|
||||
@ -27,17 +25,15 @@ const InputsFormNode = ({
|
||||
allInputsHidden,
|
||||
inputsForms,
|
||||
} = useEmbeddedChatbotContext()
|
||||
const isTryApp = appSourceType === AppSourceType.tryApp
|
||||
|
||||
if (allInputsHidden || inputsForms.length === 0)
|
||||
return null
|
||||
|
||||
return (
|
||||
<div className={cn('mb-6 flex flex-col items-center px-4 pt-6', isMobile && 'mb-4 pt-4', isTryApp && 'mb-0 px-0')}>
|
||||
<div className={cn('mb-6 flex flex-col items-center px-4 pt-6', isMobile && 'mb-4 pt-4')}>
|
||||
<div className={cn(
|
||||
'w-full max-w-[672px] rounded-2xl border-[0.5px] border-components-panel-border bg-components-panel-bg shadow-md',
|
||||
collapsed && 'border border-components-card-border bg-components-card-bg shadow-none',
|
||||
isTryApp && 'max-w-[auto]',
|
||||
)}
|
||||
>
|
||||
<div className={cn(
|
||||
|
||||
@ -33,7 +33,7 @@ const ViewFormDropdown = ({ iconColor }: Props) => {
|
||||
<RiChatSettingsLine className={cn('h-[18px] w-[18px]', iconColor)} />
|
||||
</ActionButton>
|
||||
</PortalToFollowElemTrigger>
|
||||
<PortalToFollowElemContent className="z-[99]">
|
||||
<PortalToFollowElemContent className="z-50">
|
||||
<div className="w-[400px] rounded-2xl border-[0.5px] border-components-panel-border bg-components-panel-bg shadow-lg backdrop-blur-sm">
|
||||
<div className="flex items-center gap-3 rounded-t-2xl border-b border-divider-subtle px-6 py-4">
|
||||
<Message3Fill className="h-6 w-6 shrink-0" />
|
||||
|
||||
@ -3,8 +3,10 @@ import {
|
||||
RiClipboardFill,
|
||||
RiClipboardLine,
|
||||
} from '@remixicon/react'
|
||||
import { useClipboard } from 'foxact/use-clipboard'
|
||||
import { useCallback } from 'react'
|
||||
import copy from 'copy-to-clipboard'
|
||||
import { debounce } from 'es-toolkit/compat'
|
||||
import * as React from 'react'
|
||||
import { useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import ActionButton from '@/app/components/base/action-button'
|
||||
import Tooltip from '@/app/components/base/tooltip'
|
||||
@ -19,27 +21,32 @@ const prefixEmbedded = 'overview.appInfo.embedded'
|
||||
|
||||
const CopyFeedback = ({ content }: Props) => {
|
||||
const { t } = useTranslation()
|
||||
const { copied, copy, reset } = useClipboard()
|
||||
const [isCopied, setIsCopied] = useState<boolean>(false)
|
||||
|
||||
const handleCopy = useCallback(() => {
|
||||
const onClickCopy = debounce(() => {
|
||||
copy(content)
|
||||
}, [copy, content])
|
||||
setIsCopied(true)
|
||||
}, 100)
|
||||
|
||||
const onMouseLeave = debounce(() => {
|
||||
setIsCopied(false)
|
||||
}, 100)
|
||||
|
||||
return (
|
||||
<Tooltip
|
||||
popupContent={
|
||||
(copied
|
||||
(isCopied
|
||||
? t(`${prefixEmbedded}.copied`, { ns: 'appOverview' })
|
||||
: t(`${prefixEmbedded}.copy`, { ns: 'appOverview' })) || ''
|
||||
}
|
||||
>
|
||||
<ActionButton>
|
||||
<div
|
||||
onClick={handleCopy}
|
||||
onMouseLeave={reset}
|
||||
onClick={onClickCopy}
|
||||
onMouseLeave={onMouseLeave}
|
||||
>
|
||||
{copied && <RiClipboardFill className="h-4 w-4" />}
|
||||
{!copied && <RiClipboardLine className="h-4 w-4" />}
|
||||
{isCopied && <RiClipboardFill className="h-4 w-4" />}
|
||||
{!isCopied && <RiClipboardLine className="h-4 w-4" />}
|
||||
</div>
|
||||
</ActionButton>
|
||||
</Tooltip>
|
||||
@ -50,16 +57,21 @@ export default CopyFeedback
|
||||
|
||||
export const CopyFeedbackNew = ({ content, className }: Pick<Props, 'className' | 'content'>) => {
|
||||
const { t } = useTranslation()
|
||||
const { copied, copy, reset } = useClipboard()
|
||||
const [isCopied, setIsCopied] = useState<boolean>(false)
|
||||
|
||||
const handleCopy = useCallback(() => {
|
||||
const onClickCopy = debounce(() => {
|
||||
copy(content)
|
||||
}, [copy, content])
|
||||
setIsCopied(true)
|
||||
}, 100)
|
||||
|
||||
const onMouseLeave = debounce(() => {
|
||||
setIsCopied(false)
|
||||
}, 100)
|
||||
|
||||
return (
|
||||
<Tooltip
|
||||
popupContent={
|
||||
(copied
|
||||
(isCopied
|
||||
? t(`${prefixEmbedded}.copied`, { ns: 'appOverview' })
|
||||
: t(`${prefixEmbedded}.copy`, { ns: 'appOverview' })) || ''
|
||||
}
|
||||
@ -69,9 +81,9 @@ export const CopyFeedbackNew = ({ content, className }: Pick<Props, 'className'
|
||||
}`}
|
||||
>
|
||||
<div
|
||||
onClick={handleCopy}
|
||||
onMouseLeave={reset}
|
||||
className={`h-full w-full ${copyStyle.copyIcon} ${copied ? copyStyle.copied : ''
|
||||
onClick={onClickCopy}
|
||||
onMouseLeave={onMouseLeave}
|
||||
className={`h-full w-full ${copyStyle.copyIcon} ${isCopied ? copyStyle.copied : ''
|
||||
}`}
|
||||
>
|
||||
</div>
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
'use client'
|
||||
import { useClipboard } from 'foxact/use-clipboard'
|
||||
import { useCallback } from 'react'
|
||||
import copy from 'copy-to-clipboard'
|
||||
import { debounce } from 'es-toolkit/compat'
|
||||
import * as React from 'react'
|
||||
import { useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import {
|
||||
Copy,
|
||||
@ -16,24 +18,29 @@ const prefixEmbedded = 'overview.appInfo.embedded'
|
||||
|
||||
const CopyIcon = ({ content }: Props) => {
|
||||
const { t } = useTranslation()
|
||||
const { copied, copy, reset } = useClipboard()
|
||||
const [isCopied, setIsCopied] = useState<boolean>(false)
|
||||
|
||||
const handleCopy = useCallback(() => {
|
||||
const onClickCopy = debounce(() => {
|
||||
copy(content)
|
||||
}, [copy, content])
|
||||
setIsCopied(true)
|
||||
}, 100)
|
||||
|
||||
const onMouseLeave = debounce(() => {
|
||||
setIsCopied(false)
|
||||
}, 100)
|
||||
|
||||
return (
|
||||
<Tooltip
|
||||
popupContent={
|
||||
(copied
|
||||
(isCopied
|
||||
? t(`${prefixEmbedded}.copied`, { ns: 'appOverview' })
|
||||
: t(`${prefixEmbedded}.copy`, { ns: 'appOverview' })) || ''
|
||||
}
|
||||
>
|
||||
<div onMouseLeave={reset}>
|
||||
{!copied
|
||||
<div onMouseLeave={onMouseLeave}>
|
||||
{!isCopied
|
||||
? (
|
||||
<Copy className="mx-1 h-3.5 w-3.5 cursor-pointer text-text-tertiary" onClick={handleCopy} />
|
||||
<Copy className="mx-1 h-3.5 w-3.5 cursor-pointer text-text-tertiary" onClick={onClickCopy} />
|
||||
)
|
||||
: (
|
||||
<CopyCheck className="mx-1 h-3.5 w-3.5 text-text-tertiary" />
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user