feat: Optimize codes.

This commit is contained in:
FFXN
2026-01-28 17:09:31 +08:00
parent 6db70ffd9e
commit a300bc5616
7 changed files with 392 additions and 375 deletions

View File

@ -42,7 +42,7 @@ from libs.datetime_utils import naive_utc_now
from libs.login import current_account_with_tenant, login_required
from models import DatasetProcessRule, Document, DocumentSegment, UploadFile
from models.dataset import DocumentPipelineExecutionLog
from services.dataset_service import DatasetService, DocumentService
from services.dataset_service import DatasetService, DocumentService, SegmentService
from services.entities.knowledge_entities.knowledge_entities import KnowledgeConfig, ProcessRule, RetrievalModel
from services.file_service import FileService
from tasks.generate_summary_index_task import generate_summary_index_task
@ -1351,14 +1351,7 @@ class DocumentGenerateSummaryApi(Resource):
raise ValueError("Summary index is not enabled for this dataset. Please enable it in the dataset settings.")
# Verify all documents exist and belong to the dataset
documents = (
db.session.query(Document)
.filter(
Document.id.in_(document_list),
Document.dataset_id == dataset_id,
)
.all()
)
documents = DocumentService.get_documents_by_ids(dataset_id, document_list)
if len(documents) != len(document_list):
found_ids = {doc.id for doc in documents}
@ -1422,15 +1415,11 @@ class DocumentSummaryStatusApi(DocumentResource):
raise Forbidden(str(e))
# Get all segments for this document
segments = (
db.session.query(DocumentSegment)
.filter(
DocumentSegment.document_id == document_id,
DocumentSegment.dataset_id == dataset_id,
DocumentSegment.status == "completed",
DocumentSegment.enabled == True,
)
.all()
segments = SegmentService.get_segments_by_document_and_dataset(
document_id=document_id,
dataset_id=dataset_id,
status="completed",
enabled=True,
)
total_segments = len(segments)

View File

@ -7,6 +7,7 @@ from collections.abc import Mapping
from typing import Any
from configs import dify_config
from core.db.session_factory import session_factory
from core.entities.knowledge_entities import PreviewDetail
from core.model_manager import ModelInstance
from core.rag.cleaner.clean_processor import CleanProcessor
@ -148,17 +149,18 @@ class ParentChildIndexProcessor(BaseIndexProcessor):
if delete_summaries:
if node_ids:
# Find segments by index_node_id
segments = (
db.session.query(DocumentSegment)
.filter(
DocumentSegment.dataset_id == dataset.id,
DocumentSegment.index_node_id.in_(node_ids),
with session_factory.create_session() as session:
segments = (
session.query(DocumentSegment)
.filter(
DocumentSegment.dataset_id == dataset.id,
DocumentSegment.index_node_id.in_(node_ids),
)
.all()
)
.all()
)
segment_ids = [segment.id for segment in segments]
if segment_ids:
SummaryIndexService.delete_summaries_for_segments(dataset, segment_ids)
segment_ids = [segment.id for segment in segments]
if segment_ids:
SummaryIndexService.delete_summaries_for_segments(dataset, segment_ids)
else:
# Delete all summaries for the dataset
SummaryIndexService.delete_summaries_for_segments(dataset, None)

View File

@ -11,6 +11,7 @@ import pandas as pd
from flask import Flask, current_app
from werkzeug.datastructures import FileStorage
from core.db.session_factory import session_factory
from core.entities.knowledge_entities import PreviewDetail
from core.llm_generator.llm_generator import LLMGenerator
from core.rag.cleaner.clean_processor import CleanProcessor
@ -24,7 +25,6 @@ from core.rag.index_processor.index_processor_base import BaseIndexProcessor
from core.rag.models.document import AttachmentDocument, Document, QAStructureChunk
from core.rag.retrieval.retrieval_methods import RetrievalMethod
from core.tools.utils.text_processing_utils import remove_leading_symbols
from extensions.ext_database import db
from libs import helper
from models.account import Account
from models.dataset import Dataset, DocumentSegment
@ -156,17 +156,18 @@ class QAIndexProcessor(BaseIndexProcessor):
if delete_summaries:
if node_ids:
# Find segments by index_node_id
segments = (
db.session.query(DocumentSegment)
.filter(
DocumentSegment.dataset_id == dataset.id,
DocumentSegment.index_node_id.in_(node_ids),
with session_factory.create_session() as session:
segments = (
session.query(DocumentSegment)
.filter(
DocumentSegment.dataset_id == dataset.id,
DocumentSegment.index_node_id.in_(node_ids),
)
.all()
)
.all()
)
segment_ids = [segment.id for segment in segments]
if segment_ids:
SummaryIndexService.delete_summaries_for_segments(dataset, segment_ids)
segment_ids = [segment.id for segment in segments]
if segment_ids:
SummaryIndexService.delete_summaries_for_segments(dataset, segment_ids)
else:
# Delete all summaries for the dataset
SummaryIndexService.delete_summaries_for_segments(dataset, None)

View File

@ -3811,6 +3811,39 @@ class SegmentService:
)
return result if isinstance(result, DocumentSegment) else None
@classmethod
def get_segments_by_document_and_dataset(
cls,
document_id: str,
dataset_id: str,
status: str | None = None,
enabled: bool | None = None,
) -> Sequence[DocumentSegment]:
"""
Get segments for a document in a dataset with optional filtering.
Args:
document_id: Document ID
dataset_id: Dataset ID
status: Optional status filter (e.g., "completed")
enabled: Optional enabled filter (True/False)
Returns:
Sequence of DocumentSegment instances
"""
query = select(DocumentSegment).where(
DocumentSegment.document_id == document_id,
DocumentSegment.dataset_id == dataset_id,
)
if status is not None:
query = query.where(DocumentSegment.status == status)
if enabled is not None:
query = query.where(DocumentSegment.enabled == enabled)
return db.session.scalars(query).all()
class DatasetCollectionBindingService:
@classmethod

View File

@ -542,42 +542,42 @@ class SummaryIndexService:
)
session.commit() # Commit initial records
summary_records = []
summary_records = []
for segment in segments:
# For parent-child mode, only process parent chunks
# In parent-child mode, all DocumentSegments are parent chunks,
# so we process all of them. Child chunks are stored in ChildChunk table
# and are not DocumentSegments, so they won't be in the segments list.
# This check is mainly for clarity and future-proofing.
if only_parent_chunks:
# In parent-child mode, all segments in the query are parent chunks
# Child chunks are not DocumentSegments, so they won't appear here
# We can process all segments
pass
for segment in segments:
# For parent-child mode, only process parent chunks
# In parent-child mode, all DocumentSegments are parent chunks,
# so we process all of them. Child chunks are stored in ChildChunk table
# and are not DocumentSegments, so they won't be in the segments list.
# This check is mainly for clarity and future-proofing.
if only_parent_chunks:
# In parent-child mode, all segments in the query are parent chunks
# Child chunks are not DocumentSegments, so they won't appear here
# We can process all segments
pass
try:
summary_record = SummaryIndexService.generate_and_vectorize_summary(
segment, dataset, summary_index_setting
)
summary_records.append(summary_record)
except Exception as e:
logger.exception("Failed to generate summary for segment %s", segment.id)
# Update summary record with error status
SummaryIndexService.update_summary_record_error(
segment=segment,
dataset=dataset,
error=str(e),
)
# Continue with other segments
continue
try:
summary_record = SummaryIndexService.generate_and_vectorize_summary(
segment, dataset, summary_index_setting
)
summary_records.append(summary_record)
except Exception as e:
logger.exception("Failed to generate summary for segment %s", segment.id)
# Update summary record with error status
SummaryIndexService.update_summary_record_error(
segment=segment,
dataset=dataset,
error=str(e),
)
# Continue with other segments
continue
logger.info(
"Completed summary generation for document %s: %s summaries generated and vectorized",
document.id,
len(summary_records),
)
return summary_records
logger.info(
"Completed summary generation for document %s: %s summaries generated and vectorized",
document.id,
len(summary_records),
)
return summary_records
@staticmethod
def disable_summaries_for_segments(

View File

@ -6,7 +6,7 @@ import time
import click
from celery import shared_task
from extensions.ext_database import db
from core.db.session_factory import session_factory
from models.dataset import Dataset, DocumentSegment
from models.dataset import Document as DatasetDocument
from services.summary_index_service import SummaryIndexService
@ -37,76 +37,72 @@ def generate_summary_index_task(dataset_id: str, document_id: str, segment_ids:
start_at = time.perf_counter()
try:
dataset = db.session.query(Dataset).where(Dataset.id == dataset_id).first()
if not dataset:
logger.error(click.style(f"Dataset not found: {dataset_id}", fg="red"))
db.session.close()
return
with session_factory.create_session() as session:
dataset = session.query(Dataset).where(Dataset.id == dataset_id).first()
if not dataset:
logger.error(click.style(f"Dataset not found: {dataset_id}", fg="red"))
return
document = db.session.query(DatasetDocument).where(DatasetDocument.id == document_id).first()
if not document:
logger.error(click.style(f"Document not found: {document_id}", fg="red"))
db.session.close()
return
document = session.query(DatasetDocument).where(DatasetDocument.id == document_id).first()
if not document:
logger.error(click.style(f"Document not found: {document_id}", fg="red"))
return
# Only generate summary index for high_quality indexing technique
if dataset.indexing_technique != "high_quality":
# Only generate summary index for high_quality indexing technique
if dataset.indexing_technique != "high_quality":
logger.info(
click.style(
f"Skipping summary generation for dataset {dataset_id}: "
f"indexing_technique is {dataset.indexing_technique}, not 'high_quality'",
fg="cyan",
)
)
return
# Check if summary index is enabled
summary_index_setting = dataset.summary_index_setting
if not summary_index_setting or not summary_index_setting.get("enable"):
logger.info(
click.style(
f"Summary index is disabled for dataset {dataset_id}",
fg="cyan",
)
)
return
# Determine if only parent chunks should be processed
only_parent_chunks = dataset.chunk_structure == "parent_child_index"
# Generate summaries
summary_records = SummaryIndexService.generate_summaries_for_document(
dataset=dataset,
document=document,
summary_index_setting=summary_index_setting,
segment_ids=segment_ids,
only_parent_chunks=only_parent_chunks,
)
end_at = time.perf_counter()
logger.info(
click.style(
f"Skipping summary generation for dataset {dataset_id}: "
f"indexing_technique is {dataset.indexing_technique}, not 'high_quality'",
fg="cyan",
f"Summary index generation completed for document {document_id}: "
f"{len(summary_records)} summaries generated, latency: {end_at - start_at}",
fg="green",
)
)
db.session.close()
return
# Check if summary index is enabled
summary_index_setting = dataset.summary_index_setting
if not summary_index_setting or not summary_index_setting.get("enable"):
logger.info(
click.style(
f"Summary index is disabled for dataset {dataset_id}",
fg="cyan",
)
)
db.session.close()
return
# Determine if only parent chunks should be processed
only_parent_chunks = dataset.chunk_structure == "parent_child_index"
# Generate summaries
summary_records = SummaryIndexService.generate_summaries_for_document(
dataset=dataset,
document=document,
summary_index_setting=summary_index_setting,
segment_ids=segment_ids,
only_parent_chunks=only_parent_chunks,
)
end_at = time.perf_counter()
logger.info(
click.style(
f"Summary index generation completed for document {document_id}: "
f"{len(summary_records)} summaries generated, latency: {end_at - start_at}",
fg="green",
)
)
except Exception as e:
logger.exception("Failed to generate summary index for document %s", document_id)
# Update document segments with error status if needed
if segment_ids:
db.session.query(DocumentSegment).filter(
DocumentSegment.id.in_(segment_ids),
DocumentSegment.dataset_id == dataset_id,
).update(
{
DocumentSegment.error: f"Summary generation failed: {str(e)}",
},
synchronize_session=False,
)
db.session.commit()
finally:
db.session.close()
with session_factory.create_session() as session:
session.query(DocumentSegment).filter(
DocumentSegment.id.in_(segment_ids),
DocumentSegment.dataset_id == dataset_id,
).update(
{
DocumentSegment.error: f"Summary generation failed: {str(e)}",
},
synchronize_session=False,
)
session.commit()

View File

@ -8,7 +8,7 @@ import click
from celery import shared_task
from sqlalchemy import or_, select
from extensions.ext_database import db
from core.db.session_factory import session_factory
from models.dataset import Dataset, DocumentSegment, DocumentSegmentSummary
from models.dataset import Document as DatasetDocument
from services.summary_index_service import SummaryIndexService
@ -45,275 +45,271 @@ def regenerate_summary_index_task(
start_at = time.perf_counter()
try:
dataset = db.session.query(Dataset).filter_by(id=dataset_id).first()
if not dataset:
logger.error(click.style(f"Dataset not found: {dataset_id}", fg="red"))
db.session.close()
return
with session_factory.create_session() as session:
dataset = session.query(Dataset).filter_by(id=dataset_id).first()
if not dataset:
logger.error(click.style(f"Dataset not found: {dataset_id}", fg="red"))
return
# Only regenerate summary index for high_quality indexing technique
if dataset.indexing_technique != "high_quality":
logger.info(
click.style(
f"Skipping summary regeneration for dataset {dataset_id}: "
f"indexing_technique is {dataset.indexing_technique}, not 'high_quality'",
fg="cyan",
)
)
db.session.close()
return
# Check if summary index is enabled (only for summary_model change)
# For embedding_model change, we still re-vectorize existing summaries even if setting is disabled
summary_index_setting = dataset.summary_index_setting
if not regenerate_vectors_only:
# For summary_model change, require summary_index_setting to be enabled
if not summary_index_setting or not summary_index_setting.get("enable"):
# Only regenerate summary index for high_quality indexing technique
if dataset.indexing_technique != "high_quality":
logger.info(
click.style(
f"Summary index is disabled for dataset {dataset_id}",
f"Skipping summary regeneration for dataset {dataset_id}: "
f"indexing_technique is {dataset.indexing_technique}, not 'high_quality'",
fg="cyan",
)
)
db.session.close()
return
total_segments_processed = 0
total_segments_failed = 0
if regenerate_vectors_only:
# For embedding_model change: directly query all segments with existing summaries
# Don't require document indexing_status == "completed"
# Include summaries with status "completed" or "error" (if they have content)
segments_with_summaries = (
db.session.query(DocumentSegment, DocumentSegmentSummary)
.join(
DocumentSegmentSummary,
DocumentSegment.id == DocumentSegmentSummary.chunk_id,
)
.join(
DatasetDocument,
DocumentSegment.document_id == DatasetDocument.id,
)
.where(
DocumentSegment.dataset_id == dataset_id,
DocumentSegment.status == "completed", # Segment must be completed
DocumentSegment.enabled == True,
DocumentSegmentSummary.dataset_id == dataset_id,
DocumentSegmentSummary.summary_content.isnot(None), # Must have summary content
# Include completed summaries or error summaries (with content)
or_(
DocumentSegmentSummary.status == "completed",
DocumentSegmentSummary.status == "error",
),
DatasetDocument.enabled == True, # Document must be enabled
DatasetDocument.archived == False, # Document must not be archived
DatasetDocument.doc_form != "qa_model", # Skip qa_model documents
)
.order_by(DocumentSegment.document_id.asc(), DocumentSegment.position.asc())
.all()
)
if not segments_with_summaries:
logger.info(
click.style(
f"No segments with summaries found for re-vectorization in dataset {dataset_id}",
fg="cyan",
)
)
db.session.close()
return
logger.info(
"Found %s segments with summaries for re-vectorization in dataset %s",
len(segments_with_summaries),
dataset_id,
)
# Group by document for logging
segments_by_document = defaultdict(list)
for segment, summary_record in segments_with_summaries:
segments_by_document[segment.document_id].append((segment, summary_record))
logger.info(
"Segments grouped into %s documents for re-vectorization",
len(segments_by_document),
)
for document_id, segment_summary_pairs in segments_by_document.items():
logger.info(
"Re-vectorizing summaries for %s segments in document %s",
len(segment_summary_pairs),
document_id,
)
for segment, summary_record in segment_summary_pairs:
try:
# Delete old vector
if summary_record.summary_index_node_id:
try:
from core.rag.datasource.vdb.vector_factory import Vector
vector = Vector(dataset)
vector.delete_by_ids([summary_record.summary_index_node_id])
except Exception as e:
logger.warning(
"Failed to delete old summary vector for segment %s: %s",
segment.id,
str(e),
)
# Re-vectorize with new embedding model
SummaryIndexService.vectorize_summary(summary_record, segment, dataset)
db.session.commit()
total_segments_processed += 1
except Exception as e:
logger.error(
"Failed to re-vectorize summary for segment %s: %s",
segment.id,
str(e),
exc_info=True,
)
total_segments_failed += 1
# Update summary record with error status
summary_record.status = "error"
summary_record.error = f"Re-vectorization failed: {str(e)}"
db.session.add(summary_record)
db.session.commit()
continue
else:
# For summary_model change: require document indexing_status == "completed"
# Get all documents with completed indexing status
dataset_documents = db.session.scalars(
select(DatasetDocument).where(
DatasetDocument.dataset_id == dataset_id,
DatasetDocument.indexing_status == "completed",
DatasetDocument.enabled == True,
DatasetDocument.archived == False,
)
).all()
if not dataset_documents:
logger.info(
click.style(
f"No documents found for summary regeneration in dataset {dataset_id}",
fg="cyan",
)
)
db.session.close()
return
logger.info(
"Found %s documents for summary regeneration in dataset %s",
len(dataset_documents),
dataset_id,
)
for dataset_document in dataset_documents:
# Skip qa_model documents
if dataset_document.doc_form == "qa_model":
continue
try:
# Get all segments with existing summaries
segments = (
db.session.query(DocumentSegment)
.join(
DocumentSegmentSummary,
DocumentSegment.id == DocumentSegmentSummary.chunk_id,
)
.where(
DocumentSegment.document_id == dataset_document.id,
DocumentSegment.dataset_id == dataset_id,
DocumentSegment.status == "completed",
DocumentSegment.enabled == True,
DocumentSegmentSummary.dataset_id == dataset_id,
)
.order_by(DocumentSegment.position.asc())
.all()
)
if not segments:
continue
# Check if summary index is enabled (only for summary_model change)
# For embedding_model change, we still re-vectorize existing summaries even if setting is disabled
summary_index_setting = dataset.summary_index_setting
if not regenerate_vectors_only:
# For summary_model change, require summary_index_setting to be enabled
if not summary_index_setting or not summary_index_setting.get("enable"):
logger.info(
"Regenerating summaries for %s segments in document %s",
len(segments),
dataset_document.id,
click.style(
f"Summary index is disabled for dataset {dataset_id}",
fg="cyan",
)
)
return
total_segments_processed = 0
total_segments_failed = 0
if regenerate_vectors_only:
# For embedding_model change: directly query all segments with existing summaries
# Don't require document indexing_status == "completed"
# Include summaries with status "completed" or "error" (if they have content)
segments_with_summaries = (
session.query(DocumentSegment, DocumentSegmentSummary)
.join(
DocumentSegmentSummary,
DocumentSegment.id == DocumentSegmentSummary.chunk_id,
)
.join(
DatasetDocument,
DocumentSegment.document_id == DatasetDocument.id,
)
.where(
DocumentSegment.dataset_id == dataset_id,
DocumentSegment.status == "completed", # Segment must be completed
DocumentSegment.enabled == True,
DocumentSegmentSummary.dataset_id == dataset_id,
DocumentSegmentSummary.summary_content.isnot(None), # Must have summary content
# Include completed summaries or error summaries (with content)
or_(
DocumentSegmentSummary.status == "completed",
DocumentSegmentSummary.status == "error",
),
DatasetDocument.enabled == True, # Document must be enabled
DatasetDocument.archived == False, # Document must not be archived
DatasetDocument.doc_form != "qa_model", # Skip qa_model documents
)
.order_by(DocumentSegment.document_id.asc(), DocumentSegment.position.asc())
.all()
)
if not segments_with_summaries:
logger.info(
click.style(
f"No segments with summaries found for re-vectorization in dataset {dataset_id}",
fg="cyan",
)
)
return
logger.info(
"Found %s segments with summaries for re-vectorization in dataset %s",
len(segments_with_summaries),
dataset_id,
)
# Group by document for logging
segments_by_document = defaultdict(list)
for segment, summary_record in segments_with_summaries:
segments_by_document[segment.document_id].append((segment, summary_record))
logger.info(
"Segments grouped into %s documents for re-vectorization",
len(segments_by_document),
)
for document_id, segment_summary_pairs in segments_by_document.items():
logger.info(
"Re-vectorizing summaries for %s segments in document %s",
len(segment_summary_pairs),
document_id,
)
for segment in segments:
summary_record = None
for segment, summary_record in segment_summary_pairs:
try:
# Get existing summary record
summary_record = (
db.session.query(DocumentSegmentSummary)
.filter_by(
chunk_id=segment.id,
dataset_id=dataset_id,
)
.first()
)
# Delete old vector
if summary_record.summary_index_node_id:
try:
from core.rag.datasource.vdb.vector_factory import Vector
if not summary_record:
logger.warning("Summary record not found for segment %s, skipping", segment.id)
continue
vector = Vector(dataset)
vector.delete_by_ids([summary_record.summary_index_node_id])
except Exception as e:
logger.warning(
"Failed to delete old summary vector for segment %s: %s",
segment.id,
str(e),
)
# Regenerate both summary content and vectors (for summary_model change)
SummaryIndexService.generate_and_vectorize_summary(segment, dataset, summary_index_setting)
db.session.commit()
# Re-vectorize with new embedding model
SummaryIndexService.vectorize_summary(summary_record, segment, dataset)
session.commit()
total_segments_processed += 1
except Exception as e:
logger.error(
"Failed to regenerate summary for segment %s: %s",
"Failed to re-vectorize summary for segment %s: %s",
segment.id,
str(e),
exc_info=True,
)
total_segments_failed += 1
# Update summary record with error status
if summary_record:
summary_record.status = "error"
summary_record.error = f"Regeneration failed: {str(e)}"
db.session.add(summary_record)
db.session.commit()
summary_record.status = "error"
summary_record.error = f"Re-vectorization failed: {str(e)}"
session.add(summary_record)
session.commit()
continue
except Exception as e:
logger.error(
"Failed to process document %s for summary regeneration: %s",
dataset_document.id,
str(e),
exc_info=True,
else:
# For summary_model change: require document indexing_status == "completed"
# Get all documents with completed indexing status
dataset_documents = session.scalars(
select(DatasetDocument).where(
DatasetDocument.dataset_id == dataset_id,
DatasetDocument.indexing_status == "completed",
DatasetDocument.enabled == True,
DatasetDocument.archived == False,
)
continue
).all()
end_at = time.perf_counter()
if regenerate_vectors_only:
logger.info(
click.style(
f"Summary re-vectorization completed for dataset {dataset_id}: "
f"{total_segments_processed} segments processed successfully, "
f"{total_segments_failed} segments failed, "
f"latency: {end_at - start_at:.2f}s",
fg="green",
if not dataset_documents:
logger.info(
click.style(
f"No documents found for summary regeneration in dataset {dataset_id}",
fg="cyan",
)
)
return
logger.info(
"Found %s documents for summary regeneration in dataset %s",
len(dataset_documents),
dataset_id,
)
)
else:
logger.info(
click.style(
f"Summary index regeneration completed for dataset {dataset_id}: "
f"{total_segments_processed} segments processed successfully, "
f"{total_segments_failed} segments failed, "
f"latency: {end_at - start_at:.2f}s",
fg="green",
for dataset_document in dataset_documents:
# Skip qa_model documents
if dataset_document.doc_form == "qa_model":
continue
try:
# Get all segments with existing summaries
segments = (
session.query(DocumentSegment)
.join(
DocumentSegmentSummary,
DocumentSegment.id == DocumentSegmentSummary.chunk_id,
)
.where(
DocumentSegment.document_id == dataset_document.id,
DocumentSegment.dataset_id == dataset_id,
DocumentSegment.status == "completed",
DocumentSegment.enabled == True,
DocumentSegmentSummary.dataset_id == dataset_id,
)
.order_by(DocumentSegment.position.asc())
.all()
)
if not segments:
continue
logger.info(
"Regenerating summaries for %s segments in document %s",
len(segments),
dataset_document.id,
)
for segment in segments:
summary_record = None
try:
# Get existing summary record
summary_record = (
session.query(DocumentSegmentSummary)
.filter_by(
chunk_id=segment.id,
dataset_id=dataset_id,
)
.first()
)
if not summary_record:
logger.warning("Summary record not found for segment %s, skipping", segment.id)
continue
# Regenerate both summary content and vectors (for summary_model change)
SummaryIndexService.generate_and_vectorize_summary(
segment, dataset, summary_index_setting
)
session.commit()
total_segments_processed += 1
except Exception as e:
logger.error(
"Failed to regenerate summary for segment %s: %s",
segment.id,
str(e),
exc_info=True,
)
total_segments_failed += 1
# Update summary record with error status
if summary_record:
summary_record.status = "error"
summary_record.error = f"Regeneration failed: {str(e)}"
session.add(summary_record)
session.commit()
continue
except Exception as e:
logger.error(
"Failed to process document %s for summary regeneration: %s",
dataset_document.id,
str(e),
exc_info=True,
)
continue
end_at = time.perf_counter()
if regenerate_vectors_only:
logger.info(
click.style(
f"Summary re-vectorization completed for dataset {dataset_id}: "
f"{total_segments_processed} segments processed successfully, "
f"{total_segments_failed} segments failed, "
f"latency: {end_at - start_at:.2f}s",
fg="green",
)
)
else:
logger.info(
click.style(
f"Summary index regeneration completed for dataset {dataset_id}: "
f"{total_segments_processed} segments processed successfully, "
f"{total_segments_failed} segments failed, "
f"latency: {end_at - start_at:.2f}s",
fg="green",
)
)
)
except Exception:
logger.exception("Regenerate summary index failed for dataset %s", dataset_id)
finally:
db.session.close()