feat: fix i18n missing keys and merge upstream/main (#24615)

Signed-off-by: -LAN- <laipz8200@outlook.com>
Signed-off-by: kenwoodjw <blackxin55+@gmail.com>
Signed-off-by: Yongtao Huang <yongtaoh2022@gmail.com>
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
Signed-off-by: zhanluxianshen <zhanluxianshen@163.com>
Co-authored-by: -LAN- <laipz8200@outlook.com>
Co-authored-by: GuanMu <ballmanjq@gmail.com>
Co-authored-by: Davide Delbianco <davide.delbianco@outlook.com>
Co-authored-by: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com>
Co-authored-by: kenwoodjw <blackxin55+@gmail.com>
Co-authored-by: Yongtao Huang <yongtaoh2022@gmail.com>
Co-authored-by: Yongtao Huang <99629139+hyongtao-db@users.noreply.github.com>
Co-authored-by: Qiang Lee <18018968632@163.com>
Co-authored-by: 李强04 <liqiang04@gaotu.cn>
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: Asuka Minato <i@asukaminato.eu.org>
Co-authored-by: Matri Qi <matrixdom@126.com>
Co-authored-by: huayaoyue6 <huayaoyue@163.com>
Co-authored-by: Bowen Liang <liangbowen@gf.com.cn>
Co-authored-by: znn <jubinkumarsoni@gmail.com>
Co-authored-by: crazywoola <427733928@qq.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: yihong <zouzou0208@gmail.com>
Co-authored-by: Muke Wang <shaodwaaron@gmail.com>
Co-authored-by: wangmuke <wangmuke@kingsware.cn>
Co-authored-by: Wu Tianwei <30284043+WTW0313@users.noreply.github.com>
Co-authored-by: quicksand <quicksandzn@gmail.com>
Co-authored-by: 非法操作 <hjlarry@163.com>
Co-authored-by: zxhlyh <jasonapring2015@outlook.com>
Co-authored-by: Eric Guo <eric.guocz@gmail.com>
Co-authored-by: Zhedong Cen <cenzhedong2@126.com>
Co-authored-by: jiangbo721 <jiangbo721@163.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: hjlarry <25834719+hjlarry@users.noreply.github.com>
Co-authored-by: lxsummer <35754229+lxjustdoit@users.noreply.github.com>
Co-authored-by: 湛露先生 <zhanluxianshen@163.com>
Co-authored-by: Guangdong Liu <liugddx@gmail.com>
Co-authored-by: QuantumGhost <obelisk.reg+git@gmail.com>
Co-authored-by: Claude <noreply@anthropic.com>
Co-authored-by: Yessenia-d <yessenia.contact@gmail.com>
Co-authored-by: huangzhuo1949 <167434202+huangzhuo1949@users.noreply.github.com>
Co-authored-by: huangzhuo <huangzhuo1@xiaomi.com>
Co-authored-by: 17hz <0x149527@gmail.com>
Co-authored-by: Amy <1530140574@qq.com>
Co-authored-by: Joel <iamjoel007@gmail.com>
Co-authored-by: Nite Knite <nkCoding@gmail.com>
Co-authored-by: Yeuoly <45712896+Yeuoly@users.noreply.github.com>
Co-authored-by: Petrus Han <petrus.hanks@gmail.com>
Co-authored-by: iamjoel <2120155+iamjoel@users.noreply.github.com>
Co-authored-by: Kalo Chin <frog.beepers.0n@icloud.com>
Co-authored-by: Ujjwal Maurya <ujjwalsbx@gmail.com>
Co-authored-by: Maries <xh001x@hotmail.com>
This commit is contained in:
lyzno1
2025-08-27 15:07:28 +08:00
committed by GitHub
parent a63d1e87b1
commit 5bbf685035
625 changed files with 23778 additions and 10693 deletions

View File

@ -21,7 +21,7 @@ from models.model import (
from models.web import SavedMessage
from services.feature_service import FeatureService
_logger = logging.getLogger(__name__)
logger = logging.getLogger(__name__)
@app.celery.task(queue="dataset")
@ -47,10 +47,9 @@ def clean_messages():
if not messages:
break
for message in messages:
plan_sandbox_clean_message_day = message.created_at
app = db.session.query(App).filter_by(id=message.app_id).first()
if not app:
_logger.warning(
logger.warning(
"Expected App record to exist, but none was found, app_id=%s, message_id=%s",
message.app_id,
message.id,

View File

@ -45,6 +45,7 @@ def clean_unused_datasets_task():
plan_filter = config["plan_filter"]
add_logs = config["add_logs"]
page = 1
while True:
try:
# Subquery for counting new documents
@ -86,12 +87,12 @@ def clean_unused_datasets_task():
.order_by(Dataset.created_at.desc())
)
datasets = db.paginate(stmt, page=1, per_page=50)
datasets = db.paginate(stmt, page=page, per_page=50, error_out=False)
except SQLAlchemyError:
raise
if datasets.items is None or len(datasets.items) == 0:
if datasets is None or datasets.items is None or len(datasets.items) == 0:
break
for dataset in datasets:
@ -150,5 +151,7 @@ def clean_unused_datasets_task():
except Exception as e:
click.echo(click.style(f"clean dataset index error: {e.__class__.__name__} {str(e)}", fg="red"))
page += 1
end_at = time.perf_counter()
click.echo(click.style(f"Cleaned unused dataset from db success latency: {end_at - start_at}", fg="green"))

View File

@ -19,7 +19,7 @@ from models.model import (
)
from models.workflow import ConversationVariable, WorkflowAppLog, WorkflowNodeExecutionModel, WorkflowRun
_logger = logging.getLogger(__name__)
logger = logging.getLogger(__name__)
MAX_RETRIES = 3
@ -39,9 +39,9 @@ def clean_workflow_runlogs_precise():
try:
total_workflow_runs = db.session.query(WorkflowRun).where(WorkflowRun.created_at < cutoff_date).count()
if total_workflow_runs == 0:
_logger.info("No expired workflow run logs found")
logger.info("No expired workflow run logs found")
return
_logger.info("Found %s expired workflow run logs to clean", total_workflow_runs)
logger.info("Found %s expired workflow run logs to clean", total_workflow_runs)
total_deleted = 0
failed_batches = 0
@ -66,20 +66,20 @@ def clean_workflow_runlogs_precise():
else:
failed_batches += 1
if failed_batches >= MAX_RETRIES:
_logger.error("Failed to delete batch after %s retries, aborting cleanup for today", MAX_RETRIES)
logger.error("Failed to delete batch after %s retries, aborting cleanup for today", MAX_RETRIES)
break
else:
# Calculate incremental delay times: 5, 10, 15 minutes
retry_delay_minutes = failed_batches * 5
_logger.warning("Batch deletion failed, retrying in %s minutes...", retry_delay_minutes)
logger.warning("Batch deletion failed, retrying in %s minutes...", retry_delay_minutes)
time.sleep(retry_delay_minutes * 60)
continue
_logger.info("Cleanup completed: %s expired workflow run logs deleted", total_deleted)
logger.info("Cleanup completed: %s expired workflow run logs deleted", total_deleted)
except Exception as e:
db.session.rollback()
_logger.exception("Unexpected error in workflow log cleanup")
logger.exception("Unexpected error in workflow log cleanup")
raise
end_at = time.perf_counter()
@ -151,5 +151,5 @@ def _delete_batch_with_retry(workflow_run_ids: list[str], attempt_count: int) ->
except Exception as e:
db.session.rollback()
_logger.exception("Batch deletion failed (attempt %s)", attempt_count + 1)
logger.exception("Batch deletion failed (attempt %s)", attempt_count + 1)
return False

View File

@ -13,6 +13,8 @@ from models.account import Account, Tenant, TenantAccountJoin
from models.dataset import Dataset, DatasetAutoDisableLog
from services.feature_service import FeatureService
logger = logging.getLogger(__name__)
@app.celery.task(queue="dataset")
def mail_clean_document_notify_task():
@ -24,7 +26,7 @@ def mail_clean_document_notify_task():
if not mail.is_inited():
return
logging.info(click.style("Start send document clean notify mail", fg="green"))
logger.info(click.style("Start send document clean notify mail", fg="green"))
start_at = time.perf_counter()
# send document clean notify mail
@ -89,8 +91,6 @@ def mail_clean_document_notify_task():
dataset_auto_disable_log.notified = True
db.session.commit()
end_at = time.perf_counter()
logging.info(
click.style(f"Send document clean notify mail succeeded: latency: {end_at - start_at}", fg="green")
)
logger.info(click.style(f"Send document clean notify mail succeeded: latency: {end_at - start_at}", fg="green"))
except Exception:
logging.exception("Send document clean notify mail failed")
logger.exception("Send document clean notify mail failed")

View File

@ -18,6 +18,8 @@ celery_redis = Redis(
db=int(redis_config.get("virtual_host")) if redis_config.get("virtual_host") else 1,
)
logger = logging.getLogger(__name__)
@app.celery.task(queue="monitor")
def queue_monitor_task():
@ -25,27 +27,27 @@ def queue_monitor_task():
threshold = dify_config.QUEUE_MONITOR_THRESHOLD
if threshold is None:
logging.warning(click.style("QUEUE_MONITOR_THRESHOLD is not configured, skipping monitoring", fg="yellow"))
logger.warning(click.style("QUEUE_MONITOR_THRESHOLD is not configured, skipping monitoring", fg="yellow"))
return
try:
queue_length = celery_redis.llen(f"{queue_name}")
logging.info(click.style(f"Start monitor {queue_name}", fg="green"))
logger.info(click.style(f"Start monitor {queue_name}", fg="green"))
if queue_length is None:
logging.error(
logger.error(
click.style(f"Failed to get queue length for {queue_name} - Redis may be unavailable", fg="red")
)
return
logging.info(click.style(f"Queue length: {queue_length}", fg="green"))
logger.info(click.style(f"Queue length: {queue_length}", fg="green"))
if queue_length >= threshold:
warning_msg = f"Queue {queue_name} task count exceeded the limit.: {queue_length}/{threshold}"
logging.warning(click.style(warning_msg, fg="red"))
alter_emails = dify_config.QUEUE_MONITOR_ALERT_EMAILS
if alter_emails:
to_list = alter_emails.split(",")
alert_emails = dify_config.QUEUE_MONITOR_ALERT_EMAILS
if alert_emails:
to_list = alert_emails.split(",")
email_service = get_email_i18n_service()
for to in to_list:
try:
@ -62,10 +64,10 @@ def queue_monitor_task():
},
)
except Exception as e:
logging.exception(click.style("Exception occurred during sending email", fg="red"))
logger.exception(click.style("Exception occurred during sending email", fg="red"))
except Exception as e:
logging.exception(click.style("Exception occurred during queue monitoring", fg="red"))
logger.exception(click.style("Exception occurred during queue monitoring", fg="red"))
finally:
if db.session.is_active:
db.session.close()