feat: Adapt EvaluationMetricName.

This commit is contained in:
FFXN
2026-03-17 16:08:57 +08:00
parent b7baeb34e1
commit e6e668d1d9
2 changed files with 8 additions and 2 deletions

View File

@ -64,6 +64,12 @@ WORKFLOW_METRIC_NAMES: list[EvaluationMetricName] = [
EvaluationMetricName.ANSWER_CORRECTNESS,
]
METRIC_NODE_TYPE_MAPPING: dict[str, str] = {
**{m.value: "llm" for m in LLM_METRIC_NAMES},
**{m.value: "knowledge-retrieval" for m in RETRIEVAL_METRIC_NAMES},
**{m.value: "agent" for m in AGENT_METRIC_NAMES},
}
class EvaluationMetric(BaseModel):
name: str

View File

@ -11,12 +11,12 @@ from sqlalchemy.orm import Session
from configs import dify_config
from core.evaluation.entities.evaluation_entity import (
EVALUATION_METRICS,
METRIC_NODE_TYPE_MAPPING,
DefaultMetric,
EvaluationCategory,
EvaluationConfigData,
EvaluationDatasetInput,
EvaluationMetricName,
EvaluationRunData,
EvaluationRunRequest,
NodeInfo,
@ -429,7 +429,7 @@ class EvaluationService:
@staticmethod
def get_available_metrics() -> list[str]:
"""Return the centrally-defined list of evaluation metrics."""
return list(EVALUATION_METRICS)
return [m.value for m in EvaluationMetricName]
@classmethod
def get_nodes_for_metrics(