refactor(web): remove unused metric property

This commit is contained in:
JzoNg
2026-04-09 17:30:10 +08:00
parent d1a80a85c0
commit 5069694bba
5 changed files with 2 additions and 30 deletions

View File

@ -54,9 +54,8 @@ export const useMetricSelectorData = ({
const resolvedMetrics = useMemo(() => {
const metricsMap = new Map(config.builtinMetrics.map(metric => [metric.id, metric] as const))
const defaultGroup = config.builtinMetrics[0]?.group ?? 'other'
return availableMetricIds.map(metricId => metricsMap.get(metricId) ?? buildMetricOption(metricId, defaultGroup))
return availableMetricIds.map(metricId => metricsMap.get(metricId) ?? buildMetricOption(metricId))
}, [availableMetricIds, config.builtinMetrics])
useEffect(() => {

View File

@ -14,12 +14,10 @@ const humanizeMetricId = (metricId: string) => {
.join(' ')
}
export const buildMetricOption = (metricId: string, fallbackGroup: string): MetricOption => ({
export const buildMetricOption = (metricId: string): MetricOption => ({
id: metricId,
label: humanizeMetricId(metricId),
description: '',
group: fallbackGroup,
badges: ['Built-in'],
})
export const getMetricVisual = (metricId: string): { icon: string, tone: MetricVisualTone } => {

View File

@ -29,43 +29,31 @@ const builtinMetrics: MetricOption[] = [
id: 'answer-correctness',
label: 'Answer Correctness',
description: 'Compares the response with the expected answer and scores factual alignment.',
group: 'quality',
badges: ['LLM', 'Built-in'],
},
{
id: 'faithfulness',
label: 'Faithfulness',
description: 'Checks whether the answer stays grounded in the retrieved evidence.',
group: 'quality',
badges: ['LLM', 'Retrieval'],
},
{
id: 'relevance',
label: 'Relevance',
description: 'Evaluates how directly the answer addresses the original request.',
group: 'quality',
badges: ['LLM'],
},
{
id: 'latency',
label: 'Latency',
description: 'Captures runtime responsiveness for the full execution path.',
group: 'operations',
badges: ['System'],
},
{
id: 'token-usage',
label: 'Token Usage',
description: 'Tracks prompt and completion token consumption for the run.',
group: 'operations',
badges: ['System'],
},
{
id: 'tool-success-rate',
label: 'Tool Success Rate',
description: 'Measures whether each required tool invocation finishes without failure.',
group: 'operations',
badges: ['Workflow'],
},
]
@ -74,22 +62,16 @@ const pipelineBuiltinMetrics: MetricOption[] = [
id: 'context-precision',
label: 'Context Precision',
description: 'Measures whether retrieved chunks stay tightly aligned to the request.',
group: 'quality',
badges: ['Retrieval'],
},
{
id: 'context-recall',
label: 'Context Recall',
description: 'Checks whether the retrieval result includes the evidence needed to answer.',
group: 'quality',
badges: ['Retrieval'],
},
{
id: 'context-relevance',
label: 'Context Relevance',
description: 'Scores how useful the retrieved context is for downstream generation.',
group: 'quality',
badges: ['Retrieval'],
},
]

View File

@ -41,8 +41,6 @@ const resolveMetricOption = (resourceType: EvaluationResourceType, metricId: str
id: metricId,
label: humanizeMetricId(metricId),
description: '',
group: config.builtinMetrics[0]?.group ?? 'other',
badges: ['Built-in'],
}
}
@ -225,7 +223,6 @@ export function createBuiltinMetric(
kind: 'builtin',
label: metric.label,
description: metric.description,
badges: metric.badges,
threshold,
nodeInfoList,
}
@ -246,7 +243,6 @@ export function createCustomMetric(): EvaluationMetric {
kind: 'custom-workflow',
label: 'Custom Evaluator',
description: 'Map workflow variables to your evaluation inputs.',
badges: ['Workflow'],
customConfig: {
workflowId: null,
workflowAppId: null,

View File

@ -37,8 +37,6 @@ export type MetricOption = {
id: string
label: string
description: string
group: string
badges: string[]
}
export type EvaluationWorkflowOption = {
@ -81,7 +79,6 @@ export type EvaluationMetric = {
kind: MetricKind
label: string
description: string
badges: string[]
threshold?: number
nodeInfoList?: NodeInfo[]
customConfig?: CustomMetricConfig