refactor(web): evaluation configure schema update

This commit is contained in:
JzoNg
2026-04-09 17:17:15 +08:00
parent 5c93d74dec
commit d1a80a85c0
6 changed files with 28 additions and 50 deletions

View File

@ -245,12 +245,10 @@ describe('Evaluation', () => {
data: {
evaluation_model: null,
evaluation_model_provider: null,
metrics_config: {
default_metrics: [{
metric: 'context-precision',
}],
customized_metrics: null,
},
default_metrics: [{
metric: 'context-precision',
}],
customized_metrics: null,
judgement_conditions: null,
},
})

View File

@ -132,18 +132,16 @@ describe('evaluation store', () => {
const config: EvaluationConfig = {
evaluation_model: 'gpt-4o-mini',
evaluation_model_provider: 'openai',
metrics_config: {
default_metrics: [{
metric: 'faithfulness',
node_info_list: [
{ node_id: 'node-1', title: 'Retriever', type: 'retriever' },
],
}],
customized_metrics: {
evaluation_workflow_id: 'workflow-precision-review',
input_fields: {
'app.input.query': 'query',
},
default_metrics: [{
metric: 'faithfulness',
node_info_list: [
{ node_id: 'node-1', title: 'Retriever', type: 'retriever' },
],
}],
customized_metrics: {
evaluation_workflow_id: 'workflow-precision-review',
input_fields: {
'app.input.query': 'query',
},
},
judgement_conditions: [{

View File

@ -3,7 +3,7 @@ import type { MetricVisualTone } from './types'
import type { EvaluationTargetType, NodeInfo } from '@/types/evaluation'
export const toEvaluationTargetType = (resourceType: 'apps' | 'snippets'): EvaluationTargetType => {
return resourceType === 'snippets' ? 'snippets' : 'app'
return resourceType === 'snippets' ? 'snippets' : 'apps'
}
const humanizeMetricId = (metricId: string) => {

View File

@ -16,7 +16,6 @@ import type {
EvaluationDefaultMetric,
EvaluationJudgementConditionGroup,
EvaluationJudgementConditionItem,
EvaluationMetricsConfig,
NodeInfo,
} from '@/types/evaluation'
import { getComparisonOperators, getDefaultOperator, getEvaluationMockConfig } from './mock'
@ -71,7 +70,7 @@ const normalizeNodeInfoList = (value: NodeInfo[] | undefined): NodeInfo[] => {
const normalizeDefaultMetrics = (
resourceType: EvaluationResourceType,
value: EvaluationDefaultMetric[] | undefined,
value: EvaluationDefaultMetric[] | null | undefined,
): EvaluationMetric[] => {
if (!value?.length)
return []
@ -292,9 +291,8 @@ export const buildStateFromEvaluationConfig = (
resourceType: EvaluationResourceType,
config: EvaluationConfig,
): EvaluationResourceState => {
const metricsConfig: EvaluationMetricsConfig = config.metrics_config ?? {}
const defaultMetrics = normalizeDefaultMetrics(resourceType, metricsConfig.default_metrics)
const customMetrics = normalizeCustomMetric(metricsConfig.customized_metrics)
const defaultMetrics = normalizeDefaultMetrics(resourceType, config.default_metrics)
const customMetrics = normalizeCustomMetric(config.customized_metrics)
return {
...buildInitialState(resourceType),

View File

@ -29,30 +29,14 @@ const normalizeAvailableEvaluationWorkflowsParams = (params: AvailableEvaluation
}
}
const toEvaluationTargetType = (resourceType: Exclude<EvaluationResourceType, 'datasets'>) => {
return resourceType === 'snippets' ? 'snippets' : 'app'
}
const getEvaluationConfigQueryOptions = (
resourceType: EvaluationResourceType,
resourceId: string,
) => {
if (resourceType === 'datasets') {
return consoleQuery.datasetEvaluation.config.queryOptions({
input: {
params: {
datasetId: resourceId,
},
},
enabled: !!resourceId,
refetchOnWindowFocus: false,
})
}
return consoleQuery.evaluation.config.queryOptions({
input: {
params: {
targetType: toEvaluationTargetType(resourceType),
targetType: resourceType,
targetId: resourceId,
},
},

View File

@ -1,9 +1,4 @@
export type EvaluationTargetType = 'app' | 'snippets'
export type EvaluationMetricsConfig = {
default_metrics?: EvaluationDefaultMetric[]
customized_metrics?: EvaluationCustomizedMetric | null
}
export type EvaluationTargetType = 'apps' | 'snippets' | 'datasets'
export type EvaluationConditionValue = string | number | boolean | null
@ -31,7 +26,8 @@ export type EvaluationJudgementConditions
export type EvaluationConfig = {
evaluation_model: string | null
evaluation_model_provider: string | null
metrics_config: EvaluationMetricsConfig | null
default_metrics?: EvaluationDefaultMetric[] | null
customized_metrics?: EvaluationCustomizedMetric | null
judgement_conditions: EvaluationJudgementConditions | null
}
@ -43,19 +39,23 @@ export type NodeInfo = {
export type EvaluationDefaultMetric = {
metric?: string
value_type?: string
node_info_list?: NodeInfo[]
}
export type EvaluationCustomizedMetric = {
evaluation_workflow_id?: string
input_fields?: Record<string, string | null | undefined>
output_fields?: Array<Record<string, string | null | undefined>>
output_fields?: Array<{
variable?: string
value_type?: string
}>
}
export type EvaluationConfigData = {
evaluation_model?: string
evaluation_model_provider?: string
default_metrics?: EvaluationDefaultMetric[]
default_metrics?: EvaluationDefaultMetric[] | null
customized_metrics?: EvaluationCustomizedMetric | null
judgment_config?: EvaluationJudgementConditions | null
}