feat: Adds the tenant model ID field to the interface definition. (#13274)

### What problem does this PR solve?

feat: Adds the tenant model ID field to the interface definition

### Type of change

- [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
chanx
2026-03-05 17:27:34 +08:00
committed by GitHub
parent 62cb292635
commit 35fc5edc93
7 changed files with 217 additions and 3 deletions

View File

@ -108,6 +108,7 @@ export interface IGenerateForm {
cite?: boolean;
prompt: number;
llm_id: string;
tenant_llm_id?: string;
parameters: { key: string; component_id: string };
}
@ -143,6 +144,7 @@ export interface IRetrievalForm {
top_n?: number;
top_k?: number;
rerank_id?: string;
tenant_rerank_id?: string;
empty_response?: string;
kb_ids: string[];
}

View File

@ -35,6 +35,7 @@ export interface Variable {
temperature?: number;
top_p?: number;
llm_id?: string;
tenant_llm_id?: string;
}
export interface IDialog {
@ -48,6 +49,7 @@ export interface IDialog {
kb_names: string[];
language: string;
llm_id: string;
tenant_llm_id?: string;
llm_setting: Variable;
llm_setting_type: string;
name: string;

View File

@ -117,6 +117,13 @@ export interface ITenantInfo {
speech2text_id: string;
rerank_id?: string;
tts_id: string;
// Tenant model IDs
tenant_asr_id?: string;
tenant_embd_id?: string;
tenant_img2txt_id?: string;
tenant_llm_id?: string;
tenant_rerank_id?: string;
tenant_tts_id?: string;
}
export type ChunkDocType = 'image' | 'table' | 'text';

View File

@ -0,0 +1,59 @@
// LLM list cache utility
interface LlmCache {
data: Record<string, any>;
timestamp: number;
}
const CACHE_KEY = 'ragflow_llm_list_cache';
const CACHE_DURATION = 5 * 60 * 1000; // 5 minutes
// Get cached LLM list
export function getCachedLlmList(): Record<string, any> | null {
try {
const cached = localStorage.getItem(CACHE_KEY);
if (!cached) return null;
const parsed: LlmCache = JSON.parse(cached);
const now = Date.now();
// Check if cache is expired
if (now - parsed.timestamp > CACHE_DURATION) {
clearLlmCache();
return null;
}
return parsed.data;
} catch (error) {
console.error('Error getting cached LLM list:', error);
clearLlmCache();
return null;
}
}
// Set LLM list to cache
export function setCachedLlmList(data: Record<string, any>): void {
try {
const cache: LlmCache = {
data,
timestamp: Date.now(),
};
localStorage.setItem(CACHE_KEY, JSON.stringify(cache));
} catch (error) {
console.error('Error setting cached LLM list:', error);
}
}
// Clear LLM list cache
export function clearLlmCache(): void {
try {
localStorage.removeItem(CACHE_KEY);
} catch (error) {
console.error('Error clearing LLM cache:', error);
}
}
// Check if LLM list is cached and not expired
export function isLlmListCached(): boolean {
return getCachedLlmList() !== null;
}

View File

@ -1,4 +1,5 @@
import { IThirdOAIModel } from '@/interfaces/database/llm';
import { getCachedLlmList } from './llm-cache';
export const getLLMIconName = (fid: string, llm_name: string) => {
if (fid === 'FastEmbed') {
@ -22,3 +23,117 @@ export function getRealModelName(llmName: string) {
export function buildLlmUuid(llm: IThirdOAIModel) {
return `${llm.llm_name}@${llm.fid}`;
}
// Get tenant model ID from LLM list by model name and factory ID
export function getTenantModelId(
llmList: Record<string, any>,
modelName: string,
factoryId: string,
): string {
// Iterate through all providers in the LLM list
for (const [provider, data] of Object.entries(llmList)) {
if (data.llm && Array.isArray(data.llm)) {
// Handle /v1/llm/my_llms format
const model = data.llm.find(
(m: any) => m.name === modelName && provider === factoryId,
);
if (model && model.id) {
return model.id;
}
} else if (Array.isArray(data)) {
// Handle /v1/llm/list format
const model = data.find(
(m: any) => m.llm_name === modelName && m.fid === factoryId,
);
if (model && model.id) {
return model.id;
}
}
}
return '';
}
// Extract model name and factory ID from a model UUID (e.g., "model_name@factory_id")
export function parseModelUuid(uuid: string): {
modelName: string;
factoryId: string;
} {
const [modelName, factoryId] = uuid.split('@');
return { modelName, factoryId };
}
// Model parameter to tenant parameter mapping
type ModelParamMap = {
[key: string]: string;
};
const modelParamMap: ModelParamMap = {
llm_id: 'tenant_llm_id',
embd_id: 'tenant_embd_id',
asr_id: 'tenant_asr_id',
tts_id: 'tenant_tts_id',
img2txt_id: 'tenant_img2txt_id',
rerank_id: 'tenant_rerank_id',
};
// API endpoint whitelist - only these endpoints will have tenant parameters added
const API_WHITELIST = [
'/v1/user/set_tenant_info',
'/v1/dialog/set',
'/v1/canvas/set',
'/v1/canvas/setting',
'/v1/search/update',
'/api/v1/memories',
'/v1/kb/create',
'/v1/kb/update',
'/v1/dataflow/set',
];
// Check if the URL is in the whitelist
export function isUrlInWhitelist(url: string): boolean {
return API_WHITELIST.some((endpoint) => url.includes(endpoint));
}
// Add tenant model ID parameters to request data
export function addTenantParams(data: any, url?: string): any {
if (!data || typeof data !== 'object') return data;
// If URL is provided and not in whitelist, return original data
if (url && !isUrlInWhitelist(url)) {
return data;
}
const llmList = getCachedLlmList();
if (!llmList) return data;
// Handle arrays
if (Array.isArray(data)) {
return data.map((item) => addTenantParams(item, url));
}
const newData = { ...data };
// Iterate through model parameters and add corresponding tenant parameters
for (const [paramName, tenantParamName] of Object.entries(modelParamMap)) {
if (newData[paramName]) {
try {
const { modelName, factoryId } = parseModelUuid(newData[paramName]);
const tenantModelId = getTenantModelId(llmList, modelName, factoryId);
if (tenantModelId) {
newData[tenantParamName] = tenantModelId;
}
} catch (error) {
console.error(`Error processing ${paramName}:`, error);
}
}
}
// Recursively process nested objects
for (const [key, value] of Object.entries(newData)) {
if (value && typeof value === 'object' && !modelParamMap[key]) {
newData[key] = addTenantParams(value, url);
}
}
return newData;
}

View File

@ -8,6 +8,8 @@ import authorizationUtil, {
import notification from '@/utils/notification';
import axios from 'axios';
import { convertTheKeysOfTheObjectToSnake } from './common-util';
import { setCachedLlmList } from './llm-cache';
import { addTenantParams } from './llm-util';
const FAILED_TO_FETCH = 'Failed to fetch';
@ -82,9 +84,13 @@ request.interceptors.request.use(
const data = convertTheKeysOfTheObjectToSnake(config.data);
const params = convertTheKeysOfTheObjectToSnake(config.params);
const newConfig = { ...config, data, params };
// Add tenant parameters to data
const dataWithTenantParams = addTenantParams(data, config.url);
if (!newConfig.skipToken) {
const newConfig = { ...config, data: dataWithTenantParams, params };
// Skip token if explicitly requested
if (!(newConfig as any).skipToken) {
newConfig.headers.set(Authorization, getAuthorization());
}
@ -105,6 +111,15 @@ request.interceptors.response.use(
return response;
}
const data = response?.data;
// Update LLM list cache when fetching my_llm or llm_list
if (data?.code === 0 && data?.data) {
const url = response?.config?.url || '';
if (url.includes('/v1/llm/my_llms') || url.includes('/v1/llm/list')) {
setCachedLlmList(data.data);
}
}
if (data?.code === 100) {
message.error(data?.message);
} else if (data?.code === 401) {

View File

@ -9,6 +9,8 @@ import authorizationUtil, {
import notification from '@/utils/notification';
import { RequestMethod, extend } from 'umi-request';
import { convertTheKeysOfTheObjectToSnake } from './common-util';
import { setCachedLlmList } from './llm-cache';
import { addTenantParams } from './llm-util';
const FAILED_TO_FETCH = 'Failed to fetch';
@ -82,11 +84,14 @@ request.interceptors.request.use((url: string, options: any) => {
const data = convertTheKeysOfTheObjectToSnake(options.data);
const params = convertTheKeysOfTheObjectToSnake(options.params);
// Add tenant parameters to data
const dataWithTenantParams = addTenantParams(data, url);
return {
url,
options: {
...options,
data,
data: dataWithTenantParams,
params,
headers: {
...(options.skipToken
@ -109,6 +114,15 @@ request.interceptors.response.use(async (response: Response, options) => {
}
const data: ResponseType = await response?.clone()?.json();
// Update LLM list cache when fetching my_llm or llm_list
if (data?.code === 0 && data?.data) {
const url = response?.url || '';
if (url.includes('/v1/llm/my_llms') || url.includes('/v1/llm/list')) {
setCachedLlmList(data.data);
}
}
if (data?.code === 100) {
message.error(data?.message);
} else if (data?.code === 401) {