Feat: Optimize the style of the chat page. (#13429)

### What problem does this PR solve?

Feat: Optimize the style of the chat page.
### Type of change


- [x] New Feature (non-breaking change which adds functionality)
This commit is contained in:
balibabu
2026-03-06 11:42:25 +08:00
committed by GitHub
parent 6023eb27ac
commit 6849d35bf5
10 changed files with 59 additions and 52 deletions

View File

@ -14,6 +14,7 @@ import {
} from '@/components/file-upload';
import { Button } from '@/components/ui/button';
import { Textarea } from '@/components/ui/textarea';
import { cn } from '@/lib/utils';
import { t } from 'i18next';
import {
Atom,
@ -246,8 +247,10 @@ export function NextMessageInput({
<Button
type="button"
size="sm"
variant={enableThinking ? 'accent' : 'transparent'}
className="border-0 h-7 text-sm"
variant={'outline'}
className={cn('border-0 h-7 text-sm bg-bg-card', {
'bg-text-primary text-bg-base': enableThinking,
})}
onClick={handleThinkingToggle}
data-testid="chat-detail-thinking-toggle"
>

View File

@ -56,7 +56,10 @@ export const AssistantGroupButton = ({
return (
<>
<div className="flex gap-1" role="toolbar">
<div
className="flex gap-1 opacity-0 transition-opacity group-hover:opacity-100"
role="toolbar"
>
<CopyToClipboard text={content} className="border-0" size="icon-xs" />
{showLoudspeaker && (
@ -152,7 +155,7 @@ export const UserGroupButton = ({
const { t } = useTranslation();
return (
<div className="flex gap-1">
<div className="flex gap-1 opacity-0 transition-opacity group-hover:opacity-100">
<CopyToClipboard text={content} className="border-0" size="icon-xs" />
{regenerateMessage && (

View File

@ -99,7 +99,7 @@ const MessageItem = ({
})}
>
<div
className={classNames(styles.messageItemContent, {
className={classNames(styles.messageItemContent, 'group', {
[styles.messageItemContentReverse]: item.role === MessageType.User,
})}
>

View File

@ -55,6 +55,8 @@ const buttonVariants = cva(
focus-visible:text-text-primary focus-visible:bg-border-button focus-visible:border-border-button
`,
icon: 'bg-transparent text-foreground hover:bg-transparent/80',
transparent: `
text-text-secondary bg-transparent border-0.5 border-border-button
hover:text-text-primary hover:bg-border-button

View File

@ -95,7 +95,7 @@ export default {
title: 'A leading RAG engine for LLM context',
start: "Let's get started",
description:
'Sign up for free to explore top RAG technology. Create knowledge bases and AIs to empower your business.',
'Sign up for free to explore top RAG technology. Create datasets and AIs to empower your business.',
review: 'from 500+ reviews',
seeAll: 'See all',
},
@ -198,7 +198,7 @@ Example: A 1 KB message with 1024-dim embedding uses ~9 KB. The 5 MB default lim
},
knowledgeList: {
welcome: 'Welcome back',
description: 'Which knowledge bases will you use today?',
description: 'Which datasets will you use today?',
createKnowledgeBase: 'Create dataset',
name: 'Name',
namePlaceholder: 'Please input name.',
@ -404,7 +404,7 @@ Example: A 1 KB message with 1024-dim embedding uses ~9 KB. The 5 MB default lim
'A delimiter or separator can consist of one or multiple special characters. If it is multiple characters, ensure they are enclosed in backticks( ``). For example, if you configure your delimiters like this: \\n`##`;, then your texts will be separated at line breaks, double hash symbols (##), and semicolons.',
html4excel: 'Excel to HTML',
html4excelTip: `Use with the General chunking method. When disabled, spreadsheets (XLSX or XLS(Excel 97-2003)) in the knowledge base will be parsed into key-value pairs. When enabled, they will be parsed into HTML tables, splitting every 12 rows if the original table has more than 12 rows. See https://ragflow.io/docs/dev/enable_excel2html for details.`,
html4excelTip: `Use with the General chunking method. When disabled, spreadsheets (XLSX or XLS(Excel 97-2003)) in the dataset will be parsed into key-value pairs. When enabled, they will be parsed into HTML tables, splitting every 12 rows if the original table has more than 12 rows. See https://ragflow.io/docs/dev/enable_excel2html for details.`,
autoKeywords: 'Auto-keyword',
autoKeywordsTip: `Automatically extract N keywords for each chunk to increase their ranking for queries containing those keywords. Be aware that extra tokens will be consumed by the indexing model specified in 'Configuration'. You can check or update the added keywords for a chunk from the chunk list. For details, see https://ragflow.io/docs/dev/autokeyword_autoquestion.`,
autoQuestions: 'Auto-question',
@ -518,8 +518,8 @@ Example: A 1 KB message with 1024-dim embedding uses ~9 KB. The 5 MB default lim
builtIn: 'Built-in',
titleDescription:
'Update your memory configuration here, particularly the LLM and prompts.',
name: 'Knowledge base name',
photo: 'Knowledge base photo',
name: 'Dataset name',
photo: 'Dataset photo',
photoTip: 'You can upload an image up to 4 MB.',
description: 'Description',
language: 'Document language',
@ -530,9 +530,9 @@ Example: A 1 KB message with 1024-dim embedding uses ~9 KB. The 5 MB default lim
chunkTokenNumber: 'Recommended chunk size',
chunkTokenNumberMessage: 'Chunk token number for text is required',
embeddingModelTip:
'The default embedding model used by the knowledge base. Once the knowledge base has chunks, when switching the embedding model, the system randomly samples a few chunks for a compatibility check, re-embeds them with the new embedding model, and computes cosine similarity between the new and old vectors. Switching is allowed only when the average similarity of the sample is ≥ 0.9. Otherwise, you must delete all chunks in the knowledge base before you can change it.',
'The default embedding model used by the dataset. Once the dataset has chunks, when switching the embedding model, the system randomly samples a few chunks for a compatibility check, re-embeds them with the new embedding model, and computes cosine similarity between the new and old vectors. Switching is allowed only when the average similarity of the sample is ≥ 0.9. Otherwise, you must delete all chunks in the dataset before you can change it.',
permissionsTip:
"If it is set to 'Team', all your team members will be able to manage the knowledge base.",
"If it is set to 'Team', all your team members will be able to manage the dataset.",
chunkTokenNumberTip:
'It kind of sets the token threshold for a creating a chunk. A segment with fewer tokens than this threshold will be combined with the following segments until the token count exceeds the threshold, at which point a chunk is created. No new chunk is created unless a delimiter is encountered, even if the threshold is exceeded.',
chunkMethod: 'Chunking method',
@ -569,7 +569,7 @@ Example: A 1 KB message with 1024-dim embedding uses ~9 KB. The 5 MB default lim
'The following screenshots are provided for clarification.',
dialogueExamplesTitle: 'view',
methodEmpty:
'This will display a visual explanation of the knowledge base categories',
'This will display a visual explanation of the dataset categories',
book: `<p>Supported file formats are <b>DOCX</b>, <b>PDF</b>, <b>TXT</b>.</p><p>
For each book in PDF, please set the <i>page ranges</i> to remove unwanted information and reduce analysis time.</p>`,
laws: `<p>Supported file formats are <b>DOCX</b>, <b>PDF</b>, <b>TXT</b>.</p><p>
@ -647,9 +647,9 @@ Example: A 1 KB message with 1024-dim embedding uses ~9 KB. The 5 MB default lim
<p>This approach chunks files using the 'naive'/'General' method. It splits a document into segments and then combines adjacent segments until the token count exceeds the threshold specified by 'Chunk token number for text', at which point a chunk is created.</p>
<p>The chunks are then fed to the LLM to extract entities and relationships for a knowledge graph and a mind map.</p>
<p>Ensure that you set the <b>Entity types</b>.</p>`,
tag: `<p>A knowledge base using the 'Tag' chunking method functions as a tag set. Other knowledge bases use it to tag their chunks, and queries to these knowledge bases are also tagged using this tag set.</p>
tag: `<p>A dataset using the 'Tag' chunking method functions as a tag set. Other datasets use it to tag their chunks, and queries to these datasets are also tagged using this tag set.</p>
<p>A tag set will <b>NOT</b> be directly involved in a Retrieval-Augmented Generation (RAG) process.</p>
<p>Each chunk in this knowledge base is an independent description-tag pair.</p>
<p>Each chunk in this dataset is an independent description-tag pair.</p>
<p>Supported file formats include <b>XLSX</b> and <b>CSV/TXT</b>:</p>
<p>If a file is in <b>XLSX</b> format, it should contain two columns without headers: one for tag descriptions and the other for tag names, with the Description column preceding the Tag column. Multiple sheets are acceptable, provided the columns are properly structured.</p>
<p>If a file is in <b>CSV/TXT</b> format, it must be UTF-8 encoded with TAB as the delimiter to separate descriptions and tags.</p>
@ -681,7 +681,7 @@ The above is the content you need to summarize.`,
entityTypes: 'Entity types',
vietnamese: 'Vietnamese',
pageRank: 'Page rank',
pageRankTip: `You can assign a higher PageRank score to specific knowledge bases during retrieval. The corresponding score is added to the hybrid similarity scores of retrieved chunks from these knowledge bases, increasing their ranking. See https://ragflow.io/docs/dev/set_page_rank for details.`,
pageRankTip: `You can assign a higher PageRank score to specific datasets during retrieval. The corresponding score is added to the hybrid similarity scores of retrieved chunks from these datasets, increasing their ranking. See https://ragflow.io/docs/dev/set_page_rank for details.`,
tagName: 'Tag',
frequency: 'Frequency',
searchTags: 'Search tags',
@ -689,12 +689,12 @@ The above is the content you need to summarize.`,
tagTable: 'Table',
tagSet: 'Tag sets',
tagSetTip: `
<p> Select one or multiple tag knowledge bases to auto-tag chunks in your knowledge base. See https://ragflow.io/docs/dev/use_tag_sets for details.</p>
<p> Select one or multiple tag datasets to auto-tag chunks in your dataset. See https://ragflow.io/docs/dev/use_tag_sets for details.</p>
<p>The user query will also be auto-tagged.</p>
This auto-tagging feature enhances retrieval by adding another layer of domain-specific knowledge to the existing dataset.
<p>Difference between auto-tag and auto-keyword:</p>
<ul>
<li>A tag knowledge base is a user-defined close set, whereas keywords extracted by the LLM can be regarded as an open set.</li>
<li>A tag dataset is a user-defined close set, whereas keywords extracted by the LLM can be regarded as an open set.</li>
<li>You must upload tag sets in specified formats before running the auto-tag feature.</li>
<li>The auto-keyword feature is dependent on the LLM and consumes a significant number of tokens.</li>
</ul>
@ -704,7 +704,7 @@ This auto-tagging feature enhances retrieval by adding another layer of domain-s
addTag: 'Add tag',
useGraphRag: 'Knowledge graph',
useGraphRagTip:
'Construct a knowledge graph over file chunks of the current knowledge base to enhance multi-hop question-answering involving nested logic. See https://ragflow.io/docs/dev/construct_knowledge_graph for details.',
'Construct a knowledge graph over file chunks of the current dataset to enhance multi-hop question-answering involving nested logic. See https://ragflow.io/docs/dev/construct_knowledge_graph for details.',
graphRagMethod: 'Method',
graphRagMethodTip: `
Light: (Default) Use prompts provided by github.com/HKUDS/LightRAG to extract entities and relationships. This option consumes fewer tokens, less memory, and fewer computational resources.</br>
@ -770,17 +770,17 @@ This auto-tagging feature enhances retrieval by adding another layer of domain-s
sendPlaceholder: 'Message the assistant...',
chatConfiguration: 'Chat configuration',
chatConfigurationDescription:
' Set up a chat assistant for your selected datasets (knowledge bases) here! 💕',
' Set up a chat assistant for your selected datasets (datasets) here! 💕',
assistantName: 'Assistant name',
assistantNameMessage: 'Assistant name is required',
namePlaceholder: 'e.g. Resume Jarvis',
assistantAvatar: 'Assistant avatar',
language: 'Language',
emptyResponse: 'Empty response',
emptyResponseTip: `Set this as a response if no results are retrieved from the datasets for your query, or leave this field blank to allow the LLM to improvise when nothing is found.`,
emptyResponseMessage: `Empty response will be triggered when nothing relevant is retrieved from datasets. You must clear the 'Empty response' field if no dataset is selected.`,
emptyResponsePlaceholder:
'The answer you are looking for is not found in the dataset!',
emptyResponseTip: `Set this as a response if no results are retrieved from the knowledge bases for your query, or leave this field blank to allow the LLM to improvise when nothing is found.`,
emptyResponseMessage: `Empty response will be triggered when nothing relevant is retrieved from knowledge bases. You must clear the 'Empty response' field if no knowledge base is selected.`,
setAnOpener: 'Opening greeting',
setAnOpenerInitial: `Hi! I'm your assistant. What can I do for you?`,
setAnOpenerTip: 'Set an opening greeting for users.',
@ -788,7 +788,7 @@ This auto-tagging feature enhances retrieval by adding another layer of domain-s
knowledgeBasesPlaceholder: 'Select value',
knowledgeBasesMessage: 'Please select',
knowledgeBasesTip:
'Select the datasets to associate with this chat assistant. An empty knowledge base will not appear in the dropdown list.',
'Select the datasets to associate with this chat assistant. An empty dataset will not appear in the dropdown list.',
system: 'System prompt',
systemPlaceholder: `You are an intelligent assistant. Your primary function is to answer questions based strictly on the provided knowledge base.
@ -800,9 +800,9 @@ This auto-tagging feature enhances retrieval by adding another layer of domain-s
systemInitialValue: `You are an intelligent assistant. Your primary function is to answer questions based strictly on the provided knowledge base.
**Essential Rules:**
- Your answer must be derived **solely** from this knowledge base: \`{knowledge}\`.
- Your answer must be derived **solely** from this dataset: \`{knowledge}\`.
- **When information is available**: Summarize the content to give a detailed answer.
- **When information is unavailable**: Your response must contain this exact sentence: "The answer you are looking for is not found in the knowledge base!"
- **When information is unavailable**: Your response must contain this exact sentence: "The answer you are looking for is not found in the dataset!"
- **Always consider** the entire conversation history.`,
systemMessage: 'Please input!',
systemTip:
@ -810,7 +810,7 @@ This auto-tagging feature enhances retrieval by adding another layer of domain-s
topN: 'Top N',
topNTip: `Not all chunks with similarity score above the 'similarity threshold' will be sent to the LLM. This selects 'Top N' chunks from the retrieved ones.`,
variable: 'Variable',
variableTip: `Used together with RAGFlow's chat assistant management APIs, variables can help develop more flexible system prompt strategies. The defined variables will be used by 'System prompt' as part of the prompts for the LLM. {knowledge} is a reserved special variable representing chunks retrieved from specified knowledge base(s), and all variables should be enclosed in curly braces {} in the 'System prompt'. See https://ragflow.io/docs/dev/set_chat_variables for details.`,
variableTip: `Used together with RAGFlow's chat assistant management APIs, variables can help develop more flexible system prompt strategies. The defined variables will be used by 'System prompt' as part of the prompts for the LLM. {knowledge} is a reserved special variable representing chunks retrieved from specified dataset(s), and all variables should be enclosed in curly braces {} in the 'System prompt'. See https://ragflow.io/docs/dev/set_chat_variables for details.`,
add: 'Add',
key: 'Key',
optional: 'Optional',
@ -897,7 +897,7 @@ This auto-tagging feature enhances retrieval by adding another layer of domain-s
descriptionPlaceholder: "I'm a chat assistant.",
useKnowledgeGraph: 'Use knowledge graph',
useKnowledgeGraphTip:
'Whether to use knowledge graph(s) in the specified knowledge base(s) during retrieval for multi-hop question answering. When enabled, this would involve iterative searches across entity, relationship, and community report chunks, greatly increasing retrieval time.',
'Whether to use knowledge graph(s) in the specified dataset(s) during retrieval for multi-hop question answering. When enabled, this would involve iterative searches across entity, relationship, and community report chunks, greatly increasing retrieval time.',
keyword: 'Keyword analysis',
keywordTip: `Use LLM to analyze user's questions, extract keywords which will be emphasize during the relevance computation. Works well with lengthy queries but will increase response time.`,
languageTip:
@ -908,7 +908,7 @@ This auto-tagging feature enhances retrieval by adding another layer of domain-s
reasoning: 'Reasoning',
reasoningTip: `Whether to enable a reasoning workflow during question answering, as seen in models like Deepseek-R1 or OpenAI o1. When enabled, this allows the model to access external knowledge and tackle complex questions in a step-by-step manner, leveraging techniques like chain-of-thought reasoning. This approach enhances the model's ability to provide accurate responses by breaking down problems into manageable steps, improving performance on tasks that require logical reasoning and multi-step thinking.`,
tavilyApiKeyTip:
'If an API key is correctly set here, Tavily-based web searches will be used to supplement knowledge base retrieval.',
'If an API key is correctly set here, Tavily-based web searches will be used to supplement dataset retrieval.',
tavilyApiKeyMessage: 'Please enter your Tavily API Key',
tavilyApiKeyHelp: 'How to get it?',
crossLanguage: 'Cross-language search',
@ -1223,13 +1223,13 @@ Example: Virtual Hosted Style`,
modify: 'Modify',
systemModelSettings: 'Set default models',
chatModel: 'LLM',
chatModelTip: 'The default LLM for each newly created knowledge base.',
chatModelTip: 'The default LLM for each newly created dataset.',
embeddingModel: 'Embedding',
embeddingModelTip:
'The default embedding model for each newly created knowledge base. If you cannot find an embedding model from the dropdown, check if you are using RAGFlow slim edition (which does not include embedding models) or check https://ragflow.io/docs/dev/supported_models to see if your model provider supports this model.',
'The default embedding model for each newly created dataset. If you cannot find an embedding model from the dropdown, check if you are using RAGFlow slim edition (which does not include embedding models) or check https://ragflow.io/docs/dev/supported_models to see if your model provider supports this model.',
img2txtModel: 'VLM',
img2txtModelTip:
'The default VLM for each newly created knowledge base. It describes a picture or video. If you cannot find a model from the dropdown, check https://ragflow.io/docs/dev/supported_models to see if your model provider supports this model.',
'The default VLM for each newly created dataset. It describes a picture or video. If you cannot find a model from the dropdown, check https://ragflow.io/docs/dev/supported_models to see if your model provider supports this model.',
sequence2txtModel: 'ASR',
sequence2txtModelTip:
'The default ASR model for each newly created dataset. Use this model to translate voices to corresponding text.',
@ -1606,7 +1606,7 @@ Example: Virtual Hosted Style`,
title: 'ID:',
beginDescription: 'This is where the flow begins.',
answerDescription: `A component that serves as the interface between human and bot, receiving user inputs and displaying the agent's responses.`,
retrievalDescription: `A component that retrieves information from specified knowledge bases (datasets). Ensure that the knowledge bases you select use the same embedding model.`,
retrievalDescription: `A component that retrieves information from specified datasets (datasets). Ensure that the datasets you select use the same embedding model.`,
generateDescription: `A component that prompts the LLM to generate responses. Ensure the prompt is set correctly.`,
categorizeDescription: `A component that uses the LLM to classify user inputs into predefined categories. Ensure you specify the name, description, and examples for each category, along with the corresponding next component.`,
relevantDescription: `A component that uses the LLM to assess whether the upstream output is relevant to the user's latest query. Ensure you specify the next component for each judge result.`,
@ -1615,7 +1615,7 @@ Example: Virtual Hosted Style`,
'This component returns the final data output of the workflow along with predefined message content. ',
keywordDescription: `A component that retrieves top N search results from user's input. Ensure the TopN value is set properly before use.`,
switchDescription: `A component that evaluates conditions based on the output of previous components and directs the flow of execution accordingly. It allows for complex branching logic by defining cases and specifying actions for each case or default action if no conditions are met.`,
wikipediaDescription: `A component that searches from wikipedia.org, using TopN to specify the number of search results. It supplements the existing knowledge bases.`,
wikipediaDescription: `A component that searches from wikipedia.org, using TopN to specify the number of search results. It supplements the existing datasets.`,
promptText: `Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:
{input}
The above is the content you need to summarize.`,
@ -1638,10 +1638,10 @@ Example: Virtual Hosted Style`,
keywordExtract: 'Keyword',
keywordExtractDescription: `A component that extracts keywords from a user query, with Top N specifying the number of keywords to extract.`,
baidu: 'Baidu',
baiduDescription: `A component that searches from baidu.com, using TopN to specify the number of search results. It supplements the existing knowledge bases.`,
baiduDescription: `A component that searches from baidu.com, using TopN to specify the number of search results. It supplements the existing datasets.`,
duckDuckGo: 'DuckDuckGo',
duckDuckGoDescription:
'A component that searches from duckduckgo.com, allowing you to specify the number of search results using TopN. It supplements the existing knowledge bases.',
'A component that searches from duckduckgo.com, allowing you to specify the number of search results using TopN. It supplements the existing datasets.',
searXNG: 'SearXNG',
searXNGDescription:
'A component that searches via your provided SearXNG instance URL. Specify TopN and the instance URL.',
@ -1676,23 +1676,23 @@ Example: Virtual Hosted Style`,
wikipedia: 'Wikipedia',
pubMed: 'PubMed',
pubMedDescription:
'A component that searches from https://pubmed.ncbi.nlm.nih.gov/, allowing you to specify the number of search results using TopN. It supplements the existing knowledge bases.',
'A component that searches from https://pubmed.ncbi.nlm.nih.gov/, allowing you to specify the number of search results using TopN. It supplements the existing datasets.',
email: 'Email',
emailTip:
'E-mail is a required field. You must input an E-mail address here.',
arXiv: 'ArXiv',
arXivDescription:
'A component that searches from https://arxiv.org/, allowing you to specify the number of search results using TopN. It supplements the existing knowledge bases.',
'A component that searches from https://arxiv.org/, allowing you to specify the number of search results using TopN. It supplements the existing datasets.',
sortBy: 'Sort by',
submittedDate: 'Submitted date',
lastUpdatedDate: 'Last updated date',
relevance: 'Relevance',
google: 'Google',
googleDescription:
'A component that searches from https://www.google.com/, allowing you to specify the number of search results using TopN. It supplements the existing knowledge bases. Please note that this requires an API key from serpapi.com.',
'A component that searches from https://www.google.com/, allowing you to specify the number of search results using TopN. It supplements the existing datasets. Please note that this requires an API key from serpapi.com.',
bing: 'Bing',
bingDescription:
'A component that searches from https://www.bing.com/, allowing you to specify the number of search results using TopN. It supplements the existing knowledge bases. Please note that this requires an API key from microsoft.com.',
'A component that searches from https://www.bing.com/, allowing you to specify the number of search results using TopN. It supplements the existing datasets. Please note that this requires an API key from microsoft.com.',
apiKey: 'API KEY',
country: 'Country & region',
language: 'Language',
@ -2089,8 +2089,8 @@ This delimiter is used to split the input text into several text pieces echo of
promptMessage: 'Prompt is required',
infor: 'Information run',
knowledgeBasesTip:
'Select the knowledge bases to associate with this chat assistant, or choose variables containing knowledge base IDs below.',
knowledgeBaseVars: 'Knowledge base variables',
'Select the datasets to associate with this chat assistant, or choose variables containing dataset IDs below.',
knowledgeBaseVars: 'Dataset variables',
code: 'Code',
codeDescription: 'It allows developers to write custom Python logic.',
dataOperations: 'Data operations',

View File

@ -12,7 +12,7 @@ export function ChunkToolbar({ text }: ChunkToolbarProps) {
{text}
</span>
<div className="flex items-center gap-3">
<Button variant={'icon'} size={'icon'}>
<Button variant={'ghost'} size={'icon'}>
<Copy />
</Button>
<Button variant={'outline'} size={'sm'}>

View File

@ -72,7 +72,7 @@ export default function ChunkPage() {
></Segmented>
</div>
<div className="flex items-center gap-2">
<Button variant={'icon'} size={'icon'}>
<Button variant={'ghost'} size={'icon'}>
<EllipsisVertical />
</Button>
<Button size={'sm'}>

View File

@ -20,7 +20,7 @@ interface ChunkResultBarProps {
handleInputChange: (e: React.ChangeEvent<HTMLInputElement>) => void;
searchString: string;
}
export default ({
export default function ChunkResultBar({
changeChunkTextMode,
available,
selectAllChunk,
@ -28,7 +28,7 @@ export default ({
createChunk,
handleInputChange,
searchString,
}: ChunkResultBarProps) => {
}: ChunkResultBarProps) {
const { t } = useTranslate('chunk');
const [textSelectValue, setTextSelectValue] = useState<string | number>(
ChunkTextMode.Full,
@ -99,4 +99,4 @@ export default ({
<div className="w-[20px]"></div> */}
</div>
);
};
}

View File

@ -40,8 +40,11 @@ export default function Chat() {
const { data: dialogList } = useFetchConversationList();
const currentConversationName = useMemo(() => {
return dialogList.find((x) => x.id === conversationId)?.name;
}, [conversationId, dialogList]);
return (
dialogList.find((x) => x.id === conversationId)?.name ||
t('chat.newConversation')
);
}, [conversationId, dialogList, t]);
const fetchConversation: typeof handleConversationCardClick = useCallback(
async (conversationId, isNew) => {

View File

@ -155,10 +155,6 @@ module.exports = {
DEFAULT: 'var(--colors-background-inverse-standard)',
foreground: 'var(--colors-background-inverse-standard-foreground)',
},
'colors-background-inverse-standard': {
DEFAULT: 'var(--colors-background-inverse-standard)',
foreground: 'var(--background-inverse-standard-foreground)',
},
'colors-background-inverse-strong': {
DEFAULT: 'var(--colors-background-inverse-strong)',
foreground: 'var(--background-inverse-standard-foreground)',