feat: Add Avian as an LLM provider (#13256)

### What problem does this PR solve?

This PR adds [Avian](https://avian.io) as a new LLM provider to RAGFlow.
Avian provides an OpenAI-compatible API with competitive pricing,
offering access to models like DeepSeek V3.2, Kimi K2.5, GLM-5, and
MiniMax M2.5.

**Provider details:**
- API Base URL: `https://api.avian.io/v1`
- Auth: Bearer token via API key
- OpenAI-compatible (chat completions, streaming, function calling)
- Models:
  - `deepseek/deepseek-v3.2` — 164K context, $0.26/$0.38 per 1M tokens
  - `moonshotai/kimi-k2.5` — 131K context, $0.45/$2.20 per 1M tokens
  - `z-ai/glm-5` — 131K context, $0.30/$2.55 per 1M tokens
  - `minimax/minimax-m2.5` — 1M context, $0.30/$1.10 per 1M tokens

**Changes:**
- `rag/llm/chat_model.py` — Add `AvianChat` class extending `Base`
- `rag/llm/__init__.py` — Register in `SupportedLiteLLMProvider`,
`FACTORY_DEFAULT_BASE_URL`, `LITELLM_PROVIDER_PREFIX`
- `conf/llm_factories.json` — Add Avian factory with model definitions
- `web/src/constants/llm.ts` — Add to `LLMFactory` enum, `IconMap`,
`APIMapUrl`
- `web/src/components/svg-icon.tsx` — Register SVG icon
- `web/src/assets/svg/llm/avian.svg` — Provider icon
- `docs/references/supported_models.mdx` — Add to supported models table

This follows the same pattern as other OpenAI-compatible providers
(e.g., n1n #12680, TokenPony).

cc @KevinHuSh @JinHai-CN

### Type of change

- [x] New Feature (non-breaking change which adds functionality)
- [x] Documentation Update
This commit is contained in:
avianion
2026-02-27 09:36:55 +00:00
committed by GitHub
parent bb59a27e55
commit 5f53fbe0f1
7 changed files with 58 additions and 0 deletions

View File

@ -5641,6 +5641,42 @@
"is_tools": true
}
]
},
{
"name": "Avian",
"logo": "",
"tags": "LLM",
"status": "1",
"llm": [
{
"llm_name": "deepseek/deepseek-v3.2",
"tags": "LLM,CHAT,164K",
"max_tokens": 164000,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "moonshotai/kimi-k2.5",
"tags": "LLM,CHAT,131K",
"max_tokens": 131000,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "z-ai/glm-5",
"tags": "LLM,CHAT,131K",
"max_tokens": 131000,
"model_type": "chat",
"is_tools": true
},
{
"llm_name": "minimax/minimax-m2.5",
"tags": "LLM,CHAT,1M",
"max_tokens": 1000000,
"model_type": "chat",
"is_tools": true
}
]
}
]
}

View File

@ -18,6 +18,7 @@ A complete list of models supported by RAGFlow, which will continue to expand.
| Provider | LLM | Image2Text | Speech2text | TTS | Embedding | Rerank | OCR |
| --------------------- | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ |
| Anthropic | :heavy_check_mark: | | | | | | |
| Avian | :heavy_check_mark: | | | | | | |
| Azure-OpenAI | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | |
| BaiChuan | :heavy_check_mark: | | | | :heavy_check_mark: | | |
| BaiduYiyan | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | |

View File

@ -58,6 +58,7 @@ class SupportedLiteLLMProvider(StrEnum):
Azure_OpenAI = "Azure-OpenAI"
n1n = "n1n"
HunYuan = "Tencent Hunyuan"
Avian = "Avian"
FACTORY_DEFAULT_BASE_URL = {
@ -85,6 +86,7 @@ FACTORY_DEFAULT_BASE_URL = {
SupportedLiteLLMProvider.OpenAI: "https://api.openai.com/v1",
SupportedLiteLLMProvider.n1n: "https://api.n1n.ai/v1",
SupportedLiteLLMProvider.HunYuan: "https://api.hunyuan.cloud.tencent.com/v1",
SupportedLiteLLMProvider.Avian: "https://api.avian.io/v1",
}
@ -124,6 +126,7 @@ LITELLM_PROVIDER_PREFIX = {
SupportedLiteLLMProvider.Azure_OpenAI: "azure/",
SupportedLiteLLMProvider.n1n: "openai/",
SupportedLiteLLMProvider.HunYuan: "openai/",
SupportedLiteLLMProvider.Avian: "openai/",
}
ChatModel = globals().get("ChatModel", {})

View File

@ -1096,6 +1096,15 @@ class N1nChat(Base):
super().__init__(key, model_name, base_url, **kwargs)
class AvianChat(Base):
_FACTORY_NAME = "Avian"
def __init__(self, key, model_name, base_url="https://api.avian.io/v1", **kwargs):
if not base_url:
base_url = "https://api.avian.io/v1"
super().__init__(key, model_name, base_url, **kwargs)
class LiteLLMBase(ABC):
_FACTORY_NAME = [
"Tongyi-Qianwen",

View File

@ -0,0 +1,5 @@
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" viewBox="0 0 48 48" fill="none">
<rect width="48" height="48" rx="10" fill="#1a1a2e"/>
<path d="M14 30 C14 30 18 18 28 16 C28 16 24 22 26 26 C28 22 32 20 36 20 C34 24 30 28 26 30 C28 32 30 32 34 31 C30 34 24 36 20 34 C16 32 14 30 14 30Z" fill="#4cc9f0"/>
<circle cx="29" cy="18" r="1.5" fill="#ffffff"/>
</svg>

After

Width:  |  Height:  |  Size: 380 B

View File

@ -84,6 +84,7 @@ const svgIcons = [
LLMFactory.PaddleOCR,
LLMFactory.N1n,
// LLMFactory.DeerAPI,
LLMFactory.Avian,
];
export const LlmIcon = ({

View File

@ -63,6 +63,7 @@ export enum LLMFactory {
MinerU = 'MinerU',
PaddleOCR = 'PaddleOCR',
N1n = 'n1n',
Avian = 'Avian',
}
// Please lowercase the file name
@ -131,6 +132,7 @@ export const IconMap = {
[LLMFactory.MinerU]: 'mineru',
[LLMFactory.PaddleOCR]: 'paddleocr',
[LLMFactory.N1n]: 'n1n',
[LLMFactory.Avian]: 'avian',
};
export const APIMapUrl = {
@ -183,4 +185,5 @@ export const APIMapUrl = {
[LLMFactory.DeepInfra]: 'https://deepinfra.com/dash/api_keys',
[LLMFactory.PaddleOCR]: 'https://www.paddleocr.ai/latest/',
[LLMFactory.N1n]: 'https://docs.n1n.ai',
[LLMFactory.Avian]: 'https://avian.io',
};