mirror of
https://github.com/infiniflow/ragflow.git
synced 2026-05-06 10:17:49 +08:00
feat: Add Avian as an LLM provider (#13256)
### What problem does this PR solve? This PR adds [Avian](https://avian.io) as a new LLM provider to RAGFlow. Avian provides an OpenAI-compatible API with competitive pricing, offering access to models like DeepSeek V3.2, Kimi K2.5, GLM-5, and MiniMax M2.5. **Provider details:** - API Base URL: `https://api.avian.io/v1` - Auth: Bearer token via API key - OpenAI-compatible (chat completions, streaming, function calling) - Models: - `deepseek/deepseek-v3.2` — 164K context, $0.26/$0.38 per 1M tokens - `moonshotai/kimi-k2.5` — 131K context, $0.45/$2.20 per 1M tokens - `z-ai/glm-5` — 131K context, $0.30/$2.55 per 1M tokens - `minimax/minimax-m2.5` — 1M context, $0.30/$1.10 per 1M tokens **Changes:** - `rag/llm/chat_model.py` — Add `AvianChat` class extending `Base` - `rag/llm/__init__.py` — Register in `SupportedLiteLLMProvider`, `FACTORY_DEFAULT_BASE_URL`, `LITELLM_PROVIDER_PREFIX` - `conf/llm_factories.json` — Add Avian factory with model definitions - `web/src/constants/llm.ts` — Add to `LLMFactory` enum, `IconMap`, `APIMapUrl` - `web/src/components/svg-icon.tsx` — Register SVG icon - `web/src/assets/svg/llm/avian.svg` — Provider icon - `docs/references/supported_models.mdx` — Add to supported models table This follows the same pattern as other OpenAI-compatible providers (e.g., n1n #12680, TokenPony). cc @KevinHuSh @JinHai-CN ### Type of change - [x] New Feature (non-breaking change which adds functionality) - [x] Documentation Update
This commit is contained in:
@ -58,6 +58,7 @@ class SupportedLiteLLMProvider(StrEnum):
|
||||
Azure_OpenAI = "Azure-OpenAI"
|
||||
n1n = "n1n"
|
||||
HunYuan = "Tencent Hunyuan"
|
||||
Avian = "Avian"
|
||||
|
||||
|
||||
FACTORY_DEFAULT_BASE_URL = {
|
||||
@ -85,6 +86,7 @@ FACTORY_DEFAULT_BASE_URL = {
|
||||
SupportedLiteLLMProvider.OpenAI: "https://api.openai.com/v1",
|
||||
SupportedLiteLLMProvider.n1n: "https://api.n1n.ai/v1",
|
||||
SupportedLiteLLMProvider.HunYuan: "https://api.hunyuan.cloud.tencent.com/v1",
|
||||
SupportedLiteLLMProvider.Avian: "https://api.avian.io/v1",
|
||||
}
|
||||
|
||||
|
||||
@ -124,6 +126,7 @@ LITELLM_PROVIDER_PREFIX = {
|
||||
SupportedLiteLLMProvider.Azure_OpenAI: "azure/",
|
||||
SupportedLiteLLMProvider.n1n: "openai/",
|
||||
SupportedLiteLLMProvider.HunYuan: "openai/",
|
||||
SupportedLiteLLMProvider.Avian: "openai/",
|
||||
}
|
||||
|
||||
ChatModel = globals().get("ChatModel", {})
|
||||
|
||||
@ -1096,6 +1096,15 @@ class N1nChat(Base):
|
||||
super().__init__(key, model_name, base_url, **kwargs)
|
||||
|
||||
|
||||
class AvianChat(Base):
|
||||
_FACTORY_NAME = "Avian"
|
||||
|
||||
def __init__(self, key, model_name, base_url="https://api.avian.io/v1", **kwargs):
|
||||
if not base_url:
|
||||
base_url = "https://api.avian.io/v1"
|
||||
super().__init__(key, model_name, base_url, **kwargs)
|
||||
|
||||
|
||||
class LiteLLMBase(ABC):
|
||||
_FACTORY_NAME = [
|
||||
"Tongyi-Qianwen",
|
||||
|
||||
Reference in New Issue
Block a user