Consolidate Llama model usage in tests (#13094)

This commit is contained in:
Harry Mellor
2025-02-14 06:18:03 +00:00
committed by GitHub
parent 40932d7a05
commit f2b20fe491
22 changed files with 45 additions and 53 deletions

View File

@ -14,7 +14,7 @@ from vllm.entrypoints.openai.serving_models import (BaseModelPath,
OpenAIServingModels)
from vllm.lora.request import LoRARequest
MODEL_NAME = "meta-llama/Llama-2-7b"
MODEL_NAME = "meta-llama/Llama-3.2-1B-Instruct"
BASE_MODEL_PATHS = [BaseModelPath(name=MODEL_NAME, model_path=MODEL_NAME)]
LORA_LOADING_SUCCESS_MESSAGE = (
"Success: LoRA adapter '{lora_name}' added successfully.")

View File

@ -5,7 +5,7 @@ import pytest
from ...utils import RemoteOpenAIServer
MODEL_NAME = "meta-llama/Llama-3.2-1B"
MODEL_NAME = "meta-llama/Llama-3.2-1B-Instruct"
@pytest.mark.asyncio