Fix vllm:prompt_tokens_total metric calculation (#2869)

This commit is contained in:
Ronen Schaffer
2024-02-19 09:55:41 +02:00
committed by GitHub
parent 86fd8bb0ac
commit e433c115bc
3 changed files with 41 additions and 6 deletions

View File

@ -13,12 +13,10 @@ _TEST_PROMPTS = [os.path.join(_TEST_DIR, "prompts", "example.txt")]
_LONG_PROMPTS = [os.path.join(_TEST_DIR, "prompts", "summary.txt")]
def _read_prompts(filename: str) -> str:
prompts = []
def _read_prompts(filename: str) -> List[str]:
with open(filename, "r") as f:
prompt = f.readline()
prompts.append(prompt)
return prompts
prompts = f.readlines()
return prompts
@pytest.fixture
@ -165,6 +163,7 @@ class VllmRunner:
model_name: str,
tokenizer_name: Optional[str] = None,
dtype: str = "half",
disable_log_stats: bool = True,
tensor_parallel_size: int = 1,
) -> None:
self.model = LLM(
@ -173,6 +172,7 @@ class VllmRunner:
trust_remote_code=True,
dtype=dtype,
swap_space=0,
disable_log_stats=disable_log_stats,
tensor_parallel_size=tensor_parallel_size,
)