Port metrics from aioprometheus to prometheus_client (#2730)

This commit is contained in:
Harry Mellor
2024-02-25 19:54:00 +00:00
committed by GitHub
parent f7c1234990
commit ef978fe411
9 changed files with 130 additions and 84 deletions

View File

@ -165,6 +165,7 @@ class VllmRunner:
dtype: str = "half",
disable_log_stats: bool = True,
tensor_parallel_size: int = 1,
**kwargs,
) -> None:
self.model = LLM(
model=model_name,
@ -174,6 +175,7 @@ class VllmRunner:
swap_space=0,
disable_log_stats=disable_log_stats,
tensor_parallel_size=tensor_parallel_size,
**kwargs,
)
def generate(

View File

@ -1,5 +1,4 @@
import pytest
import vllm.engine.metrics
MODELS = [
"facebook/opt-125m",
@ -16,10 +15,10 @@ def test_metric_counter_prompt_tokens(
dtype: str,
max_tokens: int,
) -> None:
# Reset metric
vllm.engine.metrics.counter_prompt_tokens.set_value({}, 0)
vllm_model = vllm_runner(model, dtype=dtype, disable_log_stats=False)
vllm_model = vllm_runner(model,
dtype=dtype,
disable_log_stats=False,
gpu_memory_utilization=0.4)
tokenizer = vllm_model.model.get_tokenizer()
prompt_token_counts = [len(tokenizer.encode(p)) for p in example_prompts]
# This test needs at least 2 prompts in a batch of different lengths to verify their token count is correct despite padding.
@ -29,7 +28,9 @@ def test_metric_counter_prompt_tokens(
vllm_prompt_token_count = sum(prompt_token_counts)
_ = vllm_model.generate_greedy(example_prompts, max_tokens)
metric_count = vllm.engine.metrics.counter_prompt_tokens.get_value({})
stat_logger = vllm_model.model.llm_engine.stat_logger
metric_count = stat_logger.metrics.counter_prompt_tokens.labels(
**stat_logger.labels)._value.get()
assert vllm_prompt_token_count == metric_count, (
f"prompt token count: {vllm_prompt_token_count!r}\nmetric: {metric_count!r}"
@ -46,13 +47,15 @@ def test_metric_counter_generation_tokens(
dtype: str,
max_tokens: int,
) -> None:
# Reset metric
vllm.engine.metrics.counter_generation_tokens.set_value({}, 0)
vllm_model = vllm_runner(model, dtype=dtype, disable_log_stats=False)
vllm_model = vllm_runner(model,
dtype=dtype,
disable_log_stats=False,
gpu_memory_utilization=0.4)
vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens)
tokenizer = vllm_model.model.get_tokenizer()
metric_count = vllm.engine.metrics.counter_generation_tokens.get_value({})
stat_logger = vllm_model.model.llm_engine.stat_logger
metric_count = stat_logger.metrics.counter_generation_tokens.labels(
**stat_logger.labels)._value.get()
vllm_generation_count = 0
for i in range(len(example_prompts)):
vllm_output_ids, vllm_output_str = vllm_outputs[i]