[Model] Add reasoning_parser and tool_parser for Ernie45 thinking (#25027)

Signed-off-by: wangyafeng <wangyafeng@baidu.com>
This commit is contained in:
CSWYF3634076
2025-10-13 15:55:20 +08:00
committed by GitHub
parent 98f30b8cba
commit 782505ed8e
7 changed files with 870 additions and 0 deletions

View File

@ -0,0 +1,124 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
from transformers import AutoTokenizer
from tests.reasoning.utils import run_reasoning_extraction
from vllm.reasoning import ReasoningParser, ReasoningParserManager
parser_name = "ernie45"
REASONING_MODEL_NAME = "baidu/ERNIE-4.5-21B-A3B-Thinking"
@pytest.fixture(scope="module")
def ernie45_tokenizer():
return AutoTokenizer.from_pretrained(REASONING_MODEL_NAME)
# 带 </think>非stream
WITH_THINK = {
"output": "abc</think>def",
"reasoning_content": "abc",
"content": "def",
}
# 带 </think>stream
WITH_THINK_STREAM = {
"output": "abc</think>def",
"reasoning_content": "abc",
"content": "def",
}
# without </think>, all is reasoning_content
WITHOUT_THINK = {
"output": "abc",
"reasoning_content": "abc",
"content": None,
}
# without </think>, all is reasoning_content
WITHOUT_THINK_STREAM = {
"output": "abc",
"reasoning_content": "abc",
"content": None,
}
COMPLETE_REASONING = {
"output": "abc</think>",
"reasoning_content": "abc",
"content": None,
}
MULTILINE_REASONING = {
"output": "abc\nABC</think>def\nDEF",
"reasoning_content": "abc\nABC",
"content": "def\nDEF",
}
TEST_CASES = [
pytest.param(
False,
WITH_THINK,
id="with_think",
),
pytest.param(
True,
WITH_THINK_STREAM,
id="with_think_stream",
),
pytest.param(
False,
WITHOUT_THINK,
id="without_think",
),
pytest.param(
True,
WITHOUT_THINK_STREAM,
id="without_think_stream",
),
pytest.param(
False,
COMPLETE_REASONING,
id="complete_reasoning",
),
pytest.param(
True,
COMPLETE_REASONING,
id="complete_reasoning_stream",
),
pytest.param(
False,
MULTILINE_REASONING,
id="multiline_reasoning",
),
pytest.param(
True,
MULTILINE_REASONING,
id="multiline_reasoning_stream",
),
]
@pytest.mark.parametrize("streaming, param_dict", TEST_CASES)
def test_reasoning(
streaming: bool,
param_dict: dict,
ernie45_tokenizer,
):
output = ernie45_tokenizer.tokenize(param_dict["output"])
output_tokens: list[str] = []
for token in output:
one_token = ernie45_tokenizer.convert_tokens_to_string([token])
if one_token:
output_tokens.append(one_token)
parser: ReasoningParser = ReasoningParserManager.get_reasoning_parser(parser_name)(
ernie45_tokenizer
)
reasoning, content = run_reasoning_extraction(
parser, output_tokens, streaming=streaming
)
print()
assert reasoning == param_dict["reasoning_content"]
assert content == param_dict["content"]

View File

@ -0,0 +1,359 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# ruff: noqa: E501
import json
from collections.abc import Generator
import pytest
from vllm.entrypoints.openai.protocol import (
ChatCompletionRequest,
DeltaMessage,
FunctionCall,
ToolCall,
)
from vllm.entrypoints.openai.tool_parsers import Ernie45ToolParser
from vllm.transformers_utils.detokenizer_utils import detokenize_incrementally
from vllm.transformers_utils.tokenizer import AnyTokenizer, get_tokenizer
# Use a common model that is likely to be available
MODEL = "baidu/ERNIE-4.5-21B-A3B-Thinking"
@pytest.fixture(scope="module")
def ernie45_tokenizer():
return get_tokenizer(tokenizer_name=MODEL, trust_remote_code=True)
@pytest.fixture
def ernie45_tool_parser(ernie45_tokenizer):
return Ernie45ToolParser(ernie45_tokenizer)
def assert_tool_calls(
actual_tool_calls: list[ToolCall], expected_tool_calls: list[ToolCall]
):
assert len(actual_tool_calls) == len(expected_tool_calls)
for actual_tool_call, expected_tool_call in zip(
actual_tool_calls, expected_tool_calls
):
assert isinstance(actual_tool_call.id, str)
assert len(actual_tool_call.id) > 0
assert actual_tool_call.type == "function"
assert actual_tool_call.function.name == expected_tool_call.function.name
# Compare arguments as JSON objects to handle formatting differences
actual_args = json.loads(actual_tool_call.function.arguments)
expected_args = json.loads(expected_tool_call.function.arguments)
assert actual_args == expected_args
def test_extract_tool_calls_no_tools(ernie45_tool_parser):
model_output = "This is a test"
extracted_tool_calls = ernie45_tool_parser.extract_tool_calls(
model_output, request=None
) # type: ignore[arg-type]
assert not extracted_tool_calls.tools_called
assert extracted_tool_calls.tool_calls == []
assert extracted_tool_calls.content == model_output
@pytest.mark.parametrize(
ids=[
"single_tool_call",
"multiple_tool_calls",
"tool_call_with_content_before",
],
argnames=["model_output", "expected_tool_calls", "expected_content"],
argvalues=[
(
"""<tool_call>
{"name": "get_current_temperature", "arguments": {"location": "Beijing"}}
</tool_call>
""",
[
ToolCall(
function=FunctionCall(
name="get_current_temperature",
arguments=json.dumps(
{
"location": "Beijing",
}
),
)
)
],
None,
),
(
"""<tool_call>
{"name": "get_current_temperature", "arguments": {"location": "Beijing"}}
</tool_call>
<tool_call>
{"name": "get_temperature_unit", "arguments": {"location": "Guangzhou", "unit": "c"}}
</tool_call>
""",
[
ToolCall(
function=FunctionCall(
name="get_current_temperature",
arguments=json.dumps(
{
"location": "Beijing",
}
),
)
),
ToolCall(
function=FunctionCall(
name="get_temperature_unit",
arguments=json.dumps(
{
"location": "Guangzhou",
"unit": "c",
}
),
)
),
],
None,
),
(
"""I need to call two tools to handle these two issues separately.
</think>
<tool_call>
{"name": "get_current_temperature", "arguments": {"location": "Beijing"}}
</tool_call>
<tool_call>
{"name": "get_temperature_unit", "arguments": {"location": "Guangzhou", "unit": "c"}}
</tool_call>
""",
[
ToolCall(
function=FunctionCall(
name="get_current_temperature",
arguments=json.dumps(
{
"location": "Beijing",
}
),
)
),
ToolCall(
function=FunctionCall(
name="get_temperature_unit",
arguments=json.dumps(
{
"location": "Guangzhou",
"unit": "c",
}
),
)
),
],
"I need to call two tools to handle these two issues separately.\n</think>",
),
],
)
def test_extract_tool_calls(
ernie45_tool_parser, model_output, expected_tool_calls, expected_content
):
extracted_tool_calls = ernie45_tool_parser.extract_tool_calls(
model_output, request=None
) # type: ignore[arg-type]
assert extracted_tool_calls.tools_called
assert_tool_calls(extracted_tool_calls.tool_calls, expected_tool_calls)
assert extracted_tool_calls.content == expected_content
def stream_delta_message_generator(
ernie45_tool_parser: Ernie45ToolParser,
ernie45_tokenizer: AnyTokenizer,
model_output: str,
request: ChatCompletionRequest | None = None,
) -> Generator[DeltaMessage, None, None]:
all_token_ids = ernie45_tokenizer.encode(model_output, add_special_tokens=False)
previous_text = ""
previous_tokens = None
prefix_offset = 0
read_offset = 0
for i, delta_token in enumerate(all_token_ids):
delta_token_ids = [delta_token]
previous_token_ids = all_token_ids[:i]
current_token_ids = all_token_ids[: i + 1]
(new_tokens, delta_text, new_prefix_offset, new_read_offset) = (
detokenize_incrementally(
tokenizer=ernie45_tokenizer,
all_input_ids=current_token_ids,
prev_tokens=previous_tokens,
prefix_offset=prefix_offset,
read_offset=read_offset,
skip_special_tokens=False,
spaces_between_special_tokens=True,
)
)
current_text = previous_text + delta_text
delta_message = ernie45_tool_parser.extract_tool_calls_streaming(
previous_text,
current_text,
delta_text,
previous_token_ids,
current_token_ids,
delta_token_ids,
request=request,
)
if delta_message:
yield delta_message
previous_text = current_text
previous_tokens = (
previous_tokens + new_tokens if previous_tokens else new_tokens
)
prefix_offset = new_prefix_offset
read_offset = new_read_offset
@pytest.mark.parametrize(
ids=[
"single_tool_call",
"multiple_tool_calls",
"tool_call_with_content_before",
],
argnames=["model_output", "expected_tool_calls", "expected_content"],
argvalues=[
(
"""<tool_call>
{"name": "get_current_temperature", "arguments": {"location": "Beijing"}}
</tool_call>
""",
[
ToolCall(
function=FunctionCall(
name="get_current_temperature",
arguments=json.dumps(
{
"location": "Beijing",
}
),
)
)
],
None,
),
(
"""<tool_call>
{"name": "get_current_temperature", "arguments": {"location": "Beijing"}}
</tool_call>
<tool_call>
{"name": "get_temperature_unit", "arguments": {"location": "Guangzhou", "unit": "c"}}
</tool_call>
""",
[
ToolCall(
function=FunctionCall(
name="get_current_temperature",
arguments=json.dumps(
{
"location": "Beijing",
}
),
)
),
ToolCall(
function=FunctionCall(
name="get_temperature_unit",
arguments=json.dumps(
{
"location": "Guangzhou",
"unit": "c",
}
),
)
),
],
None,
),
(
"""I need to call two tools to handle these two issues separately.
</think>
<tool_call>
{"name": "get_current_temperature", "arguments": {"location": "Beijing"}}
</tool_call>
<tool_call>
{"name": "get_temperature_unit", "arguments": {"location": "Guangzhou", "unit": "c"}}
</tool_call>
""",
[
ToolCall(
function=FunctionCall(
name="get_current_temperature",
arguments=json.dumps(
{
"location": "Beijing",
}
),
)
),
ToolCall(
function=FunctionCall(
name="get_temperature_unit",
arguments=json.dumps(
{
"location": "Guangzhou",
"unit": "c",
}
),
)
),
],
"I need to call two tools to handle these two issues separately.\n</think>",
),
],
)
def test_extract_tool_calls_streaming_incremental(
ernie45_tool_parser,
ernie45_tokenizer,
model_output,
expected_tool_calls,
expected_content,
):
"""Verify the Ernie45 Parser streaming behavior by verifying each chunk is as expected.""" # noqa: E501
request = ChatCompletionRequest(model=MODEL, messages=[], tools=[])
tool_calls_dict = {}
for delta_message in stream_delta_message_generator(
ernie45_tool_parser, ernie45_tokenizer, model_output, request
):
if (
delta_message.role is None
and delta_message.content is None
and delta_message.reasoning_content is None
and len(delta_message.tool_calls) == 0
):
continue
tool_calls = delta_message.tool_calls
for tool_call_chunk in tool_calls:
index = tool_call_chunk.index
if index not in tool_calls_dict:
if tool_call_chunk.function.arguments is None:
tool_call_chunk.function.arguments = ""
tool_calls_dict[index] = tool_call_chunk
else:
tool_calls_dict[
index
].function.arguments += tool_call_chunk.function.arguments
actual_tool_calls = list(tool_calls_dict.values())
assert len(actual_tool_calls) > 0
# check tool call format
assert_tool_calls(actual_tool_calls, expected_tool_calls)