Compare commits

..

1 Commits

Author SHA1 Message Date
e17250f0d2 fix precommit 2025-06-18 21:17:43 -07:00
247 changed files with 4691 additions and 10028 deletions

View File

@ -16,7 +16,7 @@ Please download the visualization scripts in the post
- Download `nightly-benchmarks.zip`.
- In the same folder, run the following code:
```bash
```console
export HF_TOKEN=<your HF token>
apt update
apt install -y git

View File

@ -102,7 +102,6 @@ steps:
commands:
- "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7"
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg GIT_REPO_CHECK=1 --tag public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:$(buildkite-agent meta-data get release-version) --tag public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:latest --progress plain --target vllm-openai -f docker/Dockerfile.cpu ."
- "docker push public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:latest"
- "docker push public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:$(buildkite-agent meta-data get release-version)"
env:
DOCKER_BUILDKIT: "1"
@ -118,7 +117,6 @@ steps:
commands:
- "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7"
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg GIT_REPO_CHECK=1 --tag public.ecr.aws/q9t5s3a7/vllm-neuron-release-repo:$(buildkite-agent meta-data get release-version) --tag public.ecr.aws/q9t5s3a7/vllm-neuron-release-repo:latest --progress plain -f docker/Dockerfile.neuron ."
- "docker push public.ecr.aws/q9t5s3a7/vllm-neuron-release-repo:latest"
- "docker push public.ecr.aws/q9t5s3a7/vllm-neuron-release-repo:$(buildkite-agent meta-data get release-version)"
env:
DOCKER_BUILDKIT: "1"

View File

@ -54,11 +54,10 @@ docker run --rm -it --device=/dev/neuron0 --network bridge \
--name "${container_name}" \
${image_name} \
/bin/bash -c "
set -e; # Exit on first error
python3 /workspace/vllm/examples/offline_inference/neuron.py;
python3 -m pytest /workspace/vllm/tests/neuron/1_core/ -v --capture=tee-sys;
for f in /workspace/vllm/tests/neuron/2_core/*.py; do
echo \"Running test file: \$f\";
echo 'Running test file: '$f;
python3 -m pytest \$f -v --capture=tee-sys;
done
"

View File

@ -4,8 +4,8 @@ CONTAINER_NAME=vllm-tpu
# vllm config
MODEL=meta-llama/Llama-3.1-8B-Instruct
MAX_NUM_SEQS=256
MAX_NUM_BATCHED_TOKENS=1024
MAX_NUM_SEQS=512
MAX_NUM_BATCHED_TOKENS=512
TENSOR_PARALLEL_SIZE=1
MAX_MODEL_LEN=2048
DOWNLOAD_DIR=/mnt/disks/persist

View File

@ -89,7 +89,7 @@ steps:
- VLLM_TEST_ENABLE_ARTIFICIAL_PREEMPT=1 pytest -v -s basic_correctness/test_preemption.py
- label: Chunked Prefill Test
mirror_hardwares: [amdexperimental, amdproduction]
mirror_hardwares: [amdexperimental]
source_file_dependencies:
- vllm/
- tests/basic_correctness/test_chunked_prefill
@ -271,15 +271,6 @@ steps:
commands:
- pytest -v -s prefix_caching
- label: Platform Tests (CUDA)
mirror_hardwares: [amdexperimental]
source_file_dependencies:
- vllm/
- tests/cuda
commands:
- pytest -v -s cuda/test_cuda_context.py
- label: Samplers Test # 36min
mirror_hardwares: [amdexperimental]
source_file_dependencies:

15
.github/mergify.yml vendored
View File

@ -45,7 +45,6 @@ pull_request_rules:
- files~=^vllm/entrypoints/openai/tool_parsers/llama.*\.py
- files~=^vllm/model_executor/models/.*llama.*\.py
- files~=^vllm/transformers_utils/configs/.*llama.*\.py
- title~=(?i)llama
actions:
label:
add:
@ -66,19 +65,6 @@ pull_request_rules:
add:
- multi-modality
- name: label-performance
description: Automatically apply performance label
conditions:
- or:
- files~=^benchmarks/
- files~=^vllm/benchmarks/
- files~=^tests/benchmarks/
- files~=^\.buildkite/nightly-benchmarks/
actions:
label:
add:
- performance
- name: label-qwen
description: Automatically apply qwen label
conditions:
@ -88,6 +74,7 @@ pull_request_rules:
- files~=^vllm/model_executor/models/.*qwen.*\.py
- files~=^vllm/reasoning/.*qwen.*\.py
- title~=(?i)Qwen
- body~=(?i)Qwen
actions:
label:
add:

View File

@ -115,11 +115,6 @@ repos:
entry: python tools/check_spdx_header.py
language: python
types: [python]
- id: check-root-lazy-imports
name: Check root lazy imports
entry: python tools/check_init_lazy_imports.py
language: python
types: [python]
- id: check-filenames
name: Check for spaces in all filenames
entry: bash

View File

@ -154,13 +154,11 @@ If you use vLLM for your research, please cite our [paper](https://arxiv.org/abs
## Contact Us
<!-- --8<-- [start:contact-us] -->
- For technical questions and feature requests, please use GitHub [Issues](https://github.com/vllm-project/vllm/issues) or [Discussions](https://github.com/vllm-project/vllm/discussions)
- For discussing with fellow users, please use the [vLLM Forum](https://discuss.vllm.ai)
- For coordinating contributions and development, please use [Slack](https://slack.vllm.ai)
- For security disclosures, please use GitHub's [Security Advisories](https://github.com/vllm-project/vllm/security/advisories) feature
- For collaborations and partnerships, please contact us at [vllm-questions@lists.berkeley.edu](mailto:vllm-questions@lists.berkeley.edu)
<!-- --8<-- [end:contact-us] -->
## Media Kit

View File

@ -387,178 +387,3 @@ python3 vllm/benchmarks/benchmark_throughput.py \
--enable-lora \
--lora-path yard1/llama-2-7b-sql-lora-test
```
---
## Example - Structured Output Benchmark
Benchmark the performance of structured output generation (JSON, grammar, regex).
### Server Setup
```bash
vllm serve NousResearch/Hermes-3-Llama-3.1-8B --disable-log-requests
```
### JSON Schema Benchmark
```bash
python3 benchmarks/benchmark_serving_structured_output.py \
--backend vllm \
--model NousResearch/Hermes-3-Llama-3.1-8B \
--dataset json \
--structured-output-ratio 1.0 \
--request-rate 10 \
--num-prompts 1000
```
### Grammar-based Generation Benchmark
```bash
python3 benchmarks/benchmark_serving_structured_output.py \
--backend vllm \
--model NousResearch/Hermes-3-Llama-3.1-8B \
--dataset grammar \
--structure-type grammar \
--request-rate 10 \
--num-prompts 1000
```
### Regex-based Generation Benchmark
```bash
python3 benchmarks/benchmark_serving_structured_output.py \
--backend vllm \
--model NousResearch/Hermes-3-Llama-3.1-8B \
--dataset regex \
--request-rate 10 \
--num-prompts 1000
```
### Choice-based Generation Benchmark
```bash
python3 benchmarks/benchmark_serving_structured_output.py \
--backend vllm \
--model NousResearch/Hermes-3-Llama-3.1-8B \
--dataset choice \
--request-rate 10 \
--num-prompts 1000
```
### XGrammar Benchmark Dataset
```bash
python3 benchmarks/benchmark_serving_structured_output.py \
--backend vllm \
--model NousResearch/Hermes-3-Llama-3.1-8B \
--dataset xgrammar_bench \
--request-rate 10 \
--num-prompts 1000
```
---
## Example - Long Document QA Throughput Benchmark
Benchmark the performance of long document question-answering with prefix caching.
### Basic Long Document QA Test
```bash
python3 benchmarks/benchmark_long_document_qa_throughput.py \
--model meta-llama/Llama-2-7b-chat-hf \
--enable-prefix-caching \
--num-documents 16 \
--document-length 2000 \
--output-len 50 \
--repeat-count 5
```
### Different Repeat Modes
```bash
# Random mode (default) - shuffle prompts randomly
python3 benchmarks/benchmark_long_document_qa_throughput.py \
--model meta-llama/Llama-2-7b-chat-hf \
--enable-prefix-caching \
--num-documents 8 \
--document-length 3000 \
--repeat-count 3 \
--repeat-mode random
# Tile mode - repeat entire prompt list in sequence
python3 benchmarks/benchmark_long_document_qa_throughput.py \
--model meta-llama/Llama-2-7b-chat-hf \
--enable-prefix-caching \
--num-documents 8 \
--document-length 3000 \
--repeat-count 3 \
--repeat-mode tile
# Interleave mode - repeat each prompt consecutively
python3 benchmarks/benchmark_long_document_qa_throughput.py \
--model meta-llama/Llama-2-7b-chat-hf \
--enable-prefix-caching \
--num-documents 8 \
--document-length 3000 \
--repeat-count 3 \
--repeat-mode interleave
```
---
## Example - Prefix Caching Benchmark
Benchmark the efficiency of automatic prefix caching.
### Fixed Prompt with Prefix Caching
```bash
python3 benchmarks/benchmark_prefix_caching.py \
--model meta-llama/Llama-2-7b-chat-hf \
--enable-prefix-caching \
--num-prompts 1 \
--repeat-count 100 \
--input-length-range 128:256
```
### ShareGPT Dataset with Prefix Caching
```bash
# download dataset
# wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json
python3 benchmarks/benchmark_prefix_caching.py \
--model meta-llama/Llama-2-7b-chat-hf \
--dataset-path /path/ShareGPT_V3_unfiltered_cleaned_split.json \
--enable-prefix-caching \
--num-prompts 20 \
--repeat-count 5 \
--input-length-range 128:256
```
---
## Example - Request Prioritization Benchmark
Benchmark the performance of request prioritization in vLLM.
### Basic Prioritization Test
```bash
python3 benchmarks/benchmark_prioritization.py \
--model meta-llama/Llama-2-7b-chat-hf \
--input-len 128 \
--output-len 64 \
--num-prompts 100 \
--scheduling-policy priority
```
### Multiple Sequences per Prompt
```bash
python3 benchmarks/benchmark_prioritization.py \
--model meta-llama/Llama-2-7b-chat-hf \
--input-len 128 \
--output-len 64 \
--num-prompts 100 \
--scheduling-policy priority \
--n 2
```

View File

@ -10,7 +10,6 @@
# 3. Set variables (ALL REQUIRED)
# BASE: your directory for vllm repo
# MODEL: the model served by vllm
# SYSTEM: the hardware, choice TPU or GPU, for other systems, "get best profile" might not support.
# TP: ways of tensor parallelism
# DOWNLOAD_DIR: directory to download and load model weights.
# INPUT_LEN: request input len
@ -35,7 +34,6 @@
TAG=$(date +"%Y_%m_%d_%H_%M")
BASE=""
MODEL="meta-llama/Llama-3.1-8B-Instruct"
SYSTEM="TPU"
TP=1
DOWNLOAD_DIR=""
INPUT_LEN=4000
@ -47,15 +45,12 @@ NUM_BATCHED_TOKENS_LIST="512 1024 2048 4096"
LOG_FOLDER="$BASE/auto-benchmark/$TAG"
RESULT="$LOG_FOLDER/result.txt"
PROFILE_PATH="$LOG_FOLDER/profile"
echo "result file: $RESULT"
echo "model: $MODEL"
rm -rf $LOG_FOLDER
rm -rf $PROFILE_PATH
mkdir -p $LOG_FOLDER
mkdir -p $PROFILE_PATH
cd "$BASE/vllm"
@ -75,11 +70,10 @@ start_server() {
local max_num_seqs=$2
local max_num_batched_tokens=$3
local vllm_log=$4
local profile_dir=$5
pkill -f vllm
VLLM_USE_V1=1 VLLM_SERVER_DEV_MODE=1 VLLM_TORCH_PROFILER_DIR=$profile_dir vllm serve $MODEL \
VLLM_USE_V1=1 VLLM_SERVER_DEV_MODE=1 vllm serve $MODEL \
--disable-log-requests \
--port 8004 \
--gpu-memory-utilization $gpu_memory_utilization \
@ -111,37 +105,19 @@ start_server() {
fi
}
update_best_profile() {
local profile_dir=$1
local profile_index=$2
sorted_paths=($(find "$profile_dir" -maxdepth 1 -not -path "$profile_dir" | sort))
selected_profile_file=
if [[ "$SYSTEM" == "TPU" ]]; then
selected_profile_file="${sorted_paths[$profile_index]}/*.xplane.pb"
fi
if [[ "$SYSTEM" == "GPU" ]]; then
selected_profile_file="${sorted_paths[$profile_index]}"
fi
rm -f $PROFILE_PATH/*
cp $selected_profile_file $PROFILE_PATH
}
run_benchmark() {
local max_num_seqs=$1
local max_num_batched_tokens=$2
local gpu_memory_utilization=$3
echo "max_num_seq: $max_num_seqs, max_num_batched_tokens: $max_num_batched_tokens"
local vllm_log="$LOG_FOLDER/vllm_log_${max_num_seqs}_${max_num_batched_tokens}.txt"
local profile_dir="$LOG_FOLDER/profile_${max_num_seqs}_${max_num_batched_tokens}"
echo "vllm_log: $vllm_log"
echo
rm -f $vllm_log
mkdir -p $profile_dir
pkill -f vllm
local profile_index=0
echo "starting server..."
start_server $gpu_memory_utilization $max_num_seqs $max_num_batched_tokens $vllm_log $profile_dir
start_server $gpu_memory_utilization $max_num_seqs $max_num_batched_tokens $vllm_log
result=$?
if [[ "$result" -eq 1 ]]; then
echo "server failed to start. gpu_memory_utilization:$gpu_memory_utilization, max_num_seqs:$max_num_seqs, max_num_batched_tokens: $max_num_batched_tokens"
@ -168,8 +144,7 @@ run_benchmark() {
--goodput e2el:$MAX_LATENCY_ALLOWED_MS \
--num-prompts 1000 \
--random-prefix-len $prefix_len \
--port 8004 \
--profile &> "$bm_log"
--port 8004 &> "$bm_log"
throughput=$(grep "Request throughput (req/s):" "$bm_log" | sed 's/[^0-9.]//g')
e2el=$(grep "P99 E2EL (ms):" "$bm_log" | awk '{print $NF}')
goodput=$(grep "Request goodput (req/s):" "$bm_log" | sed 's/[^0-9.]//g')
@ -183,7 +158,6 @@ run_benchmark() {
# start from request-rate as int(throughput) + 1
request_rate=$((${throughput%.*} + 1))
while ((request_rate > 0)); do
profile_index=$((profile_index+1))
# clear prefix cache
curl -X POST http://0.0.0.0:8004/reset_prefix_cache
sleep 5
@ -221,12 +195,6 @@ run_benchmark() {
best_max_num_seqs=$max_num_seqs
best_num_batched_tokens=$max_num_batched_tokens
best_goodput=$goodput
if [[ "$SYSTEM" == "TPU" ]]; then
update_best_profile "$profile_dir/plugins/profile" $profile_index
fi
if [[ "$SYSTEM" == "GPU" ]]; then
update_best_profile "$profile_dir" $profile_index
fi
fi
else
echo "max_num_seqs: $max_num_seqs, max_num_batched_tokens: $max_num_batched_tokens does not meet latency requirement ${MAX_LATENCY_ALLOWED_MS}"
@ -271,6 +239,6 @@ for num_seqs in "${num_seqs_list[@]}"; do
done
done
echo "finish permutations"
echo "best_max_num_seqs: $best_max_num_seqs, best_num_batched_tokens: $best_num_batched_tokens, best_throughput: $best_throughput, profile saved in: $PROFILE_PATH"
echo "best_max_num_seqs: $best_max_num_seqs, best_num_batched_tokens: $best_num_batched_tokens, best_throughput: $best_throughput, profile saved in: $PROFILE_PATH" >> "$RESULT"
echo "best_max_num_seqs: $best_max_num_seqs, best_num_batched_tokens: $best_num_batched_tokens, best_throughput: $best_throughput"
echo "best_max_num_seqs: $best_max_num_seqs, best_num_batched_tokens: $best_num_batched_tokens, best_throughput: $best_throughput" >> "$RESULT"

View File

@ -404,14 +404,8 @@ async def async_request_openai_chat_completions(
chunk_bytes = chunk_bytes.strip()
if not chunk_bytes:
continue
chunk_bytes = chunk_bytes.decode("utf-8")
# NOTE: SSE comments (often used as pings) start with a colon.
# These are not JSON data payload and should be skipped.
if chunk_bytes.startswith(":"):
continue
chunk = chunk_bytes.removeprefix("data: ")
chunk = chunk_bytes.decode("utf-8").removeprefix("data: ")
if chunk != "[DONE]":
timestamp = time.perf_counter()
data = json.loads(chunk)

View File

@ -353,7 +353,7 @@ class RandomDataset(BenchmarkDataset):
: input_lens[i]
]
prompt = tokenizer.decode(re_encoded_sequence)
total_input_len = len(re_encoded_sequence)
total_input_len = prefix_len + int(input_lens[i])
requests.append(
SampleRequest(
prompt=prompt,

View File

@ -97,7 +97,7 @@ def run_vllm(
assert lora_requests is None, "BeamSearch API does not support LoRA"
prompts = [request.prompt for request in requests]
# output_len should be the same for all requests.
output_len = requests[0].expected_output_len
output_len = requests[0][2]
for request in requests:
assert request.expected_output_len == output_len
start = time.perf_counter()

View File

@ -22,16 +22,8 @@ from vllm.model_executor.layers.quantization.utils.marlin_utils import (
MARLIN_SUPPORTED_GROUP_SIZES,
query_marlin_supported_quant_types,
)
from vllm.model_executor.layers.quantization.utils.marlin_utils_fp4 import (
FP4_MARLIN_SUPPORTED_GROUP_SIZES,
rand_marlin_weight_fp4_like,
)
from vllm.model_executor.layers.quantization.utils.marlin_utils_fp8 import (
marlin_quant_fp8_torch,
)
from vllm.model_executor.layers.quantization.utils.marlin_utils_test import (
MarlinWorkspace,
awq_marlin_quantize,
marlin_quantize,
)
from vllm.model_executor.layers.quantization.utils.marlin_utils_test_24 import (
@ -43,7 +35,7 @@ from vllm.model_executor.layers.quantization.utils.quant_utils import (
quantize_weights,
sort_weights,
)
from vllm.scalar_type import ScalarType, scalar_types
from vllm.scalar_type import ScalarType
from vllm.utils import FlexibleArgumentParser
DEFAULT_MODELS = ["meta-llama/Llama-2-7b-hf/TP1"]
@ -65,79 +57,36 @@ def bench_run(
size_n: int,
):
label = "Quant Matmul"
sub_label = "{}, act={} k_full={}, q={}, g={}, MKN=({}x{}x{})".format(
model, act_order, is_k_full, str(quant_type), group_size, size_m, size_k, size_n
)
print(f"Testing: {sub_label}")
a = torch.randn(size_m, size_k).to(torch.half).cuda()
b = torch.rand(size_k, size_n).to(torch.half).cuda()
has_zp = quant_type in [scalar_types.uint4, scalar_types.uint8]
if act_order and (group_size == -1 or group_size == size_k or has_zp):
return
if size_k % group_size != 0:
return
marlin_24_supported = (
quant_type in GPTQ_MARLIN_24_SUPPORTED_QUANT_TYPES
and group_size in GPTQ_MARLIN_24_SUPPORTED_GROUP_SIZES
)
repack_supported = (
quant_type in GPTQ_MARLIN_24_SUPPORTED_QUANT_TYPES
and group_size in MARLIN_SUPPORTED_GROUP_SIZES
)
allspark_supported = (
quant_type in ALLSPARK_SUPPORTED_QUANT_TYPES
and group_size == -1
and not act_order
and is_k_full
)
a_tmp = torch.zeros(size_m, size_k).to(torch.half).cuda()
def gen_marlin_params():
# Marlin quant
marlin_g_idx = marlin_sort_indices = marlin_zp = marlin_s2 = None
if quant_type == scalar_types.float4_e2m1f:
if group_size != 16 or act_order:
return
marlin_w_ref, marlin_q_w, marlin_s, marlin_s2 = rand_marlin_weight_fp4_like(
b.T, group_size
)
elif quant_type == scalar_types.float8_e4m3fn:
if group_size not in [-1, 128] or act_order:
return
marlin_w_ref, marlin_q_w, marlin_s = marlin_quant_fp8_torch(b.T, group_size)
elif group_size == 16:
return
elif has_zp:
marlin_w_ref, marlin_q_w, marlin_s, marlin_zp = awq_marlin_quantize(
b, quant_type, group_size
)
else:
marlin_w_ref, marlin_q_w, marlin_s, marlin_g_idx, marlin_sort_indices, _ = (
marlin_quantize(b, quant_type, group_size, act_order)
)
return (
(
marlin_w_ref,
marlin_q_w,
marlin_s,
marlin_s2,
marlin_zp,
marlin_g_idx,
marlin_sort_indices,
)
marlin_rand_perm,
) = marlin_quantize(b, quant_type, group_size, act_order)
def gen_marlin_24_params():
marlin_24_w_ref = marlin_24_q_w_comp = marlin_24_meta = marlin_24_s = None
if marlin_24_supported:
# Marlin_24 quant
(marlin_24_w_ref, marlin_24_q_w_comp, marlin_24_meta, marlin_24_s) = (
marlin_24_quantize(b, quant_type, group_size)
)
return (marlin_24_w_ref, marlin_24_q_w_comp, marlin_24_meta, marlin_24_s)
def gen_repack_params():
q_w_gptq = None
repack_sort_indices = None
if repack_supported:
marlin_zp = torch.empty(0, dtype=torch.int, device=b.device)
# GPTQ quant
(w_ref, q_w, s, g_idx, rand_perm) = gptq_quantize_weights(
b, quant_type, group_size, act_order
)
@ -148,21 +97,33 @@ def bench_run(
repack_sort_indices = torch.empty(0, dtype=torch.int, device=b.device)
if act_order:
(q_w, g_idx, repack_sort_indices) = sort_weights(q_w, g_idx)
return q_w_gptq, repack_sort_indices
def gen_allspark_params():
qw_reorder = s_reorder = zp_reorder = sm_count = sm_version = (
CUBLAS_M_THRESHOLD
) = None
nonlocal allspark_supported
if allspark_supported:
# Prepare
marlin_workspace = MarlinWorkspace(
size_n, GPTQ_MARLIN_MIN_THREAD_N, GPTQ_MARLIN_MAX_PARALLEL
)
marlin_24_workspace = MarlinWorkspace(
size_n, GPTQ_MARLIN_24_MIN_THREAD_N, GPTQ_MARLIN_24_MAX_PARALLEL
)
marlin_zp = torch.zeros_like(marlin_s, dtype=torch.int)
# AllSpark W8A16 quant
as_supported_case = (
quant_type in ALLSPARK_SUPPORTED_QUANT_TYPES
and group_size == -1
and not act_order
and is_k_full
)
if as_supported_case:
properties = torch.cuda.get_device_properties(b.device.index)
sm_count = properties.multi_processor_count
sm_version = properties.major * 10 + properties.minor
supported_arch = sm_version >= 80 and sm_version < 90
allspark_supported = allspark_supported and supported_arch
as_supported_case = as_supported_case and supported_arch
if supported_arch:
has_zp = False
w_ref, qw, s, zp = quantize_weights(b, quant_type, group_size, has_zp)
qw = qw.to(torch.uint8)
@ -170,39 +131,6 @@ def bench_run(
qw, s, zp, has_zp
)
CUBLAS_M_THRESHOLD = ALLSPARK_AMPERE_M_CUBLAS_THRESHOLD
return (
qw_reorder,
s_reorder,
zp_reorder,
sm_count,
sm_version,
CUBLAS_M_THRESHOLD,
)
(
marlin_w_ref,
marlin_q_w,
marlin_s,
marlin_s2,
marlin_zp,
marlin_g_idx,
marlin_sort_indices,
) = gen_marlin_params()
marlin_24_w_ref, marlin_24_q_w_comp, marlin_24_meta, marlin_24_s = (
gen_marlin_24_params()
)
q_w_gptq, repack_sort_indices = gen_repack_params()
qw_reorder, s_reorder, zp_reorder, sm_count, sm_version, CUBLAS_M_THRESHOLD = (
gen_allspark_params()
)
# Prepare
marlin_workspace = MarlinWorkspace(
size_n, GPTQ_MARLIN_MIN_THREAD_N, GPTQ_MARLIN_MAX_PARALLEL
)
marlin_24_workspace = MarlinWorkspace(
size_n, GPTQ_MARLIN_24_MIN_THREAD_N, GPTQ_MARLIN_24_MAX_PARALLEL
)
globals = {
# Gen params
@ -212,14 +140,15 @@ def bench_run(
"size_n": size_n,
"size_k": size_k,
"a": a,
"a_tmp": a_tmp,
# Marlin params
"marlin_w_ref": marlin_w_ref,
"marlin_q_w": marlin_q_w,
"marlin_s": marlin_s,
"marlin_s2": marlin_s2,
"marlin_zp": marlin_zp,
"marlin_g_idx": marlin_g_idx,
"marlin_sort_indices": marlin_sort_indices,
"marlin_rand_perm": marlin_rand_perm,
"marlin_workspace": marlin_workspace,
"is_k_full": is_k_full,
# Marlin_24 params
@ -232,12 +161,12 @@ def bench_run(
"q_w_gptq": q_w_gptq,
"repack_sort_indices": repack_sort_indices,
# AllSpark W8A16 params
"qw_reorder": qw_reorder,
"s_reorder": s_reorder,
"zp_reorder": zp_reorder,
"sm_count": sm_count,
"sm_version": sm_version,
"CUBLAS_M_THRESHOLD": CUBLAS_M_THRESHOLD,
"qw_reorder": qw_reorder if as_supported_case else None,
"s_reorder": s_reorder if as_supported_case else None,
"zp_reorder": zp_reorder if as_supported_case else None,
"sm_count": sm_count if as_supported_case else None,
"sm_version": sm_version if as_supported_case else None,
"CUBLAS_M_THRESHOLD": CUBLAS_M_THRESHOLD if as_supported_case else None,
# Kernels
"gptq_marlin_gemm": ops.gptq_marlin_gemm,
"gptq_marlin_24_gemm": ops.gptq_marlin_24_gemm,
@ -248,7 +177,7 @@ def bench_run(
min_run_time = 1
# Warmup pytorch
for _ in range(5):
for i in range(5):
torch.matmul(a, marlin_w_ref)
results.append(
@ -263,17 +192,17 @@ def bench_run(
results.append(
benchmark.Timer(
stmt="output = gptq_marlin_gemm(a, None, marlin_q_w, marlin_s, marlin_s2, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, False, False)", # noqa: E501
stmt="output = gptq_marlin_gemm(a, marlin_q_w, marlin_s, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, False, False)", # noqa: E501
globals=globals,
label=label,
sub_label=sub_label,
description="gptq_marlin_gemm",
description="gptq_marlin_gemm_fp16",
).blocked_autorange(min_run_time=min_run_time)
)
results.append(
benchmark.Timer(
stmt="output = gptq_marlin_gemm(a, None, marlin_q_w, marlin_s, marlin_s2, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, True, False)", # noqa: E501
stmt="output = gptq_marlin_gemm(a, marlin_q_w, marlin_s, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, True, False)", # noqa: E501
globals=globals,
label=label,
sub_label=sub_label,
@ -281,7 +210,10 @@ def bench_run(
).blocked_autorange(min_run_time=min_run_time)
)
if marlin_24_supported:
if (
quant_type in GPTQ_MARLIN_24_SUPPORTED_QUANT_TYPES
and group_size in GPTQ_MARLIN_24_SUPPORTED_GROUP_SIZES
):
results.append(
benchmark.Timer(
stmt="output = gptq_marlin_24_gemm(a, marlin_24_q_w_comp, marlin_24_meta, marlin_24_s, marlin_24_workspace.scratch, quant_type, size_m, size_n, size_k)", # noqa: E501
@ -292,7 +224,6 @@ def bench_run(
).blocked_autorange(min_run_time=min_run_time)
)
if repack_supported:
results.append(
benchmark.Timer(
stmt="q_res = gptq_marlin_repack(q_w_gptq, repack_sort_indices, size_k, size_n, quant_type.size_bits)", # noqa: E501
@ -303,7 +234,7 @@ def bench_run(
).blocked_autorange(min_run_time=min_run_time)
)
if allspark_supported:
if as_supported_case:
results.append(
benchmark.Timer(
stmt="output = allspark_w8a16_gemm(a, qw_reorder, s_reorder, zp_reorder, size_n, group_size, sm_count, sm_version, CUBLAS_M_THRESHOLD, False, True)", # noqa: E501
@ -319,6 +250,7 @@ def main(args):
print("Benchmarking models:")
for i, model in enumerate(args.models):
print(f"[{i}] {model}")
results: list[benchmark.Measurement] = []
for model in args.models:
@ -346,17 +278,14 @@ def main(args):
):
continue
for quant_type in query_marlin_supported_quant_types():
for quant_type in query_marlin_supported_quant_types(False):
if (
len(args.limit_num_bits) > 0
and quant_type.size_bits not in args.limit_num_bits
):
continue
for group_size in (
MARLIN_SUPPORTED_GROUP_SIZES
+ FP4_MARLIN_SUPPORTED_GROUP_SIZES
):
for group_size in MARLIN_SUPPORTED_GROUP_SIZES:
if (
len(args.limit_group_size) > 0
and group_size not in args.limit_group_size

View File

@ -12,7 +12,7 @@ ARG PYTORCH_REPO="https://github.com/pytorch/pytorch.git"
ARG PYTORCH_VISION_REPO="https://github.com/pytorch/vision.git"
ARG FA_BRANCH="1a7f4dfa"
ARG FA_REPO="https://github.com/Dao-AILab/flash-attention.git"
ARG AITER_BRANCH="6487649"
ARG AITER_BRANCH="c1debd8"
ARG AITER_REPO="https://github.com/ROCm/aiter.git"
FROM ${BASE_IMAGE} AS base

View File

@ -91,7 +91,7 @@ source to unblock the update process.
### FlashInfer
Here is how to build and install it from source with torch2.7.0+cu128 in vLLM [Dockerfile](https://github.com/vllm-project/vllm/blob/27bebcd89792d5c4b08af7a65095759526f2f9e1/docker/Dockerfile#L259-L271):
```bash
```
export TORCH_CUDA_ARCH_LIST='7.5 8.0 8.9 9.0 10.0+PTX'
export FLASHINFER_ENABLE_SM90=1
uv pip install --system --no-build-isolation "git+https://github.com/flashinfer-ai/flashinfer@v0.2.6.post1"
@ -105,14 +105,14 @@ team if you want to get the package published there.
### xFormers
Similar to FlashInfer, here is how to build and install xFormers from source:
```bash
```
export TORCH_CUDA_ARCH_LIST='7.0 7.5 8.0 8.9 9.0 10.0+PTX'
MAX_JOBS=16 uv pip install --system --no-build-isolation "git+https://github.com/facebookresearch/xformers@v0.0.30"
```
### Mamba
```bash
```
uv pip install --system --no-build-isolation "git+https://github.com/state-spaces/mamba@v2.2.4"
```

View File

@ -16,7 +16,7 @@ vllm {chat,complete,serve,bench,collect-env,run-batch}
Start the vLLM OpenAI Compatible API server.
??? Examples
Examples:
```bash
# Start with a model
@ -43,6 +43,8 @@ Start the vLLM OpenAI Compatible API server.
Generate chat completions via the running API server.
Examples:
```bash
# Directly connect to localhost API without arguments
vllm chat
@ -58,6 +60,8 @@ vllm chat --quick "hi"
Generate text completions based on the given prompt via the running API server.
Examples:
```bash
# Directly connect to localhost API without arguments
vllm complete
@ -69,8 +73,6 @@ vllm complete --url http://{vllm-serve-host}:{vllm-serve-port}/v1
vllm complete --quick "The future of AI is"
```
</details>
## bench
Run benchmark tests for latency online serving throughput and offline inference throughput.
@ -87,6 +89,8 @@ vllm bench {latency, serve, throughput}
Benchmark the latency of a single batch of requests.
Example:
```bash
vllm bench latency \
--model meta-llama/Llama-3.2-1B-Instruct \
@ -100,6 +104,8 @@ vllm bench latency \
Benchmark the online serving throughput.
Example:
```bash
vllm bench serve \
--model meta-llama/Llama-3.2-1B-Instruct \
@ -114,6 +120,8 @@ vllm bench serve \
Benchmark offline inference throughput.
Example:
```bash
vllm bench throughput \
--model meta-llama/Llama-3.2-1B-Instruct \
@ -135,8 +143,7 @@ vllm collect-env
Run batch prompts and write results to file.
<details>
<summary>Examples</summary>
Examples:
```bash
# Running with a local file
@ -152,8 +159,6 @@ vllm run-batch \
--model meta-llama/Meta-Llama-3-8B-Instruct
```
</details>
## More Help
For detailed options of any subcommand, use:

View File

@ -1,6 +0,0 @@
---
title: Contact Us
---
[](){ #contactus }
--8<-- "README.md:contact-us"

View File

@ -57,8 +57,6 @@ By default, we optimize model inference using CUDA graphs which take up extra me
You can adjust `compilation_config` to achieve a better balance between inference speed and memory usage:
??? Code
```python
from vllm import LLM
from vllm.config import CompilationConfig, CompilationLevel
@ -129,8 +127,6 @@ reduce the size of the processed multi-modal inputs, which in turn saves memory.
Here are some examples:
??? Code
```python
from vllm import LLM

View File

@ -7,8 +7,6 @@ vLLM uses the following environment variables to configure the system:
All environment variables used by vLLM are prefixed with `VLLM_`. **Special care should be taken for Kubernetes users**: please do not name the service as `vllm`, otherwise environment variables set by Kubernetes might conflict with vLLM's environment variables, because [Kubernetes sets environment variables for each service with the capitalized service name as the prefix](https://kubernetes.io/docs/concepts/services-networking/service/#environment-variables).
??? Code
```python
--8<-- "vllm/envs.py:env-vars-definition"
```

View File

@ -93,8 +93,6 @@ For additional features and advanced configurations, refer to the official [MkDo
## Testing
??? note "Commands"
```bash
pip install -r requirements/dev.txt

View File

@ -27,8 +27,6 @@ All vLLM modules within the model must include a `prefix` argument in their cons
The initialization code should look like this:
??? Code
```python
from torch import nn
from vllm.config import VllmConfig

View File

@ -25,8 +25,6 @@ Further update the model as follows:
- Implement [get_multimodal_embeddings][vllm.model_executor.models.interfaces.SupportsMultiModal.get_multimodal_embeddings] that returns the embeddings from running the multimodal inputs through the multimodal tokenizer of the model. Below we provide a boilerplate of a typical implementation pattern, but feel free to adjust it to your own needs.
??? Code
```python
class YourModelForImage2Seq(nn.Module):
...
@ -55,8 +53,6 @@ Further update the model as follows:
- Implement [get_input_embeddings][vllm.model_executor.models.interfaces.SupportsMultiModal.get_input_embeddings] to merge `multimodal_embeddings` with text embeddings from the `input_ids`. If input processing for the model is implemented correctly (see sections below), then you can leverage the utility function we provide to easily merge the embeddings.
??? Code
```python
from .utils import merge_multimodal_embeddings
@ -139,8 +135,6 @@ Assuming that the memory usage increases with the number of tokens, the dummy in
Looking at the code of HF's `LlavaForConditionalGeneration`:
??? Code
```python
# https://github.com/huggingface/transformers/blob/v4.47.1/src/transformers/models/llava/modeling_llava.py#L530-L544
n_image_tokens = (input_ids == self.config.image_token_index).sum().item()
@ -163,8 +157,6 @@ Assuming that the memory usage increases with the number of tokens, the dummy in
The number of placeholder feature tokens per image is `image_features.shape[1]`.
`image_features` is calculated inside the `get_image_features` method:
??? Code
```python
# https://github.com/huggingface/transformers/blob/v4.47.1/src/transformers/models/llava/modeling_llava.py#L290-L300
image_outputs = self.vision_tower(pixel_values, output_hidden_states=True)
@ -201,8 +193,6 @@ Assuming that the memory usage increases with the number of tokens, the dummy in
To find the sequence length, we turn to the code of `CLIPVisionEmbeddings`:
??? Code
```python
# https://github.com/huggingface/transformers/blob/v4.47.1/src/transformers/models/clip/modeling_clip.py#L247-L257
target_dtype = self.patch_embedding.weight.dtype
@ -228,8 +218,6 @@ Assuming that the memory usage increases with the number of tokens, the dummy in
Overall, the number of placeholder feature tokens for an image can be calculated as:
??? Code
```python
def get_num_image_tokens(
self,
@ -253,8 +241,6 @@ Assuming that the memory usage increases with the number of tokens, the dummy in
Notice that the number of image tokens doesn't depend on the image width and height.
We can simply use a dummy `image_size` to calculate the multimodal profiling data:
??? Code
```python
# NOTE: In actuality, this is usually implemented as part of the
# model's subclass of `BaseProcessingInfo`, but we show it as is
@ -298,8 +284,6 @@ Assuming that the memory usage increases with the number of tokens, the dummy in
Looking at the code of HF's `FuyuForCausalLM`:
??? Code
```python
# https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/modeling_fuyu.py#L311-L322
if image_patches is not None and past_key_values is None:
@ -328,8 +312,6 @@ Assuming that the memory usage increases with the number of tokens, the dummy in
In `FuyuImageProcessor.preprocess`, the images are resized and padded to the target `FuyuImageProcessor.size`,
returning the dimensions after resizing (but before padding) as metadata.
??? Code
```python
# https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/processing_fuyu.py#L541-L544
image_encoding = self.image_processor.preprocess(images, **output_kwargs["images_kwargs"])
@ -366,8 +348,6 @@ Assuming that the memory usage increases with the number of tokens, the dummy in
In `FuyuImageProcessor.preprocess_with_tokenizer_info`, the images are split into patches based on this metadata:
??? Code
```python
# https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/processing_fuyu.py#L417-L425
model_image_input = self.image_processor.preprocess_with_tokenizer_info(
@ -404,8 +384,6 @@ Assuming that the memory usage increases with the number of tokens, the dummy in
The number of patches is in turn defined by `FuyuImageProcessor.get_num_patches`:
??? Code
```python
# https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/image_processing_fuyu.py#L552-L562
patch_size = patch_size if patch_size is not None else self.patch_size
@ -441,8 +419,6 @@ Assuming that the memory usage increases with the number of tokens, the dummy in
For the multimodal image profiling data, the logic is very similar to LLaVA:
??? Code
```python
def get_dummy_mm_data(
self,
@ -479,7 +455,6 @@ return a schema of the tensors outputted by the HF processor that are related to
The output of `CLIPImageProcessor` is a simple tensor with shape
`(num_images, num_channels, image_height, image_width)`:
```python
# https://github.com/huggingface/transformers/blob/v4.47.1/src/transformers/models/clip/image_processing_clip.py#L339-L345
images = [
@ -530,8 +505,6 @@ return a schema of the tensors outputted by the HF processor that are related to
In order to support the use of [MultiModalFieldConfig.batched][] like in LLaVA,
we remove the extra batch dimension by overriding [BaseMultiModalProcessor._call_hf_processor][]:
??? Code
```python
def _call_hf_processor(
self,
@ -600,8 +573,6 @@ Each [PromptUpdate][vllm.multimodal.processing.PromptUpdate] instance specifies
It simply repeats each input `image_token` a number of times equal to the number of placeholder feature tokens (`num_image_tokens`).
Based on this, we override [_get_prompt_updates][vllm.multimodal.processing.BaseMultiModalProcessor._get_prompt_updates] as follows:
??? Code
```python
def _get_prompt_updates(
self,
@ -645,8 +616,6 @@ Each [PromptUpdate][vllm.multimodal.processing.PromptUpdate] instance specifies
We define a helper function to return `ncols` and `nrows` directly:
??? Code
```python
def get_image_feature_grid_size(
self,
@ -675,8 +644,6 @@ Each [PromptUpdate][vllm.multimodal.processing.PromptUpdate] instance specifies
Based on this, we can initially define our replacement tokens as:
??? Code
```python
def get_replacement(item_idx: int):
images = mm_items.get_items("image", ImageProcessorItems)
@ -695,8 +662,6 @@ Each [PromptUpdate][vllm.multimodal.processing.PromptUpdate] instance specifies
However, this is not entirely correct. After `FuyuImageProcessor.preprocess_with_tokenizer_info` is called,
a BOS token (`<s>`) is also added to the promopt:
??? Code
```python
# https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/processing_fuyu.py#L417-L435
model_image_input = self.image_processor.preprocess_with_tokenizer_info(
@ -722,8 +687,6 @@ Each [PromptUpdate][vllm.multimodal.processing.PromptUpdate] instance specifies
To assign the vision embeddings to only the image tokens, instead of a string
you can return an instance of [PromptUpdateDetails][vllm.multimodal.processing.PromptUpdateDetails]:
??? Code
```python
hf_config = self.info.get_hf_config()
bos_token_id = hf_config.bos_token_id # `<s>`
@ -749,8 +712,6 @@ Each [PromptUpdate][vllm.multimodal.processing.PromptUpdate] instance specifies
Finally, noticing that the HF processor removes the `|ENDOFTEXT|` token from the tokenized prompt,
we can search for it to conduct the replacement at the start of the string:
??? Code
```python
def _get_prompt_updates(
self,

View File

@ -30,21 +30,13 @@ Refer to <gh-file:examples/offline_inference/simple_profiling.py> for an example
#### OpenAI Server
```bash
VLLM_TORCH_PROFILER_DIR=./vllm_profile \
python -m vllm.entrypoints.openai.api_server \
--model meta-llama/Meta-Llama-3-70B
VLLM_TORCH_PROFILER_DIR=./vllm_profile python -m vllm.entrypoints.openai.api_server --model meta-llama/Meta-Llama-3-70B
```
benchmark_serving.py:
```bash
python benchmarks/benchmark_serving.py \
--backend vllm \
--model meta-llama/Meta-Llama-3-70B \
--dataset-name sharegpt \
--dataset-path sharegpt.json \
--profile \
--num-prompts 2
python benchmarks/benchmark_serving.py --backend vllm --model meta-llama/Meta-Llama-3-70B --dataset-name sharegpt --dataset-path sharegpt.json --profile --num-prompts 2
```
## Profile with NVIDIA Nsight Systems
@ -72,16 +64,7 @@ For basic usage, you can just append `nsys profile -o report.nsys-rep --trace-fo
The following is an example using the `benchmarks/benchmark_latency.py` script:
```bash
nsys profile -o report.nsys-rep \
--trace-fork-before-exec=true \
--cuda-graph-trace=node \
python benchmarks/benchmark_latency.py \
--model meta-llama/Llama-3.1-8B-Instruct \
--num-iters-warmup 5 \
--num-iters 1 \
--batch-size 16 \
--input-len 512 \
--output-len 8
nsys profile -o report.nsys-rep --trace-fork-before-exec=true --cuda-graph-trace=node python benchmarks/benchmark_latency.py --model meta-llama/Llama-3.1-8B-Instruct --num-iters-warmup 5 --num-iters 1 --batch-size 16 --input-len 512 --output-len 8
```
#### OpenAI Server
@ -90,21 +73,10 @@ To profile the server, you will want to prepend your `vllm serve` command with `
```bash
# server
nsys profile -o report.nsys-rep \
--trace-fork-before-exec=true \
--cuda-graph-trace=node \
--delay 30 \
--duration 60 \
vllm serve meta-llama/Llama-3.1-8B-Instruct
nsys profile -o report.nsys-rep --trace-fork-before-exec=true --cuda-graph-trace=node --delay 30 --duration 60 vllm serve meta-llama/Llama-3.1-8B-Instruct
# client
python benchmarks/benchmark_serving.py \
--backend vllm \
--model meta-llama/Llama-3.1-8B-Instruct \
--num-prompts 1 \
--dataset-name random \
--random-input 1024 \
--random-output 512
python benchmarks/benchmark_serving.py --backend vllm --model meta-llama/Llama-3.1-8B-Instruct --num-prompts 1 --dataset-name random --random-input 1024 --random-output 512
```
In practice, you should set the `--duration` argument to a large value. Whenever you want the server to stop profiling, run:
@ -125,7 +97,7 @@ to manually kill the profiler and generate your `nsys-rep` report.
You can view these profiles either as summaries in the CLI, using `nsys stats [profile-file]`, or in the GUI by installing Nsight [locally following the directions here](https://developer.nvidia.com/nsight-systems/get-started).
??? CLI example
CLI example:
```bash
nsys stats report1.nsys-rep

View File

@ -10,7 +10,7 @@ title: Using Docker
vLLM offers an official Docker image for deployment.
The image can be used to run OpenAI compatible server and is available on Docker Hub as [vllm/vllm-openai](https://hub.docker.com/r/vllm/vllm-openai/tags).
```bash
```console
docker run --runtime nvidia --gpus all \
-v ~/.cache/huggingface:/root/.cache/huggingface \
--env "HUGGING_FACE_HUB_TOKEN=<secret>" \
@ -22,7 +22,7 @@ docker run --runtime nvidia --gpus all \
This image can also be used with other container engines such as [Podman](https://podman.io/).
```bash
```console
podman run --gpus all \
-v ~/.cache/huggingface:/root/.cache/huggingface \
--env "HUGGING_FACE_HUB_TOKEN=$HF_TOKEN" \
@ -71,7 +71,7 @@ You can add any other [engine-args][engine-args] you need after the image tag (`
You can build and run vLLM from source via the provided <gh-file:docker/Dockerfile>. To build vLLM:
```bash
```console
# optionally specifies: --build-arg max_jobs=8 --build-arg nvcc_threads=2
DOCKER_BUILDKIT=1 docker build . \
--target vllm-openai \
@ -97,9 +97,7 @@ of PyTorch Nightly and should be considered **experimental**. Using the flag `--
flags to speed up build process. However, ensure your `max_jobs` is substantially larger than `nvcc_threads` to get the most benefits.
Keep an eye on memory usage with parallel jobs as it can be substantial (see example below).
??? Command
```bash
```console
# Example of building on Nvidia GH200 server. (Memory usage: ~15GB, Build time: ~1475s / ~25 min, Image size: 6.93GB)
python3 use_existing_torch.py
DOCKER_BUILDKIT=1 docker build . \
@ -118,7 +116,7 @@ of PyTorch Nightly and should be considered **experimental**. Using the flag `--
Run the following command on your host machine to register QEMU user static handlers:
```bash
```console
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
```
@ -128,7 +126,7 @@ of PyTorch Nightly and should be considered **experimental**. Using the flag `--
To run vLLM with the custom-built Docker image:
```bash
```console
docker run --runtime nvidia --gpus all \
-v ~/.cache/huggingface:/root/.cache/huggingface \
-p 8000:8000 \

View File

@ -15,7 +15,7 @@ It allows you to deploy a large language model (LLM) server with vLLM as the bac
- Start the vLLM server with the supported chat completion model, e.g.
```bash
```console
vllm serve Qwen/Qwen1.5-32B-Chat-AWQ --max-model-len 4096
```

View File

@ -11,7 +11,7 @@ title: AutoGen
- Setup [AutoGen](https://microsoft.github.io/autogen/0.2/docs/installation/) environment
```bash
```console
pip install vllm
# Install AgentChat and OpenAI client from Extensions
@ -23,15 +23,13 @@ pip install -U "autogen-agentchat" "autogen-ext[openai]"
- Start the vLLM server with the supported chat completion model, e.g.
```bash
```console
python -m vllm.entrypoints.openai.api_server \
--model mistralai/Mistral-7B-Instruct-v0.2
```
- Call it with AutoGen:
??? Code
```python
import asyncio
from autogen_core.models import UserMessage

View File

@ -11,14 +11,14 @@ vLLM can be run on a cloud based GPU machine with [Cerebrium](https://www.cerebr
To install the Cerebrium client, run:
```bash
```console
pip install cerebrium
cerebrium login
```
Next, create your Cerebrium project, run:
```bash
```console
cerebrium init vllm-project
```
@ -34,8 +34,6 @@ vllm = "latest"
Next, let us add our code to handle inference for the LLM of your choice (`mistralai/Mistral-7B-Instruct-v0.1` for this example), add the following code to your `main.py`:
??? Code
```python
from vllm import LLM, SamplingParams
@ -58,14 +56,12 @@ Next, let us add our code to handle inference for the LLM of your choice (`mistr
Then, run the following code to deploy it to the cloud:
```bash
```console
cerebrium deploy
```
If successful, you should be returned a CURL command that you can call inference against. Just remember to end the url with the function name you are calling (in our case`/run`)
??? Command
```python
curl -X POST https://api.cortex.cerebrium.ai/v4/p-xxxxxx/vllm/run \
-H 'Content-Type: application/json' \
@ -82,8 +78,6 @@ If successful, you should be returned a CURL command that you can call inference
You should get a response like:
??? Response
```python
{
"run_id": "52911756-3066-9ae8-bcc9-d9129d1bd262",

View File

@ -15,7 +15,7 @@ It allows you to deploy a large language model (LLM) server with vLLM as the bac
- Start the vLLM server with the supported chat completion model, e.g.
```bash
```console
vllm serve qwen/Qwen1.5-0.5B-Chat
```

View File

@ -18,13 +18,13 @@ This guide walks you through deploying Dify using a vLLM backend.
- Start the vLLM server with the supported chat completion model, e.g.
```bash
```console
vllm serve Qwen/Qwen1.5-7B-Chat
```
- Start the Dify server with docker compose ([details](https://github.com/langgenius/dify?tab=readme-ov-file#quick-start)):
```bash
```console
git clone https://github.com/langgenius/dify.git
cd dify
cd docker

View File

@ -11,14 +11,14 @@ vLLM can be run on a cloud based GPU machine with [dstack](https://dstack.ai/),
To install dstack client, run:
```bash
```console
pip install "dstack[all]
dstack server
```
Next, to configure your dstack project, run:
```bash
```console
mkdir -p vllm-dstack
cd vllm-dstack
dstack init
@ -26,8 +26,6 @@ dstack init
Next, to provision a VM instance with LLM of your choice (`NousResearch/Llama-2-7b-chat-hf` for this example), create the following `serve.dstack.yml` file for the dstack `Service`:
??? Config
```yaml
type: service
@ -48,8 +46,6 @@ Next, to provision a VM instance with LLM of your choice (`NousResearch/Llama-2-
Then, run the following CLI for provisioning:
??? Command
```console
$ dstack run . -f serve.dstack.yml
@ -79,8 +75,6 @@ Then, run the following CLI for provisioning:
After the provisioning, you can interact with the model by using the OpenAI SDK:
??? Code
```python
from openai import OpenAI

View File

@ -13,7 +13,7 @@ It allows you to deploy a large language model (LLM) server with vLLM as the bac
- Setup vLLM and Haystack environment
```bash
```console
pip install vllm haystack-ai
```
@ -21,14 +21,12 @@ pip install vllm haystack-ai
- Start the vLLM server with the supported chat completion model, e.g.
```bash
```console
vllm serve mistralai/Mistral-7B-Instruct-v0.1
```
- Use the `OpenAIGenerator` and `OpenAIChatGenerator` components in Haystack to query the vLLM server.
??? Code
```python
from haystack.components.generators.chat import OpenAIChatGenerator
from haystack.dataclasses import ChatMessage
@ -51,6 +49,8 @@ vllm serve mistralai/Mistral-7B-Instruct-v0.1
print("-"*30)
```
Output e.g.:
```console
------------------------------
{'replies': [ChatMessage(_role=<ChatRole.ASSISTANT: 'assistant'>, _content=[TextContent(text=' Of course! Where in Italy would you like to go and what type of trip are you looking to plan?')], _name=None, _meta={'model': 'mistralai/Mistral-7B-Instruct-v0.1', 'index': 0, 'finish_reason': 'stop', 'usage': {'completion_tokens': 23, 'prompt_tokens': 21, 'total_tokens': 44, 'completion_tokens_details': None, 'prompt_tokens_details': None}})]}

View File

@ -22,7 +22,7 @@ Before you begin, ensure that you have the following:
To install the chart with the release name `test-vllm`:
```bash
```console
helm upgrade --install --create-namespace --namespace=ns-vllm test-vllm . -f values.yaml --set secrets.s3endpoint=$ACCESS_POINT --set secrets.s3bucketname=$BUCKET --set secrets.s3accesskeyid=$ACCESS_KEY --set secrets.s3accesskey=$SECRET_KEY
```
@ -30,7 +30,7 @@ helm upgrade --install --create-namespace --namespace=ns-vllm test-vllm . -f val
To uninstall the `test-vllm` deployment:
```bash
```console
helm uninstall test-vllm --namespace=ns-vllm
```

View File

@ -18,7 +18,7 @@ And LiteLLM supports all models on VLLM.
- Setup vLLM and litellm environment
```bash
```console
pip install vllm litellm
```
@ -28,14 +28,12 @@ pip install vllm litellm
- Start the vLLM server with the supported chat completion model, e.g.
```bash
```console
vllm serve qwen/Qwen1.5-0.5B-Chat
```
- Call it with litellm:
??? Code
```python
import litellm
@ -56,7 +54,7 @@ vllm serve qwen/Qwen1.5-0.5B-Chat
- Start the vLLM server with the supported embedding model, e.g.
```bash
```console
vllm serve BAAI/bge-base-en-v1.5
```

View File

@ -17,8 +17,6 @@ vLLM can be deployed with [LWS](https://github.com/kubernetes-sigs/lws) on Kuber
Deploy the following yaml file `lws.yaml`
??? Yaml
```yaml
apiVersion: leaderworkerset.x-k8s.io/v1
kind: LeaderWorkerSet
@ -177,8 +175,6 @@ curl http://localhost:8080/v1/completions \
The output should be similar to the following
??? Output
```text
{
"id": "cmpl-1bb34faba88b43f9862cfbfb2200949d",

View File

@ -7,13 +7,13 @@ title: Open WebUI
2. Start the vLLM server with the supported chat completion model, e.g.
```bash
```console
vllm serve qwen/Qwen1.5-0.5B-Chat
```
1. Start the [Open WebUI](https://github.com/open-webui/open-webui) docker container (replace the vllm serve host and vllm serve port):
```bash
```console
docker run -d -p 3000:8080 \
--name open-webui \
-v open-webui:/app/backend/data \

View File

@ -15,7 +15,7 @@ Here are the integrations:
- Setup vLLM and langchain environment
```bash
```console
pip install -U vllm \
langchain_milvus langchain_openai \
langchain_community beautifulsoup4 \
@ -26,14 +26,14 @@ pip install -U vllm \
- Start the vLLM server with the supported embedding model, e.g.
```bash
```console
# Start embedding service (port 8000)
vllm serve ssmits/Qwen2-7B-Instruct-embed-base
```
- Start the vLLM server with the supported chat completion model, e.g.
```bash
```console
# Start chat service (port 8001)
vllm serve qwen/Qwen1.5-0.5B-Chat --port 8001
```
@ -52,7 +52,7 @@ python retrieval_augmented_generation_with_langchain.py
- Setup vLLM and llamaindex environment
```bash
```console
pip install vllm \
llama-index llama-index-readers-web \
llama-index-llms-openai-like \
@ -64,14 +64,14 @@ pip install vllm \
- Start the vLLM server with the supported embedding model, e.g.
```bash
```console
# Start embedding service (port 8000)
vllm serve ssmits/Qwen2-7B-Instruct-embed-base
```
- Start the vLLM server with the supported chat completion model, e.g.
```bash
```console
# Start chat service (port 8001)
vllm serve qwen/Qwen1.5-0.5B-Chat --port 8001
```

View File

@ -15,7 +15,7 @@ vLLM can be **run and scaled to multiple service replicas on clouds and Kubernet
- Check that you have installed SkyPilot ([docs](https://skypilot.readthedocs.io/en/latest/getting-started/installation.html)).
- Check that `sky check` shows clouds or Kubernetes are enabled.
```bash
```console
pip install skypilot-nightly
sky check
```
@ -24,8 +24,6 @@ sky check
See the vLLM SkyPilot YAML for serving, [serving.yaml](https://github.com/skypilot-org/skypilot/blob/master/llm/vllm/serve.yaml).
??? Yaml
```yaml
resources:
accelerators: {L4, A10g, A10, L40, A40, A100, A100-80GB} # We can use cheaper accelerators for 8B model.
@ -71,7 +69,7 @@ See the vLLM SkyPilot YAML for serving, [serving.yaml](https://github.com/skypil
Start the serving the Llama-3 8B model on any of the candidate GPUs listed (L4, A10g, ...):
```bash
```console
HF_TOKEN="your-huggingface-token" sky launch serving.yaml --env HF_TOKEN
```
@ -83,7 +81,7 @@ Check the output of the command. There will be a shareable gradio link (like the
**Optional**: Serve the 70B model instead of the default 8B and use more GPU:
```bash
```console
HF_TOKEN="your-huggingface-token" \
sky launch serving.yaml \
--gpus A100:8 \
@ -95,8 +93,6 @@ HF_TOKEN="your-huggingface-token" \
SkyPilot can scale up the service to multiple service replicas with built-in autoscaling, load-balancing and fault-tolerance. You can do it by adding a services section to the YAML file.
??? Yaml
```yaml
service:
replicas: 2
@ -111,7 +107,8 @@ SkyPilot can scale up the service to multiple service replicas with built-in aut
max_completion_tokens: 1
```
??? Yaml
<details>
<summary>Click to see the full recipe YAML</summary>
```yaml
service:
@ -157,9 +154,11 @@ SkyPilot can scale up the service to multiple service replicas with built-in aut
2>&1 | tee api_server.log
```
</details>
Start the serving the Llama-3 8B model on multiple replicas:
```bash
```console
HF_TOKEN="your-huggingface-token" \
sky serve up -n vllm serving.yaml \
--env HF_TOKEN
@ -167,11 +166,12 @@ HF_TOKEN="your-huggingface-token" \
Wait until the service is ready:
```bash
```console
watch -n10 sky serve status vllm
```
Example outputs:
<details>
<summary>Example outputs:</summary>
```console
Services
@ -184,11 +184,11 @@ vllm 1 1 xx.yy.zz.121 18 mins ago 1x GCP([Spot]{'L4': 1}) R
vllm 2 1 xx.yy.zz.245 18 mins ago 1x GCP([Spot]{'L4': 1}) READY us-east4
```
</details>
After the service is READY, you can find a single endpoint for the service and access the service with the endpoint:
??? Commands
```bash
```console
ENDPOINT=$(sky serve status --endpoint 8081 vllm)
curl -L http://$ENDPOINT/v1/chat/completions \
-H "Content-Type: application/json" \
@ -220,7 +220,8 @@ service:
This will scale the service up to when the QPS exceeds 2 for each replica.
??? Yaml
<details>
<summary>Click to see the full recipe YAML</summary>
```yaml
service:
@ -269,15 +270,17 @@ This will scale the service up to when the QPS exceeds 2 for each replica.
2>&1 | tee api_server.log
```
</details>
To update the service with the new config:
```bash
```console
HF_TOKEN="your-huggingface-token" sky serve update vllm serving.yaml --env HF_TOKEN
```
To stop the service:
```bash
```console
sky serve down vllm
```
@ -285,7 +288,8 @@ sky serve down vllm
It is also possible to access the Llama-3 service with a separate GUI frontend, so the user requests send to the GUI will be load-balanced across replicas.
??? Yaml
<details>
<summary>Click to see the full GUI YAML</summary>
```yaml
envs:
@ -315,9 +319,11 @@ It is also possible to access the Llama-3 service with a separate GUI frontend,
--stop-token-ids 128009,128001 | tee ~/gradio.log
```
</details>
1. Start the chat web UI:
```bash
```console
sky launch \
-c gui ./gui.yaml \
--env ENDPOINT=$(sky serve status --endpoint vllm)

View File

@ -15,13 +15,13 @@ It can be quickly integrated with vLLM as a backend API server, enabling powerfu
- Start the vLLM server with the supported chat completion model, e.g.
```bash
```console
vllm serve qwen/Qwen1.5-0.5B-Chat
```
- Install streamlit and openai:
```bash
```console
pip install streamlit openai
```
@ -29,7 +29,7 @@ pip install streamlit openai
- Start the streamlit web UI and start to chat:
```bash
```console
streamlit run streamlit_openai_chatbot_webserver.py
# or specify the VLLM_API_BASE or VLLM_API_KEY

View File

@ -7,7 +7,7 @@ vLLM is also available via [Llama Stack](https://github.com/meta-llama/llama-sta
To install Llama Stack, run
```bash
```console
pip install llama-stack -q
```

View File

@ -60,7 +60,7 @@ And then you can send out a query to the OpenAI-compatible API to check the avai
curl -o- http://localhost:30080/models
```
??? Output
Expected output:
```json
{
@ -89,7 +89,7 @@ curl -X POST http://localhost:30080/completions \
}'
```
??? Output
Expected output:
```json
{
@ -121,8 +121,6 @@ sudo helm uninstall vllm
The core vLLM production stack configuration is managed with YAML. Here is the example configuration used in the installation above:
??? Yaml
```yaml
servingEngineSpec:
runtimeClassName: ""

View File

@ -29,8 +29,6 @@ Alternatively, you can deploy vLLM to Kubernetes using any of the following:
First, create a Kubernetes PVC and Secret for downloading and storing Hugging Face model:
??? Config
```bash
cat <<EOF |kubectl apply -f -
apiVersion: v1
@ -57,8 +55,6 @@ First, create a Kubernetes PVC and Secret for downloading and storing Hugging Fa
Next, start the vLLM server as a Kubernetes Deployment and Service:
??? Config
```bash
cat <<EOF |kubectl apply -f -
apiVersion: apps/v1
@ -115,7 +111,7 @@ Next, start the vLLM server as a Kubernetes Deployment and Service:
We can verify that the vLLM server has started successfully via the logs (this might take a couple of minutes to download the model):
```bash
```console
kubectl logs -l app.kubernetes.io/name=vllm
...
INFO: Started server process [1]
@ -132,9 +128,6 @@ INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
PVC is used to store the model cache and it is optional, you can use hostPath or other storage options
<details>
<summary>Yaml</summary>
```yaml
apiVersion: v1
kind: PersistentVolumeClaim
@ -151,8 +144,6 @@ INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
volumeMode: Filesystem
```
</details>
Secret is optional and only required for accessing gated models, you can skip this step if you are not using gated models
```yaml
@ -172,9 +163,6 @@ INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
NVIDIA GPU:
<details>
<summary>Yaml</summary>
```yaml
apiVersion: apps/v1
kind: Deployment
@ -245,15 +233,10 @@ INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
periodSeconds: 5
```
</details>
AMD GPU:
You can refer to the `deployment.yaml` below if using AMD ROCm GPU like MI300X.
<details>
<summary>Yaml</summary>
```yaml
apiVersion: apps/v1
kind: Deployment
@ -322,17 +305,12 @@ INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
mountPath: /dev/shm
```
</details>
You can get the full example with steps and sample yaml files from <https://github.com/ROCm/k8s-device-plugin/tree/master/example/vllm-serve>.
2. Create a Kubernetes Service for vLLM
Next, create a Kubernetes Service file to expose the `mistral-7b` deployment:
<details>
<summary>Yaml</summary>
```yaml
apiVersion: v1
kind: Service
@ -352,20 +330,18 @@ INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
type: ClusterIP
```
</details>
3. Deploy and Test
Apply the deployment and service configurations using `kubectl apply -f <filename>`:
```bash
```console
kubectl apply -f deployment.yaml
kubectl apply -f service.yaml
```
To test the deployment, run the following `curl` command:
```bash
```console
curl http://mistral-7b.default.svc.cluster.local/v1/completions \
-H "Content-Type: application/json" \
-d '{

View File

@ -11,13 +11,13 @@ This document shows how to launch multiple vLLM serving containers and use Nginx
This guide assumes that you have just cloned the vLLM project and you're currently in the vllm root directory.
```bash
```console
export vllm_root=`pwd`
```
Create a file named `Dockerfile.nginx`:
```dockerfile
```console
FROM nginx:latest
RUN rm /etc/nginx/conf.d/default.conf
EXPOSE 80
@ -26,7 +26,7 @@ CMD ["nginx", "-g", "daemon off;"]
Build the container:
```bash
```console
docker build . -f Dockerfile.nginx --tag nginx-lb
```
@ -36,8 +36,6 @@ docker build . -f Dockerfile.nginx --tag nginx-lb
Create a file named `nginx_conf/nginx.conf`. Note that you can add as many servers as you'd like. In the below example we'll start with two. To add more, add another `server vllmN:8000 max_fails=3 fail_timeout=10000s;` entry to `upstream backend`.
??? Config
```console
upstream backend {
least_conn;
@ -60,14 +58,14 @@ Create a file named `nginx_conf/nginx.conf`. Note that you can add as many serve
## Build vLLM Container
```bash
```console
cd $vllm_root
docker build -f docker/Dockerfile . --tag vllm
```
If you are behind proxy, you can pass the proxy settings to the docker build command as shown below:
```bash
```console
cd $vllm_root
docker build \
-f docker/Dockerfile . \
@ -80,7 +78,7 @@ docker build \
## Create Docker Network
```bash
```console
docker network create vllm_nginx
```
@ -95,8 +93,6 @@ Notes:
- The below example assumes GPU backend used. If you are using CPU backend, remove `--gpus device=ID`, add `VLLM_CPU_KVCACHE_SPACE` and `VLLM_CPU_OMP_THREADS_BIND` environment variables to the docker run command.
- Adjust the model name that you want to use in your vLLM servers if you don't want to use `Llama-2-7b-chat-hf`.
??? Commands
```console
mkdir -p ~/.cache/huggingface/hub/
hf_cache_dir=~/.cache/huggingface/
@ -129,7 +125,7 @@ Notes:
## Launch Nginx
```bash
```console
docker run \
-itd \
-p 8000:80 \
@ -142,7 +138,7 @@ docker run \
## Verify That vLLM Servers Are Ready
```bash
```console
docker logs vllm0 | grep Uvicorn
docker logs vllm1 | grep Uvicorn
```

View File

@ -22,8 +22,6 @@ server.
Here is a sample of `LLM` class usage:
??? Code
```python
from vllm import LLM, SamplingParams
@ -180,8 +178,6 @@ vision-language model.
To avoid accidentally passing incorrect arguments, the constructor is now keyword-only. This ensures that the constructor will raise an error if old configurations are passed. vLLM developers have already made this change for all models within vLLM. For out-of-tree registered models, developers need to update their models, for example by adding shim code to adapt the old constructor signature to the new one:
??? Code
```python
class MyOldModel(nn.Module):
def __init__(

View File

@ -448,8 +448,6 @@ elements of the entire head for all context tokens. However, overall,
all results for output have been calculated but are just stored in
different thread register memory.
??? Code
```cpp
float* out_smem = reinterpret_cast<float*>(shared_mem);
for (int i = NUM_WARPS; i > 1; i /= 2) {

View File

@ -13,8 +13,6 @@ Plugins are user-registered code that vLLM executes. Given vLLM's architecture (
vLLM's plugin system uses the standard Python `entry_points` mechanism. This mechanism allows developers to register functions in their Python packages for use by other packages. An example of a plugin:
??? Code
```python
# inside `setup.py` file
from setuptools import setup

View File

@ -61,8 +61,6 @@ To address the above issues, I have designed and developed a local Tensor memory
# Install vLLM
??? Commands
```shell
# Enter the home directory or your working directory.
cd /home
@ -106,8 +104,6 @@ python3 disagg_prefill_proxy_xpyd.py &
### Prefill1 (e.g. 10.0.1.2 or 10.0.1.1)
??? Command
```shell
VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=0 vllm serve {your model directory} \
--host 0.0.0.0 \
@ -128,8 +124,6 @@ python3 disagg_prefill_proxy_xpyd.py &
### Decode1 (e.g. 10.0.1.3 or 10.0.1.1)
??? Command
```shell
VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=1 vllm serve {your model directory} \
--host 0.0.0.0 \
@ -150,8 +144,6 @@ python3 disagg_prefill_proxy_xpyd.py &
### Decode2 (e.g. 10.0.1.4 or 10.0.1.1)
??? Command
```shell
VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=2 vllm serve {your model directory} \
--host 0.0.0.0 \
@ -172,8 +164,6 @@ python3 disagg_prefill_proxy_xpyd.py &
### Decode3 (e.g. 10.0.1.5 or 10.0.1.1)
??? Command
```shell
VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=3 vllm serve {your model directory} \
--host 0.0.0.0 \
@ -203,8 +193,6 @@ python3 disagg_prefill_proxy_xpyd.py &
### Prefill1 (e.g. 10.0.1.2 or 10.0.1.1)
??? Command
```shell
VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=0 vllm serve {your model directory} \
--host 0.0.0.0 \
@ -225,8 +213,6 @@ python3 disagg_prefill_proxy_xpyd.py &
### Prefill2 (e.g. 10.0.1.3 or 10.0.1.1)
??? Command
```shell
VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=1 vllm serve {your model directory} \
--host 0.0.0.0 \
@ -247,8 +233,6 @@ python3 disagg_prefill_proxy_xpyd.py &
### Prefill3 (e.g. 10.0.1.4 or 10.0.1.1)
??? Command
```shell
VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=2 vllm serve {your model directory} \
--host 0.0.0.0 \
@ -269,8 +253,6 @@ python3 disagg_prefill_proxy_xpyd.py &
### Decode1 (e.g. 10.0.1.5 or 10.0.1.1)
??? Command
```shell
VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=3 vllm serve {your model directory} \
--host 0.0.0.0 \
@ -304,8 +286,6 @@ curl -X POST -s http://10.0.1.1:10001/v1/completions \
# Benchmark
??? Command
```shell
python3 benchmark_serving.py \
--backend vllm \

View File

@ -28,9 +28,7 @@ A unique aspect of vLLM's `torch.compile` integration, is that we guarantee all
In the very verbose logs, we can see:
??? Logs
```text
```
DEBUG 03-07 03:06:52 [decorators.py:203] Start compiling function <code object forward at 0x7f08acf40c90, file "xxx/vllm/model_executor/models/llama.py", line 339>
DEBUG 03-07 03:06:54 [backends.py:370] Traced files (to be considered for compilation cache):
@ -101,17 +99,14 @@ This time, Inductor compilation is completely bypassed, and we will load from di
The above example just uses Inductor to compile for a general shape (i.e. symbolic shape). We can also use Inductor to compile for some of the specific shapes, for example:
```bash
vllm serve meta-llama/Llama-3.2-1B \
--compilation_config '{"compile_sizes": [1, 2, 4, 8]}'
```
vllm serve meta-llama/Llama-3.2-1B --compilation_config '{"compile_sizes": [1, 2, 4, 8]}'
```
Then it will also compile a specific kernel just for batch size `1, 2, 4, 8`. At this time, all of the shapes in the computation graph are static and known, and we will turn on auto-tuning to tune for max performance. This can be slow when you run it for the first time, but the next time you run it, we can directly bypass the tuning and run the tuned kernel.
When all the shapes are known, `torch.compile` can compare different configs, and often find some better configs to run the kernel. For example, we can see the following log:
??? Logs
```
AUTOTUNE mm(8x2048, 2048x3072)
triton_mm_4 0.0130 ms 100.0% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=128, BLOCK_M=16, BLOCK_N=32, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=5, num_warps=2
@ -141,9 +136,8 @@ The cudagraphs are captured and managed by the compiler backend, and replayed wh
By default, vLLM will try to determine a set of sizes to capture cudagraph. You can also override it using the config `cudagraph_capture_sizes`:
```bash
vllm serve meta-llama/Llama-3.2-1B \
--compilation-config '{"cudagraph_capture_sizes": [1, 2, 4, 8]}'
```
vllm serve meta-llama/Llama-3.2-1B --compilation-config '{"cudagraph_capture_sizes": [1, 2, 4, 8]}'
```
Then it will only capture cudagraph for the specified sizes. It can be useful to have fine-grained control over the cudagraph capture.

View File

@ -29,8 +29,6 @@ We can now submit the prompts and call `llm.generate` with the `lora_request` pa
of `LoRARequest` is a human identifiable name, the second parameter is a globally unique ID for the adapter and
the third parameter is the path to the LoRA adapter.
??? Code
```python
sampling_params = SamplingParams(
temperature=0,
@ -70,8 +68,6 @@ The server entrypoint accepts all other LoRA configuration parameters (`max_lora
etc.), which will apply to all forthcoming requests. Upon querying the `/models` endpoint, we should see our LoRA along
with its base model (if `jq` is not installed, you can follow [this guide](https://jqlang.org/download/) to install it.):
??? Command
```bash
curl localhost:8000/v1/models | jq .
{
@ -172,7 +168,7 @@ Alternatively, follow these example steps to implement your own plugin:
1. Implement the LoRAResolver interface.
??? Example of a simple S3 LoRAResolver implementation
Example of a simple S3 LoRAResolver implementation:
```python
import os
@ -238,8 +234,6 @@ The new format of `--lora-modules` is mainly to support the display of parent mo
- The `parent` field of LoRA model `sql-lora` now links to its base model `meta-llama/Llama-2-7b-hf`. This correctly reflects the hierarchical relationship between the base model and the LoRA adapter.
- The `root` field points to the artifact location of the lora adapter.
??? Command output
```bash
$ curl http://localhost:8000/v1/models

View File

@ -20,8 +20,6 @@ To input multi-modal data, follow this schema in [vllm.inputs.PromptType][]:
You can pass a single image to the `'image'` field of the multi-modal dictionary, as shown in the following examples:
??? Code
```python
from vllm import LLM
@ -68,8 +66,6 @@ Full example: <gh-file:examples/offline_inference/vision_language.py>
To substitute multiple images inside the same text prompt, you can pass in a list of images instead:
??? Code
```python
from vllm import LLM
@ -103,8 +99,6 @@ Full example: <gh-file:examples/offline_inference/vision_language_multi_image.py
Multi-image input can be extended to perform video captioning. We show this with [Qwen2-VL](https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct) as it supports videos:
??? Code
```python
from vllm import LLM
@ -150,8 +144,6 @@ Full example: <gh-file:examples/offline_inference/audio_language.py>
To input pre-computed embeddings belonging to a data type (i.e. image, video, or audio) directly to the language model,
pass a tensor of shape `(num_items, feature_size, hidden_size of LM)` to the corresponding field of the multi-modal dictionary.
??? Code
```python
from vllm import LLM
@ -177,8 +169,6 @@ pass a tensor of shape `(num_items, feature_size, hidden_size of LM)` to the cor
For Qwen2-VL and MiniCPM-V, we accept additional parameters alongside the embeddings:
??? Code
```python
# Construct the prompt based on your model
prompt = ...
@ -245,8 +235,6 @@ vllm serve microsoft/Phi-3.5-vision-instruct --task generate \
Then, you can use the OpenAI client as follows:
??? Code
```python
from openai import OpenAI
@ -307,7 +295,7 @@ Full example: <gh-file:examples/online_serving/openai_chat_completion_client_for
By default, the timeout for fetching images through HTTP URL is `5` seconds.
You can override this by setting the environment variable:
```bash
```console
export VLLM_IMAGE_FETCH_TIMEOUT=<timeout>
```
@ -323,8 +311,6 @@ vllm serve llava-hf/llava-onevision-qwen2-0.5b-ov-hf --task generate --max-model
Then, you can use the OpenAI client as follows:
??? Code
```python
from openai import OpenAI
@ -370,7 +356,7 @@ Full example: <gh-file:examples/online_serving/openai_chat_completion_client_for
By default, the timeout for fetching videos through HTTP URL is `30` seconds.
You can override this by setting the environment variable:
```bash
```console
export VLLM_VIDEO_FETCH_TIMEOUT=<timeout>
```
@ -387,8 +373,6 @@ vllm serve fixie-ai/ultravox-v0_5-llama-3_2-1b
Then, you can use the OpenAI client as follows:
??? Code
```python
import base64
import requests
@ -443,8 +427,6 @@ Then, you can use the OpenAI client as follows:
Alternatively, you can pass `audio_url`, which is the audio counterpart of `image_url` for image input:
??? Code
```python
chat_completion_from_url = client.chat.completions.create(
messages=[{
@ -476,7 +458,7 @@ Full example: <gh-file:examples/online_serving/openai_chat_completion_client_for
By default, the timeout for fetching audios through HTTP URL is `10` seconds.
You can override this by setting the environment variable:
```bash
```console
export VLLM_AUDIO_FETCH_TIMEOUT=<timeout>
```
@ -488,8 +470,6 @@ pass a tensor of shape to the corresponding field of the multi-modal dictionary.
For image embeddings, you can pass the base64-encoded tensor to the `image_embeds` field.
The following example demonstrates how to pass image embeddings to the OpenAI server:
??? Code
```python
image_embedding = torch.load(...)
grid_thw = torch.load(...) # Required by Qwen/Qwen2-VL-2B-Instruct

View File

@ -9,14 +9,12 @@ The main benefits are lower latency and memory usage.
You can quantize your own models by installing AutoAWQ or picking one of the [6500+ models on Huggingface](https://huggingface.co/models?search=awq).
```bash
```console
pip install autoawq
```
After installing AutoAWQ, you are ready to quantize a model. Please refer to the [AutoAWQ documentation](https://casper-hansen.github.io/AutoAWQ/examples/#basic-quantization) for further details. Here is an example of how to quantize `mistralai/Mistral-7B-Instruct-v0.2`:
??? Code
```python
from awq import AutoAWQForCausalLM
from transformers import AutoTokenizer
@ -43,7 +41,7 @@ After installing AutoAWQ, you are ready to quantize a model. Please refer to the
To run an AWQ model with vLLM, you can use [TheBloke/Llama-2-7b-Chat-AWQ](https://huggingface.co/TheBloke/Llama-2-7b-Chat-AWQ) with the following command:
```bash
```console
python examples/offline_inference/llm_engine_example.py \
--model TheBloke/Llama-2-7b-Chat-AWQ \
--quantization awq
@ -51,8 +49,6 @@ python examples/offline_inference/llm_engine_example.py \
AWQ models are also supported directly through the LLM entrypoint:
??? Code
```python
from vllm import LLM, SamplingParams

View File

@ -12,7 +12,7 @@ vLLM now supports [BitBLAS](https://github.com/microsoft/BitBLAS) for more effic
Below are the steps to utilize BitBLAS with vLLM.
```bash
```console
pip install bitblas>=0.1.0
```
@ -43,8 +43,6 @@ llm = LLM(
## Read gptq format checkpoint
??? Code
```python
from vllm import LLM
import torch

View File

@ -9,7 +9,7 @@ Compared to other quantization methods, BitsAndBytes eliminates the need for cal
Below are the steps to utilize BitsAndBytes with vLLM.
```bash
```console
pip install bitsandbytes>=0.45.3
```
@ -54,6 +54,6 @@ llm = LLM(
Append the following to your model arguments for 4bit inflight quantization:
```bash
```console
--quantization bitsandbytes
```

View File

@ -23,7 +23,7 @@ The FP8 types typically supported in hardware have two distinct representations,
To produce performant FP8 quantized models with vLLM, you'll need to install the [llm-compressor](https://github.com/vllm-project/llm-compressor/) library:
```bash
```console
pip install llmcompressor
```
@ -58,8 +58,6 @@ For FP8 quantization, we can recover accuracy with simple RTN quantization. We r
Since simple RTN does not require data for weight quantization and the activations are quantized dynamically, we do not need any calibration data for this quantization flow.
??? Code
```python
from llmcompressor.transformers import oneshot
from llmcompressor.modifiers.quantization import QuantizationModifier
@ -81,7 +79,7 @@ Since simple RTN does not require data for weight quantization and the activatio
Install `vllm` and `lm-evaluation-harness` for evaluation:
```bash
```console
pip install vllm lm-eval==0.4.4
```
@ -99,9 +97,9 @@ Evaluate accuracy with `lm_eval` (for example on 250 samples of `gsm8k`):
!!! note
Quantized models can be sensitive to the presence of the `bos` token. `lm_eval` does not add a `bos` token by default, so make sure to include the `add_bos_token=True` argument when running your evaluations.
```bash
MODEL=$PWD/Meta-Llama-3-8B-Instruct-FP8-Dynamic
lm_eval \
```console
$ MODEL=$PWD/Meta-Llama-3-8B-Instruct-FP8-Dynamic
$ lm_eval \
--model vllm \
--model_args pretrained=$MODEL,add_bos_token=True \
--tasks gsm8k --num_fewshot 5 --batch_size auto --limit 250

View File

@ -11,7 +11,7 @@ title: GGUF
To run a GGUF model with vLLM, you can download and use the local GGUF model from [TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF](https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF) with the following command:
```bash
```console
wget https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf
# We recommend using the tokenizer from base model to avoid long-time and buggy tokenizer conversion.
vllm serve ./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf \
@ -20,7 +20,7 @@ vllm serve ./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf \
You can also add `--tensor-parallel-size 2` to enable tensor parallelism inference with 2 GPUs:
```bash
```console
# We recommend using the tokenizer from base model to avoid long-time and buggy tokenizer conversion.
vllm serve ./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf \
--tokenizer TinyLlama/TinyLlama-1.1B-Chat-v1.0 \
@ -32,7 +32,7 @@ vllm serve ./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf \
GGUF assumes that huggingface can convert the metadata to a config file. In case huggingface doesn't support your model you can manually create a config and pass it as hf-config-path
```bash
```console
# If you model is not supported by huggingface you can manually provide a huggingface compatible config path
vllm serve ./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf \
--tokenizer TinyLlama/TinyLlama-1.1B-Chat-v1.0 \
@ -41,8 +41,6 @@ vllm serve ./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf \
You can also use the GGUF model directly through the LLM entrypoint:
??? Code
```python
from vllm import LLM, SamplingParams

View File

@ -21,7 +21,7 @@ for more details on this and other advanced features.
You can quantize your own models by installing [GPTQModel](https://github.com/ModelCloud/GPTQModel) or picking one of the [5000+ models on Huggingface](https://huggingface.co/models?search=gptq).
```bash
```console
pip install -U gptqmodel --no-build-isolation -v
```
@ -31,8 +31,6 @@ After installing GPTQModel, you are ready to quantize a model. Please refer to t
Here is an example of how to quantize `meta-llama/Llama-3.2-1B-Instruct`:
??? Code
```python
from datasets import load_dataset
from gptqmodel import GPTQModel, QuantizeConfig
@ -60,7 +58,7 @@ Here is an example of how to quantize `meta-llama/Llama-3.2-1B-Instruct`:
To run an GPTQModel quantized model with vLLM, you can use [DeepSeek-R1-Distill-Qwen-7B-gptqmodel-4bit-vortex-v2](https://huggingface.co/ModelCloud/DeepSeek-R1-Distill-Qwen-7B-gptqmodel-4bit-vortex-v2) with the following command:
```bash
```console
python examples/offline_inference/llm_engine_example.py \
--model ModelCloud/DeepSeek-R1-Distill-Qwen-7B-gptqmodel-4bit-vortex-v2
```
@ -69,8 +67,6 @@ python examples/offline_inference/llm_engine_example.py \
GPTQModel quantized models are also supported directly through the LLM entrypoint:
??? Code
```python
from vllm import LLM, SamplingParams

View File

@ -14,13 +14,13 @@ Please visit the HF collection of [quantized INT4 checkpoints of popular LLMs re
To use INT4 quantization with vLLM, you'll need to install the [llm-compressor](https://github.com/vllm-project/llm-compressor/) library:
```bash
```console
pip install llmcompressor
```
Additionally, install `vllm` and `lm-evaluation-harness` for evaluation:
```bash
```console
pip install vllm lm-eval==0.4.4
```
@ -53,8 +53,6 @@ When quantizing weights to INT4, you need sample data to estimate the weight upd
It's best to use calibration data that closely matches your deployment data.
For a general-purpose instruction-tuned model, you can use a dataset like `ultrachat`:
??? Code
```python
from datasets import load_dataset
@ -78,8 +76,6 @@ For a general-purpose instruction-tuned model, you can use a dataset like `ultra
Now, apply the quantization algorithms:
??? Code
```python
from llmcompressor.transformers import oneshot
from llmcompressor.modifiers.quantization import GPTQModifier
@ -116,8 +112,8 @@ model = LLM("./Meta-Llama-3-8B-Instruct-W4A16-G128")
To evaluate accuracy, you can use `lm_eval`:
```bash
lm_eval --model vllm \
```console
$ lm_eval --model vllm \
--model_args pretrained="./Meta-Llama-3-8B-Instruct-W4A16-G128",add_bos_token=true \
--tasks gsm8k \
--num_fewshot 5 \
@ -141,8 +137,6 @@ lm_eval --model vllm \
The following is an example of an expanded quantization recipe you can tune to your own use case:
??? Code
```python
from compressed_tensors.quantization import (
QuantizationArgs,

View File

@ -15,13 +15,13 @@ Please visit the HF collection of [quantized INT8 checkpoints of popular LLMs re
To use INT8 quantization with vLLM, you'll need to install the [llm-compressor](https://github.com/vllm-project/llm-compressor/) library:
```bash
```console
pip install llmcompressor
```
Additionally, install `vllm` and `lm-evaluation-harness` for evaluation:
```bash
```console
pip install vllm lm-eval==0.4.4
```
@ -54,8 +54,6 @@ When quantizing activations to INT8, you need sample data to estimate the activa
It's best to use calibration data that closely matches your deployment data.
For a general-purpose instruction-tuned model, you can use a dataset like `ultrachat`:
??? Code
```python
from datasets import load_dataset
@ -75,14 +73,10 @@ For a general-purpose instruction-tuned model, you can use a dataset like `ultra
ds = ds.map(tokenize, remove_columns=ds.column_names)
```
</details>
### 3. Applying Quantization
Now, apply the quantization algorithms:
??? Code
```python
from llmcompressor.transformers import oneshot
from llmcompressor.modifiers.quantization import GPTQModifier
@ -122,8 +116,8 @@ model = LLM("./Meta-Llama-3-8B-Instruct-W8A8-Dynamic-Per-Token")
To evaluate accuracy, you can use `lm_eval`:
```bash
lm_eval --model vllm \
```console
$ lm_eval --model vllm \
--model_args pretrained="./Meta-Llama-3-8B-Instruct-W8A8-Dynamic-Per-Token",add_bos_token=true \
--tasks gsm8k \
--num_fewshot 5 \

View File

@ -4,7 +4,7 @@ The [NVIDIA TensorRT Model Optimizer](https://github.com/NVIDIA/TensorRT-Model-O
We recommend installing the library with:
```bash
```console
pip install nvidia-modelopt
```
@ -14,8 +14,6 @@ You can quantize HuggingFace models using the example scripts provided in the Te
Below is an example showing how to quantize a model using modelopt's PTQ API:
??? Code
```python
import modelopt.torch.quantization as mtq
from transformers import AutoModelForCausalLM
@ -50,8 +48,6 @@ with torch.inference_mode():
The quantized checkpoint can then be deployed with vLLM. As an example, the following code shows how to deploy `nvidia/Llama-3.1-8B-Instruct-FP8`, which is the FP8 quantized checkpoint derived from `meta-llama/Llama-3.1-8B-Instruct`, using vLLM:
??? Code
```python
from vllm import LLM, SamplingParams

View File

@ -35,8 +35,6 @@ Studies have shown that FP8 E4M3 quantization typically only minimally degrades
Here is an example of how to enable FP8 quantization:
??? Code
```python
# To calculate kv cache scales on the fly enable the calculate_kv_scales
# parameter
@ -65,7 +63,7 @@ For optimal model quality when using FP8 KV Cache, we recommend using calibrated
First, install the required dependencies:
```bash
```console
pip install llmcompressor
```
@ -73,8 +71,6 @@ pip install llmcompressor
Here's a complete example using `meta-llama/Llama-3.1-8B-Instruct` (most models can use this same pattern):
??? Code
```python
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer

View File

@ -13,7 +13,7 @@ AWQ, GPTQ, Rotation and SmoothQuant.
Before quantizing models, you need to install Quark. The latest release of Quark can be installed with pip:
```bash
```console
pip install amd-quark
```
@ -22,7 +22,7 @@ for more installation details.
Additionally, install `vllm` and `lm-evaluation-harness` for evaluation:
```bash
```console
pip install vllm lm-eval==0.4.4
```
@ -42,8 +42,6 @@ The Quark quantization process can be listed for 5 steps as below:
Quark uses [Transformers](https://huggingface.co/docs/transformers/en/index)
to fetch model and tokenizer.
??? Code
```python
from transformers import AutoTokenizer, AutoModelForCausalLM
@ -65,8 +63,6 @@ Quark uses the [PyTorch Dataloader](https://pytorch.org/tutorials/beginner/basic
to load calibration data. For more details about how to use calibration datasets efficiently, please refer
to [Adding Calibration Datasets](https://quark.docs.amd.com/latest/pytorch/calibration_datasets.html).
??? Code
```python
from datasets import load_dataset
from torch.utils.data import DataLoader
@ -98,8 +94,6 @@ kv-cache and the quantization algorithm is AutoSmoothQuant.
AutoSmoothQuant config file for Llama is
`examples/torch/language_modeling/llm_ptq/models/llama/autosmoothquant_config.json`.
??? Code
```python
from quark.torch.quantization import (Config, QuantizationConfig,
FP8E4M3PerTensorSpec,
@ -145,8 +139,6 @@ HuggingFace `safetensors`, you can refer to
[HuggingFace format exporting](https://quark.docs.amd.com/latest/pytorch/export/quark_export_hf.html)
for more exporting format details.
??? Code
```python
import torch
from quark.torch import ModelQuantizer, ModelExporter
@ -176,8 +168,6 @@ for more exporting format details.
Now, you can load and run the Quark quantized model directly through the LLM entrypoint:
??? Code
```python
from vllm import LLM, SamplingParams
@ -209,8 +199,8 @@ Now, you can load and run the Quark quantized model directly through the LLM ent
Or, you can use `lm_eval` to evaluate accuracy:
```bash
lm_eval --model vllm \
```console
$ lm_eval --model vllm \
--model_args pretrained=Llama-2-70b-chat-hf-w-fp8-a-fp8-kvcache-fp8-pertensor-autosmoothquant,kv_cache_dtype='fp8',quantization='quark' \
--tasks gsm8k
```
@ -222,7 +212,7 @@ to quantize large language models more conveniently. It supports quantizing mode
of different quantization schemes and optimization algorithms. It can export the quantized model
and run evaluation tasks on the fly. With the script, the example above can be:
```bash
```console
python3 quantize_quark.py --model_dir meta-llama/Llama-2-70b-chat-hf \
--output_dir /path/to/output \
--quant_scheme w_fp8_a_fp8 \

View File

@ -4,7 +4,7 @@ TorchAO is an architecture optimization library for PyTorch, it provides high pe
We recommend installing the latest torchao nightly with
```bash
```console
# Install the latest TorchAO nightly build
# Choose the CUDA version that matches your system (cu126, cu128, etc.)
pip install \
@ -15,8 +15,6 @@ pip install \
## Quantizing HuggingFace Models
You can quantize your own huggingface model with torchao, e.g. [transformers](https://huggingface.co/docs/transformers/main/en/quantization/torchao) and [diffusers](https://huggingface.co/docs/diffusers/en/quantization/torchao), and save the checkpoint to huggingface hub like [this](https://huggingface.co/jerryzh168/llama3-8b-int8wo) with the following example code:
??? Code
```Python
import torch
from transformers import TorchAoConfig, AutoModelForCausalLM, AutoTokenizer

View File

@ -33,8 +33,6 @@ vllm serve deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B \
Next, make a request to the model that should return the reasoning content in the response.
??? Code
```python
from openai import OpenAI
@ -70,8 +68,6 @@ The `reasoning_content` field contains the reasoning steps that led to the final
Streaming chat completions are also supported for reasoning models. The `reasoning_content` field is available in the `delta` field in [chat completion response chunks](https://platform.openai.com/docs/api-reference/chat/streaming).
??? Json
```json
{
"id": "chatcmpl-123",
@ -95,8 +91,6 @@ Streaming chat completions are also supported for reasoning models. The `reasoni
OpenAI Python client library does not officially support `reasoning_content` attribute for streaming output. But the client supports extra attributes in the response. You can use `hasattr` to check if the `reasoning_content` attribute is present in the response. For example:
??? Code
```python
from openai import OpenAI
@ -152,8 +146,6 @@ Remember to check whether the `reasoning_content` exists in the response before
The reasoning content is also available when both tool calling and the reasoning parser are enabled. Additionally, tool calling only parses functions from the `content` field, not from the `reasoning_content`.
??? Code
```python
from openai import OpenAI
@ -200,8 +192,6 @@ For more examples, please refer to <gh-file:examples/online_serving/openai_chat_
You can add a new `ReasoningParser` similar to <gh-file:vllm/reasoning/deepseek_r1_reasoning_parser.py>.
??? Code
```python
# import the required packages
@ -258,8 +248,6 @@ You can add a new `ReasoningParser` similar to <gh-file:vllm/reasoning/deepseek_
Additionally, to enable structured output, you'll need to create a new `Reasoner` similar to the one in <gh-file:vllm/reasoning/deepseek_r1_reasoning_parser.py>.
??? Code
```python
@dataclass
class DeepSeekReasoner(Reasoner):

View File

@ -18,8 +18,6 @@ Speculative decoding is a technique which improves inter-token latency in memory
The following code configures vLLM in an offline mode to use speculative decoding with a draft model, speculating 5 tokens at a time.
??? Code
```python
from vllm import LLM, SamplingParams
@ -62,8 +60,6 @@ python -m vllm.entrypoints.openai.api_server \
Then use a client:
??? Code
```python
from openai import OpenAI
@ -103,8 +99,6 @@ Then use a client:
The following code configures vLLM to use speculative decoding where proposals are generated by
matching n-grams in the prompt. For more information read [this thread.](https://x.com/joao_gante/status/1747322413006643259)
??? Code
```python
from vllm import LLM, SamplingParams
@ -137,8 +131,6 @@ draft models that conditioning draft predictions on both context vectors and sam
For more information see [this blog](https://pytorch.org/blog/hitchhikers-guide-speculative-decoding/) or
[this technical report](https://arxiv.org/abs/2404.19124).
??? Code
```python
from vllm import LLM, SamplingParams
@ -185,8 +177,6 @@ A variety of speculative models of this type are available on HF hub:
The following code configures vLLM to use speculative decoding where proposals are generated by
an [EAGLE (Extrapolation Algorithm for Greater Language-model Efficiency)](https://arxiv.org/pdf/2401.15077) based draft model. A more detailed example for offline mode, including how to extract request level acceptance rate, can be found [here](gh-file:examples/offline_inference/eagle.py).
??? Code
```python
from vllm import LLM, SamplingParams

View File

@ -33,8 +33,6 @@ text.
Now let´s see an example for each of the cases, starting with the `guided_choice`, as it´s the easiest one:
??? Code
```python
from openai import OpenAI
client = OpenAI(
@ -55,8 +53,6 @@ Now let´s see an example for each of the cases, starting with the `guided_choic
The next example shows how to use the `guided_regex`. The idea is to generate an email address, given a simple regex template:
??? Code
```python
completion = client.chat.completions.create(
model=model,
@ -79,8 +75,6 @@ For this we can use the `guided_json` parameter in two different ways:
The next example shows how to use the `guided_json` parameter with a Pydantic model:
??? Code
```python
from pydantic import BaseModel
from enum import Enum
@ -127,8 +121,6 @@ difficult to use, but it´s really powerful. It allows us to define complete
languages like SQL queries. It works by using a context free EBNF grammar.
As an example, we can use to define a specific format of simplified SQL queries:
??? Code
```python
simplified_sql_grammar = """
root ::= select_statement
@ -169,8 +161,6 @@ vllm serve deepseek-ai/DeepSeek-R1-Distill-Qwen-7B --reasoning-parser deepseek_r
Note that you can use reasoning with any provided structured outputs feature. The following uses one with JSON schema:
??? Code
```python
from pydantic import BaseModel
@ -212,8 +202,6 @@ For the following examples, vLLM was setup using `vllm serve meta-llama/Llama-3.
Here is a simple example demonstrating how to get structured output using Pydantic models:
??? Code
```python
from pydantic import BaseModel
from openai import OpenAI
@ -240,6 +228,8 @@ Here is a simple example demonstrating how to get structured output using Pydant
print("Age:", message.parsed.age)
```
Output:
```console
ParsedChatCompletionMessage[Testing](content='{"name": "Cameron", "age": 28}', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=[], parsed=Testing(name='Cameron', age=28))
Name: Cameron
@ -248,8 +238,6 @@ Age: 28
Here is a more complex example using nested Pydantic models to handle a step-by-step math solution:
??? Code
```python
from typing import List
from pydantic import BaseModel
@ -308,8 +296,6 @@ These parameters can be used in the same way as the parameters from the Online
Serving examples above. One example for the usage of the `choice` parameter is
shown below:
??? Code
```python
from vllm import LLM, SamplingParams
from vllm.sampling_params import GuidedDecodingParams

View File

@ -15,8 +15,6 @@ vllm serve meta-llama/Llama-3.1-8B-Instruct \
Next, make a request to the model that should result in it using the available tools:
??? Code
```python
from openai import OpenAI
import json
@ -228,25 +226,6 @@ AI21's Jamba-1.5 models are supported.
Flags: `--tool-call-parser jamba`
### xLAM Models (`xlam`)
The xLAM tool parser is designed to support models that generate tool calls in various JSON formats. It detects function calls in several different output styles:
1. Direct JSON arrays: Output strings that are JSON arrays starting with `[` and ending with `]`
2. Thinking tags: Using `<think>...</think>` tags containing JSON arrays
3. Code blocks: JSON in code blocks (```json ...```)
4. Tool calls tags: Using `[TOOL_CALLS]` or `<tool_call>...</tool_call>` tags
Parallel function calls are supported, and the parser can effectively separate text content from tool calls.
Supported models:
* Salesforce Llama-xLAM models: `Salesforce/Llama-xLAM-2-8B-fc-r`, `Salesforce/Llama-xLAM-2-70B-fc-r`
* Qwen-xLAM models: `Salesforce/xLAM-1B-fc-r`, `Salesforce/xLAM-3B-fc-r`, `Salesforce/Qwen-xLAM-32B-fc-r`
Flags:
* For Llama-based xLAM models: `--tool-call-parser xlam --chat-template examples/tool_chat_template_xlam_llama.jinja`
* For Qwen-based xLAM models: `--tool-call-parser xlam --chat-template examples/tool_chat_template_xlam_qwen.jinja`
### Qwen Models
For Qwen2.5, the chat template in tokenizer_config.json has already included support for the Hermes-style tool use. Therefore, you can use the `hermes` parser to enable tool calls for Qwen models. For more detailed information, please refer to the official [Qwen documentation](https://qwen.readthedocs.io/en/latest/framework/function_call.html#vllm)
@ -303,8 +282,6 @@ A tool parser plugin is a Python file containing one or more ToolParser implemen
Here is a summary of a plugin file:
??? Code
```python
# import the required packages
@ -351,7 +328,7 @@ Here is a summary of a plugin file:
Then you can use this plugin in the command line like this.
```bash
```console
--enable-auto-tool-choice \
--tool-parser-plugin <absolute path of the plugin file>
--tool-call-parser example \

View File

@ -26,7 +26,7 @@ The easiest way to launch a Trainium or Inferentia instance with pre-installed N
- After launching the instance, follow the instructions in [Connect to your instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) to connect to the instance
- Once inside your instance, activate the pre-installed virtual environment for inference by running
```bash
```console
source /opt/aws_neuronx_venv_pytorch_2_6_nxd_inference/bin/activate
```
@ -47,7 +47,7 @@ Currently, there are no pre-built Neuron wheels.
To build and install vLLM from source, run:
```bash
```console
git clone https://github.com/vllm-project/vllm.git
cd vllm
pip install -U -r requirements/neuron.txt
@ -66,7 +66,7 @@ Refer to [vLLM User Guide for NxD Inference](https://awsdocs-neuron.readthedocs-
To install the AWS Neuron fork, run the following:
```bash
```console
git clone -b neuron-2.23-vllm-v0.7.2 https://github.com/aws-neuron/upstreaming-to-vllm.git
cd upstreaming-to-vllm
pip install -r requirements/neuron.txt
@ -100,7 +100,7 @@ to perform most of the heavy lifting which includes PyTorch model initialization
To configure NxD Inference features through the vLLM entrypoint, use the `override_neuron_config` setting. Provide the configs you want to override
as a dictionary (or JSON object when starting vLLM from the CLI). For example, to disable auto bucketing, include
```python
```console
override_neuron_config={
"enable_bucketing":False,
}
@ -108,7 +108,7 @@ override_neuron_config={
or when launching vLLM from the CLI, pass
```bash
```console
--override-neuron-config "{\"enable_bucketing\":false}"
```

View File

@ -76,15 +76,11 @@ Currently, there are no pre-built CPU wheels.
### Build image from source
??? Commands
```bash
docker build -f docker/Dockerfile.cpu \
--tag vllm-cpu-env \
--target vllm-openai .
```console
$ docker build -f docker/Dockerfile.cpu --tag vllm-cpu-env --target vllm-openai .
# Launching OpenAI server
docker run --rm \
$ docker run --rm \
--privileged=true \
--shm-size=4g \
-p 8000:8000 \
@ -123,7 +119,7 @@ vLLM CPU backend supports the following vLLM features:
- We highly recommend to use TCMalloc for high performance memory allocation and better cache locality. For example, on Ubuntu 22.4, you can run:
```bash
```console
sudo apt-get install libtcmalloc-minimal4 # install TCMalloc library
find / -name *libtcmalloc* # find the dynamic link library path
export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4:$LD_PRELOAD # prepend the library to LD_PRELOAD
@ -132,7 +128,7 @@ python examples/offline_inference/basic/basic.py # run vLLM
- When using the online serving, it is recommended to reserve 1-2 CPU cores for the serving framework to avoid CPU oversubscription. For example, on a platform with 32 physical CPU cores, reserving CPU 30 and 31 for the framework and using CPU 0-29 for OpenMP:
```bash
```console
export VLLM_CPU_KVCACHE_SPACE=40
export VLLM_CPU_OMP_THREADS_BIND=0-29
vllm serve facebook/opt-125m
@ -140,7 +136,7 @@ vllm serve facebook/opt-125m
or using default auto thread binding:
```bash
```console
export VLLM_CPU_KVCACHE_SPACE=40
export VLLM_CPU_NUM_OF_RESERVED_CPU=2
vllm serve facebook/opt-125m
@ -148,8 +144,6 @@ vllm serve facebook/opt-125m
- If using vLLM CPU backend on a machine with hyper-threading, it is recommended to bind only one OpenMP thread on each physical CPU core using `VLLM_CPU_OMP_THREADS_BIND` or using auto thread binding feature by default. On a hyper-threading enabled platform with 16 logical CPU cores / 8 physical CPU cores:
??? Commands
```console
$ lscpu -e # check the mapping between logical CPU cores and physical CPU cores
@ -189,20 +183,14 @@ vllm serve facebook/opt-125m
- Tensor Parallel is supported for serving and offline inferencing. In general each NUMA node is treated as one GPU card. Below is the example script to enable Tensor Parallel = 2 for serving:
```bash
VLLM_CPU_KVCACHE_SPACE=40 VLLM_CPU_OMP_THREADS_BIND="0-31|32-63" \
vllm serve meta-llama/Llama-2-7b-chat-hf \
-tp=2 \
--distributed-executor-backend mp
```console
VLLM_CPU_KVCACHE_SPACE=40 VLLM_CPU_OMP_THREADS_BIND="0-31|32-63" vllm serve meta-llama/Llama-2-7b-chat-hf -tp=2 --distributed-executor-backend mp
```
or using default auto thread binding:
```bash
VLLM_CPU_KVCACHE_SPACE=40 \
vllm serve meta-llama/Llama-2-7b-chat-hf \
-tp=2 \
--distributed-executor-backend mp
```console
VLLM_CPU_KVCACHE_SPACE=40 vllm serve meta-llama/Llama-2-7b-chat-hf -tp=2 --distributed-executor-backend mp
```
- For each thread id list in `VLLM_CPU_OMP_THREADS_BIND`, users should guarantee threads in the list belong to a same NUMA node.

View File

@ -25,7 +25,7 @@ Currently the CPU implementation for macOS supports FP32 and FP16 datatypes.
After installation of XCode and the Command Line Tools, which include Apple Clang, execute the following commands to build and install vLLM from the source.
```bash
```console
git clone https://github.com/vllm-project/vllm.git
cd vllm
pip install -r requirements/cpu.txt

View File

@ -23,7 +23,7 @@ ARM CPU backend currently supports Float32, FP16 and BFloat16 datatypes.
# --8<-- [end:pre-built-wheels]
# --8<-- [start:build-wheel-from-source]
--8<-- "docs/getting_started/installation/cpu/build.inc.md"
--8<-- "docs/getting_started/installation/cpu/cpu/build.inc.md"
Testing has been conducted on AWS Graviton3 instances for compatibility.

View File

@ -1,6 +1,6 @@
First, install recommended compiler. We recommend to use `gcc/g++ >= 12.3.0` as the default compiler to avoid potential problems. For example, on Ubuntu 22.4, you can run:
```bash
```console
sudo apt-get update -y
sudo apt-get install -y gcc-12 g++-12 libnuma-dev python3-dev
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave /usr/bin/g++ g++ /usr/bin/g++-12
@ -8,14 +8,14 @@ sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave /
Second, clone vLLM project:
```bash
```console
git clone https://github.com/vllm-project/vllm.git vllm_source
cd vllm_source
```
Third, install Python packages for vLLM CPU backend building:
```bash
```console
pip install --upgrade pip
pip install "cmake>=3.26.1" wheel packaging ninja "setuptools-scm>=8" numpy
pip install -v -r requirements/cpu.txt --extra-index-url https://download.pytorch.org/whl/cpu
@ -23,13 +23,13 @@ pip install -v -r requirements/cpu.txt --extra-index-url https://download.pytorc
Finally, build and install vLLM CPU backend:
```bash
```console
VLLM_TARGET_DEVICE=cpu python setup.py install
```
If you want to develop vllm, install it in editable mode instead.
```bash
```console
VLLM_TARGET_DEVICE=cpu python setup.py develop
```

View File

@ -26,7 +26,7 @@ Currently the CPU implementation for s390x architecture supports FP32 datatype o
Install the following packages from the package manager before building the vLLM. For example on RHEL 9.4:
```bash
```console
dnf install -y \
which procps findutils tar vim git gcc g++ make patch make cython zlib-devel \
libjpeg-turbo-devel libtiff-devel libpng-devel libwebp-devel freetype-devel harfbuzz-devel \
@ -35,7 +35,7 @@ dnf install -y \
Install rust>=1.80 which is needed for `outlines-core` and `uvloop` python packages installation.
```bash
```console
curl https://sh.rustup.rs -sSf | sh -s -- -y && \
. "$HOME/.cargo/env"
```
@ -45,7 +45,7 @@ Execute the following commands to build and install vLLM from the source.
!!! tip
Please build the following dependencies, `torchvision`, `pyarrow` from the source before building vLLM.
```bash
```console
sed -i '/^torch/d' requirements-build.txt # remove torch from requirements-build.txt since we use nightly builds
pip install -v \
--extra-index-url https://download.pytorch.org/whl/nightly/cpu \

View File

@ -24,7 +24,7 @@ vLLM initially supports basic model inferencing and serving on x86 CPU platform,
# --8<-- [end:pre-built-wheels]
# --8<-- [start:build-wheel-from-source]
--8<-- "docs/getting_started/installation/cpu/build.inc.md"
--8<-- "docs/getting_started/installation/cpu/cpu/build.inc.md"
!!! note
- AVX512_BF16 is an extension ISA provides native BF16 data type conversion and vector product instructions, which brings some performance improvement compared with pure AVX512. The CPU backend build script will check the host CPU flags to determine whether to enable AVX512_BF16.

View File

@ -68,7 +68,7 @@ For more information about using TPUs with GKE, see:
Create a TPU v5e with 4 TPU chips:
```bash
```console
gcloud alpha compute tpus queued-resources create QUEUED_RESOURCE_ID \
--node-id TPU_NAME \
--project PROJECT_ID \
@ -156,13 +156,13 @@ See [deployment-docker-pre-built-image][deployment-docker-pre-built-image] for i
You can use <gh-file:docker/Dockerfile.tpu> to build a Docker image with TPU support.
```bash
```console
docker build -f docker/Dockerfile.tpu -t vllm-tpu .
```
Run the Docker image with the following command:
```bash
```console
# Make sure to add `--privileged --net host --shm-size=16G`.
docker run --privileged --net host --shm-size=16G -it vllm-tpu
```
@ -185,6 +185,6 @@ docker run --privileged --net host --shm-size=16G -it vllm-tpu
Install OpenBLAS with the following command:
```bash
```console
sudo apt-get install --no-install-recommends --yes libopenblas-base libopenmpi-dev libomp-dev
```

View File

@ -22,7 +22,7 @@ Therefore, it is recommended to install vLLM with a **fresh new** environment. I
You can install vLLM using either `pip` or `uv pip`:
```bash
```console
# Install vLLM with CUDA 12.8.
# If you are using pip.
pip install vllm --extra-index-url https://download.pytorch.org/whl/cu128
@ -37,7 +37,7 @@ We recommend leveraging `uv` to [automatically select the appropriate PyTorch in
As of now, vLLM's binaries are compiled with CUDA 12.8 and public PyTorch release versions by default. We also provide vLLM binaries compiled with CUDA 12.6, 11.8, and public PyTorch release versions:
```bash
```console
# Install vLLM with CUDA 11.8.
export VLLM_VERSION=0.6.1.post1
export PYTHON_VERSION=312
@ -52,7 +52,7 @@ LLM inference is a fast-evolving field, and the latest code may contain bug fixe
##### Install the latest code using `pip`
```bash
```console
pip install -U vllm \
--pre \
--extra-index-url https://wheels.vllm.ai/nightly
@ -62,7 +62,7 @@ pip install -U vllm \
Another way to install the latest code is to use `uv`:
```bash
```console
uv pip install -U vllm \
--torch-backend=auto \
--extra-index-url https://wheels.vllm.ai/nightly
@ -72,7 +72,7 @@ uv pip install -U vllm \
If you want to access the wheels for previous commits (e.g. to bisect the behavior change, performance regression), due to the limitation of `pip`, you have to specify the full URL of the wheel file by embedding the commit hash in the URL:
```bash
```console
export VLLM_COMMIT=33f460b17a54acb3b6cc0b03f4a17876cff5eafd # use full commit hash from the main branch
pip install https://wheels.vllm.ai/${VLLM_COMMIT}/vllm-1.0.0.dev-cp38-abi3-manylinux1_x86_64.whl
```
@ -83,7 +83,7 @@ Note that the wheels are built with Python 3.8 ABI (see [PEP 425](https://peps.p
If you want to access the wheels for previous commits (e.g. to bisect the behavior change, performance regression), you can specify the commit hash in the URL:
```bash
```console
export VLLM_COMMIT=72d9c316d3f6ede485146fe5aabd4e61dbc59069 # use full commit hash from the main branch
uv pip install vllm \
--torch-backend=auto \
@ -99,7 +99,7 @@ The `uv` approach works for vLLM `v0.6.6` and later and offers an easy-to-rememb
If you only need to change Python code, you can build and install vLLM without compilation. Using `pip`'s [`--editable` flag](https://pip.pypa.io/en/stable/topics/local-project-installs/#editable-installs), changes you make to the code will be reflected when you run vLLM:
```bash
```console
git clone https://github.com/vllm-project/vllm.git
cd vllm
VLLM_USE_PRECOMPILED=1 pip install --editable .
@ -118,7 +118,7 @@ This command will do the following:
In case you see an error about wheel not found when running the above command, it might be because the commit you based on in the main branch was just merged and the wheel is being built. In this case, you can wait for around an hour to try again, or manually assign the previous commit in the installation using the `VLLM_PRECOMPILED_WHEEL_LOCATION` environment variable.
```bash
```console
export VLLM_COMMIT=72d9c316d3f6ede485146fe5aabd4e61dbc59069 # use full commit hash from the main branch
export VLLM_PRECOMPILED_WHEEL_LOCATION=https://wheels.vllm.ai/${VLLM_COMMIT}/vllm-1.0.0.dev-cp38-abi3-manylinux1_x86_64.whl
pip install --editable .
@ -134,7 +134,7 @@ You can find more information about vLLM's wheels in [install-the-latest-code][i
If you want to modify C++ or CUDA code, you'll need to build vLLM from source. This can take several minutes:
```bash
```console
git clone https://github.com/vllm-project/vllm.git
cd vllm
pip install -e .
@ -160,7 +160,7 @@ There are scenarios where the PyTorch dependency cannot be easily installed via
To build vLLM using an existing PyTorch installation:
```bash
```console
git clone https://github.com/vllm-project/vllm.git
cd vllm
python use_existing_torch.py
@ -173,7 +173,7 @@ pip install --no-build-isolation -e .
Currently, before starting the build process, vLLM fetches cutlass code from GitHub. However, there may be scenarios where you want to use a local version of cutlass instead.
To achieve this, you can set the environment variable VLLM_CUTLASS_SRC_DIR to point to your local cutlass directory.
```bash
```console
git clone https://github.com/vllm-project/vllm.git
cd vllm
VLLM_CUTLASS_SRC_DIR=/path/to/cutlass pip install -e .
@ -184,7 +184,7 @@ VLLM_CUTLASS_SRC_DIR=/path/to/cutlass pip install -e .
To avoid your system being overloaded, you can limit the number of compilation jobs
to be run simultaneously, via the environment variable `MAX_JOBS`. For example:
```bash
```console
export MAX_JOBS=6
pip install -e .
```
@ -194,7 +194,7 @@ A side effect is a much slower build process.
Additionally, if you have trouble building vLLM, we recommend using the NVIDIA PyTorch Docker image.
```bash
```console
# Use `--ipc=host` to make sure the shared memory is large enough.
docker run \
--gpus all \
@ -205,14 +205,14 @@ docker run \
If you don't want to use docker, it is recommended to have a full installation of CUDA Toolkit. You can download and install it from [the official website](https://developer.nvidia.com/cuda-toolkit-archive). After installation, set the environment variable `CUDA_HOME` to the installation path of CUDA Toolkit, and make sure that the `nvcc` compiler is in your `PATH`, e.g.:
```bash
```console
export CUDA_HOME=/usr/local/cuda
export PATH="${CUDA_HOME}/bin:$PATH"
```
Here is a sanity check to verify that the CUDA Toolkit is correctly installed:
```bash
```console
nvcc --version # verify that nvcc is in your PATH
${CUDA_HOME}/bin/nvcc --version # verify that nvcc is in your CUDA_HOME
```
@ -223,7 +223,7 @@ vLLM can fully run only on Linux but for development purposes, you can still bui
Simply disable the `VLLM_TARGET_DEVICE` environment variable before installing:
```bash
```console
export VLLM_TARGET_DEVICE=empty
pip install -e .
```
@ -238,7 +238,7 @@ See [deployment-docker-pre-built-image][deployment-docker-pre-built-image] for i
Another way to access the latest code is to use the docker images:
```bash
```console
export VLLM_COMMIT=33f460b17a54acb3b6cc0b03f4a17876cff5eafd # use full commit hash from the main branch
docker pull public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:${VLLM_COMMIT}
```

View File

@ -31,17 +31,17 @@ Currently, there are no pre-built ROCm wheels.
Alternatively, you can install PyTorch using PyTorch wheels. You can check PyTorch installation guide in PyTorch [Getting Started](https://pytorch.org/get-started/locally/). Example:
```bash
```console
# Install PyTorch
pip uninstall torch -y
pip install --no-cache-dir --pre torch --index-url https://download.pytorch.org/whl/nightly/rocm6.3
$ pip uninstall torch -y
$ pip install --no-cache-dir --pre torch --index-url https://download.pytorch.org/whl/nightly/rocm6.3
```
1. Install [Triton flash attention for ROCm](https://github.com/ROCm/triton)
Install ROCm's Triton flash attention (the default triton-mlir branch) following the instructions from [ROCm/triton](https://github.com/ROCm/triton/blob/triton-mlir/README.md)
```bash
```console
python3 -m pip install ninja cmake wheel pybind11
pip uninstall -y triton
git clone https://github.com/OpenAI/triton.git
@ -62,7 +62,7 @@ Currently, there are no pre-built ROCm wheels.
For example, for ROCm 6.3, suppose your gfx arch is `gfx90a`. To get your gfx architecture, run `rocminfo |grep gfx`.
```bash
```console
git clone https://github.com/ROCm/flash-attention.git
cd flash-attention
git checkout b7d29fb
@ -76,7 +76,7 @@ Currently, there are no pre-built ROCm wheels.
3. If you choose to build AITER yourself to use a certain branch or commit, you can build AITER using the following steps:
```bash
```console
python3 -m pip uninstall -y aiter
git clone --recursive https://github.com/ROCm/aiter.git
cd aiter
@ -90,8 +90,6 @@ Currently, there are no pre-built ROCm wheels.
4. Build vLLM. For example, vLLM on ROCM 6.3 can be built with the following steps:
??? Commands
```bash
pip install --upgrade pip
@ -148,7 +146,7 @@ If you choose to build this rocm_base image yourself, the steps are as follows.
It is important that the user kicks off the docker build using buildkit. Either the user put DOCKER_BUILDKIT=1 as environment variable when calling docker build command, or the user needs to setup buildkit in the docker daemon configuration /etc/docker/daemon.json as follows and restart the daemon:
```json
```console
{
"features": {
"buildkit": true
@ -158,7 +156,7 @@ It is important that the user kicks off the docker build using buildkit. Either
To build vllm on ROCm 6.3 for MI200 and MI300 series, you can use the default:
```bash
```console
DOCKER_BUILDKIT=1 docker build \
-f docker/Dockerfile.rocm_base \
-t rocm/vllm-dev:base .
@ -169,7 +167,7 @@ DOCKER_BUILDKIT=1 docker build \
First, build a docker image from <gh-file:docker/Dockerfile.rocm> and launch a docker container from the image.
It is important that the user kicks off the docker build using buildkit. Either the user put `DOCKER_BUILDKIT=1` as environment variable when calling docker build command, or the user needs to setup buildkit in the docker daemon configuration /etc/docker/daemon.json as follows and restart the daemon:
```bash
```console
{
"features": {
"buildkit": true
@ -187,13 +185,13 @@ Their values can be passed in when running `docker build` with `--build-arg` opt
To build vllm on ROCm 6.3 for MI200 and MI300 series, you can use the default:
```bash
```console
DOCKER_BUILDKIT=1 docker build -f docker/Dockerfile.rocm -t vllm-rocm .
```
To build vllm on ROCm 6.3 for Radeon RX7900 series (gfx1100), you should pick the alternative base image:
```bash
```console
DOCKER_BUILDKIT=1 docker build \
--build-arg BASE_IMAGE="rocm/vllm-dev:navi_base" \
-f docker/Dockerfile.rocm \
@ -203,9 +201,7 @@ DOCKER_BUILDKIT=1 docker build \
To run the above docker image `vllm-rocm`, use the below command:
??? Command
```bash
```console
docker run -it \
--network=host \
--group-add=video \

View File

@ -25,7 +25,7 @@ Currently, there are no pre-built XPU wheels.
- First, install required driver and Intel OneAPI 2025.0 or later.
- Second, install Python packages for vLLM XPU backend building:
```bash
```console
git clone https://github.com/vllm-project/vllm.git
cd vllm
pip install --upgrade pip
@ -34,7 +34,7 @@ pip install -v -r requirements/xpu.txt
- Then, build and install vLLM XPU backend:
```bash
```console
VLLM_TARGET_DEVICE=xpu python setup.py install
```
@ -53,9 +53,9 @@ Currently, there are no pre-built XPU images.
# --8<-- [end:pre-built-images]
# --8<-- [start:build-image-from-source]
```bash
docker build -f docker/Dockerfile.xpu -t vllm-xpu-env --shm-size=4g .
docker run -it \
```console
$ docker build -f docker/Dockerfile.xpu -t vllm-xpu-env --shm-size=4g .
$ docker run -it \
--rm \
--network=host \
--device /dev/dri \
@ -68,7 +68,7 @@ docker run -it \
XPU platform supports **tensor parallel** inference/serving and also supports **pipeline parallel** as a beta feature for online serving. We require Ray as the distributed runtime backend. For example, a reference execution like following:
```bash
```console
python -m vllm.entrypoints.openai.api_server \
--model=facebook/opt-13b \
--dtype=bfloat16 \

View File

@ -24,7 +24,7 @@ please follow the methods outlined in the
To verify that the Intel Gaudi software was correctly installed, run:
```bash
```console
hl-smi # verify that hl-smi is in your PATH and each Gaudi accelerator is visible
apt list --installed | grep habana # verify that habanalabs-firmware-tools, habanalabs-graph, habanalabs-rdma-core, habanalabs-thunk and habanalabs-container-runtime are installed
pip list | grep habana # verify that habana-torch-plugin, habana-torch-dataloader, habana-pyhlml and habana-media-loader are installed
@ -42,7 +42,7 @@ for more details.
Use the following commands to run a Docker image:
```bash
```console
docker pull vault.habana.ai/gaudi-docker/1.18.0/ubuntu22.04/habanalabs/pytorch-installer-2.4.0:latest
docker run \
-it \
@ -65,7 +65,7 @@ Currently, there are no pre-built Intel Gaudi wheels.
To build and install vLLM from source, run:
```bash
```console
git clone https://github.com/vllm-project/vllm.git
cd vllm
pip install -r requirements/hpu.txt
@ -74,7 +74,7 @@ python setup.py develop
Currently, the latest features and performance optimizations are developed in Gaudi's [vLLM-fork](https://github.com/HabanaAI/vllm-fork) and we periodically upstream them to vLLM main repo. To install latest [HabanaAI/vLLM-fork](https://github.com/HabanaAI/vllm-fork), run the following:
```bash
```console
git clone https://github.com/HabanaAI/vllm-fork.git
cd vllm-fork
git checkout habana_main
@ -90,7 +90,7 @@ Currently, there are no pre-built Intel Gaudi images.
### Build image from source
```bash
```console
docker build -f docker/Dockerfile.hpu -t vllm-hpu-env .
docker run \
-it \
@ -200,7 +200,7 @@ INFO 08-01 21:37:59 hpu_model_runner.py:509] Generated 48 decode buckets: [(1, 1
`min` determines the lowest value of the bucket. `step` determines the interval between buckets, and `max` determines the upper bound of the bucket. Furthermore, interval between `min` and `step` has special handling -- `min` gets multiplied by consecutive powers of two, until `step` gets reached. We call this the ramp-up phase and it is used for handling lower batch sizes with minimum wastage, while allowing larger padding on larger batch sizes.
Example (with ramp-up):
Example (with ramp-up)
```text
min = 2, step = 32, max = 64
@ -209,7 +209,7 @@ min = 2, step = 32, max = 64
=> buckets = ramp_up + stable => (2, 4, 8, 16, 32, 64)
```
Example (without ramp-up):
Example (without ramp-up)
```text
min = 128, step = 128, max = 512
@ -232,8 +232,6 @@ As an example, if a request of 3 sequences, with max sequence length of 412 come
Warmup is an optional, but highly recommended step occurring before vLLM server starts listening. It executes a forward pass for each bucket with dummy data. The goal is to pre-compile all graphs and not incur any graph compilation overheads within bucket boundaries during server runtime. Each warmup step is logged during vLLM startup:
??? Logs
```text
INFO 08-01 22:26:47 hpu_model_runner.py:1066] [Warmup][Prompt][1/24] batch_size:4 seq_len:1024 free_mem:79.16 GiB
INFO 08-01 22:26:47 hpu_model_runner.py:1066] [Warmup][Prompt][2/24] batch_size:4 seq_len:896 free_mem:55.43 GiB
@ -281,8 +279,6 @@ When there's large amount of requests pending, vLLM scheduler will attempt to fi
Each described step is logged by vLLM server, as follows (negative values correspond to memory being released):
??? Logs
```text
INFO 08-02 17:37:44 hpu_model_runner.py:493] Prompt bucket config (min, step, max_warmup) bs:[1, 32, 4], seq:[128, 128, 1024]
INFO 08-02 17:37:44 hpu_model_runner.py:499] Generated 24 prompt buckets: [(1, 128), (1, 256), (1, 384), (1, 512), (1, 640), (1, 768), (1, 896), (1, 1024), (2, 128), (2, 256), (2, 384), (2, 512), (2, 640), (2, 768), (2, 896), (2, 1024), (4, 128), (4, 256), (4, 384), (4, 512), (4, 640), (4, 768), (4, 896), (4, 1024)]

View File

@ -1,6 +1,6 @@
It's recommended to use [uv](https://docs.astral.sh/uv/), a very fast Python environment manager, to create and manage Python environments. Please follow the [documentation](https://docs.astral.sh/uv/#getting-started) to install `uv`. After installing `uv`, you can create a new Python environment and install vLLM using the following commands:
```bash
```console
uv venv --python 3.12 --seed
source .venv/bin/activate
```

View File

@ -19,7 +19,7 @@ If you are using NVIDIA GPUs, you can install vLLM using [pip](https://pypi.org/
It's recommended to use [uv](https://docs.astral.sh/uv/), a very fast Python environment manager, to create and manage Python environments. Please follow the [documentation](https://docs.astral.sh/uv/#getting-started) to install `uv`. After installing `uv`, you can create a new Python environment and install vLLM using the following commands:
```bash
```console
uv venv --python 3.12 --seed
source .venv/bin/activate
uv pip install vllm --torch-backend=auto
@ -29,13 +29,13 @@ uv pip install vllm --torch-backend=auto
Another delightful way is to use `uv run` with `--with [dependency]` option, which allows you to run commands such as `vllm serve` without creating any permanent environment:
```bash
```console
uv run --with vllm vllm --help
```
You can also use [conda](https://docs.conda.io/projects/conda/en/latest/user-guide/getting-started.html) to create and manage Python environments. You can install `uv` to the conda environment through `pip` if you want to manage it within the environment.
```bash
```console
conda create -n myenv python=3.12 -y
conda activate myenv
pip install --upgrade uv
@ -110,7 +110,7 @@ By default, it starts the server at `http://localhost:8000`. You can specify the
Run the following command to start the vLLM server with the [Qwen2.5-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct) model:
```bash
```console
vllm serve Qwen/Qwen2.5-1.5B-Instruct
```
@ -124,7 +124,7 @@ vllm serve Qwen/Qwen2.5-1.5B-Instruct
This server can be queried in the same format as OpenAI API. For example, to list the models:
```bash
```console
curl http://localhost:8000/v1/models
```
@ -134,7 +134,7 @@ You can pass in the argument `--api-key` or environment variable `VLLM_API_KEY`
Once your server is started, you can query the model with input prompts:
```bash
```console
curl http://localhost:8000/v1/completions \
-H "Content-Type: application/json" \
-d '{
@ -147,8 +147,6 @@ curl http://localhost:8000/v1/completions \
Since this server is compatible with OpenAI API, you can use it as a drop-in replacement for any applications using OpenAI API. For example, another way to query the server is via the `openai` Python package:
??? Code
```python
from openai import OpenAI
@ -172,7 +170,7 @@ vLLM is designed to also support the OpenAI Chat Completions API. The chat inter
You can use the [create chat completion](https://platform.openai.com/docs/api-reference/chat/completions/create) endpoint to interact with the model:
```bash
```console
curl http://localhost:8000/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
@ -186,8 +184,6 @@ curl http://localhost:8000/v1/chat/completions \
Alternatively, you can use the `openai` Python package:
??? Code
```python
from openai import OpenAI
# Set OpenAI's API key and API base to use vLLM's API server.

View File

@ -9,27 +9,27 @@ Further reading can be found in [Run:ai Model Streamer Documentation](https://gi
vLLM supports loading weights in Safetensors format using the Run:ai Model Streamer.
You first need to install vLLM RunAI optional dependency:
```bash
```console
pip3 install vllm[runai]
```
To run it as an OpenAI-compatible server, add the `--load-format runai_streamer` flag:
```bash
```console
vllm serve /home/meta-llama/Llama-3.2-3B-Instruct \
--load-format runai_streamer
```
To run model from AWS S3 object store run:
```bash
```console
vllm serve s3://core-llm/Llama-3-8b \
--load-format runai_streamer
```
To run model from a S3 compatible object store run:
```bash
```console
RUNAI_STREAMER_S3_USE_VIRTUAL_ADDRESSING=0 \
AWS_EC2_METADATA_DISABLED=true \
AWS_ENDPOINT_URL=https://storage.googleapis.com \
@ -44,7 +44,7 @@ You can tune parameters using `--model-loader-extra-config`:
You can tune `concurrency` that controls the level of concurrency and number of OS threads reading tensors from the file to the CPU buffer.
For reading from S3, it will be the number of client instances the host is opening to the S3 server.
```bash
```console
vllm serve /home/meta-llama/Llama-3.2-3B-Instruct \
--load-format runai_streamer \
--model-loader-extra-config '{"concurrency":16}'
@ -53,7 +53,7 @@ vllm serve /home/meta-llama/Llama-3.2-3B-Instruct \
You can control the size of the CPU Memory buffer to which tensors are read from the file, and limit this size.
You can read further about CPU buffer memory limiting [here](https://github.com/run-ai/runai-model-streamer/blob/master/docs/src/env-vars.md#runai_streamer_memory_limit).
```bash
```console
vllm serve /home/meta-llama/Llama-3.2-3B-Instruct \
--load-format runai_streamer \
--model-loader-extra-config '{"memory_limit":5368709120}'
@ -66,13 +66,13 @@ vllm serve /home/meta-llama/Llama-3.2-3B-Instruct \
vLLM also supports loading sharded models using Run:ai Model Streamer. This is particularly useful for large models that are split across multiple files. To use this feature, use the `--load-format runai_streamer_sharded` flag:
```bash
```console
vllm serve /path/to/sharded/model --load-format runai_streamer_sharded
```
The sharded loader expects model files to follow the same naming pattern as the regular sharded state loader: `model-rank-{rank}-part-{part}.safetensors`. You can customize this pattern using the `pattern` parameter in `--model-loader-extra-config`:
```bash
```console
vllm serve /path/to/sharded/model \
--load-format runai_streamer_sharded \
--model-loader-extra-config '{"pattern":"custom-model-rank-{rank}-part-{part}.safetensors"}'
@ -82,7 +82,7 @@ To create sharded model files, you can use the script provided in <gh-file:examp
The sharded loader supports all the same tunable parameters as the regular Run:ai Model Streamer, including `concurrency` and `memory_limit`. These can be configured in the same way:
```bash
```console
vllm serve /path/to/sharded/model \
--load-format runai_streamer_sharded \
--model-loader-extra-config '{"concurrency":16, "memory_limit":5368709120}'

View File

@ -85,8 +85,6 @@ and automatically applies the model's [chat template](https://huggingface.co/doc
In general, only instruction-tuned models have a chat template.
Base models may perform poorly as they are not trained to respond to the chat conversation.
??? Code
```python
from vllm import LLM

View File

@ -70,10 +70,7 @@ To make your model compatible with the Transformers backend, it needs:
2. `MyAttention` must use `ALL_ATTENTION_FUNCTIONS` to call attention.
3. `MyModel` must contain `_supports_attention_backend = True`.
<details>
<summary>modeling_my_model.py</summary>
```python
```python title="modeling_my_model.py"
from transformers import PreTrainedModel
from torch import nn
@ -96,8 +93,6 @@ class MyModel(PreTrainedModel):
_supports_attention_backend = True
```
</details>
Here is what happens in the background when this model is loaded:
1. The config is loaded.
@ -108,10 +103,7 @@ That's it!
For your model to be compatible with vLLM's tensor parallel and/or pipeline parallel features, you must add `base_model_tp_plan` and/or `base_model_pp_plan` to your model's config class:
<details>
<summary>configuration_my_model.py</summary>
```python
```python title="configuration_my_model.py"
from transformers import PretrainedConfig
@ -131,8 +123,6 @@ class MyConfig(PretrainedConfig):
}
```
</details>
- `base_model_tp_plan` is a `dict` that maps fully qualified layer name patterns to tensor parallel styles (currently only `"colwise"` and `"rowwise"` are supported).
- `base_model_pp_plan` is a `dict` that maps direct child layer names to `tuple`s of `list`s of `str`s:
* You only need to do this for layers which are not present on all pipeline stages
@ -178,7 +168,7 @@ Alternatively, you can [open an issue on GitHub](https://github.com/vllm-project
If you prefer, you can use the Hugging Face CLI to [download a model](https://huggingface.co/docs/huggingface_hub/guides/cli#huggingface-cli-download) or specific files from a model repository:
```bash
```console
# Download a model
huggingface-cli download HuggingFaceH4/zephyr-7b-beta
@ -193,7 +183,7 @@ huggingface-cli download HuggingFaceH4/zephyr-7b-beta eval_results.json
Use the Hugging Face CLI to [manage models](https://huggingface.co/docs/huggingface_hub/guides/manage-cache#scan-your-cache) stored in local cache:
```bash
```console
# List cached models
huggingface-cli scan-cache
@ -208,9 +198,6 @@ huggingface-cli scan-cache --dir ~/.cache/huggingface/hub
Use the Hugging Face CLI to interactively [delete downloaded model](https://huggingface.co/docs/huggingface_hub/guides/manage-cache#clean-your-cache) from the cache:
<details>
<summary>Commands</summary>
```console
# The `delete-cache` command requires extra dependencies to work with the TUI.
# Please run `pip install huggingface_hub[cli]` to install them.
@ -237,8 +224,6 @@ Start deletion.
Done. Deleted 1 repo(s) and 0 revision(s) for a total of 438.9M.
```
</details>
#### Using a proxy
Here are some tips for loading/downloading models from Hugging Face using a proxy:
@ -407,15 +392,15 @@ Specified using `--task embed`.
| Architecture | Models | Example HF Models | [LoRA][lora-adapter] | [PP][distributed-serving] | [V1](gh-issue:8779) |
|--------------------------------------------------------|---------------------|---------------------------------------------------------------------------------------------------------------------|----------------------|---------------------------|-----------------------|
| `BertModel` | BERT-based | `BAAI/bge-base-en-v1.5`, `Snowflake/snowflake-arctic-embed-xs`, etc. | | | |
| `Gemma2Model` | Gemma 2-based | `BAAI/bge-multilingual-gemma2`, etc. | ✅︎ | | ✅︎ |
| `Gemma2Model` | Gemma 2-based | `BAAI/bge-multilingual-gemma2`, etc. | ✅︎ | | |
| `GritLM` | GritLM | `parasail-ai/GritLM-7B-vllm`. | ✅︎ | ✅︎ | |
| `GteModel` | Arctic-Embed-2.0-M | `Snowflake/snowflake-arctic-embed-m-v2.0`. | | | |
| `GteNewModel` | mGTE-TRM (see note) | `Alibaba-NLP/gte-multilingual-base`, etc. | | | |
| `ModernBertModel` | ModernBERT-based | `Alibaba-NLP/gte-modernbert-base`, etc. | | | |
| `NomicBertModel` | Nomic BERT | `nomic-ai/nomic-embed-text-v1`, `nomic-ai/nomic-embed-text-v2-moe`, `Snowflake/snowflake-arctic-embed-m-long`, etc. | | | |
| `LlamaModel`, `LlamaForCausalLM`, `MistralModel`, etc. | Llama-based | `intfloat/e5-mistral-7b-instruct`, etc. | ✅︎ | ✅︎ | ✅︎ |
| `Qwen2Model`, `Qwen2ForCausalLM` | Qwen2-based | `ssmits/Qwen2-7B-Instruct-embed-base` (see note), `Alibaba-NLP/gte-Qwen2-7B-instruct` (see note), etc. | ✅︎ | ✅︎ | ✅︎ |
| `Qwen3Model`, `Qwen3ForCausalLM` | Qwen3-based | `Qwen/Qwen3-Embedding-0.6B`, etc. | ✅︎ | ✅︎ | ✅︎ |
| `LlamaModel`, `LlamaForCausalLM`, `MistralModel`, etc. | Llama-based | `intfloat/e5-mistral-7b-instruct`, etc. | ✅︎ | ✅︎ | |
| `Qwen2Model`, `Qwen2ForCausalLM` | Qwen2-based | `ssmits/Qwen2-7B-Instruct-embed-base` (see note), `Alibaba-NLP/gte-Qwen2-7B-instruct` (see note), etc. | ✅︎ | ✅︎ | |
| `Qwen3Model`, `Qwen3ForCausalLM` | Qwen3-based | `Qwen/Qwen3-Embedding-0.6B`, etc. | ✅︎ | ✅︎ | |
| `RobertaModel`, `RobertaForMaskedLM` | RoBERTa-based | `sentence-transformers/all-roberta-large-v1`, etc. | | | |
!!! note
@ -442,10 +427,9 @@ Specified using `--task reward`.
| Architecture | Models | Example HF Models | [LoRA][lora-adapter] | [PP][distributed-serving] | [V1](gh-issue:8779) |
|---------------------------|-----------------|------------------------------------------------------------------------|------------------------|-----------------------------|-----------------------|
| `InternLM2ForRewardModel` | InternLM2-based | `internlm/internlm2-1_8b-reward`, `internlm/internlm2-7b-reward`, etc. | ✅︎ | ✅︎ | ✅︎ |
| `LlamaForCausalLM` | Llama-based | `peiyi9979/math-shepherd-mistral-7b-prm`, etc. | ✅︎ | ✅︎ | ✅︎ |
| `Qwen2ForRewardModel` | Qwen2-based | `Qwen/Qwen2.5-Math-RM-72B`, etc. | ✅︎ | ✅︎ | ✅︎ |
| `Qwen2ForProcessRewardModel` | Qwen2-based | `Qwen/Qwen2.5-Math-PRM-7B`, etc. | ✅︎ | ✅︎ | ✅︎ |
| `InternLM2ForRewardModel` | InternLM2-based | `internlm/internlm2-1_8b-reward`, `internlm/internlm2-7b-reward`, etc. | ✅︎ | ✅︎ | |
| `LlamaForCausalLM` | Llama-based | `peiyi9979/math-shepherd-mistral-7b-prm`, etc. | ✅︎ | ✅︎ | |
| `Qwen2ForRewardModel` | Qwen2-based | `Qwen/Qwen2.5-Math-RM-72B`, etc. | ✅︎ | ✅︎ | |
If your model is not in the above list, we will try to automatically convert the model using
[as_reward_model][vllm.model_executor.models.adapters.as_reward_model]. By default, we return the hidden states of each token directly.
@ -461,7 +445,7 @@ Specified using `--task classify`.
| Architecture | Models | Example HF Models | [LoRA][lora-adapter] | [PP][distributed-serving] | [V1](gh-issue:8779) |
|----------------------------------|----------|----------------------------------------|------------------------|-----------------------------|-----------------------|
| `JambaForSequenceClassification` | Jamba | `ai21labs/Jamba-tiny-reward-dev`, etc. | ✅︎ | ✅︎ | |
| `GPT2ForSequenceClassification` | GPT2 | `nie3e/sentiment-polish-gpt2-small` | | | ✅︎ |
If your model is not in the above list, we will try to automatically convert the model using
[as_classification_model][vllm.model_executor.models.adapters.as_classification_model]. By default, the class probabilities are extracted from the softmaxed hidden state corresponding to the last token.
@ -472,7 +456,7 @@ Specified using `--task score`.
| Architecture | Models | Example HF Models | [V1](gh-issue:8779) |
|---------------------------------------|-------------------|--------------------------------------------------------------------------------------|-----------------------|
| `BertForSequenceClassification` | BERT-based | `cross-encoder/ms-marco-MiniLM-L-6-v2`, etc. | |
| `Qwen3ForSequenceClassification` | Qwen3-based | `tomaarsen/Qwen3-Reranker-0.6B-seq-cls`, `Qwen/Qwen3-Reranker-0.6B` (see note), etc. | ✅︎ |
| `Qwen3ForSequenceClassification` | Qwen3-based | `tomaarsen/Qwen3-Reranker-0.6B-seq-cls`, `Qwen/Qwen3-Reranker-0.6B` (see note), etc. | |
| `RobertaForSequenceClassification` | RoBERTa-based | `cross-encoder/quora-roberta-base`, etc. | |
| `XLMRobertaForSequenceClassification` | XLM-RoBERTa-based | `BAAI/bge-reranker-v2-m3`, etc. | |
@ -578,7 +562,6 @@ Specified using `--task generate`.
| `SkyworkR1VChatModel` | Skywork-R1V-38B | T + I | `Skywork/Skywork-R1V-38B` | | ✅︎ | ✅︎ |
| `SmolVLMForConditionalGeneration` | SmolVLM2 | T + I | `SmolVLM2-2.2B-Instruct` | ✅︎ | | ✅︎ |
| `TarsierForConditionalGeneration` | Tarsier | T + I<sup>E+</sup> | `omni-search/Tarsier-7b`,`omni-search/Tarsier-34b` | | ✅︎ | ✅︎ |
| `Tarsier2ForConditionalGeneration`<sup>^</sup> | Tarsier2 | T + I<sup>E+</sup> + V<sup>E+</sup> | `omni-research/Tarsier2-Recap-7b`,`omni-research/Tarsier2-7b-0115` | | ✅︎ | ✅︎ |
<sup>^</sup> You need to set the architecture name via `--hf-overrides` to match the one in vLLM.
&nbsp;&nbsp;&nbsp;&nbsp;• For example, to use DeepSeek-VL2 series models:
@ -617,8 +600,6 @@ Specified using `--task generate`.
For the best results, we recommend using the following dependency versions (tested on A10 and L40):
??? Dependency versions
```text
# Core vLLM-compatible dependencies with Molmo accuracy setup (tested on L40)
torch==2.5.1

View File

@ -34,14 +34,14 @@ output = llm.generate("San Francisco is a")
To run multi-GPU serving, pass in the `--tensor-parallel-size` argument when starting the server. For example, to run API server on 4 GPUs:
```bash
```console
vllm serve facebook/opt-13b \
--tensor-parallel-size 4
```
You can also additionally specify `--pipeline-parallel-size` to enable pipeline parallelism. For example, to run API server on 8 GPUs with pipeline parallelism and tensor parallelism:
```bash
```console
vllm serve gpt2 \
--tensor-parallel-size 4 \
--pipeline-parallel-size 2
@ -55,7 +55,7 @@ The first step, is to start containers and organize them into a cluster. We have
Pick a node as the head node, and run the following command:
```bash
```console
bash run_cluster.sh \
vllm/vllm-openai \
ip_of_head_node \
@ -66,7 +66,7 @@ bash run_cluster.sh \
On the rest of the worker nodes, run the following command:
```bash
```console
bash run_cluster.sh \
vllm/vllm-openai \
ip_of_head_node \
@ -87,7 +87,7 @@ Then, on any node, use `docker exec -it node /bin/bash` to enter the container,
After that, on any node, use `docker exec -it node /bin/bash` to enter the container again. **In the container**, you can use vLLM as usual, just as you have all the GPUs on one node: vLLM will be able to leverage GPU resources of all nodes in the Ray cluster, and therefore, only run the `vllm` command on this node but not other nodes. The common practice is to set the tensor parallel size to the number of GPUs in each node, and the pipeline parallel size to the number of nodes. For example, if you have 16 GPUs in 2 nodes (8 GPUs per node), you can set the tensor parallel size to 8 and the pipeline parallel size to 2:
```bash
```console
vllm serve /path/to/the/model/in/the/container \
--tensor-parallel-size 8 \
--pipeline-parallel-size 2
@ -95,7 +95,7 @@ After that, on any node, use `docker exec -it node /bin/bash` to enter the conta
You can also use tensor parallel without pipeline parallel, just set the tensor parallel size to the number of GPUs in the cluster. For example, if you have 16 GPUs in 2 nodes (8 GPUs per node), you can set the tensor parallel size to 16:
```bash
```console
vllm serve /path/to/the/model/in/the/container \
--tensor-parallel-size 16
```

View File

@ -7,14 +7,12 @@ vLLM is also available via [LangChain](https://github.com/langchain-ai/langchain
To install LangChain, run
```bash
```console
pip install langchain langchain_community -q
```
To run inference on a single or multiple GPUs, use `VLLM` class from `langchain`.
??? Code
```python
from langchain_community.llms import VLLM

View File

@ -7,7 +7,7 @@ vLLM is also available via [LlamaIndex](https://github.com/run-llama/llama_index
To install LlamaIndex, run
```bash
```console
pip install llama-index-llms-vllm -q
```

View File

@ -15,8 +15,6 @@ vllm serve NousResearch/Meta-Llama-3-8B-Instruct \
To call the server, in your preferred text editor, create a script that uses an HTTP client. Include any messages that you want to send to the model. Then run that script. Below is an example script using the [official OpenAI Python client](https://github.com/openai/openai-python).
??? Code
```python
from openai import OpenAI
client = OpenAI(
@ -149,8 +147,6 @@ with `--enable-request-id-headers`.
> rather than within the vLLM layer for this reason.
> See [this PR](https://github.com/vllm-project/vllm/pull/11529) for more details.
??? Code
```python
completion = client.chat.completions.create(
model="NousResearch/Meta-Llama-3-8B-Instruct",
@ -188,16 +184,12 @@ Code example: <gh-file:examples/online_serving/openai_completion_client.py>
The following [sampling parameters][sampling-params] are supported.
??? Code
```python
--8<-- "vllm/entrypoints/openai/protocol.py:completion-sampling-params"
```
The following extra parameters are supported:
??? Code
```python
--8<-- "vllm/entrypoints/openai/protocol.py:completion-extra-params"
```
@ -220,16 +212,12 @@ Code example: <gh-file:examples/online_serving/openai_chat_completion_client.py>
The following [sampling parameters][sampling-params] are supported.
??? Code
```python
--8<-- "vllm/entrypoints/openai/protocol.py:chat-completion-sampling-params"
```
The following extra parameters are supported:
??? Code
```python
--8<-- "vllm/entrypoints/openai/protocol.py:chat-completion-extra-params"
```
@ -271,8 +259,6 @@ and passing a list of `messages` in the request. Refer to the examples below for
Since the request schema is not defined by OpenAI client, we post a request to the server using the lower-level `requests` library:
??? Code
```python
import requests
@ -330,16 +316,12 @@ The following [pooling parameters][pooling-params] are supported.
The following extra parameters are supported by default:
??? Code
```python
--8<-- "vllm/entrypoints/openai/protocol.py:embedding-extra-params"
```
For chat-like input (i.e. if `messages` is passed), these extra parameters are supported instead:
??? Code
```python
--8<-- "vllm/entrypoints/openai/protocol.py:chat-embedding-extra-params"
```
@ -361,16 +343,12 @@ Code example: <gh-file:examples/online_serving/openai_transcription_client.py>
The following [sampling parameters][sampling-params] are supported.
??? Code
```python
--8<-- "vllm/entrypoints/openai/protocol.py:transcription-sampling-params"
```
The following extra parameters are supported:
??? Code
```python
--8<-- "vllm/entrypoints/openai/protocol.py:transcription-extra-params"
```
@ -409,6 +387,8 @@ Code example: <gh-file:examples/online_serving/openai_classification_client.py>
You can classify multiple texts by passing an array of strings:
Request:
```bash
curl -v "http://127.0.0.1:8000/classify" \
-H "Content-Type: application/json" \
@ -421,7 +401,7 @@ curl -v "http://127.0.0.1:8000/classify" \
}'
```
??? Response
Response:
```bash
{
@ -460,6 +440,8 @@ curl -v "http://127.0.0.1:8000/classify" \
You can also pass a string directly to the `input` field:
Request:
```bash
curl -v "http://127.0.0.1:8000/classify" \
-H "Content-Type: application/json" \
@ -469,7 +451,7 @@ curl -v "http://127.0.0.1:8000/classify" \
}'
```
??? Response
Response:
```bash
{
@ -526,6 +508,8 @@ Code example: <gh-file:examples/online_serving/openai_cross_encoder_score.py>
You can pass a string to both `text_1` and `text_2`, forming a single sentence pair.
Request:
```bash
curl -X 'POST' \
'http://127.0.0.1:8000/score' \
@ -539,7 +523,7 @@ curl -X 'POST' \
}'
```
??? Response
Response:
```bash
{
@ -564,7 +548,7 @@ You can pass a string to `text_1` and a list to `text_2`, forming multiple sente
where each pair is built from `text_1` and a string in `text_2`.
The total number of pairs is `len(text_2)`.
??? Request
Request:
```bash
curl -X 'POST' \
@ -581,7 +565,7 @@ The total number of pairs is `len(text_2)`.
}'
```
??? Response
Response:
```bash
{
@ -609,7 +593,7 @@ You can pass a list to both `text_1` and `text_2`, forming multiple sentence pai
where each pair is built from a string in `text_1` and the corresponding string in `text_2` (similar to `zip()`).
The total number of pairs is `len(text_2)`.
??? Request
Request:
```bash
curl -X 'POST' \
@ -630,7 +614,7 @@ The total number of pairs is `len(text_2)`.
}'
```
??? Response
Response:
```bash
{
@ -691,7 +675,7 @@ Code example: <gh-file:examples/online_serving/jinaai_rerank_client.py>
Note that the `top_n` request parameter is optional and will default to the length of the `documents` field.
Result documents will be sorted by relevance, and the `index` property can be used to determine original order.
??? Request
Request:
```bash
curl -X 'POST' \
@ -709,7 +693,7 @@ Result documents will be sorted by relevance, and the `index` property can be us
}'
```
??? Response
Response:
```bash
{

View File

@ -6,14 +6,12 @@ OpenAI compatible API server.
You can start the server using Python, or using [Docker][deployment-docker]:
```bash
```console
vllm serve unsloth/Llama-3.2-1B-Instruct
```
Then query the endpoint to get the latest metrics from the server:
??? Output
```console
$ curl http://0.0.0.0:8000/metrics
@ -33,8 +31,6 @@ Then query the endpoint to get the latest metrics from the server:
The following metrics are exposed:
??? Code
```python
--8<-- "vllm/engine/metrics.py:metrics-definitions"
```

View File

@ -60,8 +60,6 @@ To identify the particular CUDA operation that causes the error, you can add `--
If GPU/CPU communication cannot be established, you can use the following Python script and follow the instructions below to confirm whether the GPU/CPU communication is working correctly.
??? Code
```python
# Test PyTorch NCCL
import torch
@ -127,17 +125,14 @@ If GPU/CPU communication cannot be established, you can use the following Python
If you are testing with a single node, adjust `--nproc-per-node` to the number of GPUs you want to use:
```bash
```console
NCCL_DEBUG=TRACE torchrun --nproc-per-node=<number-of-GPUs> test.py
```
If you are testing with multi-nodes, adjust `--nproc-per-node` and `--nnodes` according to your setup and set `MASTER_ADDR` to the correct IP address of the master node, reachable from all nodes. Then, run:
```bash
NCCL_DEBUG=TRACE torchrun --nnodes 2 \
--nproc-per-node=2 \
--rdzv_backend=c10d \
--rdzv_endpoint=$MASTER_ADDR test.py
```console
NCCL_DEBUG=TRACE torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=$MASTER_ADDR test.py
```
If the script runs successfully, you should see the message `sanity check is successful!`.
@ -170,8 +165,6 @@ WARNING 12-11 14:50:37 multiproc_worker_utils.py:281] CUDA was previously
or an error from Python that looks like this:
??? Logs
```console
RuntimeError:
An attempt has been made to start a new process before the
@ -214,8 +207,6 @@ if __name__ == '__main__':
vLLM heavily depends on `torch.compile` to optimize the model for better performance, which introduces the dependency on the `torch.compile` functionality and the `triton` library. By default, we use `torch.compile` to [optimize some functions](https://github.com/vllm-project/vllm/pull/10406) in the model. Before running vLLM, you can check if `torch.compile` is working as expected by running the following script:
??? Code
```python
import torch

View File

@ -10,8 +10,6 @@ The list of data collected by the latest version of vLLM can be found here: <gh-
Here is an example as of v0.4.0:
??? Output
```json
{
"uuid": "fbe880e9-084d-4cab-a395-8984c50f1109",

View File

@ -45,18 +45,6 @@ For each item, our progress towards V1 support falls into one of the following s
- **🟠 Delayed**: Temporarily dropped in V1 but planned to be re-introduced later.
- **🔴 Deprecated**: Not planned for V1 unless there is strong demand.
!!! note
vLLM V1s unified scheduler treats both prompt and output tokens the same
way by using a simple dictionary (e.g., `{request_id: num_tokens}`) to dynamically
allocate a fixed token budget per request, enabling features like chunked prefills,
prefix caching, and speculative decoding without a strict separation between prefill
and decode phases.
The V1 scheduler supports multiple scheduling policies, including First-Come,
First-Served (FCFS) and priority-based scheduling (where requests are processed
based on assigned priority, with FCFS as a tie-breaker), configurable via the
`--scheduling-policy` argument.
### Hardware
| Hardware | Status |
@ -82,7 +70,7 @@ based on assigned priority, with FCFS as a tie-breaker), configurable via the
|-----------------------------|------------------------------------------------------------------------------------|
| **Decoder-only Models** | <nobr>🚀 Optimized</nobr> |
| **Encoder-Decoder Models** | <nobr>🟠 Delayed</nobr> |
| **Embedding Models** | <nobr>🟢 Functional</nobr> |
| **Embedding Models** | <nobr>🚧 WIP ([PR #16188](https://github.com/vllm-project/vllm/pull/16188))</nobr> |
| **Mamba Models** | <nobr>🚧 WIP ([PR #19327](https://github.com/vllm-project/vllm/pull/19327))</nobr> |
| **Multimodal Models** | <nobr>🟢 Functional</nobr> |
@ -92,11 +80,11 @@ vLLM V1 currently excludes model architectures with the `SupportsV0Only` protoco
This corresponds to the V1 column in our [list of supported models][supported-models].
See below for the status of models that are not yet supported or have more features planned in V1.
See below for the status of models that are still not yet supported in V1.
#### Embedding Models
The initial basic support is now functional.
The initial support will be provided by [PR #16188](https://github.com/vllm-project/vllm/pull/16188).
Later, we will consider using [hidden states processor](https://github.com/vllm-project/vllm/issues/12249),
which is based on [global logits processor](https://github.com/vllm-project/vllm/pull/13360)

View File

@ -12,10 +12,7 @@ def parse_args():
parser = EngineArgs.add_cli_args(parser)
# Set example specific arguments
parser.set_defaults(
model="intfloat/e5-mistral-7b-instruct",
task="embed",
enforce_eager=True,
max_model_len=1024,
model="intfloat/e5-mistral-7b-instruct", task="embed", enforce_eager=True
)
return parser.parse_args()

View File

@ -29,14 +29,14 @@ We currently support `/v1/chat/completions`, `/v1/embeddings`, and `/v1/score` e
To follow along with this example, you can download the example batch, or create your own batch file in your working directory.
```bash
```console
wget https://raw.githubusercontent.com/vllm-project/vllm/main/examples/offline_inference/openai_batch/openai_example_batch.jsonl
```
Once you've created your batch file it should look like this
```bash
cat offline_inference/openai_batch/openai_example_batch.jsonl
```console
$ cat offline_inference/openai_batch/openai_example_batch.jsonl
{"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "messages": [{"role": "system", "content": "You are a helpful assistant."},{"role": "user", "content": "Hello world!"}],"max_completion_tokens": 1000}}
{"custom_id": "request-2", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "messages": [{"role": "system", "content": "You are an unhelpful assistant."},{"role": "user", "content": "Hello world!"}],"max_completion_tokens": 1000}}
```
@ -47,7 +47,7 @@ The batch running tool is designed to be used from the command line.
You can run the batch with the following command, which will write its results to a file called `results.jsonl`
```bash
```console
python -m vllm.entrypoints.openai.run_batch \
-i offline_inference/openai_batch/openai_example_batch.jsonl \
-o results.jsonl \
@ -56,7 +56,7 @@ python -m vllm.entrypoints.openai.run_batch \
or use command-line:
```bash
```console
vllm run-batch \
-i offline_inference/openai_batch/openai_example_batch.jsonl \
-o results.jsonl \
@ -67,8 +67,8 @@ vllm run-batch \
You should now have your results at `results.jsonl`. You can check your results by running `cat results.jsonl`
```bash
cat results.jsonl
```console
$ cat results.jsonl
{"id":"vllm-383d1c59835645aeb2e07d004d62a826","custom_id":"request-1","response":{"id":"cmpl-61c020e54b964d5a98fa7527bfcdd378","object":"chat.completion","created":1715633336,"model":"meta-llama/Meta-Llama-3-8B-Instruct","choices":[{"index":0,"message":{"role":"assistant","content":"Hello! It's great to meet you! I'm here to help with any questions or tasks you may have. What's on your mind today?"},"logprobs":null,"finish_reason":"stop","stop_reason":null}],"usage":{"prompt_tokens":25,"total_tokens":56,"completion_tokens":31}},"error":null}
{"id":"vllm-42e3d09b14b04568afa3f1797751a267","custom_id":"request-2","response":{"id":"cmpl-f44d049f6b3a42d4b2d7850bb1e31bcc","object":"chat.completion","created":1715633336,"model":"meta-llama/Meta-Llama-3-8B-Instruct","choices":[{"index":0,"message":{"role":"assistant","content":"*silence*"},"logprobs":null,"finish_reason":"stop","stop_reason":null}],"usage":{"prompt_tokens":27,"total_tokens":32,"completion_tokens":5}},"error":null}
```
@ -79,7 +79,7 @@ The batch runner supports remote input and output urls that are accessible via h
For example, to run against our example input file located at `https://raw.githubusercontent.com/vllm-project/vllm/main/examples/offline_inference/openai_batch/openai_example_batch.jsonl`, you can run
```bash
```console
python -m vllm.entrypoints.openai.run_batch \
-i https://raw.githubusercontent.com/vllm-project/vllm/main/examples/offline_inference/openai_batch/openai_example_batch.jsonl \
-o results.jsonl \
@ -88,7 +88,7 @@ python -m vllm.entrypoints.openai.run_batch \
or use command-line:
```bash
```console
vllm run-batch \
-i https://raw.githubusercontent.com/vllm-project/vllm/main/examples/offline_inference/openai_batch/openai_example_batch.jsonl \
-o results.jsonl \
@ -112,21 +112,21 @@ To integrate with cloud blob storage, we recommend using presigned urls.
To follow along with this example, you can download the example batch, or create your own batch file in your working directory.
```bash
```console
wget https://raw.githubusercontent.com/vllm-project/vllm/main/examples/offline_inference/openai_batch/openai_example_batch.jsonl
```
Once you've created your batch file it should look like this
```bash
cat offline_inference/openai_batch/openai_example_batch.jsonl
```console
$ cat offline_inference/openai_batch/openai_example_batch.jsonl
{"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "messages": [{"role": "system", "content": "You are a helpful assistant."},{"role": "user", "content": "Hello world!"}],"max_completion_tokens": 1000}}
{"custom_id": "request-2", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "messages": [{"role": "system", "content": "You are an unhelpful assistant."},{"role": "user", "content": "Hello world!"}],"max_completion_tokens": 1000}}
```
Now upload your batch file to your S3 bucket.
```bash
```console
aws s3 cp offline_inference/openai_batch/openai_example_batch.jsonl s3://MY_BUCKET/MY_INPUT_FILE.jsonl
```
@ -181,7 +181,7 @@ output_url='https://s3.us-west-2.amazonaws.com/MY_BUCKET/MY_OUTPUT_FILE.jsonl?AW
You can now run the batch runner, using the urls generated in the previous section.
```bash
```console
python -m vllm.entrypoints.openai.run_batch \
-i "https://s3.us-west-2.amazonaws.com/MY_BUCKET/MY_INPUT_FILE.jsonl?AWSAccessKeyId=ABCDEFGHIJKLMNOPQRST&Signature=abcdefghijklmnopqrstuvwxyz12345&Expires=1715800091" \
-o "https://s3.us-west-2.amazonaws.com/MY_BUCKET/MY_OUTPUT_FILE.jsonl?AWSAccessKeyId=ABCDEFGHIJKLMNOPQRST&Signature=abcdefghijklmnopqrstuvwxyz12345&Expires=1715800091" \
@ -190,7 +190,7 @@ python -m vllm.entrypoints.openai.run_batch \
or use command-line:
```bash
```console
vllm run-batch \
-i "https://s3.us-west-2.amazonaws.com/MY_BUCKET/MY_INPUT_FILE.jsonl?AWSAccessKeyId=ABCDEFGHIJKLMNOPQRST&Signature=abcdefghijklmnopqrstuvwxyz12345&Expires=1715800091" \
-o "https://s3.us-west-2.amazonaws.com/MY_BUCKET/MY_OUTPUT_FILE.jsonl?AWSAccessKeyId=ABCDEFGHIJKLMNOPQRST&Signature=abcdefghijklmnopqrstuvwxyz12345&Expires=1715800091" \
@ -201,7 +201,7 @@ vllm run-batch \
Your results are now on S3. You can view them in your terminal by running
```bash
```console
aws s3 cp s3://MY_BUCKET/MY_OUTPUT_FILE.jsonl -
```
@ -230,8 +230,8 @@ You can run the batch using the same command as in earlier examples.
You can check your results by running `cat results.jsonl`
```bash
cat results.jsonl
```console
$ cat results.jsonl
{"id":"vllm-db0f71f7dec244e6bce530e0b4ef908b","custom_id":"request-1","response":{"status_code":200,"request_id":"vllm-batch-3580bf4d4ae54d52b67eee266a6eab20","body":{"id":"embd-33ac2efa7996430184461f2e38529746","object":"list","created":444647,"model":"intfloat/e5-mistral-7b-instruct","data":[{"index":0,"object":"embedding","embedding":[0.016204833984375,0.0092010498046875,0.0018358230590820312,-0.0028228759765625,0.001422882080078125,-0.0031147003173828125,...]}],"usage":{"prompt_tokens":8,"total_tokens":8,"completion_tokens":0}}},"error":null}
...
```
@ -261,8 +261,8 @@ You can run the batch using the same command as in earlier examples.
You can check your results by running `cat results.jsonl`
```bash
cat results.jsonl
```console
$ cat results.jsonl
{"id":"vllm-f87c5c4539184f618e555744a2965987","custom_id":"request-1","response":{"status_code":200,"request_id":"vllm-batch-806ab64512e44071b37d3f7ccd291413","body":{"id":"score-4ee45236897b4d29907d49b01298cdb1","object":"list","created":1737847944,"model":"BAAI/bge-reranker-v2-m3","data":[{"index":0,"object":"score","score":0.0010900497436523438},{"index":1,"object":"score","score":1.0}],"usage":{"prompt_tokens":37,"total_tokens":37,"completion_tokens":0,"prompt_tokens_details":null}}},"error":null}
{"id":"vllm-41990c51a26d4fac8419077f12871099","custom_id":"request-2","response":{"status_code":200,"request_id":"vllm-batch-73ce66379026482699f81974e14e1e99","body":{"id":"score-13f2ffe6ba40460fbf9f7f00ad667d75","object":"list","created":1737847944,"model":"BAAI/bge-reranker-v2-m3","data":[{"index":0,"object":"score","score":0.001094818115234375},{"index":1,"object":"score","score":1.0}],"usage":{"prompt_tokens":37,"total_tokens":37,"completion_tokens":0,"prompt_tokens_details":null}}},"error":null}
```

View File

@ -22,10 +22,7 @@ model_name = "Qwen/Qwen3-Reranker-0.6B"
# If you want to load the official original version, the init parameters are
# as follows.
def get_model() -> LLM:
"""Initializes and returns the LLM model for Qwen3-Reranker."""
return LLM(
model = LLM(
model=model_name,
task="score",
hf_overrides={
@ -35,7 +32,6 @@ def get_model() -> LLM:
},
)
# Why do we need hf_overrides for the official original version:
# vllm converts it to Qwen3ForSequenceClassification when loaded for
# better performance.
@ -55,8 +51,7 @@ suffix = "<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\n"
query_template = "{prefix}<Instruct>: {instruction}\n<Query>: {query}\n"
document_template = "<Document>: {doc}{suffix}"
def main() -> None:
if __name__ == "__main__":
instruction = (
"Given a web search query, retrieve relevant passages that answer the query"
)
@ -77,13 +72,6 @@ def main() -> None:
]
documents = [document_template.format(doc=doc, suffix=suffix) for doc in documents]
model = get_model()
outputs = model.score(queries, documents)
print("-" * 30)
print([output.outputs.score for output in outputs])
print("-" * 30)
if __name__ == "__main__":
main()

View File

@ -1040,37 +1040,6 @@ def run_qwen2_5_omni(questions: list[str], modality: str):
)
def run_tarsier2(questions: list[str], modality: str) -> ModelRequestData:
model_name = "omni-research/Tarsier2-Recap-7b"
engine_args = EngineArgs(
model=model_name,
max_model_len=4096,
hf_overrides={"architectures": ["Tarsier2ForConditionalGeneration"]},
limit_mm_per_prompt={modality: 1},
)
if modality == "image":
placeholder = "<|image_pad|>"
elif modality == "video":
placeholder = "<|video_pad|>"
prompts = [
(
"<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n"
f"<|im_start|>user\n<|vision_start|>{placeholder}<|vision_end|>"
f"{question}<|im_end|>\n"
"<|im_start|>assistant\n"
)
for question in questions
]
return ModelRequestData(
engine_args=engine_args,
prompts=prompts,
)
# SkyworkR1V
def run_skyworkr1v(questions: list[str], modality: str) -> ModelRequestData:
assert modality == "image"
@ -1143,7 +1112,6 @@ model_example_map = {
"skywork_chat": run_skyworkr1v,
"smolvlm": run_smolvlm,
"tarsier": run_tarsier,
"tarsier2": run_tarsier2,
}

View File

@ -94,7 +94,6 @@ def run_vlm2vec(query: Query) -> ModelRequestData:
engine_args = EngineArgs(
model="TIGER-Lab/VLM2Vec-Full",
task="embed",
max_model_len=4096,
trust_remote_code=True,
mm_processor_kwargs={"num_crops": 4},
limit_mm_per_prompt={"image": 1},

View File

@ -828,32 +828,6 @@ def load_tarsier(question: str, image_urls: list[str]) -> ModelRequestData:
)
def load_tarsier2(question: str, image_urls: list[str]) -> ModelRequestData:
model_name = "omni-research/Tarsier2-Recap-7b"
engine_args = EngineArgs(
model=model_name,
trust_remote_code=True,
max_model_len=32768,
limit_mm_per_prompt={"image": len(image_urls)},
hf_overrides={"architectures": ["Tarsier2ForConditionalGeneration"]},
)
prompt = (
"<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n"
f"<|im_start|>user\n<|vision_start|>{'<|image_pad|>' * len(image_urls)}"
f"<|vision_end|>{question}<|im_end|>\n"
"<|im_start|>assistant\n"
)
image_data = [fetch_image(url) for url in image_urls]
return ModelRequestData(
engine_args=engine_args,
prompt=prompt,
image_data=image_data,
)
model_example_map = {
"aria": load_aria,
"aya_vision": load_aya_vision,
@ -879,7 +853,6 @@ model_example_map = {
"qwen2_5_vl": load_qwen2_5_vl,
"smolvlm": load_smolvlm,
"tarsier": load_tarsier,
"tarsier2": load_tarsier2,
}

View File

@ -1,244 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# ruff: noqa: E501
"""
Set up this example by starting a vLLM OpenAI-compatible server with tool call
options enabled for xLAM-2 models:
vllm serve --model Salesforce/Llama-xLAM-2-8b-fc-r --enable-auto-tool-choice --tool-call-parser xlam
OR
vllm serve --model Salesforce/xLAM-2-3b-fc-r --enable-auto-tool-choice --tool-call-parser xlam
"""
import json
import time
from openai import OpenAI
# Modify OpenAI's API key and API base to use vLLM's API server.
openai_api_key = "empty"
openai_api_base = "http://localhost:8000/v1"
# Define tool functions
def get_weather(location: str, unit: str):
return f"Weather in {location} is 22 degrees {unit}."
def calculate_expression(expression: str):
try:
result = eval(expression)
return f"The result of {expression} is {result}"
except Exception as e:
return f"Could not calculate {expression}: {e}"
def translate_text(text: str, target_language: str):
return f"Translation of '{text}' to {target_language}: [translated content]"
# Define tools
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and state, e.g., 'San Francisco, CA'",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location", "unit"],
},
},
},
{
"type": "function",
"function": {
"name": "calculate_expression",
"description": "Calculate a mathematical expression",
"parameters": {
"type": "object",
"properties": {
"expression": {
"type": "string",
"description": "Mathematical expression to evaluate, needs to be a valid python expression",
}
},
"required": ["expression"],
},
},
},
{
"type": "function",
"function": {
"name": "translate_text",
"description": "Translate text to another language",
"parameters": {
"type": "object",
"properties": {
"text": {"type": "string", "description": "Text to translate"},
"target_language": {
"type": "string",
"description": "Target language for translation",
},
},
"required": ["text", "target_language"],
},
},
},
]
# Map of function names to implementations
tool_functions = {
"get_weather": get_weather,
"calculate_expression": calculate_expression,
"translate_text": translate_text,
}
def process_response(response, tool_functions, original_query):
"""Process a non-streaming response with possible tool calls"""
print("\n--- Response Output ---")
# Check if the response has content
if response.choices[0].message.content:
print(f"Content: {response.choices[0].message.content}")
# Check if the response has tool calls
if response.choices[0].message.tool_calls:
print("--------------------------------")
print(f"Tool calls: {response.choices[0].message.tool_calls}")
print("--------------------------------")
# Collect all tool calls and results before making follow-up request
tool_results = []
assistant_message = {"role": "assistant"}
if response.choices[0].message.content:
assistant_message["content"] = response.choices[0].message.content
assistant_tool_calls = []
# Process each tool call
for tool_call in response.choices[0].message.tool_calls:
function_name = tool_call.function.name
function_args = tool_call.function.arguments
function_id = tool_call.id
print(f"Function called: {function_name}")
print(f"Arguments: {function_args}")
print(f"Function ID: {function_id}")
# Execute the function
try:
# Parse the JSON arguments
args = json.loads(function_args)
# Call the function with the arguments
function_result = tool_functions[function_name](**args)
print(f"\n--- Function Result ---\n{function_result}\n")
# Add tool call to assistant message
assistant_tool_calls.append(
{
"id": function_id,
"type": "function",
"function": {"name": function_name, "arguments": function_args},
}
)
# Add tool result to tool_results
tool_results.append(
{
"role": "tool",
"tool_call_id": function_id,
"content": function_result,
}
)
except Exception as e:
print(f"Error executing function: {e}")
# Add tool_calls to assistant message
assistant_message["tool_calls"] = assistant_tool_calls
# Create a follow-up message with all function results
follow_up_messages = [
{"role": "user", "content": original_query},
assistant_message,
]
# Add all tool results to the messages
follow_up_messages.extend(tool_results)
# Get completion with all tool results in a single follow-up
follow_up_response = client.chat.completions.create(
model=client.models.list().data[0].id,
messages=follow_up_messages,
stream=False,
)
print("\n--- Follow-up Response ---")
print(follow_up_response.choices[0].message.content)
print("--- End Follow-up ---\n")
print("--- End Response ---\n")
def run_test_case(query, test_name):
"""Run a single test case with the given query"""
print(f"\n{'=' * 50}\nTEST CASE: {test_name}\n{'=' * 50}")
print(f"Query: '{query}'")
start_time = time.time()
# Create non-streaming chat completion request
response = client.chat.completions.create(
model=client.models.list().data[0].id,
messages=[{"role": "user", "content": query}],
tools=tools,
tool_choice="auto",
stream=False,
)
# Process the non-streaming response, passing the original query
process_response(response, tool_functions, query)
end_time = time.time()
print(f"Test completed in {end_time - start_time:.2f} seconds")
def main():
# Initialize OpenAI client
global client
client = OpenAI(
api_key=openai_api_key,
base_url=openai_api_base,
)
# Run test cases
test_cases = [
("I want to know the weather in San Francisco", "Weather Information"),
("Calculate 25 * 17 + 31", "Math Calculation"),
("Translate 'Hello world' to Spanish", "Text Translation"),
("What is the weather in Tokyo and New York in celsius", "Multiple Tool Usage"),
]
# Execute all test cases
for query, test_name in test_cases:
run_test_case(query, test_name)
time.sleep(1) # Small delay between tests
print("\nAll tests completed.")
if __name__ == "__main__":
main()

View File

@ -1,272 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# ruff: noqa: E501
"""
Set up this example by starting a vLLM OpenAI-compatible server with tool call
options enabled for xLAM-2 models:
vllm serve --model Salesforce/Llama-xLAM-2-8b-fc-r --enable-auto-tool-choice --tool-call-parser xlam
OR
vllm serve --model Salesforce/xLAM-2-3b-fc-r --enable-auto-tool-choice --tool-call-parser xlam
This example demonstrates streaming tool calls with xLAM models.
"""
import json
import time
from openai import OpenAI
# Modify OpenAI's API key and API base to use vLLM's API server.
openai_api_key = "empty"
openai_api_base = "http://localhost:8000/v1"
# Define tool functions
def get_weather(location: str, unit: str):
return f"Weather in {location} is 22 degrees {unit}."
def calculate_expression(expression: str):
try:
result = eval(expression)
return f"The result of {expression} is {result}"
except Exception as e:
return f"Could not calculate {expression}: {e}"
def translate_text(text: str, target_language: str):
return f"Translation of '{text}' to {target_language}: [translated content]"
# Define tools
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and state, e.g., 'San Francisco, CA'",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location", "unit"],
},
},
},
{
"type": "function",
"function": {
"name": "calculate_expression",
"description": "Calculate a mathematical expression",
"parameters": {
"type": "object",
"properties": {
"expression": {
"type": "string",
"description": "Mathematical expression to evaluate, needs to be a valid Python expression",
}
},
"required": ["expression"],
},
},
},
{
"type": "function",
"function": {
"name": "translate_text",
"description": "Translate text to another language",
"parameters": {
"type": "object",
"properties": {
"text": {"type": "string", "description": "Text to translate"},
"target_language": {
"type": "string",
"description": "Target language for translation",
},
},
"required": ["text", "target_language"],
},
},
},
]
# Map of function names to implementations
tool_functions = {
"get_weather": get_weather,
"calculate_expression": calculate_expression,
"translate_text": translate_text,
}
def process_stream(response, tool_functions, original_query):
"""Process a streaming response with possible tool calls"""
# Track multiple tool calls
tool_calls = {} # Dictionary to store tool calls by ID
current_id = None
print("\n--- Stream Output ---")
for chunk in response:
# Handle tool calls in the stream
if chunk.choices[0].delta.tool_calls:
for tool_call_chunk in chunk.choices[0].delta.tool_calls:
# Get the tool call ID
if hasattr(tool_call_chunk, "id") and tool_call_chunk.id:
current_id = tool_call_chunk.id
if current_id not in tool_calls:
tool_calls[current_id] = {
"function_name": None,
"function_args": "",
"function_id": current_id,
}
# Extract function information as it comes in chunks
if (
hasattr(tool_call_chunk, "function")
and current_id
and current_id in tool_calls
):
if (
hasattr(tool_call_chunk.function, "name")
and tool_call_chunk.function.name
):
tool_calls[current_id]["function_name"] = (
tool_call_chunk.function.name
)
print(f"Function called: {tool_call_chunk.function.name}")
if (
hasattr(tool_call_chunk.function, "arguments")
and tool_call_chunk.function.arguments
):
tool_calls[current_id]["function_args"] += (
tool_call_chunk.function.arguments
)
print(f"Arguments chunk: {tool_call_chunk.function.arguments}")
# Handle regular content in the stream
elif chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")
print("\n--- End Stream ---\n")
# Execute each function call and build messages for follow-up
follow_up_messages = [{"role": "user", "content": original_query}]
for tool_id, tool_data in tool_calls.items():
function_name = tool_data["function_name"]
function_args = tool_data["function_args"]
function_id = tool_data["function_id"]
if function_name and function_args:
try:
# Parse the JSON arguments
args = json.loads(function_args)
# Call the function with the arguments
function_result = tool_functions[function_name](**args)
print(
f"\n--- Function Result ({function_name}) ---\n{function_result}\n"
)
# Add the assistant message with tool call
follow_up_messages.append(
{
"role": "assistant",
"tool_calls": [
{
"id": function_id,
"type": "function",
"function": {
"name": function_name,
"arguments": function_args,
},
}
],
}
)
# Add the tool message with function result
follow_up_messages.append(
{
"role": "tool",
"tool_call_id": function_id,
"content": function_result,
}
)
except Exception as e:
print(f"Error executing function: {e}")
# Only send follow-up if we have results to process
if len(follow_up_messages) > 1:
# Create a follow-up message with all the function results
follow_up_response = client.chat.completions.create(
model=client.models.list().data[0].id,
messages=follow_up_messages,
stream=True,
)
print("\n--- Follow-up Response ---")
for chunk in follow_up_response:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")
print("\n--- End Follow-up ---\n")
def run_test_case(query, test_name):
"""Run a single test case with the given query"""
print(f"\n{'=' * 50}\nTEST CASE: {test_name}\n{'=' * 50}")
print(f"Query: '{query}'")
start_time = time.time()
# Create streaming chat completion request
response = client.chat.completions.create(
model=client.models.list().data[0].id,
messages=[{"role": "user", "content": query}],
tools=tools,
tool_choice="auto",
stream=True,
)
# Process the streaming response
process_stream(response, tool_functions, query)
end_time = time.time()
print(f"Test completed in {end_time - start_time:.2f} seconds")
def main():
# Initialize OpenAI client
global client
client = OpenAI(
api_key=openai_api_key,
base_url=openai_api_base,
)
# Run test cases
test_cases = [
("I want to know the weather in San Francisco", "Weather Information"),
("Calculate 25 * 17 + 31", "Math Calculation"),
("Translate 'Hello world' to Spanish", "Text Translation"),
("What is the weather in Tokyo and New York in celsius", "Multiple Tool Usage"),
]
# Execute all test cases
for query, test_name in test_cases:
run_test_case(query, test_name)
time.sleep(1) # Small delay between tests
print("\nAll tests completed.")
if __name__ == "__main__":
main()

View File

@ -1,23 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
This script demonstrates how to use the vLLM API server to perform audio
transcription with the `openai/whisper-large-v3` model.
Before running this script, you must start the vLLM server with the following command:
vllm serve openai/whisper-large-v3
Requirements:
- vLLM with audio support
- openai Python SDK
- httpx for streaming support
The script performs:
1. Synchronous transcription using OpenAI-compatible API.
2. Streaming transcription using raw HTTP request to the vLLM server.
"""
import asyncio
import json
@ -39,9 +21,6 @@ client = OpenAI(
def sync_openai():
"""
Perform synchronous transcription using OpenAI-compatible API.
"""
with open(str(mary_had_lamb), "rb") as f:
transcription = client.audio.transcriptions.create(
file=f,
@ -58,11 +37,11 @@ def sync_openai():
print("transcription result:", transcription.text)
sync_openai()
# OpenAI Transcription API client does not support streaming.
async def stream_openai_response():
"""
Perform streaming transcription using vLLM's raw HTTP streaming API.
"""
data = {
"language": "en",
"stream": True,
@ -89,15 +68,7 @@ async def stream_openai_response():
# Extract and print the content
content = chunk["choices"][0].get("delta", {}).get("content")
print(content, end="")
print() # Final newline after stream ends
def main():
sync_openai()
# Run the asynchronous function
asyncio.run(stream_openai_response())
if __name__ == "__main__":
main()

Some files were not shown because too many files have changed in this diff Show More