Compare commits
129 Commits
fp8_ep_dp
...
mergify/ho
| Author | SHA1 | Date | |
|---|---|---|---|
| ca15f0afe6 | |||
| 12e5829221 | |||
| 3a4d417707 | |||
| 8335667c22 | |||
| e1c4380d4c | |||
| e31ae3de36 | |||
| 2ffb9b6e07 | |||
| cda10fa3e2 | |||
| c123bc33f9 | |||
| b9a1791e2c | |||
| 989dcee981 | |||
| 3d64d366e0 | |||
| eaa2e51088 | |||
| d77f7fb871 | |||
| 2d8476e465 | |||
| 88be823d57 | |||
| 4e4f63ad45 | |||
| d2f0e7e615 | |||
| 122cdca5f6 | |||
| cf02f9b283 | |||
| c4296b1a27 | |||
| 66c508b137 | |||
| 84166fee97 | |||
| 6e0cd10f72 | |||
| e010688f50 | |||
| 441b65d8c7 | |||
| 46ecc57973 | |||
| b6a3a9f76d | |||
| ca27f0f9c1 | |||
| aad30bd306 | |||
| 94ecee6282 | |||
| 8267f9916f | |||
| 7353492a47 | |||
| 7661e92ef8 | |||
| f168b85725 | |||
| da511d54d8 | |||
| 65c69444b1 | |||
| 94870359cd | |||
| 0d49483ea9 | |||
| 90b78ec5f9 | |||
| 91a2ef98ea | |||
| 3da2313d78 | |||
| b61dc5f972 | |||
| f8a1a2d108 | |||
| 3465b87ef8 | |||
| c8134bea15 | |||
| cb6d572e85 | |||
| 87360308b7 | |||
| aa49f14832 | |||
| 9ef9173cfa | |||
| 85e2b7bb13 | |||
| 61059bee40 | |||
| ec89524f50 | |||
| f20f9f063b | |||
| 9bc8bb07cf | |||
| 1aeb925f34 | |||
| 188a4590d8 | |||
| 18093084be | |||
| da40380214 | |||
| 8fc57501d3 | |||
| af7fc84fd2 | |||
| 0678b52251 | |||
| 25b918eee6 | |||
| a408820f2f | |||
| c56ed8bb0e | |||
| 78dcf56cb3 | |||
| b2fac67130 | |||
| 23027e2daf | |||
| c3fd4d669a | |||
| ef3f98b59f | |||
| 7ee2590478 | |||
| 53a5a0ce30 | |||
| d459fae0a2 | |||
| c8dcc15921 | |||
| 8f4ffbd373 | |||
| 5f2cd251d2 | |||
| 02658c2dfe | |||
| 01dc9a76db | |||
| 35cf32df30 | |||
| 8711bc5e68 | |||
| 2669a0d7b5 | |||
| 8e972d9c44 | |||
| 3336c8cfbe | |||
| b124e1085b | |||
| 41aa578428 | |||
| 8d646c2e53 | |||
| 5d6d1adf15 | |||
| 1409ef9134 | |||
| 4555143ea7 | |||
| 52dceb172d | |||
| abd7df2fca | |||
| b712be98c7 | |||
| a8da78eac9 | |||
| 5d96533e22 | |||
| 4de790fcad | |||
| b5fd9506c1 | |||
| 135cf55cd1 | |||
| 6cac54f4d1 | |||
| 6865fe0074 | |||
| e31446b6c8 | |||
| bdf13965ab | |||
| fa98d77773 | |||
| 01eee40536 | |||
| 19bdaf32b1 | |||
| 02f0c7b220 | |||
| d054da1992 | |||
| 4b7817c119 | |||
| d00dd65cd4 | |||
| d81edded69 | |||
| 476844d44c | |||
| 4e68ae5e59 | |||
| 4e88723f32 | |||
| 118ff92111 | |||
| ec2dcd80bc | |||
| 42243fbda0 | |||
| 6d18ed2a2e | |||
| f32fcd9444 | |||
| d32aa2e670 | |||
| cc977286e7 | |||
| 17430e3653 | |||
| 1282bd812e | |||
| bdce64f236 | |||
| 9e6f61e8c3 | |||
| 8655f47f37 | |||
| 4ce42f9204 | |||
| 8a57872b2a | |||
| 5bc1ad6cee | |||
| 9112b443a0 | |||
| c57d577e8d |
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
"""
|
"""
|
||||||
LM eval harness on model to compare vs HF baseline computed offline.
|
LM eval harness on model to compare vs HF baseline computed offline.
|
||||||
Configs are found in configs/$MODEL.yaml
|
Configs are found in configs/$MODEL.yaml
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import json
|
import json
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
from lmdeploy.serve.openai.api_client import APIClient
|
from lmdeploy.serve.openai.api_client import APIClient
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import json
|
import json
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
steps:
|
steps:
|
||||||
- label: "Build wheel - CUDA 12.8"
|
- label: "Build wheel - CUDA 12.8"
|
||||||
|
id: build-wheel-cuda-12-8
|
||||||
agents:
|
agents:
|
||||||
queue: cpu_queue_postmerge
|
queue: cpu_queue_postmerge
|
||||||
commands:
|
commands:
|
||||||
@ -11,6 +12,7 @@ steps:
|
|||||||
DOCKER_BUILDKIT: "1"
|
DOCKER_BUILDKIT: "1"
|
||||||
|
|
||||||
- label: "Build wheel - CUDA 12.6"
|
- label: "Build wheel - CUDA 12.6"
|
||||||
|
id: build-wheel-cuda-12-6
|
||||||
agents:
|
agents:
|
||||||
queue: cpu_queue_postmerge
|
queue: cpu_queue_postmerge
|
||||||
commands:
|
commands:
|
||||||
@ -28,6 +30,7 @@ steps:
|
|||||||
|
|
||||||
- label: "Build wheel - CUDA 11.8"
|
- label: "Build wheel - CUDA 11.8"
|
||||||
# depends_on: block-build-cu118-wheel
|
# depends_on: block-build-cu118-wheel
|
||||||
|
id: build-wheel-cuda-11-8
|
||||||
agents:
|
agents:
|
||||||
queue: cpu_queue_postmerge
|
queue: cpu_queue_postmerge
|
||||||
commands:
|
commands:
|
||||||
@ -44,6 +47,7 @@ steps:
|
|||||||
|
|
||||||
- label: "Build release image"
|
- label: "Build release image"
|
||||||
depends_on: block-release-image-build
|
depends_on: block-release-image-build
|
||||||
|
id: build-release-image
|
||||||
agents:
|
agents:
|
||||||
queue: cpu_queue_postmerge
|
queue: cpu_queue_postmerge
|
||||||
commands:
|
commands:
|
||||||
@ -51,6 +55,18 @@ steps:
|
|||||||
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=12.8.1 --tag public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT --target vllm-openai --progress plain -f docker/Dockerfile ."
|
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=12.8.1 --tag public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT --target vllm-openai --progress plain -f docker/Dockerfile ."
|
||||||
- "docker push public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT"
|
- "docker push public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT"
|
||||||
|
|
||||||
|
- label: "Annotate release workflow"
|
||||||
|
depends_on:
|
||||||
|
- build-release-image
|
||||||
|
- build-wheel-cuda-12-8
|
||||||
|
- build-wheel-cuda-12-6
|
||||||
|
- build-wheel-cuda-11-8
|
||||||
|
id: annotate-release-workflow
|
||||||
|
agents:
|
||||||
|
queue: cpu_queue_postmerge
|
||||||
|
commands:
|
||||||
|
- "bash .buildkite/scripts/annotate-release.sh"
|
||||||
|
|
||||||
- label: "Build and publish TPU release image"
|
- label: "Build and publish TPU release image"
|
||||||
depends_on: ~
|
depends_on: ~
|
||||||
if: build.env("NIGHTLY") == "1"
|
if: build.env("NIGHTLY") == "1"
|
||||||
@ -70,9 +86,10 @@ steps:
|
|||||||
DOCKER_BUILDKIT: "1"
|
DOCKER_BUILDKIT: "1"
|
||||||
|
|
||||||
- input: "Provide Release version here"
|
- input: "Provide Release version here"
|
||||||
|
id: input-release-version
|
||||||
fields:
|
fields:
|
||||||
- text: "What is the release version?"
|
- text: "What is the release version?"
|
||||||
key: "release-version"
|
key: release-version
|
||||||
|
|
||||||
- block: "Build CPU release image"
|
- block: "Build CPU release image"
|
||||||
key: block-cpu-release-image-build
|
key: block-cpu-release-image-build
|
||||||
|
|||||||
31
.buildkite/scripts/annotate-release.sh
Executable file
31
.buildkite/scripts/annotate-release.sh
Executable file
@ -0,0 +1,31 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# Get release version and strip leading 'v' if present
|
||||||
|
RELEASE_VERSION=$(buildkite-agent meta-data get release-version | sed 's/^v//')
|
||||||
|
|
||||||
|
if [ -z "$RELEASE_VERSION" ]; then
|
||||||
|
echo "Error: RELEASE_VERSION is empty. 'release-version' metadata might not be set or is invalid."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
buildkite-agent annotate --style 'info' --context 'release-workflow' << EOF
|
||||||
|
To download the wheel:
|
||||||
|
\`\`\`
|
||||||
|
aws s3 cp s3://vllm-wheels/${RELEASE_VERSION}/vllm-${RELEASE_VERSION}-cp38-abi3-manylinux1_x86_64.whl .
|
||||||
|
aws s3 cp s3://vllm-wheels/${RELEASE_VERSION}+cu126/vllm-${RELEASE_VERSION}+cu126-cp38-abi3-manylinux1_x86_64.whl .
|
||||||
|
aws s3 cp s3://vllm-wheels/${RELEASE_VERSION}+cu118/vllm-${RELEASE_VERSION}+cu118-cp38-abi3-manylinux1_x86_64.whl .
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
|
To download and upload the image:
|
||||||
|
|
||||||
|
\`\`\`
|
||||||
|
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}
|
||||||
|
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT} vllm/vllm-openai
|
||||||
|
docker tag vllm/vllm-openai vllm/vllm-openai:latest
|
||||||
|
docker tag vllm/vllm-openai vllm/vllm-openai:v${RELEASE_VERSION}
|
||||||
|
docker push vllm/vllm-openai:latest
|
||||||
|
docker push vllm/vllm-openai:v${RELEASE_VERSION}
|
||||||
|
\`\`\`
|
||||||
|
EOF
|
||||||
17
.buildkite/scripts/ci-clean-log.sh
Normal file
17
.buildkite/scripts/ci-clean-log.sh
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Usage: ./ci_clean_log.sh ci.log
|
||||||
|
# This script strips timestamps and color codes from CI log files.
|
||||||
|
|
||||||
|
# Check if argument is given
|
||||||
|
if [ $# -lt 1 ]; then
|
||||||
|
echo "Usage: $0 ci.log"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
INPUT_FILE="$1"
|
||||||
|
|
||||||
|
# Strip timestamps
|
||||||
|
sed -i 's/^\[[0-9]\{4\}-[0-9]\{2\}-[0-9]\{2\}T[0-9]\{2\}:[0-9]\{2\}:[0-9]\{2\}Z\] //' "$INPUT_FILE"
|
||||||
|
|
||||||
|
# Strip colorization
|
||||||
|
sed -i -r 's/\x1B\[[0-9;]*[mK]//g' "$INPUT_FILE"
|
||||||
@ -94,6 +94,10 @@ if [[ $commands == *"pytest -v -s compile/test_basic_correctness.py"* ]]; then
|
|||||||
commands=${commands//"pytest -v -s compile/test_basic_correctness.py"/"VLLM_USE_TRITON_FLASH_ATTN=0 pytest -v -s compile/test_basic_correctness.py"}
|
commands=${commands//"pytest -v -s compile/test_basic_correctness.py"/"VLLM_USE_TRITON_FLASH_ATTN=0 pytest -v -s compile/test_basic_correctness.py"}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ $commands == *"pytest -v -s lora"* ]]; then
|
||||||
|
commands=${commands//"pytest -v -s lora"/"VLLM_ROCM_CUSTOM_PAGED_ATTN=0 pytest -v -s lora"}
|
||||||
|
fi
|
||||||
|
|
||||||
#ignore certain kernels tests
|
#ignore certain kernels tests
|
||||||
if [[ $commands == *" kernels/core"* ]]; then
|
if [[ $commands == *" kernels/core"* ]]; then
|
||||||
commands="${commands} \
|
commands="${commands} \
|
||||||
|
|||||||
@ -7,6 +7,7 @@ set -ex
|
|||||||
# Setup cleanup
|
# Setup cleanup
|
||||||
remove_docker_container() {
|
remove_docker_container() {
|
||||||
if [[ -n "$container_id" ]]; then
|
if [[ -n "$container_id" ]]; then
|
||||||
|
podman stop --all -t0
|
||||||
podman rm -f "$container_id" || true
|
podman rm -f "$container_id" || true
|
||||||
fi
|
fi
|
||||||
podman system prune -f
|
podman system prune -f
|
||||||
@ -37,7 +38,7 @@ function cpu_tests() {
|
|||||||
pytest -v -s tests/models/language/generation/test_common.py::test_models[False-5-32-facebook/opt-125m]
|
pytest -v -s tests/models/language/generation/test_common.py::test_models[False-5-32-facebook/opt-125m]
|
||||||
pytest -v -s tests/models/language/generation/test_common.py::test_models[False-5-32-google/gemma-1.1-2b-it]
|
pytest -v -s tests/models/language/generation/test_common.py::test_models[False-5-32-google/gemma-1.1-2b-it]
|
||||||
pytest -v -s tests/models/language/pooling/test_classification.py::test_models[float-jason9693/Qwen2.5-1.5B-apeach]
|
pytest -v -s tests/models/language/pooling/test_classification.py::test_models[float-jason9693/Qwen2.5-1.5B-apeach]
|
||||||
pytest -v -s tests/models/language/pooling/test_embedding.py::test_models[half-BAAI/bge-base-en-v1.5]"
|
pytest -v -s tests/models/language/pooling/test_embedding.py -m cpu_model"
|
||||||
}
|
}
|
||||||
|
|
||||||
# All of CPU tests are expected to be finished less than 40 mins.
|
# All of CPU tests are expected to be finished less than 40 mins.
|
||||||
|
|||||||
@ -6,72 +6,67 @@ set -ex
|
|||||||
|
|
||||||
# allow to bind to different cores
|
# allow to bind to different cores
|
||||||
CORE_RANGE=${CORE_RANGE:-48-95}
|
CORE_RANGE=${CORE_RANGE:-48-95}
|
||||||
|
OMP_CORE_RANGE=${OMP_CORE_RANGE:-48-95}
|
||||||
NUMA_NODE=${NUMA_NODE:-1}
|
NUMA_NODE=${NUMA_NODE:-1}
|
||||||
|
|
||||||
|
export CMAKE_BUILD_PARALLEL_LEVEL=32
|
||||||
|
|
||||||
# Setup cleanup
|
# Setup cleanup
|
||||||
remove_docker_container() {
|
remove_docker_container() {
|
||||||
set -e;
|
set -e;
|
||||||
docker rm -f cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" cpu-test-"$BUILDKITE_BUILD_NUMBER"-avx2-"$NUMA_NODE" || true;
|
docker rm -f cpu-test-"$NUMA_NODE" cpu-test-"$NUMA_NODE"-avx2 || true;
|
||||||
docker image rm cpu-test-"$BUILDKITE_BUILD_NUMBER" cpu-test-"$BUILDKITE_BUILD_NUMBER"-avx2 || true;
|
|
||||||
}
|
}
|
||||||
trap remove_docker_container EXIT
|
trap remove_docker_container EXIT
|
||||||
remove_docker_container
|
remove_docker_container
|
||||||
|
|
||||||
# Try building the docker image
|
# Try building the docker image
|
||||||
numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build --tag cpu-test-"$BUILDKITE_BUILD_NUMBER" --target vllm-test -f docker/Dockerfile.cpu .
|
numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build --tag cpu-test-"$NUMA_NODE" --target vllm-test -f docker/Dockerfile.cpu .
|
||||||
numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build --build-arg VLLM_CPU_DISABLE_AVX512="true" --tag cpu-test-"$BUILDKITE_BUILD_NUMBER"-avx2 --target vllm-test -f docker/Dockerfile.cpu .
|
numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build --build-arg VLLM_CPU_DISABLE_AVX512="true" --tag cpu-test-"$NUMA_NODE"-avx2 --target vllm-test -f docker/Dockerfile.cpu .
|
||||||
|
|
||||||
# Run the image, setting --shm-size=4g for tensor parallel.
|
# Run the image, setting --shm-size=4g for tensor parallel.
|
||||||
docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus="$CORE_RANGE" \
|
docker run -itd --cpuset-cpus="$CORE_RANGE" --cpuset-mems="$NUMA_NODE" --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --env VLLM_CPU_OMP_THREADS_BIND="$OMP_CORE_RANGE" --shm-size=4g --name cpu-test-"$NUMA_NODE" cpu-test-"$NUMA_NODE"
|
||||||
--cpuset-mems="$NUMA_NODE" --privileged=true -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" cpu-test-"$BUILDKITE_BUILD_NUMBER"
|
docker run -itd --cpuset-cpus="$CORE_RANGE" --cpuset-mems="$NUMA_NODE" --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --env VLLM_CPU_OMP_THREADS_BIND="$OMP_CORE_RANGE" --shm-size=4g --name cpu-test-"$NUMA_NODE"-avx2 cpu-test-"$NUMA_NODE"-avx2
|
||||||
docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus="$CORE_RANGE" \
|
|
||||||
--cpuset-mems="$NUMA_NODE" --privileged=true -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test-"$BUILDKITE_BUILD_NUMBER"-avx2-"$NUMA_NODE" cpu-test-"$BUILDKITE_BUILD_NUMBER"-avx2
|
|
||||||
|
|
||||||
function cpu_tests() {
|
function cpu_tests() {
|
||||||
set -e
|
set -e
|
||||||
export NUMA_NODE=$2
|
export NUMA_NODE=$2
|
||||||
export BUILDKITE_BUILD_NUMBER=$3
|
|
||||||
|
|
||||||
# offline inference
|
# offline inference
|
||||||
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-avx2-"$NUMA_NODE" bash -c "
|
docker exec cpu-test-"$NUMA_NODE"-avx2 bash -c "
|
||||||
set -e
|
set -e
|
||||||
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m"
|
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m"
|
||||||
|
|
||||||
# Run basic model test
|
# Run basic model test
|
||||||
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" bash -c "
|
docker exec cpu-test-"$NUMA_NODE" bash -c "
|
||||||
set -e
|
set -e
|
||||||
pytest -v -s tests/kernels/test_cache.py -m cpu_model
|
pytest -v -s tests/kernels/attention/test_cache.py -m cpu_model
|
||||||
pytest -v -s tests/kernels/test_mla_decode_cpu.py -m cpu_model
|
pytest -v -s tests/kernels/attention/test_mla_decode_cpu.py -m cpu_model
|
||||||
pytest -v -s tests/models/decoder_only/language -m cpu_model
|
pytest -v -s tests/models/language/generation -m cpu_model
|
||||||
pytest -v -s tests/models/embedding/language -m cpu_model
|
pytest -v -s tests/models/language/pooling -m cpu_model
|
||||||
pytest -v -s tests/models/encoder_decoder/language -m cpu_model
|
pytest -v -s tests/models/multimodal/generation --ignore=tests/models/multimodal/generation/test_mllama.py -m cpu_model"
|
||||||
pytest -v -s tests/models/decoder_only/audio_language -m cpu_model
|
|
||||||
pytest -v -s tests/models/decoder_only/vision_language -m cpu_model"
|
|
||||||
|
|
||||||
# Run compressed-tensor test
|
# Run compressed-tensor test
|
||||||
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" bash -c "
|
docker exec cpu-test-"$NUMA_NODE" bash -c "
|
||||||
set -e
|
set -e
|
||||||
pytest -s -v \
|
pytest -s -v \
|
||||||
tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_static_setup \
|
tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_static_setup \
|
||||||
tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_dynamic_per_token"
|
tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_dynamic_per_token"
|
||||||
|
|
||||||
# Run AWQ test
|
# Run AWQ test
|
||||||
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" bash -c "
|
docker exec cpu-test-"$NUMA_NODE" bash -c "
|
||||||
set -e
|
set -e
|
||||||
pytest -s -v \
|
VLLM_USE_V1=0 pytest -s -v \
|
||||||
tests/quantization/test_ipex_quant.py"
|
tests/quantization/test_ipex_quant.py"
|
||||||
|
|
||||||
# Run chunked-prefill and prefix-cache test
|
# Run chunked-prefill and prefix-cache test
|
||||||
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" bash -c "
|
docker exec cpu-test-"$NUMA_NODE" bash -c "
|
||||||
set -e
|
set -e
|
||||||
pytest -s -v -k cpu_model \
|
pytest -s -v -k cpu_model \
|
||||||
tests/basic_correctness/test_chunked_prefill.py"
|
tests/basic_correctness/test_chunked_prefill.py"
|
||||||
|
|
||||||
# online serving
|
# online serving
|
||||||
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" bash -c "
|
docker exec cpu-test-"$NUMA_NODE" bash -c "
|
||||||
set -e
|
set -e
|
||||||
export VLLM_CPU_KVCACHE_SPACE=10
|
|
||||||
export VLLM_CPU_OMP_THREADS_BIND=$1
|
|
||||||
python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m --dtype half &
|
python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m --dtype half &
|
||||||
timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1
|
timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1
|
||||||
python3 benchmarks/benchmark_serving.py \
|
python3 benchmarks/benchmark_serving.py \
|
||||||
@ -83,7 +78,7 @@ function cpu_tests() {
|
|||||||
--tokenizer facebook/opt-125m"
|
--tokenizer facebook/opt-125m"
|
||||||
|
|
||||||
# Run multi-lora tests
|
# Run multi-lora tests
|
||||||
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" bash -c "
|
docker exec cpu-test-"$NUMA_NODE" bash -c "
|
||||||
set -e
|
set -e
|
||||||
pytest -s -v \
|
pytest -s -v \
|
||||||
tests/lora/test_qwen2vl.py"
|
tests/lora/test_qwen2vl.py"
|
||||||
@ -91,4 +86,4 @@ function cpu_tests() {
|
|||||||
|
|
||||||
# All of CPU tests are expected to be finished less than 40 mins.
|
# All of CPU tests are expected to be finished less than 40 mins.
|
||||||
export -f cpu_tests
|
export -f cpu_tests
|
||||||
timeout 40m bash -c "cpu_tests $CORE_RANGE $NUMA_NODE $BUILDKITE_BUILD_NUMBER"
|
timeout 1h bash -c "cpu_tests $CORE_RANGE $NUMA_NODE"
|
||||||
|
|||||||
@ -150,11 +150,15 @@ run_and_track_test 9 "test_multimodal.py" \
|
|||||||
run_and_track_test 10 "test_pallas.py" \
|
run_and_track_test 10 "test_pallas.py" \
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/test_pallas.py"
|
"python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/test_pallas.py"
|
||||||
run_and_track_test 11 "test_struct_output_generate.py" \
|
run_and_track_test 11 "test_struct_output_generate.py" \
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/v1/entrypoints/llm/test_struct_output_generate.py"
|
"python3 -m pytest -s -v /workspace/vllm/tests/v1/entrypoints/llm/test_struct_output_generate.py -k \"not test_structured_output_with_reasoning_matrices\""
|
||||||
run_and_track_test 12 "test_moe_pallas.py" \
|
run_and_track_test 12 "test_moe_pallas.py" \
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/tpu/test_moe_pallas.py"
|
"python3 -m pytest -s -v /workspace/vllm/tests/tpu/test_moe_pallas.py"
|
||||||
run_and_track_test 13 "test_lora.py" \
|
run_and_track_test 13 "test_lora.py" \
|
||||||
"VLLM_XLA_CHECK_RECOMPILATION=0 python3 -m pytest -s -v /workspace/vllm/tests/tpu/lora/test_lora.py"
|
"VLLM_XLA_CHECK_RECOMPILATION=0 python3 -m pytest -s -v /workspace/vllm/tests/tpu/lora/test_lora.py"
|
||||||
|
run_and_track_test 14 "test_tpu_qkv_linear.py" \
|
||||||
|
"python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/test_tpu_qkv_linear.py"
|
||||||
|
run_and_track_test 15 "test_spmd_model_weight_loading.py" \
|
||||||
|
"python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/test_spmd_model_weight_loading.py"
|
||||||
|
|
||||||
# After all tests have been attempted, exit with the overall status.
|
# After all tests have been attempted, exit with the overall status.
|
||||||
if [ "$overall_script_exit_code" -ne 0 ]; then
|
if [ "$overall_script_exit_code" -ne 0 ]; then
|
||||||
|
|||||||
18
.buildkite/scripts/rerun-test.sh
Normal file
18
.buildkite/scripts/rerun-test.sh
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Usage: ./rerun_test.sh path/to/test.py::test_name
|
||||||
|
|
||||||
|
# Check if argument is given
|
||||||
|
if [ $# -lt 1 ]; then
|
||||||
|
echo "Usage: $0 path/to/test.py::test_name"
|
||||||
|
echo "Example: $0 tests/v1/engine/test_engine_core_client.py::test_kv_cache_events[True-tcp]"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
TEST=$1
|
||||||
|
COUNT=1
|
||||||
|
|
||||||
|
while pytest -sv "$TEST"; do
|
||||||
|
COUNT=$((COUNT + 1))
|
||||||
|
echo "RUN NUMBER ${COUNT}"
|
||||||
|
done
|
||||||
24
.buildkite/scripts/tpu/cleanup_docker.sh
Executable file
24
.buildkite/scripts/tpu/cleanup_docker.sh
Executable file
@ -0,0 +1,24 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
docker_root=$(docker info -f '{{.DockerRootDir}}')
|
||||||
|
if [ -z "$docker_root" ]; then
|
||||||
|
echo "Failed to determine Docker root directory."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "Docker root directory: $docker_root"
|
||||||
|
# Check disk usage of the filesystem where Docker's root directory is located
|
||||||
|
disk_usage=$(df "$docker_root" | tail -1 | awk '{print $5}' | sed 's/%//')
|
||||||
|
# Define the threshold
|
||||||
|
threshold=70
|
||||||
|
if [ "$disk_usage" -gt "$threshold" ]; then
|
||||||
|
echo "Disk usage is above $threshold%. Cleaning up Docker images and volumes..."
|
||||||
|
# Remove dangling images (those that are not tagged and not used by any container)
|
||||||
|
docker image prune -f
|
||||||
|
# Remove unused volumes / force the system prune for old images as well.
|
||||||
|
docker volume prune -f && docker system prune --force --filter "until=72h" --all
|
||||||
|
echo "Docker images and volumes cleanup completed."
|
||||||
|
else
|
||||||
|
echo "Disk usage is below $threshold%. No cleanup needed."
|
||||||
|
fi
|
||||||
14
.buildkite/scripts/tpu/config_v6e_1.env
Normal file
14
.buildkite/scripts/tpu/config_v6e_1.env
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
# Environment config
|
||||||
|
TEST_NAME=llama8b
|
||||||
|
CONTAINER_NAME=vllm-tpu
|
||||||
|
|
||||||
|
# vllm config
|
||||||
|
MODEL=meta-llama/Llama-3.1-8B-Instruct
|
||||||
|
MAX_NUM_SEQS=512
|
||||||
|
MAX_NUM_BATCHED_TOKENS=512
|
||||||
|
TENSOR_PARALLEL_SIZE=1
|
||||||
|
MAX_MODEL_LEN=2048
|
||||||
|
DOWNLOAD_DIR=/mnt/disks/persist
|
||||||
|
EXPECTED_THROUGHPUT=8.0
|
||||||
|
INPUT_LEN=1800
|
||||||
|
OUTPUT_LEN=128
|
||||||
102
.buildkite/scripts/tpu/docker_run_bm.sh
Executable file
102
.buildkite/scripts/tpu/docker_run_bm.sh
Executable file
@ -0,0 +1,102 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
if [ ! -f "$1" ]; then
|
||||||
|
echo "Error: The env file '$1' does not exist."
|
||||||
|
exit 1 # Exit the script with a non-zero status to indicate an error
|
||||||
|
fi
|
||||||
|
|
||||||
|
ENV_FILE=$1
|
||||||
|
|
||||||
|
# For testing on local vm, use `set -a` to export all variables
|
||||||
|
source /etc/environment
|
||||||
|
source $ENV_FILE
|
||||||
|
|
||||||
|
remove_docker_container() {
|
||||||
|
docker rm -f tpu-test || true;
|
||||||
|
docker rm -f vllm-tpu || true;
|
||||||
|
docker rm -f $CONTAINER_NAME || true;
|
||||||
|
}
|
||||||
|
|
||||||
|
trap remove_docker_container EXIT
|
||||||
|
|
||||||
|
# Remove the container that might not be cleaned up in the previous run.
|
||||||
|
remove_docker_container
|
||||||
|
|
||||||
|
# Build docker image.
|
||||||
|
# TODO: build the image outside the script and share the image with other
|
||||||
|
# tpu test if building time is too long.
|
||||||
|
DOCKER_BUILDKIT=1 docker build \
|
||||||
|
--build-arg max_jobs=16 \
|
||||||
|
--build-arg USE_SCCACHE=1 \
|
||||||
|
--build-arg GIT_REPO_CHECK=0 \
|
||||||
|
--tag vllm/vllm-tpu-bm \
|
||||||
|
--progress plain -f docker/Dockerfile.tpu .
|
||||||
|
|
||||||
|
LOG_ROOT=$(mktemp -d)
|
||||||
|
# If mktemp fails, set -e will cause the script to exit.
|
||||||
|
echo "Results will be stored in: $LOG_ROOT"
|
||||||
|
|
||||||
|
if [ -z "$HF_TOKEN" ]; then
|
||||||
|
echo "Error: HF_TOKEN is not set or is empty."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Make sure mounted disk or dir exists
|
||||||
|
if [ ! -d "$DOWNLOAD_DIR" ]; then
|
||||||
|
echo "Error: Folder $DOWNLOAD_DIR does not exist. This is useually a mounted drive. If no mounted drive, just create a folder."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Run model $MODEL"
|
||||||
|
echo
|
||||||
|
|
||||||
|
echo "starting docker...$CONTAINER_NAME"
|
||||||
|
echo
|
||||||
|
docker run \
|
||||||
|
-v $DOWNLOAD_DIR:$DOWNLOAD_DIR \
|
||||||
|
--env-file $ENV_FILE \
|
||||||
|
-e HF_TOKEN="$HF_TOKEN" \
|
||||||
|
-e TARGET_COMMIT=$BUILDKITE_COMMIT \
|
||||||
|
-e MODEL=$MODEL \
|
||||||
|
-e WORKSPACE=/workspace \
|
||||||
|
--name $CONTAINER_NAME \
|
||||||
|
-d \
|
||||||
|
--privileged \
|
||||||
|
--network host \
|
||||||
|
-v /dev/shm:/dev/shm \
|
||||||
|
vllm/vllm-tpu-bm tail -f /dev/null
|
||||||
|
|
||||||
|
echo "run script..."
|
||||||
|
echo
|
||||||
|
docker exec "$CONTAINER_NAME" /bin/bash -c ".buildkite/scripts/hardware_ci/run_bm.sh"
|
||||||
|
|
||||||
|
echo "copy result back..."
|
||||||
|
VLLM_LOG="$LOG_ROOT/$TEST_NAME"_vllm_log.txt
|
||||||
|
BM_LOG="$LOG_ROOT/$TEST_NAME"_bm_log.txt
|
||||||
|
docker cp "$CONTAINER_NAME:/workspace/vllm_log.txt" "$VLLM_LOG"
|
||||||
|
docker cp "$CONTAINER_NAME:/workspace/bm_log.txt" "$BM_LOG"
|
||||||
|
|
||||||
|
throughput=$(grep "Request throughput (req/s):" "$BM_LOG" | sed 's/[^0-9.]//g')
|
||||||
|
echo "throughput for $TEST_NAME at $BUILDKITE_COMMIT: $throughput"
|
||||||
|
|
||||||
|
if [ "$BUILDKITE" = "true" ]; then
|
||||||
|
echo "Running inside Buildkite"
|
||||||
|
buildkite-agent artifact upload "$VLLM_LOG"
|
||||||
|
buildkite-agent artifact upload "$BM_LOG"
|
||||||
|
else
|
||||||
|
echo "Not running inside Buildkite"
|
||||||
|
fi
|
||||||
|
|
||||||
|
#
|
||||||
|
# compare the throughput with EXPECTED_THROUGHPUT
|
||||||
|
# and assert meeting the expectation
|
||||||
|
#
|
||||||
|
if [[ -z "$throughput" || ! "$throughput" =~ ^[0-9]+([.][0-9]+)?$ ]]; then
|
||||||
|
echo "Failed to get the throughput"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if (( $(echo "$throughput < $EXPECTED_THROUGHPUT" | bc -l) )); then
|
||||||
|
echo "Error: throughput($throughput) is less than expected($EXPECTED_THROUGHPUT)"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
94
.buildkite/scripts/tpu/run_bm.sh
Executable file
94
.buildkite/scripts/tpu/run_bm.sh
Executable file
@ -0,0 +1,94 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
VLLM_LOG="$WORKSPACE/vllm_log.txt"
|
||||||
|
BM_LOG="$WORKSPACE/bm_log.txt"
|
||||||
|
|
||||||
|
if [ -n "$TARGET_COMMIT" ]; then
|
||||||
|
head_hash=$(git rev-parse HEAD)
|
||||||
|
if [ "$TARGET_COMMIT" != "$head_hash" ]; then
|
||||||
|
echo "Error: target commit $TARGET_COMMIT does not match HEAD: $head_hash"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "model: $MODEL"
|
||||||
|
echo
|
||||||
|
|
||||||
|
#
|
||||||
|
# create a log folder
|
||||||
|
#
|
||||||
|
mkdir "$WORKSPACE/log"
|
||||||
|
|
||||||
|
# TODO: Move to image building.
|
||||||
|
pip install pandas
|
||||||
|
pip install datasets
|
||||||
|
|
||||||
|
#
|
||||||
|
# create sonnet_4x
|
||||||
|
#
|
||||||
|
echo "Create sonnet_4x.txt"
|
||||||
|
echo "" > benchmarks/sonnet_4x.txt
|
||||||
|
for _ in {1..4}
|
||||||
|
do
|
||||||
|
cat benchmarks/sonnet.txt >> benchmarks/sonnet_4x.txt
|
||||||
|
done
|
||||||
|
|
||||||
|
#
|
||||||
|
# start vllm service in backend
|
||||||
|
#
|
||||||
|
echo "lanching vllm..."
|
||||||
|
echo "logging to $VLLM_LOG"
|
||||||
|
echo
|
||||||
|
|
||||||
|
VLLM_USE_V1=1 vllm serve $MODEL \
|
||||||
|
--seed 42 \
|
||||||
|
--disable-log-requests \
|
||||||
|
--max-num-seqs $MAX_NUM_SEQS \
|
||||||
|
--max-num-batched-tokens $MAX_NUM_BATCHED_TOKENS \
|
||||||
|
--tensor-parallel-size $TENSOR_PARALLEL_SIZE \
|
||||||
|
--no-enable-prefix-caching \
|
||||||
|
--download_dir $DOWNLOAD_DIR \
|
||||||
|
--max-model-len $MAX_MODEL_LEN > "$VLLM_LOG" 2>&1 &
|
||||||
|
|
||||||
|
|
||||||
|
echo "wait for 20 minutes.."
|
||||||
|
echo
|
||||||
|
# sleep 1200
|
||||||
|
# wait for 10 minutes...
|
||||||
|
for i in {1..120}; do
|
||||||
|
# TODO: detect other type of errors.
|
||||||
|
if grep -Fq "raise RuntimeError" "$VLLM_LOG"; then
|
||||||
|
echo "Detected RuntimeError, exiting."
|
||||||
|
exit 1
|
||||||
|
elif grep -Fq "Application startup complete" "$VLLM_LOG"; then
|
||||||
|
echo "Application started"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
echo "wait for 10 seconds..."
|
||||||
|
sleep 10
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
#
|
||||||
|
# run test
|
||||||
|
#
|
||||||
|
echo "run benchmark test..."
|
||||||
|
echo "logging to $BM_LOG"
|
||||||
|
echo
|
||||||
|
python benchmarks/benchmark_serving.py \
|
||||||
|
--backend vllm \
|
||||||
|
--model $MODEL \
|
||||||
|
--dataset-name sonnet \
|
||||||
|
--dataset-path benchmarks/sonnet_4x.txt \
|
||||||
|
--sonnet-input-len $INPUT_LEN \
|
||||||
|
--sonnet-output-len $OUTPUT_LEN \
|
||||||
|
--ignore-eos > "$BM_LOG"
|
||||||
|
|
||||||
|
echo "completed..."
|
||||||
|
echo
|
||||||
|
|
||||||
|
throughput=$(grep "Request throughput (req/s):" "$BM_LOG" | sed 's/[^0-9.]//g')
|
||||||
|
echo "throughput: $throughput"
|
||||||
|
echo
|
||||||
@ -145,6 +145,7 @@ steps:
|
|||||||
- examples/offline_inference/rlhf_colocate.py
|
- examples/offline_inference/rlhf_colocate.py
|
||||||
- tests/examples/offline_inference/data_parallel.py
|
- tests/examples/offline_inference/data_parallel.py
|
||||||
- tests/v1/test_async_llm_dp.py
|
- tests/v1/test_async_llm_dp.py
|
||||||
|
- tests/v1/engine/test_engine_core_client.py
|
||||||
commands:
|
commands:
|
||||||
# test with tp=2 and external_dp=2
|
# test with tp=2 and external_dp=2
|
||||||
- VLLM_USE_V1=0 torchrun --nproc-per-node=4 distributed/test_torchrun_example.py
|
- VLLM_USE_V1=0 torchrun --nproc-per-node=4 distributed/test_torchrun_example.py
|
||||||
@ -154,6 +155,7 @@ steps:
|
|||||||
# test with internal dp
|
# test with internal dp
|
||||||
- python3 ../examples/offline_inference/data_parallel.py
|
- python3 ../examples/offline_inference/data_parallel.py
|
||||||
- TP_SIZE=2 DP_SIZE=2 pytest -v -s v1/test_async_llm_dp.py
|
- TP_SIZE=2 DP_SIZE=2 pytest -v -s v1/test_async_llm_dp.py
|
||||||
|
- pytest -v -s v1/engine/test_engine_core_client.py::test_kv_cache_events_dp
|
||||||
- pytest -v -s distributed/test_utils.py
|
- pytest -v -s distributed/test_utils.py
|
||||||
- pytest -v -s compile/test_basic_correctness.py
|
- pytest -v -s compile/test_basic_correctness.py
|
||||||
- pytest -v -s distributed/test_pynccl.py
|
- pytest -v -s distributed/test_pynccl.py
|
||||||
@ -287,7 +289,7 @@ steps:
|
|||||||
- pytest -v -s spec_decode/e2e/test_eagle_correctness.py
|
- pytest -v -s spec_decode/e2e/test_eagle_correctness.py
|
||||||
|
|
||||||
- label: LoRA Test %N # 15min each
|
- label: LoRA Test %N # 15min each
|
||||||
mirror_hardwares: [amdexperimental]
|
mirror_hardwares: [amdexperimental, amdproduction]
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/lora
|
- vllm/lora
|
||||||
- tests/lora
|
- tests/lora
|
||||||
@ -318,6 +320,7 @@ steps:
|
|||||||
# these tests need to be separated, cannot combine
|
# these tests need to be separated, cannot combine
|
||||||
- pytest -v -s compile/piecewise/test_simple.py
|
- pytest -v -s compile/piecewise/test_simple.py
|
||||||
- pytest -v -s compile/piecewise/test_toy_llama.py
|
- pytest -v -s compile/piecewise/test_toy_llama.py
|
||||||
|
- pytest -v -s compile/piecewise/test_full_cudagraph.py
|
||||||
|
|
||||||
- label: PyTorch Fullgraph Test # 18min
|
- label: PyTorch Fullgraph Test # 18min
|
||||||
mirror_hardwares: [amdexperimental, amdproduction]
|
mirror_hardwares: [amdexperimental, amdproduction]
|
||||||
@ -421,6 +424,9 @@ steps:
|
|||||||
- vllm/model_executor/layers/quantization
|
- vllm/model_executor/layers/quantization
|
||||||
- tests/quantization
|
- tests/quantization
|
||||||
commands:
|
commands:
|
||||||
|
# temporary install here since we need nightly, will move to requirements/test.in
|
||||||
|
# after torchao 0.12 release
|
||||||
|
- pip install --pre torchao --index-url https://download.pytorch.org/whl/nightly/cu126
|
||||||
- VLLM_TEST_FORCE_LOAD_FORMAT=auto pytest -v -s quantization
|
- VLLM_TEST_FORCE_LOAD_FORMAT=auto pytest -v -s quantization
|
||||||
|
|
||||||
- label: LM Eval Small Models # 53min
|
- label: LM Eval Small Models # 53min
|
||||||
|
|||||||
16
.github/CODEOWNERS
vendored
16
.github/CODEOWNERS
vendored
@ -10,15 +10,17 @@
|
|||||||
/vllm/worker/worker.py @zhuohan123 @youkaichao @alexm-redhat @comaniac @njhill
|
/vllm/worker/worker.py @zhuohan123 @youkaichao @alexm-redhat @comaniac @njhill
|
||||||
/vllm/model_executor/layers/sampler.py @zhuohan123 @youkaichao @alexm-redhat @comaniac @njhill
|
/vllm/model_executor/layers/sampler.py @zhuohan123 @youkaichao @alexm-redhat @comaniac @njhill
|
||||||
/vllm/model_executor/layers/quantization @mgoin @robertgshaw2-redhat @tlrmchlsmth
|
/vllm/model_executor/layers/quantization @mgoin @robertgshaw2-redhat @tlrmchlsmth
|
||||||
/vllm/model_executor/guided_decoding @mgoin @russellb
|
/vllm/model_executor/guided_decoding @mgoin @russellb @aarnphm
|
||||||
/vllm/multimodal @DarkLight1337 @ywang96
|
/vllm/multimodal @DarkLight1337 @ywang96
|
||||||
/vllm/vllm_flash_attn @LucasWilkinson
|
/vllm/vllm_flash_attn @LucasWilkinson
|
||||||
/vllm/lora @jeejeelee
|
/vllm/lora @jeejeelee
|
||||||
|
/vllm/reasoning @aarnphm
|
||||||
|
/vllm/entrypoints @aarnphm
|
||||||
CMakeLists.txt @tlrmchlsmth
|
CMakeLists.txt @tlrmchlsmth
|
||||||
|
|
||||||
# vLLM V1
|
# vLLM V1
|
||||||
/vllm/v1 @WoosukKwon @robertgshaw2-redhat @njhill @ywang96 @comaniac @alexm-redhat
|
/vllm/v1 @WoosukKwon @robertgshaw2-redhat @njhill @ywang96 @comaniac @alexm-redhat
|
||||||
/vllm/v1/structured_output @mgoin @russellb
|
/vllm/v1/structured_output @mgoin @russellb @aarnphm
|
||||||
|
|
||||||
# Test ownership
|
# Test ownership
|
||||||
/.buildkite/lm-eval-harness @mgoin @simon-mo
|
/.buildkite/lm-eval-harness @mgoin @simon-mo
|
||||||
@ -27,8 +29,8 @@ CMakeLists.txt @tlrmchlsmth
|
|||||||
/tests/distributed/test_multi_node_assignment.py @youkaichao
|
/tests/distributed/test_multi_node_assignment.py @youkaichao
|
||||||
/tests/distributed/test_pipeline_parallel.py @youkaichao
|
/tests/distributed/test_pipeline_parallel.py @youkaichao
|
||||||
/tests/distributed/test_same_node.py @youkaichao
|
/tests/distributed/test_same_node.py @youkaichao
|
||||||
/tests/entrypoints @DarkLight1337 @robertgshaw2-redhat @simon-mo
|
/tests/entrypoints @DarkLight1337 @robertgshaw2-redhat @simon-mo @aarnphm
|
||||||
/tests/entrypoints/llm/test_guided_generate.py @mgoin @russellb
|
/tests/entrypoints/llm/test_guided_generate.py @mgoin @russellb @aarnphm
|
||||||
/tests/kernels @tlrmchlsmth @WoosukKwon
|
/tests/kernels @tlrmchlsmth @WoosukKwon
|
||||||
/tests/model_executor/test_guided_processors.py @mgoin @russellb
|
/tests/model_executor/test_guided_processors.py @mgoin @russellb
|
||||||
/tests/models @DarkLight1337 @ywang96
|
/tests/models @DarkLight1337 @ywang96
|
||||||
@ -38,11 +40,11 @@ CMakeLists.txt @tlrmchlsmth
|
|||||||
/tests/quantization @mgoin @robertgshaw2-redhat
|
/tests/quantization @mgoin @robertgshaw2-redhat
|
||||||
/tests/spec_decode @njhill @LiuXiaoxuanPKU
|
/tests/spec_decode @njhill @LiuXiaoxuanPKU
|
||||||
/tests/test_inputs.py @DarkLight1337 @ywang96
|
/tests/test_inputs.py @DarkLight1337 @ywang96
|
||||||
/tests/v1/entrypoints/llm/test_struct_output_generate.py @mgoin @russellb
|
/tests/v1/entrypoints/llm/test_struct_output_generate.py @mgoin @russellb @aarnphm
|
||||||
/tests/v1/structured_output @mgoin @russellb
|
/tests/v1/structured_output @mgoin @russellb @aarnphm
|
||||||
/tests/weight_loading @mgoin @youkaichao
|
/tests/weight_loading @mgoin @youkaichao
|
||||||
/tests/lora @jeejeelee
|
/tests/lora @jeejeelee
|
||||||
|
|
||||||
# Docs
|
# Docs
|
||||||
/docs @hmellor
|
/docs @hmellor
|
||||||
mkdocs.yaml @hmellor
|
mkdocs.yaml @hmellor
|
||||||
|
|||||||
16
.github/PULL_REQUEST_TEMPLATE.md
vendored
16
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -1,6 +1,18 @@
|
|||||||
FILL IN THE PR DESCRIPTION HERE
|
## Essential Elements of an Effective PR Description Checklist
|
||||||
|
- [ ] The purpose of the PR, such as "Fix some issue (link existing issues this PR will resolve)".
|
||||||
|
- [ ] The test plan, such as providing test command.
|
||||||
|
- [ ] The test results, such as pasting the results comparison before and after, or e2e results
|
||||||
|
- [ ] (Optional) The necessary documentation update, such as updating `supported_models.md` and `examples` for a new model.
|
||||||
|
|
||||||
FIX #xxxx (*link existing issues this PR will resolve*)
|
PLEASE FILL IN THE PR DESCRIPTION HERE ENSURING ALL CHECKLIST ITEMS ABOVE HAVE BEEN CONSIDERED.
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
|
||||||
|
## Test Plan
|
||||||
|
|
||||||
|
## Test Result
|
||||||
|
|
||||||
|
## (Optional) Documentation Update
|
||||||
|
|
||||||
<!--- pyml disable-next-line no-emphasis-as-heading -->
|
<!--- pyml disable-next-line no-emphasis-as-heading -->
|
||||||
**BEFORE SUBMITTING, PLEASE READ <https://docs.vllm.ai/en/latest/contributing>** (anything written below this line will be removed by GitHub Actions)
|
**BEFORE SUBMITTING, PLEASE READ <https://docs.vllm.ai/en/latest/contributing>** (anything written below this line will be removed by GitHub Actions)
|
||||||
|
|||||||
343
.github/mergify.yml
vendored
343
.github/mergify.yml
vendored
@ -1,182 +1,181 @@
|
|||||||
pull_request_rules:
|
pull_request_rules:
|
||||||
- name: label-documentation
|
- name: label-llama
|
||||||
description: Automatically apply documentation label
|
description: Automatically apply llama label
|
||||||
conditions:
|
conditions:
|
||||||
- or:
|
- or:
|
||||||
- files~=^[^/]+\.md$
|
- files~=^examples/.*llama.*\.py
|
||||||
- files~=^docs/
|
- files~=^tests/.*llama.*\.py
|
||||||
- files~=^examples/
|
- files~=^vllm/entrypoints/openai/tool_parsers/llama.*\.py
|
||||||
actions:
|
- files~=^vllm/model_executor/models/.*llama.*\.py
|
||||||
label:
|
- files~=^vllm/transformers_utils/configs/.*llama.*\.py
|
||||||
add:
|
actions:
|
||||||
- documentation
|
label:
|
||||||
|
add:
|
||||||
- name: label-ci-build
|
- llama
|
||||||
description: Automatically apply ci/build label
|
- name: label-documentation
|
||||||
conditions:
|
description: Automatically apply documentation label
|
||||||
- or:
|
conditions:
|
||||||
- files~=^\.github/
|
- or:
|
||||||
- files~=\.buildkite/
|
- files~=^[^/]+\.md$
|
||||||
- files~=^cmake/
|
- files~=^docs/
|
||||||
- files=CMakeLists.txt
|
- files~=^examples/
|
||||||
- files~=^docker/Dockerfile
|
actions:
|
||||||
- files~=^requirements.*\.txt
|
label:
|
||||||
- files=setup.py
|
add:
|
||||||
actions:
|
- documentation
|
||||||
label:
|
- name: label-ci-build
|
||||||
add:
|
description: Automatically apply ci/build label
|
||||||
- ci/build
|
conditions:
|
||||||
|
- or:
|
||||||
- name: label-frontend
|
- files~=^\.github/
|
||||||
description: Automatically apply frontend label
|
- files~=\.buildkite/
|
||||||
conditions:
|
- files~=^cmake/
|
||||||
- files~=^vllm/entrypoints/
|
- files=CMakeLists.txt
|
||||||
actions:
|
- files~=^docker/Dockerfile
|
||||||
label:
|
- files~=^requirements.*\.txt
|
||||||
add:
|
- files=setup.py
|
||||||
- frontend
|
actions:
|
||||||
|
label:
|
||||||
- name: label-multi-modality
|
add:
|
||||||
description: Automatically apply multi-modality label
|
- ci/build
|
||||||
conditions:
|
- name: label-frontend
|
||||||
- or:
|
description: Automatically apply frontend label
|
||||||
- files~=^vllm/multimodal/
|
conditions:
|
||||||
- files~=^tests/multimodal/
|
- files~=^vllm/entrypoints/
|
||||||
- files~=^tests/models/multimodal/
|
actions:
|
||||||
- files~=^tests/models/*/audio_language/
|
label:
|
||||||
- files~=^tests/models/*/vision_language/
|
add:
|
||||||
- files=tests/models/test_vision.py
|
- frontend
|
||||||
actions:
|
- name: label-multi-modality
|
||||||
label:
|
description: Automatically apply multi-modality label
|
||||||
add:
|
conditions:
|
||||||
- multi-modality
|
- or:
|
||||||
|
- files~=^vllm/multimodal/
|
||||||
- name: label-structured-output
|
- files~=^tests/multimodal/
|
||||||
description: Automatically apply structured-output label
|
- files~=^tests/models/multimodal/
|
||||||
conditions:
|
- files~=^tests/models/*/audio_language/
|
||||||
- or:
|
- files~=^tests/models/*/vision_language/
|
||||||
- files~=^benchmarks/structured_schemas/
|
- files=tests/models/test_vision.py
|
||||||
- files=benchmarks/benchmark_serving_structured_output.py
|
actions:
|
||||||
- files=benchmarks/run_structured_output_benchmark.sh
|
label:
|
||||||
- files=docs/features/structured_outputs.md
|
add:
|
||||||
- files=examples/offline_inference/structured_outputs.py
|
- multi-modality
|
||||||
- files=examples/online_serving/openai_chat_completion_structured_outputs.py
|
- name: label-structured-output
|
||||||
- files=examples/online_serving/openai_chat_completion_structured_outputs_with_reasoning.py
|
description: Automatically apply structured-output label
|
||||||
- files~=^vllm/model_executor/guided_decoding/
|
conditions:
|
||||||
- files=tests/model_executor/test_guided_processors.py
|
- or:
|
||||||
- files=tests/entrypoints/llm/test_guided_generate.py
|
- files~=^benchmarks/structured_schemas/
|
||||||
- files~=^tests/v1/structured_output/
|
- files=benchmarks/benchmark_serving_structured_output.py
|
||||||
- files=tests/v1/entrypoints/llm/test_guided_generate.py
|
- files=benchmarks/run_structured_output_benchmark.sh
|
||||||
- files~=^vllm/v1/structured_output/
|
- files=docs/features/structured_outputs.md
|
||||||
actions:
|
- files=examples/offline_inference/structured_outputs.py
|
||||||
label:
|
- files=examples/online_serving/openai_chat_completion_structured_outputs.py
|
||||||
add:
|
- files=examples/online_serving/openai_chat_completion_structured_outputs_with_reasoning.py
|
||||||
- structured-output
|
- files~=^vllm/model_executor/guided_decoding/
|
||||||
|
- files=tests/model_executor/test_guided_processors.py
|
||||||
- name: label-speculative-decoding
|
- files=tests/entrypoints/llm/test_guided_generate.py
|
||||||
description: Automatically apply speculative-decoding label
|
- files~=^tests/v1/structured_output/
|
||||||
conditions:
|
- files=tests/v1/entrypoints/llm/test_guided_generate.py
|
||||||
- or:
|
- files~=^vllm/v1/structured_output/
|
||||||
- files~=^vllm/spec_decode/
|
actions:
|
||||||
- files=vllm/model_executor/layers/spec_decode_base_sampler.py
|
label:
|
||||||
- files~=^tests/spec_decode/
|
add:
|
||||||
actions:
|
- structured-output
|
||||||
label:
|
- name: label-speculative-decoding
|
||||||
add:
|
description: Automatically apply speculative-decoding label
|
||||||
- speculative-decoding
|
conditions:
|
||||||
|
- or:
|
||||||
- name: label-v1
|
- files~=^vllm/spec_decode/
|
||||||
description: Automatically apply v1 label
|
- files=vllm/model_executor/layers/spec_decode_base_sampler.py
|
||||||
conditions:
|
- files~=^tests/spec_decode/
|
||||||
- or:
|
actions:
|
||||||
- files~=^vllm/v1/
|
label:
|
||||||
- files~=^tests/v1/
|
add:
|
||||||
actions:
|
- speculative-decoding
|
||||||
label:
|
- name: label-v1
|
||||||
add:
|
description: Automatically apply v1 label
|
||||||
- v1
|
conditions:
|
||||||
|
- or:
|
||||||
- name: label-tpu
|
- files~=^vllm/v1/
|
||||||
description: Automatically apply tpu label
|
- files~=^tests/v1/
|
||||||
# Keep this list in sync with `label-tpu-remove` conditions
|
actions:
|
||||||
conditions:
|
label:
|
||||||
- or:
|
add:
|
||||||
- files~=tpu.py
|
- v1
|
||||||
- files~=_tpu
|
- name: label-tpu
|
||||||
- files~=tpu_
|
description: Automatically apply tpu label
|
||||||
- files~=/tpu/
|
conditions:
|
||||||
- files~=pallas
|
- or:
|
||||||
actions:
|
- files~=tpu.py
|
||||||
label:
|
- files~=_tpu
|
||||||
add:
|
- files~=tpu_
|
||||||
- tpu
|
- files~=/tpu/
|
||||||
|
- files~=pallas
|
||||||
- name: label-tpu-remove
|
actions:
|
||||||
description: Automatically remove tpu label
|
label:
|
||||||
# Keep this list in sync with `label-tpu` conditions
|
add:
|
||||||
conditions:
|
- tpu
|
||||||
- and:
|
- name: label-tpu-remove
|
||||||
- -files~=tpu.py
|
description: Automatically remove tpu label
|
||||||
- -files~=_tpu
|
conditions:
|
||||||
- -files~=tpu_
|
- and:
|
||||||
- -files~=/tpu/
|
- -files~=tpu.py
|
||||||
- -files~=pallas
|
- -files~=_tpu
|
||||||
actions:
|
- -files~=tpu_
|
||||||
label:
|
- -files~=/tpu/
|
||||||
remove:
|
- -files~=pallas
|
||||||
- tpu
|
actions:
|
||||||
|
label:
|
||||||
- name: label-tool-calling
|
remove:
|
||||||
description: Automatically add tool-calling label
|
- tpu
|
||||||
conditions:
|
- name: label-tool-calling
|
||||||
- or:
|
description: Automatically add tool-calling label
|
||||||
- files~=^tests/tool_use/
|
conditions:
|
||||||
- files~=^tests/mistral_tool_use/
|
- or:
|
||||||
- files~=^tests/entrypoints/openai/tool_parsers/
|
- files~=^tests/tool_use/
|
||||||
- files=tests/entrypoints/openai/test_chat_with_tool_reasoning.py
|
- files~=^tests/mistral_tool_use/
|
||||||
- files~=^vllm/entrypoints/openai/tool_parsers/
|
- files~=^tests/entrypoints/openai/tool_parsers/
|
||||||
- files=docs/features/tool_calling.md
|
- files=tests/entrypoints/openai/test_chat_with_tool_reasoning.py
|
||||||
- files~=^examples/tool_chat_*
|
- files~=^vllm/entrypoints/openai/tool_parsers/
|
||||||
- files=examples/offline_inference/chat_with_tools.py
|
- files=docs/features/tool_calling.md
|
||||||
- files=examples/online_serving/openai_chat_completion_client_with_tools_required.py
|
- files~=^examples/tool_chat_*
|
||||||
- files=examples/online_serving/openai_chat_completion_tool_calls_with_reasoning.py
|
- files=examples/offline_inference/chat_with_tools.py
|
||||||
- files=examples/online_serving/openai_chat_completion_client_with_tools.py
|
- files=examples/online_serving/openai_chat_completion_client_with_tools_required.py
|
||||||
actions:
|
- files=examples/online_serving/openai_chat_completion_tool_calls_with_reasoning.py
|
||||||
label:
|
- files=examples/online_serving/openai_chat_completion_client_with_tools.py
|
||||||
add:
|
actions:
|
||||||
- tool-calling
|
label:
|
||||||
|
add:
|
||||||
- name: ping author on conflicts and add 'needs-rebase' label
|
- tool-calling
|
||||||
conditions:
|
- name: ping author on conflicts and add 'needs-rebase' label
|
||||||
|
conditions:
|
||||||
- conflict
|
- conflict
|
||||||
- -closed
|
- -closed
|
||||||
actions:
|
actions:
|
||||||
label:
|
label:
|
||||||
add:
|
add:
|
||||||
- needs-rebase
|
- needs-rebase
|
||||||
comment:
|
comment:
|
||||||
message: |
|
message: |
|
||||||
This pull request has merge conflicts that must be resolved before it can be
|
This pull request has merge conflicts that must be resolved before it can be
|
||||||
merged. Please rebase the PR, @{{author}}.
|
merged. Please rebase the PR, @{{author}}.
|
||||||
|
|
||||||
https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/syncing-a-fork
|
https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/syncing-a-fork
|
||||||
|
- name: assign reviewer for tensorizer changes
|
||||||
- name: assign reviewer for tensorizer changes
|
conditions:
|
||||||
conditions:
|
|
||||||
- files~=^vllm/model_executor/model_loader/tensorizer.py
|
- files~=^vllm/model_executor/model_loader/tensorizer.py
|
||||||
- files~=^vllm/model_executor/model_loader/tensorizer_loader.py
|
- files~=^vllm/model_executor/model_loader/tensorizer_loader.py
|
||||||
- files~=^tests/entrypoints/openai/test_tensorizer_entrypoint.py
|
- files~=^tests/entrypoints/openai/test_tensorizer_entrypoint.py
|
||||||
- files~=^tests/tensorizer_loader/
|
- files~=^tests/tensorizer_loader/
|
||||||
actions:
|
actions:
|
||||||
assign:
|
assign:
|
||||||
users:
|
users:
|
||||||
- "sangstar"
|
- sangstar
|
||||||
|
- name: remove 'needs-rebase' label when conflict is resolved
|
||||||
- name: remove 'needs-rebase' label when conflict is resolved
|
conditions:
|
||||||
conditions:
|
|
||||||
- -conflict
|
- -conflict
|
||||||
- -closed
|
- -closed
|
||||||
actions:
|
actions:
|
||||||
label:
|
label:
|
||||||
remove:
|
remove:
|
||||||
- needs-rebase
|
- needs-rebase
|
||||||
|
|||||||
@ -11,6 +11,8 @@ repos:
|
|||||||
hooks:
|
hooks:
|
||||||
- id: yapf
|
- id: yapf
|
||||||
args: [--in-place, --verbose]
|
args: [--in-place, --verbose]
|
||||||
|
# Keep the same list from yapfignore here to avoid yapf failing without any inputs
|
||||||
|
exclude: '(.buildkite|benchmarks|build|examples)/.*'
|
||||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||||
rev: v0.11.7
|
rev: v0.11.7
|
||||||
hooks:
|
hooks:
|
||||||
|
|||||||
@ -182,9 +182,6 @@ include(FetchContent)
|
|||||||
file(MAKE_DIRECTORY ${FETCHCONTENT_BASE_DIR}) # Ensure the directory exists
|
file(MAKE_DIRECTORY ${FETCHCONTENT_BASE_DIR}) # Ensure the directory exists
|
||||||
message(STATUS "FetchContent base directory: ${FETCHCONTENT_BASE_DIR}")
|
message(STATUS "FetchContent base directory: ${FETCHCONTENT_BASE_DIR}")
|
||||||
|
|
||||||
#
|
|
||||||
# Set rocm version dev int.
|
|
||||||
#
|
|
||||||
if(VLLM_GPU_LANG STREQUAL "HIP")
|
if(VLLM_GPU_LANG STREQUAL "HIP")
|
||||||
#
|
#
|
||||||
# Overriding the default -O set up by cmake, adding ggdb3 for the most verbose devug info
|
# Overriding the default -O set up by cmake, adding ggdb3 for the most verbose devug info
|
||||||
@ -192,7 +189,6 @@ if(VLLM_GPU_LANG STREQUAL "HIP")
|
|||||||
set(CMAKE_${VLLM_GPU_LANG}_FLAGS_DEBUG "${CMAKE_${VLLM_GPU_LANG}_FLAGS_DEBUG} -O0 -ggdb3")
|
set(CMAKE_${VLLM_GPU_LANG}_FLAGS_DEBUG "${CMAKE_${VLLM_GPU_LANG}_FLAGS_DEBUG} -O0 -ggdb3")
|
||||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -ggdb3")
|
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -ggdb3")
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Certain HIP functions are marked as [[nodiscard]], yet vllm ignores the result which generates
|
# Certain HIP functions are marked as [[nodiscard]], yet vllm ignores the result which generates
|
||||||
# a lot of warnings that always mask real issues. Suppressing until this is properly addressed.
|
# a lot of warnings that always mask real issues. Suppressing until this is properly addressed.
|
||||||
@ -246,6 +242,7 @@ set(VLLM_EXT_SRC
|
|||||||
"csrc/activation_kernels.cu"
|
"csrc/activation_kernels.cu"
|
||||||
"csrc/layernorm_kernels.cu"
|
"csrc/layernorm_kernels.cu"
|
||||||
"csrc/layernorm_quant_kernels.cu"
|
"csrc/layernorm_quant_kernels.cu"
|
||||||
|
"csrc/sampler.cu"
|
||||||
"csrc/cuda_view.cu"
|
"csrc/cuda_view.cu"
|
||||||
"csrc/quantization/gptq/q_gemm.cu"
|
"csrc/quantization/gptq/q_gemm.cu"
|
||||||
"csrc/quantization/compressed_tensors/int8_quant_kernels.cu"
|
"csrc/quantization/compressed_tensors/int8_quant_kernels.cu"
|
||||||
@ -546,8 +543,8 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
|
|||||||
# CUTLASS MoE kernels
|
# CUTLASS MoE kernels
|
||||||
|
|
||||||
# The MoE kernel cutlass_moe_mm requires CUDA 12.3 or later (and only works
|
# The MoE kernel cutlass_moe_mm requires CUDA 12.3 or later (and only works
|
||||||
# on Hopper). get_cutlass_moe_mm_data should only be compiled if it's possible
|
# on Hopper). get_cutlass_(pplx_)moe_mm_data should only be compiled
|
||||||
# to compile MoE kernels that use its output.
|
# if it's possible to compile MoE kernels that use its output.
|
||||||
cuda_archs_loose_intersection(SCALED_MM_ARCHS "9.0a;10.0a" "${CUDA_ARCHS}")
|
cuda_archs_loose_intersection(SCALED_MM_ARCHS "9.0a;10.0a" "${CUDA_ARCHS}")
|
||||||
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.3 AND SCALED_MM_ARCHS)
|
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.3 AND SCALED_MM_ARCHS)
|
||||||
set(SRCS "csrc/quantization/cutlass_w8a8/moe/grouped_mm_c3x.cu"
|
set(SRCS "csrc/quantization/cutlass_w8a8/moe/grouped_mm_c3x.cu"
|
||||||
|
|||||||
10
README.md
10
README.md
@ -58,8 +58,8 @@ vLLM is fast with:
|
|||||||
- Efficient management of attention key and value memory with [**PagedAttention**](https://blog.vllm.ai/2023/06/20/vllm.html)
|
- Efficient management of attention key and value memory with [**PagedAttention**](https://blog.vllm.ai/2023/06/20/vllm.html)
|
||||||
- Continuous batching of incoming requests
|
- Continuous batching of incoming requests
|
||||||
- Fast model execution with CUDA/HIP graph
|
- Fast model execution with CUDA/HIP graph
|
||||||
- Quantizations: [GPTQ](https://arxiv.org/abs/2210.17323), [AWQ](https://arxiv.org/abs/2306.00978), [AutoRound](https://arxiv.org/abs/2309.05516),INT4, INT8, and FP8.
|
- Quantizations: [GPTQ](https://arxiv.org/abs/2210.17323), [AWQ](https://arxiv.org/abs/2306.00978), [AutoRound](https://arxiv.org/abs/2309.05516), INT4, INT8, and FP8
|
||||||
- Optimized CUDA kernels, including integration with FlashAttention and FlashInfer.
|
- Optimized CUDA kernels, including integration with FlashAttention and FlashInfer
|
||||||
- Speculative decoding
|
- Speculative decoding
|
||||||
- Chunked prefill
|
- Chunked prefill
|
||||||
|
|
||||||
@ -72,14 +72,14 @@ vLLM is flexible and easy to use with:
|
|||||||
- Tensor parallelism and pipeline parallelism support for distributed inference
|
- Tensor parallelism and pipeline parallelism support for distributed inference
|
||||||
- Streaming outputs
|
- Streaming outputs
|
||||||
- OpenAI-compatible API server
|
- OpenAI-compatible API server
|
||||||
- Support NVIDIA GPUs, AMD CPUs and GPUs, Intel CPUs and GPUs, PowerPC CPUs, TPU, and AWS Neuron.
|
- Support NVIDIA GPUs, AMD CPUs and GPUs, Intel CPUs and GPUs, PowerPC CPUs, TPU, and AWS Neuron
|
||||||
- Prefix caching support
|
- Prefix caching support
|
||||||
- Multi-LoRA support
|
- Multi-LoRA support
|
||||||
|
|
||||||
vLLM seamlessly supports most popular open-source models on HuggingFace, including:
|
vLLM seamlessly supports most popular open-source models on HuggingFace, including:
|
||||||
- Transformer-like LLMs (e.g., Llama)
|
- Transformer-like LLMs (e.g., Llama)
|
||||||
- Mixture-of-Expert LLMs (e.g., Mixtral, Deepseek-V2 and V3)
|
- Mixture-of-Expert LLMs (e.g., Mixtral, Deepseek-V2 and V3)
|
||||||
- Embedding Models (e.g. E5-Mistral)
|
- Embedding Models (e.g., E5-Mistral)
|
||||||
- Multi-modal LLMs (e.g., LLaVA)
|
- Multi-modal LLMs (e.g., LLaVA)
|
||||||
|
|
||||||
Find the full list of supported models [here](https://docs.vllm.ai/en/latest/models/supported_models.html).
|
Find the full list of supported models [here](https://docs.vllm.ai/en/latest/models/supported_models.html).
|
||||||
@ -162,4 +162,4 @@ If you use vLLM for your research, please cite our [paper](https://arxiv.org/abs
|
|||||||
|
|
||||||
## Media Kit
|
## Media Kit
|
||||||
|
|
||||||
- If you wish to use vLLM's logo, please refer to [our media kit repo](https://github.com/vllm-project/media-kit).
|
- If you wish to use vLLM's logo, please refer to [our media kit repo](https://github.com/vllm-project/media-kit)
|
||||||
|
|||||||
@ -10,11 +10,15 @@
|
|||||||
# 3. Set variables (ALL REQUIRED)
|
# 3. Set variables (ALL REQUIRED)
|
||||||
# BASE: your directory for vllm repo
|
# BASE: your directory for vllm repo
|
||||||
# MODEL: the model served by vllm
|
# MODEL: the model served by vllm
|
||||||
|
# TP: ways of tensor parallelism
|
||||||
# DOWNLOAD_DIR: directory to download and load model weights.
|
# DOWNLOAD_DIR: directory to download and load model weights.
|
||||||
# INPUT_LEN: request input len
|
# INPUT_LEN: request input len
|
||||||
# OUTPUT_LEN: request output len
|
# OUTPUT_LEN: request output len
|
||||||
# MIN_CACHE_HIT_PCT: prefix cache rate
|
# MIN_CACHE_HIT_PCT: prefix cache rate
|
||||||
# MAX_LATENCY_ALLOWED_MS: (e2e) latency requirement. If there's no latency requirement, set it to a large number like 1000000000
|
# MAX_LATENCY_ALLOWED_MS: (e2e) latency requirement. If there's no latency requirement, set it to a large number like 1000000000
|
||||||
|
# NUM_SEQS_LIST: a list of `max-num-seqs` you want to loop with.
|
||||||
|
# NUM_BATCHED_TOKENS_LIST: a list of `max-num-batched-tokens` you want to loop with.
|
||||||
|
# Note that the default NUM_SEQS_LIST and NUM_BATCHED_TOKENS_LIST are set for medium size input/output len, for extra short context (such as 20:20), you might need to include larger numbers in NUM_SEQS_LIST.
|
||||||
# 4. Run the script, it might take a long time, you can use tmux to avoid the script stop if disconnection happens.
|
# 4. Run the script, it might take a long time, you can use tmux to avoid the script stop if disconnection happens.
|
||||||
# 5. The final result will be saved in RESULT file.
|
# 5. The final result will be saved in RESULT file.
|
||||||
|
|
||||||
@ -30,31 +34,27 @@
|
|||||||
TAG=$(date +"%Y_%m_%d_%H_%M")
|
TAG=$(date +"%Y_%m_%d_%H_%M")
|
||||||
BASE=""
|
BASE=""
|
||||||
MODEL="meta-llama/Llama-3.1-8B-Instruct"
|
MODEL="meta-llama/Llama-3.1-8B-Instruct"
|
||||||
|
TP=1
|
||||||
DOWNLOAD_DIR=""
|
DOWNLOAD_DIR=""
|
||||||
INPUT_LEN=4000
|
INPUT_LEN=4000
|
||||||
OUTPUT_LEN=16
|
OUTPUT_LEN=16
|
||||||
MIN_CACHE_HIT_PCT_PCT=0
|
MIN_CACHE_HIT_PCT=0
|
||||||
MAX_LATENCY_ALLOWED_MS=100000000000
|
MAX_LATENCY_ALLOWED_MS=100000000000
|
||||||
|
NUM_SEQS_LIST="128 256"
|
||||||
|
NUM_BATCHED_TOKENS_LIST="512 1024 2048 4096"
|
||||||
|
|
||||||
LOG_FOLDER="$BASE/auto-benchmark/$TAG"
|
LOG_FOLDER="$BASE/auto-benchmark/$TAG"
|
||||||
RESULT="$LOG_FOLDER/result.txt"
|
RESULT="$LOG_FOLDER/result.txt"
|
||||||
|
|
||||||
echo "result file$ $RESULT"
|
echo "result file: $RESULT"
|
||||||
echo "model: $MODEL"
|
echo "model: $MODEL"
|
||||||
echo
|
|
||||||
|
|
||||||
rm -rf $LOG_FOLDER
|
rm -rf $LOG_FOLDER
|
||||||
mkdir -p $LOG_FOLDER
|
mkdir -p $LOG_FOLDER
|
||||||
|
|
||||||
cd "$BASE/vllm"
|
cd "$BASE/vllm"
|
||||||
# create sonnet-4x.txt so that we can sample 2048 tokens for input
|
|
||||||
echo "" > benchmarks/sonnet_4x.txt
|
|
||||||
for _ in {1..4}
|
|
||||||
do
|
|
||||||
cat benchmarks/sonnet.txt >> benchmarks/sonnet_4x.txt
|
|
||||||
done
|
|
||||||
|
|
||||||
pip install datasets
|
pip install -q datasets
|
||||||
|
|
||||||
current_hash=$(git rev-parse HEAD)
|
current_hash=$(git rev-parse HEAD)
|
||||||
echo "hash:$current_hash" >> "$RESULT"
|
echo "hash:$current_hash" >> "$RESULT"
|
||||||
@ -64,53 +64,69 @@ best_throughput=0
|
|||||||
best_max_num_seqs=0
|
best_max_num_seqs=0
|
||||||
best_num_batched_tokens=0
|
best_num_batched_tokens=0
|
||||||
best_goodput=0
|
best_goodput=0
|
||||||
|
|
||||||
|
start_server() {
|
||||||
|
local gpu_memory_utilization=$1
|
||||||
|
local max_num_seqs=$2
|
||||||
|
local max_num_batched_tokens=$3
|
||||||
|
local vllm_log=$4
|
||||||
|
|
||||||
|
pkill -f vllm
|
||||||
|
|
||||||
|
VLLM_USE_V1=1 VLLM_SERVER_DEV_MODE=1 vllm serve $MODEL \
|
||||||
|
--disable-log-requests \
|
||||||
|
--port 8004 \
|
||||||
|
--gpu-memory-utilization $gpu_memory_utilization \
|
||||||
|
--max-num-seqs $max_num_seqs \
|
||||||
|
--max-num-batched-tokens $max_num_batched_tokens \
|
||||||
|
--tensor-parallel-size $TP \
|
||||||
|
--enable-prefix-caching \
|
||||||
|
--load-format dummy \
|
||||||
|
--download-dir "$DOWNLOAD_DIR" \
|
||||||
|
--max-model-len $(( INPUT_LEN+OUTPUT_LEN )) > "$vllm_log" 2>&1 &
|
||||||
|
|
||||||
|
# wait for 10 minutes...
|
||||||
|
server_started=0
|
||||||
|
for i in {1..60}; do
|
||||||
|
RESPONSE=$(curl -s -X GET "http://0.0.0.0:8004/health" -w "%{http_code}" -o /dev/stdout)
|
||||||
|
STATUS_CODE=$(echo "$RESPONSE" | tail -n 1)
|
||||||
|
if [[ "$STATUS_CODE" -eq 200 ]]; then
|
||||||
|
server_started=1
|
||||||
|
break
|
||||||
|
else
|
||||||
|
sleep 10
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
if (( ! server_started )); then
|
||||||
|
echo "server did not start within 10 minutes. Please check server log at $vllm_log".
|
||||||
|
return 1
|
||||||
|
else
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
run_benchmark() {
|
run_benchmark() {
|
||||||
local max_num_seqs=$1
|
local max_num_seqs=$1
|
||||||
local max_num_batched_tokens=$2
|
local max_num_batched_tokens=$2
|
||||||
|
local gpu_memory_utilization=$3
|
||||||
echo "max_num_seq: $max_num_seqs, max_num_batched_tokens: $max_num_batched_tokens"
|
echo "max_num_seq: $max_num_seqs, max_num_batched_tokens: $max_num_batched_tokens"
|
||||||
local vllm_log="$LOG_FOLDER/vllm_log_${max_num_seqs}_${max_num_batched_tokens}.txt"
|
local vllm_log="$LOG_FOLDER/vllm_log_${max_num_seqs}_${max_num_batched_tokens}.txt"
|
||||||
echo "vllm_log: $vllm_log"
|
echo "vllm_log: $vllm_log"
|
||||||
echo
|
echo
|
||||||
rm -f $vllm_log
|
rm -f $vllm_log
|
||||||
|
pkill -f vllm
|
||||||
|
|
||||||
# start the server
|
echo "starting server..."
|
||||||
VLLM_USE_V1=1 VLLM_SERVER_DEV_MODE=1 vllm serve $MODEL \
|
start_server $gpu_memory_utilization $max_num_seqs $max_num_batched_tokens $vllm_log
|
||||||
--disable-log-requests \
|
result=$?
|
||||||
--port 8004 \
|
if [[ "$result" -eq 1 ]]; then
|
||||||
--gpu-memory-utilization 0.98 \
|
echo "server failed to start. gpu_memory_utilization:$gpu_memory_utilization, max_num_seqs:$max_num_seqs, max_num_batched_tokens: $max_num_batched_tokens"
|
||||||
--max-num-seqs $max_num_seqs \
|
else
|
||||||
--max-num-batched-tokens $max_num_batched_tokens \
|
echo "server started."
|
||||||
--tensor-parallel-size 1 \
|
|
||||||
--enable-prefix-caching \
|
|
||||||
--load-format dummy \
|
|
||||||
--download-dir $DOWNLOAD_DIR \
|
|
||||||
--max-model-len $(( INPUT_LEN+OUTPUT_LEN )) > "$vllm_log" 2>&1 &
|
|
||||||
echo "wait for 10 minutes.."
|
|
||||||
echo
|
|
||||||
# wait for 10 minutes...
|
|
||||||
server_started=0
|
|
||||||
for i in {1..60}; do
|
|
||||||
if grep -Fq "Application startup complete" "$vllm_log"; then
|
|
||||||
echo "Application started"
|
|
||||||
server_started=1
|
|
||||||
break
|
|
||||||
else
|
|
||||||
# echo "wait for 10 seconds..."
|
|
||||||
sleep 10
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if (( ! server_started )); then
|
|
||||||
echo "server did not start within 10 minutes, terminate the benchmarking. Please check server log at $vllm_log"
|
|
||||||
echo "pkill -f vllm"
|
|
||||||
echo
|
|
||||||
pkill vllm
|
|
||||||
sleep 10
|
|
||||||
return 1
|
|
||||||
fi
|
fi
|
||||||
|
echo
|
||||||
|
|
||||||
echo "run benchmark test..."
|
echo "run benchmark test..."
|
||||||
echo
|
|
||||||
meet_latency_requirement=0
|
meet_latency_requirement=0
|
||||||
# get a basic qps by using request-rate inf
|
# get a basic qps by using request-rate inf
|
||||||
bm_log="$LOG_FOLDER/bm_log_${max_num_seqs}_${max_num_batched_tokens}_requestrate_inf.txt"
|
bm_log="$LOG_FOLDER/bm_log_${max_num_seqs}_${max_num_batched_tokens}_requestrate_inf.txt"
|
||||||
@ -118,29 +134,29 @@ run_benchmark() {
|
|||||||
python benchmarks/benchmark_serving.py \
|
python benchmarks/benchmark_serving.py \
|
||||||
--backend vllm \
|
--backend vllm \
|
||||||
--model $MODEL \
|
--model $MODEL \
|
||||||
--dataset-name sonnet \
|
--dataset-name random \
|
||||||
--dataset-path benchmarks/sonnet_4x.txt \
|
--random-input-len $INPUT_LEN \
|
||||||
--sonnet-input-len $INPUT_LEN \
|
--random-output-len $OUTPUT_LEN \
|
||||||
--sonnet-output-len $OUTPUT_LEN \
|
|
||||||
--ignore-eos \
|
--ignore-eos \
|
||||||
--disable-tqdm \
|
--disable-tqdm \
|
||||||
--request-rate inf \
|
--request-rate inf \
|
||||||
--percentile-metrics ttft,tpot,itl,e2el \
|
--percentile-metrics ttft,tpot,itl,e2el \
|
||||||
--goodput e2el:$MAX_LATENCY_ALLOWED_MS \
|
--goodput e2el:$MAX_LATENCY_ALLOWED_MS \
|
||||||
--num-prompts 100 \
|
--num-prompts 1000 \
|
||||||
--sonnet-prefix-len $prefix_len \
|
--random-prefix-len $prefix_len \
|
||||||
--port 8004 > "$bm_log"
|
--port 8004 &> "$bm_log"
|
||||||
through_put=$(grep "Request throughput (req/s):" "$bm_log" | sed 's/[^0-9.]//g')
|
throughput=$(grep "Request throughput (req/s):" "$bm_log" | sed 's/[^0-9.]//g')
|
||||||
e2el=$(grep "P99 E2EL (ms):" "$bm_log" | awk '{print $NF}')
|
e2el=$(grep "P99 E2EL (ms):" "$bm_log" | awk '{print $NF}')
|
||||||
goodput=$(grep "Request goodput (req/s):" "$bm_log" | sed 's/[^0-9.]//g')
|
goodput=$(grep "Request goodput (req/s):" "$bm_log" | sed 's/[^0-9.]//g')
|
||||||
|
|
||||||
if (( $(echo "$e2el <= $MAX_LATENCY_ALLOWED_MS" | bc -l) )); then
|
if (( $(echo "$e2el <= $MAX_LATENCY_ALLOWED_MS" | bc -l) )); then
|
||||||
meet_latency_requirement=1
|
meet_latency_requirement=1
|
||||||
|
request_rate=inf
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if (( ! meet_latency_requirement )); then
|
if (( ! meet_latency_requirement )); then
|
||||||
# start from request-rate as int(through_put) + 1
|
# start from request-rate as int(throughput) + 1
|
||||||
request_rate=$((${through_put%.*} + 1))
|
request_rate=$((${throughput%.*} + 1))
|
||||||
while ((request_rate > 0)); do
|
while ((request_rate > 0)); do
|
||||||
# clear prefix cache
|
# clear prefix cache
|
||||||
curl -X POST http://0.0.0.0:8004/reset_prefix_cache
|
curl -X POST http://0.0.0.0:8004/reset_prefix_cache
|
||||||
@ -149,19 +165,18 @@ run_benchmark() {
|
|||||||
python benchmarks/benchmark_serving.py \
|
python benchmarks/benchmark_serving.py \
|
||||||
--backend vllm \
|
--backend vllm \
|
||||||
--model $MODEL \
|
--model $MODEL \
|
||||||
--dataset-name sonnet \
|
--dataset-name random \
|
||||||
--dataset-path benchmarks/sonnet_4x.txt \
|
--random-input-len $INPUT_LEN \
|
||||||
--sonnet-input-len $INPUT_LEN \
|
--random-output-len $OUTPUT_LEN \
|
||||||
--sonnet-output-len $OUTPUT_LEN \
|
--ignore-eos \
|
||||||
--ignore_eos \
|
|
||||||
--disable-tqdm \
|
--disable-tqdm \
|
||||||
--request-rate $request_rate \
|
--request-rate $request_rate \
|
||||||
--percentile-metrics ttft,tpot,itl,e2el \
|
--percentile-metrics ttft,tpot,itl,e2el \
|
||||||
--goodput e2el:$MAX_LATENCY_ALLOWED_MS \
|
--goodput e2el:$MAX_LATENCY_ALLOWED_MS \
|
||||||
--num-prompts 100 \
|
--num-prompts 100 \
|
||||||
--sonnet-prefix-len $prefix_len \
|
--random-prefix-len $prefix_len \
|
||||||
--port 8004 > "$bm_log"
|
--port 8004 &> "$bm_log"
|
||||||
through_put=$(grep "Request throughput (req/s):" "$bm_log" | sed 's/[^0-9.]//g')
|
throughput=$(grep "Request throughput (req/s):" "$bm_log" | sed 's/[^0-9.]//g')
|
||||||
e2el=$(grep "P99 E2EL (ms):" "$bm_log" | awk '{print $NF}')
|
e2el=$(grep "P99 E2EL (ms):" "$bm_log" | awk '{print $NF}')
|
||||||
goodput=$(grep "Request goodput (req/s):" "$bm_log" | sed 's/[^0-9.]//g')
|
goodput=$(grep "Request goodput (req/s):" "$bm_log" | sed 's/[^0-9.]//g')
|
||||||
if (( $(echo "$e2el <= $MAX_LATENCY_ALLOWED_MS" | bc -l) )); then
|
if (( $(echo "$e2el <= $MAX_LATENCY_ALLOWED_MS" | bc -l) )); then
|
||||||
@ -173,10 +188,10 @@ run_benchmark() {
|
|||||||
fi
|
fi
|
||||||
# write the results and update the best result.
|
# write the results and update the best result.
|
||||||
if ((meet_latency_requirement)); then
|
if ((meet_latency_requirement)); then
|
||||||
echo "max_num_seqs: $max_num_seqs, max_num_batched_tokens: $max_num_batched_tokens, request_rate: $request_rate, e2el: $e2el, through put: $through_put, goodput: $goodput"
|
echo "max_num_seqs: $max_num_seqs, max_num_batched_tokens: $max_num_batched_tokens, request_rate: $request_rate, e2el: $e2el, throughput: $throughput, goodput: $goodput"
|
||||||
echo "max_num_seqs: $max_num_seqs, max_num_batched_tokens: $max_num_batched_tokens, request_rate: $request_rate, e2el: $e2el, through put: $through_put, goodput: $goodput" >> "$RESULT"
|
echo "max_num_seqs: $max_num_seqs, max_num_batched_tokens: $max_num_batched_tokens, request_rate: $request_rate, e2el: $e2el, throughput: $throughput, goodput: $goodput" >> "$RESULT"
|
||||||
if (( $(echo "$through_put > $best_throughput" | bc -l) )); then
|
if (( $(echo "$throughput > $best_throughput" | bc -l) )); then
|
||||||
best_throughput=$through_put
|
best_throughput=$throughput
|
||||||
best_max_num_seqs=$max_num_seqs
|
best_max_num_seqs=$max_num_seqs
|
||||||
best_num_batched_tokens=$max_num_batched_tokens
|
best_num_batched_tokens=$max_num_batched_tokens
|
||||||
best_goodput=$goodput
|
best_goodput=$goodput
|
||||||
@ -188,22 +203,39 @@ run_benchmark() {
|
|||||||
|
|
||||||
echo "best_max_num_seqs: $best_max_num_seqs, best_num_batched_tokens: $best_num_batched_tokens, best_throughput: $best_throughput"
|
echo "best_max_num_seqs: $best_max_num_seqs, best_num_batched_tokens: $best_num_batched_tokens, best_throughput: $best_throughput"
|
||||||
|
|
||||||
echo "pkill -f vllm"
|
|
||||||
echo
|
|
||||||
pkill vllm
|
pkill vllm
|
||||||
sleep 10
|
sleep 10
|
||||||
rm -f $vllm_log
|
|
||||||
printf '=%.0s' $(seq 1 20)
|
printf '=%.0s' $(seq 1 20)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
read -r -a num_seqs_list <<< "$NUM_SEQS_LIST"
|
||||||
|
read -r -a num_batched_tokens_list <<< "$NUM_BATCHED_TOKENS_LIST"
|
||||||
|
|
||||||
num_seqs_list="128 256"
|
# first find out the max gpu-memory-utilization without HBM OOM.
|
||||||
num_batched_tokens_list="512 1024 2048 4096"
|
gpu_memory_utilization=0.98
|
||||||
for num_seqs in $num_seqs_list; do
|
find_gpu_memory_utilization=0
|
||||||
for num_batched_tokens in $num_batched_tokens_list; do
|
while (( $(echo "$gpu_memory_utilization >= 0.9" | bc -l) )); do
|
||||||
run_benchmark $num_seqs $num_batched_tokens
|
start_server $gpu_memory_utilization "${num_seqs_list[-1]}" "${num_batched_tokens_list[-1]}" "$LOG_FOLDER/vllm_log_gpu_memory_utilization_$gpu_memory_utilization.log"
|
||||||
exit 0
|
result=$?
|
||||||
|
if [[ "$result" -eq 0 ]]; then
|
||||||
|
find_gpu_memory_utilization=1
|
||||||
|
break
|
||||||
|
else
|
||||||
|
gpu_memory_utilization=$(echo "$gpu_memory_utilization - 0.01" | bc)
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ "$find_gpu_memory_utilization" -eq 1 ]]; then
|
||||||
|
echo "Using gpu_memory_utilization=$gpu_memory_utilization to serve model."
|
||||||
|
else
|
||||||
|
echo "Cannot find a proper gpu_memory_utilization over 0.9 to serve the model, please check logs in $LOG_FOLDER."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
for num_seqs in "${num_seqs_list[@]}"; do
|
||||||
|
for num_batched_tokens in "${num_batched_tokens_list[@]}"; do
|
||||||
|
run_benchmark $num_seqs $num_batched_tokens $gpu_memory_utilization
|
||||||
done
|
done
|
||||||
done
|
done
|
||||||
echo "finish permutations"
|
echo "finish permutations"
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import io
|
import io
|
||||||
import json
|
import json
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
"""
|
"""
|
||||||
This module defines a framework for sampling benchmark requests from various
|
This module defines a framework for sampling benchmark requests from various
|
||||||
datasets. Each dataset subclass of BenchmarkDataset must implement sample
|
datasets. Each dataset subclass of BenchmarkDataset must implement sample
|
||||||
@ -864,7 +865,15 @@ class InstructCoderDataset(HuggingFaceDataset):
|
|||||||
for item in self.data:
|
for item in self.data:
|
||||||
if len(sampled_requests) >= num_requests:
|
if len(sampled_requests) >= num_requests:
|
||||||
break
|
break
|
||||||
prompt = f"{item['instruction']}:\n{item['input']}"
|
prompt = f"{item['input']}\n\n{item['instruction']} Just output \
|
||||||
|
the code, do not include any explanation."
|
||||||
|
|
||||||
|
# apply template
|
||||||
|
prompt = tokenizer.apply_chat_template(
|
||||||
|
[{"role": "user", "content": prompt}],
|
||||||
|
add_generation_prompt=True,
|
||||||
|
tokenize=False,
|
||||||
|
)
|
||||||
prompt_len = len(tokenizer(prompt).input_ids)
|
prompt_len = len(tokenizer(prompt).input_ids)
|
||||||
sampled_requests.append(
|
sampled_requests.append(
|
||||||
SampleRequest(
|
SampleRequest(
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
"""Benchmark the latency of processing a single batch of requests."""
|
"""Benchmark the latency of processing a single batch of requests."""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
"""
|
"""
|
||||||
Offline benchmark to test the long document QA throughput.
|
Offline benchmark to test the long document QA throughput.
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
"""
|
"""
|
||||||
Benchmark the efficiency of prefix caching.
|
Benchmark the efficiency of prefix caching.
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
"""Benchmark offline prioritization."""
|
"""Benchmark offline prioritization."""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
r"""Benchmark online serving throughput.
|
r"""Benchmark online serving throughput.
|
||||||
|
|
||||||
On the server side, run one of the following commands:
|
On the server side, run one of the following commands:
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
r"""Benchmark online serving throughput with structured outputs.
|
r"""Benchmark online serving throughput with structured outputs.
|
||||||
|
|
||||||
On the server side, run one of the following commands:
|
On the server side, run one of the following commands:
|
||||||
@ -11,7 +12,6 @@ On the client side, run:
|
|||||||
--model <your_model> \
|
--model <your_model> \
|
||||||
--dataset json \
|
--dataset json \
|
||||||
--structured-output-ratio 1.0 \
|
--structured-output-ratio 1.0 \
|
||||||
--structured-output-backend auto \
|
|
||||||
--request-rate 10 \
|
--request-rate 10 \
|
||||||
--num-prompts 1000
|
--num-prompts 1000
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
"""Benchmark offline inference throughput."""
|
"""Benchmark offline inference throughput."""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import json
|
import json
|
||||||
@ -65,4 +66,9 @@ class InfEncoder(json.JSONEncoder):
|
|||||||
|
|
||||||
def write_to_json(filename: str, records: list) -> None:
|
def write_to_json(filename: str, records: list) -> None:
|
||||||
with open(filename, "w") as f:
|
with open(filename, "w") as f:
|
||||||
json.dump(records, f, cls=InfEncoder)
|
json.dump(
|
||||||
|
records,
|
||||||
|
f,
|
||||||
|
cls=InfEncoder,
|
||||||
|
default=lambda o: f"<{type(o).__name__} object is not JSON serializable>",
|
||||||
|
)
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import copy
|
import copy
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
# Cutlass bench utils
|
# Cutlass bench utils
|
||||||
from collections.abc import Iterable
|
from collections.abc import Iterable
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import copy
|
import copy
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
# Weight Shapes are in the format
|
# Weight Shapes are in the format
|
||||||
# ([K, N], TP_SPLIT_DIM)
|
# ([K, N], TP_SPLIT_DIM)
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import itertools
|
import itertools
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import pickle as pkl
|
import pickle as pkl
|
||||||
import time
|
import time
|
||||||
|
|||||||
@ -1,14 +1,15 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
import argparse
|
import argparse
|
||||||
import copy
|
import copy
|
||||||
import itertools
|
import itertools
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import triton
|
|
||||||
from weight_shapes import WEIGHT_SHAPES
|
from weight_shapes import WEIGHT_SHAPES
|
||||||
|
|
||||||
from vllm._custom_ops import cutlass_scaled_mm as vllm_scaled_mm
|
from vllm._custom_ops import cutlass_scaled_mm as vllm_scaled_mm
|
||||||
from vllm._custom_ops import scaled_fp8_quant as vllm_scaled_fp8_quant
|
from vllm._custom_ops import scaled_fp8_quant as vllm_scaled_fp8_quant
|
||||||
|
from vllm.triton_utils import triton
|
||||||
|
|
||||||
|
|
||||||
@triton.testing.perf_report(
|
@triton.testing.perf_report(
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
# Copyright (c) Microsoft Corporation.
|
# Copyright (c) Microsoft Corporation.
|
||||||
# Licensed under the MIT License.
|
# Licensed under the MIT License.
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
"""
|
"""
|
||||||
Benchmark the performance of the cutlass_moe_fp4 kernel vs the triton_moe
|
Benchmark the performance of the cutlass_moe_fp4 kernel vs the triton_moe
|
||||||
kernel. The cutlass_moe_fp4 kernel takes in fp4 quantized weights and 16-bit
|
kernel. The cutlass_moe_fp4 kernel takes in fp4 quantized weights and 16-bit
|
||||||
@ -90,7 +91,7 @@ def bench_run(
|
|||||||
|
|
||||||
score = torch.randn((m, num_experts), device=device, dtype=dtype)
|
score = torch.randn((m, num_experts), device=device, dtype=dtype)
|
||||||
|
|
||||||
topk_weights, topk_ids = fused_topk(a, score, topk, renormalize=False)
|
topk_weights, topk_ids, _ = fused_topk(a, score, topk, renormalize=False)
|
||||||
|
|
||||||
quant_blocksize = 16
|
quant_blocksize = 16
|
||||||
w1_blockscale = torch.empty(
|
w1_blockscale = torch.empty(
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import torch.utils.benchmark as benchmark
|
import torch.utils.benchmark as benchmark
|
||||||
@ -6,8 +7,8 @@ from benchmark_shapes import WEIGHT_SHAPES_MOE
|
|||||||
|
|
||||||
from vllm import _custom_ops as ops
|
from vllm import _custom_ops as ops
|
||||||
from vllm.config import ParallelConfig, VllmConfig, set_current_vllm_config
|
from vllm.config import ParallelConfig, VllmConfig, set_current_vllm_config
|
||||||
|
from vllm.model_executor.layers.fused_moe.cutlass_moe import cutlass_moe_fp8
|
||||||
from vllm.model_executor.layers.fused_moe.fused_moe import (
|
from vllm.model_executor.layers.fused_moe.fused_moe import (
|
||||||
cutlass_moe_fp8,
|
|
||||||
fused_experts,
|
fused_experts,
|
||||||
fused_topk,
|
fused_topk,
|
||||||
)
|
)
|
||||||
@ -69,18 +70,9 @@ def bench_run(
|
|||||||
w1_scale = torch.empty((num_experts, 1, 1), device="cuda", dtype=torch.float32)
|
w1_scale = torch.empty((num_experts, 1, 1), device="cuda", dtype=torch.float32)
|
||||||
w2_scale = torch.empty((num_experts, 1, 1), device="cuda", dtype=torch.float32)
|
w2_scale = torch.empty((num_experts, 1, 1), device="cuda", dtype=torch.float32)
|
||||||
|
|
||||||
ab_strides1 = torch.full((num_experts,), k, device="cuda", dtype=torch.int64)
|
|
||||||
c_strides1 = torch.full((num_experts,), 2 * n, device="cuda", dtype=torch.int64)
|
|
||||||
ab_strides2 = torch.full((num_experts,), n, device="cuda", dtype=torch.int64)
|
|
||||||
c_strides2 = torch.full((num_experts,), k, device="cuda", dtype=torch.int64)
|
|
||||||
|
|
||||||
for expert in range(num_experts):
|
for expert in range(num_experts):
|
||||||
w1_q[expert], w1_scale[expert] = ops.scaled_fp8_quant(w1[expert])
|
w1_q[expert], w1_scale[expert] = ops.scaled_fp8_quant(w1[expert])
|
||||||
w2_q[expert], w2_scale[expert] = ops.scaled_fp8_quant(w2[expert])
|
w2_q[expert], w2_scale[expert] = ops.scaled_fp8_quant(w2[expert])
|
||||||
w1_q_notransp = w1_q.clone()
|
|
||||||
w2_q_notransp = w2_q.clone()
|
|
||||||
w1_q = w1_q.transpose(1, 2)
|
|
||||||
w2_q = w2_q.transpose(1, 2)
|
|
||||||
|
|
||||||
score = torch.randn((m, num_experts), device="cuda", dtype=dtype)
|
score = torch.randn((m, num_experts), device="cuda", dtype=dtype)
|
||||||
|
|
||||||
@ -121,10 +113,6 @@ def bench_run(
|
|||||||
w2_scale: torch.Tensor,
|
w2_scale: torch.Tensor,
|
||||||
topk_weights: torch.Tensor,
|
topk_weights: torch.Tensor,
|
||||||
topk_ids: torch.Tensor,
|
topk_ids: torch.Tensor,
|
||||||
ab_strides1: torch.Tensor,
|
|
||||||
c_strides1: torch.Tensor,
|
|
||||||
ab_strides2: torch.Tensor,
|
|
||||||
c_strides2: torch.Tensor,
|
|
||||||
num_repeats: int,
|
num_repeats: int,
|
||||||
):
|
):
|
||||||
for _ in range(num_repeats):
|
for _ in range(num_repeats):
|
||||||
@ -132,14 +120,10 @@ def bench_run(
|
|||||||
a,
|
a,
|
||||||
w1,
|
w1,
|
||||||
w2,
|
w2,
|
||||||
w1_scale,
|
|
||||||
w2_scale,
|
|
||||||
topk_weights,
|
topk_weights,
|
||||||
topk_ids,
|
topk_ids,
|
||||||
ab_strides1,
|
w1_scale,
|
||||||
c_strides1,
|
w2_scale,
|
||||||
ab_strides2,
|
|
||||||
c_strides2,
|
|
||||||
a1_scale=a_scale,
|
a1_scale=a_scale,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -152,10 +136,6 @@ def bench_run(
|
|||||||
w2_scale: torch.Tensor,
|
w2_scale: torch.Tensor,
|
||||||
topk_weights: torch.Tensor,
|
topk_weights: torch.Tensor,
|
||||||
topk_ids: torch.Tensor,
|
topk_ids: torch.Tensor,
|
||||||
ab_strides1: torch.Tensor,
|
|
||||||
c_strides1: torch.Tensor,
|
|
||||||
ab_strides2: torch.Tensor,
|
|
||||||
c_strides2: torch.Tensor,
|
|
||||||
):
|
):
|
||||||
with set_current_vllm_config(
|
with set_current_vllm_config(
|
||||||
VllmConfig(parallel_config=ParallelConfig(pipeline_parallel_size=1))
|
VllmConfig(parallel_config=ParallelConfig(pipeline_parallel_size=1))
|
||||||
@ -164,14 +144,10 @@ def bench_run(
|
|||||||
a,
|
a,
|
||||||
w1_q,
|
w1_q,
|
||||||
w2_q,
|
w2_q,
|
||||||
w1_scale,
|
|
||||||
w2_scale,
|
|
||||||
topk_weights,
|
topk_weights,
|
||||||
topk_ids,
|
topk_ids,
|
||||||
ab_strides1,
|
w1_scale,
|
||||||
c_strides1,
|
w2_scale,
|
||||||
ab_strides2,
|
|
||||||
c_strides2,
|
|
||||||
a1_scale=a_scale,
|
a1_scale=a_scale,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -217,10 +193,6 @@ def bench_run(
|
|||||||
w2_scale,
|
w2_scale,
|
||||||
topk_weights,
|
topk_weights,
|
||||||
topk_ids,
|
topk_ids,
|
||||||
ab_strides1,
|
|
||||||
c_strides1,
|
|
||||||
ab_strides2,
|
|
||||||
c_strides2,
|
|
||||||
)
|
)
|
||||||
torch.cuda.synchronize()
|
torch.cuda.synchronize()
|
||||||
|
|
||||||
@ -229,8 +201,8 @@ def bench_run(
|
|||||||
with torch.cuda.graph(triton_graph, stream=triton_stream):
|
with torch.cuda.graph(triton_graph, stream=triton_stream):
|
||||||
run_triton_from_graph(
|
run_triton_from_graph(
|
||||||
a,
|
a,
|
||||||
w1_q_notransp,
|
w1_q,
|
||||||
w2_q_notransp,
|
w2_q,
|
||||||
topk_weights,
|
topk_weights,
|
||||||
topk_ids,
|
topk_ids,
|
||||||
w1_scale,
|
w1_scale,
|
||||||
@ -249,18 +221,12 @@ def bench_run(
|
|||||||
"w2": w2,
|
"w2": w2,
|
||||||
"score": score,
|
"score": score,
|
||||||
"topk": topk,
|
"topk": topk,
|
||||||
"w1_q_notransp": w1_q_notransp,
|
|
||||||
"w2_q_notransp": w2_q_notransp,
|
|
||||||
# Cutlass params
|
# Cutlass params
|
||||||
"a_scale": a_scale,
|
"a_scale": a_scale,
|
||||||
"w1_q": w1_q,
|
"w1_q": w1_q,
|
||||||
"w2_q": w2_q,
|
"w2_q": w2_q,
|
||||||
"w1_scale": w1_scale,
|
"w1_scale": w1_scale,
|
||||||
"w2_scale": w2_scale,
|
"w2_scale": w2_scale,
|
||||||
"ab_strides1": ab_strides1,
|
|
||||||
"c_strides1": c_strides1,
|
|
||||||
"ab_strides2": ab_strides2,
|
|
||||||
"c_strides2": c_strides2,
|
|
||||||
# cuda graph params
|
# cuda graph params
|
||||||
"cutlass_graph": cutlass_graph,
|
"cutlass_graph": cutlass_graph,
|
||||||
"triton_graph": triton_graph,
|
"triton_graph": triton_graph,
|
||||||
@ -278,8 +244,8 @@ def bench_run(
|
|||||||
# Warmup
|
# Warmup
|
||||||
run_triton_moe(
|
run_triton_moe(
|
||||||
a,
|
a,
|
||||||
w1_q_notransp,
|
w1_q,
|
||||||
w2_q_notransp,
|
w2_q,
|
||||||
topk_weights,
|
topk_weights,
|
||||||
topk_ids,
|
topk_ids,
|
||||||
w1_scale,
|
w1_scale,
|
||||||
@ -290,7 +256,7 @@ def bench_run(
|
|||||||
|
|
||||||
results.append(
|
results.append(
|
||||||
benchmark.Timer(
|
benchmark.Timer(
|
||||||
stmt="run_triton_moe(a, w1_q_notransp, w2_q_notransp, topk_weights, topk_ids, w1_scale, w2_scale, a_scale, num_runs)", # noqa: E501
|
stmt="run_triton_moe(a, w1_q, w2_q, topk_weights, topk_ids, w1_scale, w2_scale, a_scale, num_runs)", # noqa: E501
|
||||||
globals=globals,
|
globals=globals,
|
||||||
label=label,
|
label=label,
|
||||||
sub_label=sub_label,
|
sub_label=sub_label,
|
||||||
@ -321,16 +287,12 @@ def bench_run(
|
|||||||
w2_scale,
|
w2_scale,
|
||||||
topk_weights,
|
topk_weights,
|
||||||
topk_ids,
|
topk_ids,
|
||||||
ab_strides1,
|
|
||||||
c_strides1,
|
|
||||||
ab_strides2,
|
|
||||||
c_strides2,
|
|
||||||
num_warmup,
|
num_warmup,
|
||||||
)
|
)
|
||||||
|
|
||||||
results.append(
|
results.append(
|
||||||
benchmark.Timer(
|
benchmark.Timer(
|
||||||
stmt="run_cutlass_moe(a, a_scale, w1_q, w2_q, w1_scale, w2_scale, topk_weights, topk_ids, ab_strides1, c_strides1, ab_strides2, c_strides2, num_runs)", # noqa: E501
|
stmt="run_cutlass_moe(a, a_scale, w1_q, w2_q, w1_scale, w2_scale, topk_weights, topk_ids, num_runs)", # noqa: E501
|
||||||
globals=globals,
|
globals=globals,
|
||||||
label=label,
|
label=label,
|
||||||
sub_label=sub_label,
|
sub_label=sub_label,
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import copy
|
import copy
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import copy
|
import copy
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import torch.utils.benchmark as benchmark
|
import torch.utils.benchmark as benchmark
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import json
|
import json
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
from typing import Any, TypedDict
|
from typing import Any, TypedDict
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import random
|
import random
|
||||||
import time
|
import time
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import itertools
|
import itertools
|
||||||
from typing import Optional, Union
|
from typing import Optional, Union
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
from itertools import accumulate
|
from itertools import accumulate
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
WEIGHT_SHAPES = {
|
WEIGHT_SHAPES = {
|
||||||
"ideal": [[4 * 256 * 32, 256 * 32]],
|
"ideal": [[4 * 256 * 32, 256 * 32]],
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
# Adapted from sglang quantization/tuning_block_wise_kernel.py
|
# Adapted from sglang quantization/tuning_block_wise_kernel.py
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
# fmt: off
|
# fmt: off
|
||||||
# ruff: noqa: E501
|
# ruff: noqa: E501
|
||||||
import time
|
import time
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import math
|
import math
|
||||||
import pickle
|
import pickle
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import dataclasses
|
import dataclasses
|
||||||
from collections.abc import Iterable
|
from collections.abc import Iterable
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
# Weight Shapes are in the format
|
# Weight Shapes are in the format
|
||||||
# ([K, N], TP_SPLIT_DIM)
|
# ([K, N], TP_SPLIT_DIM)
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import cProfile
|
import cProfile
|
||||||
import pstats
|
import pstats
|
||||||
|
|||||||
@ -75,6 +75,7 @@ if (MACOSX_FOUND AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
|
|||||||
else()
|
else()
|
||||||
find_isa(${CPUINFO} "avx2" AVX2_FOUND)
|
find_isa(${CPUINFO} "avx2" AVX2_FOUND)
|
||||||
find_isa(${CPUINFO} "avx512f" AVX512_FOUND)
|
find_isa(${CPUINFO} "avx512f" AVX512_FOUND)
|
||||||
|
find_isa(${CPUINFO} "Power11" POWER11_FOUND)
|
||||||
find_isa(${CPUINFO} "POWER10" POWER10_FOUND)
|
find_isa(${CPUINFO} "POWER10" POWER10_FOUND)
|
||||||
find_isa(${CPUINFO} "POWER9" POWER9_FOUND)
|
find_isa(${CPUINFO} "POWER9" POWER9_FOUND)
|
||||||
find_isa(${CPUINFO} "asimd" ASIMD_FOUND) # Check for ARM NEON support
|
find_isa(${CPUINFO} "asimd" ASIMD_FOUND) # Check for ARM NEON support
|
||||||
@ -106,13 +107,19 @@ elseif (AVX2_FOUND)
|
|||||||
list(APPEND CXX_COMPILE_FLAGS "-mavx2")
|
list(APPEND CXX_COMPILE_FLAGS "-mavx2")
|
||||||
message(WARNING "vLLM CPU backend using AVX2 ISA")
|
message(WARNING "vLLM CPU backend using AVX2 ISA")
|
||||||
|
|
||||||
elseif (POWER9_FOUND OR POWER10_FOUND)
|
elseif (POWER9_FOUND OR POWER10_FOUND OR POWER11_FOUND)
|
||||||
message(STATUS "PowerPC detected")
|
message(STATUS "PowerPC detected")
|
||||||
# Check for PowerPC VSX support
|
if (POWER9_FOUND)
|
||||||
list(APPEND CXX_COMPILE_FLAGS
|
list(APPEND CXX_COMPILE_FLAGS
|
||||||
"-mvsx"
|
"-mvsx"
|
||||||
"-mcpu=native"
|
"-mcpu=power9"
|
||||||
"-mtune=native")
|
"-mtune=power9")
|
||||||
|
elseif (POWER10_FOUND OR POWER11_FOUND)
|
||||||
|
list(APPEND CXX_COMPILE_FLAGS
|
||||||
|
"-mvsx"
|
||||||
|
"-mcpu=power10"
|
||||||
|
"-mtune=power10")
|
||||||
|
endif()
|
||||||
|
|
||||||
elseif (ASIMD_FOUND)
|
elseif (ASIMD_FOUND)
|
||||||
message(STATUS "ARMv8 or later architecture detected")
|
message(STATUS "ARMv8 or later architecture detected")
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
#
|
#
|
||||||
# A command line tool for running pytorch's hipify preprocessor on CUDA
|
# A command line tool for running pytorch's hipify preprocessor on CUDA
|
||||||
|
|||||||
@ -119,7 +119,7 @@ typename T::Fmha::Arguments args_from_options(
|
|||||||
{static_cast<ElementOut*>(out.data_ptr()), stride_O,
|
{static_cast<ElementOut*>(out.data_ptr()), stride_O,
|
||||||
static_cast<ElementAcc*>(nullptr), stride_LSE},
|
static_cast<ElementAcc*>(nullptr), stride_LSE},
|
||||||
hw_info,
|
hw_info,
|
||||||
-1, // split_kv
|
1, // split_kv
|
||||||
nullptr, // is_var_split_kv
|
nullptr, // is_var_split_kv
|
||||||
};
|
};
|
||||||
// TODO(kaixih@nvidia): When split_kv=-1 and is_var_split_kv=false, we compute
|
// TODO(kaixih@nvidia): When split_kv=-1 and is_var_split_kv=false, we compute
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import enum
|
import enum
|
||||||
from typing import Union
|
from typing import Union
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
import glob
|
import glob
|
||||||
import itertools
|
import itertools
|
||||||
import os
|
import os
|
||||||
|
|||||||
@ -30,4 +30,8 @@ torch::Tensor moe_wna16_gemm(torch::Tensor input, torch::Tensor output,
|
|||||||
int64_t BLOCK_SIZE_K, int64_t bit);
|
int64_t BLOCK_SIZE_K, int64_t bit);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
bool moe_permute_unpermute_supported();
|
bool moe_permute_unpermute_supported();
|
||||||
|
|
||||||
|
void shuffle_rows(const torch::Tensor& input_tensor,
|
||||||
|
const torch::Tensor& dst2src_map,
|
||||||
|
torch::Tensor& output_tensor);
|
||||||
@ -130,6 +130,62 @@ void moe_unpermute(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
__global__ void shuffleInputRowsKernel(const T* input,
|
||||||
|
const int32_t* dst2src_map, T* output,
|
||||||
|
int64_t num_src_rows,
|
||||||
|
int64_t num_dst_rows, int64_t num_cols) {
|
||||||
|
int64_t dest_row_idx = blockIdx.x;
|
||||||
|
int64_t const source_row_idx = dst2src_map[dest_row_idx];
|
||||||
|
|
||||||
|
if (blockIdx.x < num_dst_rows) {
|
||||||
|
// Load 128-bits per thread
|
||||||
|
constexpr int64_t ELEM_PER_THREAD = 128 / sizeof(T) / 8;
|
||||||
|
using DataElem = cutlass::Array<T, ELEM_PER_THREAD>;
|
||||||
|
|
||||||
|
// Duplicate and permute rows
|
||||||
|
auto const* source_row_ptr =
|
||||||
|
reinterpret_cast<DataElem const*>(input + source_row_idx * num_cols);
|
||||||
|
auto* dest_row_ptr =
|
||||||
|
reinterpret_cast<DataElem*>(output + dest_row_idx * num_cols);
|
||||||
|
|
||||||
|
int64_t const start_offset = threadIdx.x;
|
||||||
|
int64_t const stride = blockDim.x;
|
||||||
|
int64_t const num_elems_in_col = num_cols / ELEM_PER_THREAD;
|
||||||
|
|
||||||
|
for (int elem_index = start_offset; elem_index < num_elems_in_col;
|
||||||
|
elem_index += stride) {
|
||||||
|
dest_row_ptr[elem_index] = source_row_ptr[elem_index];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void shuffle_rows(const torch::Tensor& input_tensor,
|
||||||
|
const torch::Tensor& dst2src_map,
|
||||||
|
torch::Tensor& output_tensor) {
|
||||||
|
TORCH_CHECK(input_tensor.scalar_type() == output_tensor.scalar_type(),
|
||||||
|
"Input and output tensors must have the same data type");
|
||||||
|
|
||||||
|
auto stream = at::cuda::getCurrentCUDAStream().stream();
|
||||||
|
int64_t const blocks = output_tensor.size(0);
|
||||||
|
int64_t const threads = 256;
|
||||||
|
int64_t const num_dest_rows = output_tensor.size(0);
|
||||||
|
int64_t const num_src_rows = input_tensor.size(0);
|
||||||
|
int64_t const num_cols = input_tensor.size(1);
|
||||||
|
|
||||||
|
TORCH_CHECK(!(num_cols % (128 / sizeof(input_tensor.scalar_type()) / 8)),
|
||||||
|
"num_cols must be divisible by 128 / "
|
||||||
|
"sizeof(input_tensor.scalar_type()) / 8");
|
||||||
|
|
||||||
|
MOE_DISPATCH(input_tensor.scalar_type(), [&] {
|
||||||
|
shuffleInputRowsKernel<scalar_t><<<blocks, threads, 0, stream>>>(
|
||||||
|
reinterpret_cast<scalar_t*>(input_tensor.data_ptr()),
|
||||||
|
dst2src_map.data_ptr<int32_t>(),
|
||||||
|
reinterpret_cast<scalar_t*>(output_tensor.data_ptr()), num_src_rows,
|
||||||
|
num_dest_rows, num_cols);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
void moe_permute(const torch::Tensor& input, const torch::Tensor& topk_weights,
|
void moe_permute(const torch::Tensor& input, const torch::Tensor& topk_weights,
|
||||||
|
|||||||
@ -14,12 +14,13 @@
|
|||||||
__VA_ARGS__(); \
|
__VA_ARGS__(); \
|
||||||
break; \
|
break; \
|
||||||
}
|
}
|
||||||
#define MOE_DISPATCH_FLOAT_CASE(...) \
|
#define MOE_DISPATCH_FLOAT_CASE(...) \
|
||||||
MOE_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \
|
MOE_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \
|
||||||
MOE_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \
|
MOE_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \
|
||||||
MOE_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__) \
|
MOE_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__) \
|
||||||
MOE_DISPATCH_CASE(at::ScalarType::Float8_e5m2, __VA_ARGS__) \
|
MOE_DISPATCH_CASE(at::ScalarType::Float8_e5m2, __VA_ARGS__) \
|
||||||
MOE_DISPATCH_CASE(at::ScalarType::Float8_e4m3fn, __VA_ARGS__)
|
MOE_DISPATCH_CASE(at::ScalarType::Float8_e4m3fn, __VA_ARGS__) \
|
||||||
|
MOE_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__)
|
||||||
|
|
||||||
#define MOE_DISPATCH(TYPE, ...) \
|
#define MOE_DISPATCH(TYPE, ...) \
|
||||||
MOE_SWITCH(TYPE, MOE_DISPATCH_FLOAT_CASE(__VA_ARGS__))
|
MOE_SWITCH(TYPE, MOE_DISPATCH_FLOAT_CASE(__VA_ARGS__))
|
||||||
@ -39,6 +40,11 @@ template <>
|
|||||||
struct ScalarType2CudaType<at::ScalarType::BFloat16> {
|
struct ScalarType2CudaType<at::ScalarType::BFloat16> {
|
||||||
using type = __nv_bfloat16;
|
using type = __nv_bfloat16;
|
||||||
};
|
};
|
||||||
|
// uint8 for packed fp4
|
||||||
|
template <>
|
||||||
|
struct ScalarType2CudaType<at::ScalarType::Byte> {
|
||||||
|
using type = uint8_t;
|
||||||
|
};
|
||||||
|
|
||||||
// #if __CUDA_ARCH__ >= 890
|
// #if __CUDA_ARCH__ >= 890
|
||||||
// fp8
|
// fp8
|
||||||
|
|||||||
@ -516,9 +516,8 @@ void topk_softmax(
|
|||||||
topk,
|
topk,
|
||||||
stream);
|
stream);
|
||||||
}
|
}
|
||||||
else
|
else if (topk_indices.scalar_type() == at::ScalarType::UInt32)
|
||||||
{
|
{
|
||||||
assert(topk_indices.scalar_type() == at::ScalarType::UInt32);
|
|
||||||
vllm::moe::topkGatingSoftmaxKernelLauncher(
|
vllm::moe::topkGatingSoftmaxKernelLauncher(
|
||||||
gating_output.data_ptr<float>(),
|
gating_output.data_ptr<float>(),
|
||||||
topk_weights.data_ptr<float>(),
|
topk_weights.data_ptr<float>(),
|
||||||
@ -530,4 +529,17 @@ void topk_softmax(
|
|||||||
topk,
|
topk,
|
||||||
stream);
|
stream);
|
||||||
}
|
}
|
||||||
|
else {
|
||||||
|
assert(topk_indices.scalar_type() == at::ScalarType::Int64);
|
||||||
|
vllm::moe::topkGatingSoftmaxKernelLauncher(
|
||||||
|
gating_output.data_ptr<float>(),
|
||||||
|
topk_weights.data_ptr<float>(),
|
||||||
|
topk_indices.data_ptr<int64_t>(),
|
||||||
|
token_expert_indices.data_ptr<int>(),
|
||||||
|
softmax_workspace.data_ptr<float>(),
|
||||||
|
num_tokens,
|
||||||
|
num_experts,
|
||||||
|
topk,
|
||||||
|
stream);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -81,6 +81,12 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, m) {
|
|||||||
m.def("moe_permute_unpermute_supported() -> bool");
|
m.def("moe_permute_unpermute_supported() -> bool");
|
||||||
m.impl("moe_permute_unpermute_supported", &moe_permute_unpermute_supported);
|
m.impl("moe_permute_unpermute_supported", &moe_permute_unpermute_supported);
|
||||||
|
|
||||||
|
// Row shuffle for MoE
|
||||||
|
m.def(
|
||||||
|
"shuffle_rows(Tensor input_tensor, Tensor dst2src_map, Tensor! "
|
||||||
|
"output_tensor) -> ()");
|
||||||
|
m.impl("shuffle_rows", torch::kCUDA, &shuffle_rows);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
19
csrc/ops.h
19
csrc/ops.h
@ -92,6 +92,11 @@ void rms_norm(torch::Tensor& out, torch::Tensor& input, torch::Tensor& weight,
|
|||||||
void fused_add_rms_norm(torch::Tensor& input, torch::Tensor& residual,
|
void fused_add_rms_norm(torch::Tensor& input, torch::Tensor& residual,
|
||||||
torch::Tensor& weight, double epsilon);
|
torch::Tensor& weight, double epsilon);
|
||||||
|
|
||||||
|
void apply_repetition_penalties_(torch::Tensor& logits,
|
||||||
|
const torch::Tensor& prompt_mask,
|
||||||
|
const torch::Tensor& output_mask,
|
||||||
|
const torch::Tensor& repetition_penalties);
|
||||||
|
|
||||||
void rms_norm_static_fp8_quant(torch::Tensor& out, torch::Tensor& input,
|
void rms_norm_static_fp8_quant(torch::Tensor& out, torch::Tensor& input,
|
||||||
torch::Tensor& weight, torch::Tensor& scale,
|
torch::Tensor& weight, torch::Tensor& scale,
|
||||||
double epsilon);
|
double epsilon);
|
||||||
@ -231,7 +236,8 @@ void cutlass_moe_mm(
|
|||||||
torch::Tensor const& b_tensors, torch::Tensor const& a_scales,
|
torch::Tensor const& b_tensors, torch::Tensor const& a_scales,
|
||||||
torch::Tensor const& b_scales, torch::Tensor const& expert_offsets,
|
torch::Tensor const& b_scales, torch::Tensor const& expert_offsets,
|
||||||
torch::Tensor const& problem_sizes, torch::Tensor const& a_strides,
|
torch::Tensor const& problem_sizes, torch::Tensor const& a_strides,
|
||||||
torch::Tensor const& b_strides, torch::Tensor const& c_strides);
|
torch::Tensor const& b_strides, torch::Tensor const& c_strides,
|
||||||
|
bool per_act_token, bool per_out_ch);
|
||||||
|
|
||||||
void cutlass_fp4_group_mm(
|
void cutlass_fp4_group_mm(
|
||||||
torch::Tensor& output, const torch::Tensor& a, const torch::Tensor& b,
|
torch::Tensor& output, const torch::Tensor& a, const torch::Tensor& b,
|
||||||
@ -243,7 +249,16 @@ void get_cutlass_moe_mm_data(
|
|||||||
const torch::Tensor& topk_ids, torch::Tensor& expert_offsets,
|
const torch::Tensor& topk_ids, torch::Tensor& expert_offsets,
|
||||||
torch::Tensor& problem_sizes1, torch::Tensor& problem_sizes2,
|
torch::Tensor& problem_sizes1, torch::Tensor& problem_sizes2,
|
||||||
torch::Tensor& input_permutation, torch::Tensor& output_permutation,
|
torch::Tensor& input_permutation, torch::Tensor& output_permutation,
|
||||||
const int64_t num_experts, const int64_t n, const int64_t k);
|
const int64_t num_experts, const int64_t n, const int64_t k,
|
||||||
|
const std::optional<torch::Tensor>& blockscale_offsets);
|
||||||
|
|
||||||
|
void get_cutlass_pplx_moe_mm_data(torch::Tensor& expert_offsets,
|
||||||
|
torch::Tensor& problem_sizes1,
|
||||||
|
torch::Tensor& problem_sizes2,
|
||||||
|
const torch::Tensor& expert_num_tokens,
|
||||||
|
const int64_t num_local_experts,
|
||||||
|
const int64_t padded_m, const int64_t n,
|
||||||
|
const int64_t k);
|
||||||
|
|
||||||
void cutlass_scaled_mm_azp(torch::Tensor& out, torch::Tensor const& a,
|
void cutlass_scaled_mm_azp(torch::Tensor& out, torch::Tensor const& a,
|
||||||
torch::Tensor const& b,
|
torch::Tensor const& b,
|
||||||
|
|||||||
@ -9,10 +9,6 @@ void cutlass_scaled_mm_blockwise_sm100_fp8(torch::Tensor& out,
|
|||||||
torch::Tensor const& b,
|
torch::Tensor const& b,
|
||||||
torch::Tensor const& a_scales,
|
torch::Tensor const& a_scales,
|
||||||
torch::Tensor const& b_scales) {
|
torch::Tensor const& b_scales) {
|
||||||
TORCH_CHECK(
|
|
||||||
a.size(0) % 4 == 0,
|
|
||||||
"Input tensor must have a number of rows that is a multiple of 4. ",
|
|
||||||
"but got: ", a.size(0), " rows.");
|
|
||||||
if (out.dtype() == torch::kBFloat16) {
|
if (out.dtype() == torch::kBFloat16) {
|
||||||
cutlass_gemm_blockwise_sm100_fp8_dispatch<cutlass::bfloat16_t>(
|
cutlass_gemm_blockwise_sm100_fp8_dispatch<cutlass::bfloat16_t>(
|
||||||
out, a, b, a_scales, b_scales);
|
out, a, b, a_scales, b_scales);
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include "cuda_utils.h"
|
||||||
#include "cutlass/cutlass.h"
|
#include "cutlass/cutlass.h"
|
||||||
#include "cutlass/numeric_types.h"
|
#include "cutlass/numeric_types.h"
|
||||||
|
|
||||||
@ -22,49 +23,49 @@ namespace vllm {
|
|||||||
|
|
||||||
using namespace cute;
|
using namespace cute;
|
||||||
|
|
||||||
template <typename OutType, typename MmaTileShape, typename ScalesPerTile,
|
// clang-format off
|
||||||
class ClusterShape, typename EpilogueScheduler,
|
template <class OutType, int ScaleGranularityM,
|
||||||
typename MainloopScheduler>
|
int ScaleGranularityN, int ScaleGranularityK,
|
||||||
|
class MmaTileShape, class ClusterShape,
|
||||||
|
class EpilogueScheduler, class MainloopScheduler,
|
||||||
|
bool swap_ab_ = false>
|
||||||
struct cutlass_3x_gemm_fp8_blockwise {
|
struct cutlass_3x_gemm_fp8_blockwise {
|
||||||
|
static constexpr bool swap_ab = swap_ab_;
|
||||||
using ElementAB = cutlass::float_e4m3_t;
|
using ElementAB = cutlass::float_e4m3_t;
|
||||||
|
|
||||||
using ElementA = ElementAB;
|
using ElementA = ElementAB;
|
||||||
using LayoutA = cutlass::layout::RowMajor;
|
using LayoutA = cutlass::layout::RowMajor;
|
||||||
|
using LayoutA_Transpose = typename cutlass::layout::LayoutTranspose<LayoutA>::type;
|
||||||
static constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value;
|
static constexpr int AlignmentA = 128 / cutlass::sizeof_bits<ElementA>::value;
|
||||||
|
|
||||||
using ElementB = ElementAB;
|
using ElementB = ElementAB;
|
||||||
using LayoutB = cutlass::layout::ColumnMajor;
|
using LayoutB = cutlass::layout::ColumnMajor;
|
||||||
|
using LayoutB_Transpose = typename cutlass::layout::LayoutTranspose<LayoutB>::type;
|
||||||
static constexpr int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value;
|
static constexpr int AlignmentB = 128 / cutlass::sizeof_bits<ElementB>::value;
|
||||||
|
|
||||||
using ElementC = void;
|
|
||||||
using ElementD = OutType;
|
using ElementD = OutType;
|
||||||
using LayoutD = cutlass::layout::RowMajor;
|
using LayoutD = cutlass::layout::RowMajor;
|
||||||
|
using LayoutD_Transpose = typename cutlass::layout::LayoutTranspose<LayoutD>::type;
|
||||||
static constexpr int AlignmentD = 128 / cutlass::sizeof_bits<ElementD>::value;
|
static constexpr int AlignmentD = 128 / cutlass::sizeof_bits<ElementD>::value;
|
||||||
|
|
||||||
|
using ElementC = void; // TODO: support bias
|
||||||
using LayoutC = LayoutD;
|
using LayoutC = LayoutD;
|
||||||
|
using LayoutC_Transpose = LayoutD_Transpose;
|
||||||
static constexpr int AlignmentC = AlignmentD;
|
static constexpr int AlignmentC = AlignmentD;
|
||||||
|
|
||||||
using ElementAccumulator = float;
|
using ElementAccumulator = float;
|
||||||
using ElementCompute = float;
|
using ElementCompute = float;
|
||||||
using ElementBlockScale = float;
|
using ElementBlockScale = float;
|
||||||
|
|
||||||
// MMA and Cluster Tile Shapes
|
using ScaleConfig = conditional_t<swap_ab,
|
||||||
// Shape of the tile computed by tcgen05 MMA, could be across 2 SMs if Cluster
|
cutlass::detail::Sm100BlockwiseScaleConfig<
|
||||||
// Shape %2 == 0 using MmaTileShape_MNK = Shape<_128,_128,_128>;
|
ScaleGranularityM, ScaleGranularityN, ScaleGranularityK,
|
||||||
static constexpr int ScaleMsPerTile = size<0>(ScalesPerTile{});
|
cute::UMMA::Major::K, cute::UMMA::Major::MN>,
|
||||||
static constexpr int ScaleGranularityM =
|
cutlass::detail::Sm100BlockwiseScaleConfig<
|
||||||
size<0>(MmaTileShape{}) / ScaleMsPerTile;
|
ScaleGranularityM, ScaleGranularityN, ScaleGranularityK,
|
||||||
static constexpr int ScaleGranularityN =
|
cute::UMMA::Major::MN, cute::UMMA::Major::K>>;
|
||||||
size<1>(MmaTileShape{}) / size<1>(ScalesPerTile{});
|
|
||||||
static constexpr int ScaleGranularityK =
|
|
||||||
size<2>(MmaTileShape{}) / size<2>(ScalesPerTile{});
|
|
||||||
|
|
||||||
// Shape of the threadblocks in a cluster
|
// layout_SFA and layout_SFB cannot be swapped since they are deduced.
|
||||||
using ClusterShape_MNK = ClusterShape;
|
|
||||||
|
|
||||||
using ScaleConfig = cutlass::detail::Sm100BlockwiseScaleConfig<
|
|
||||||
ScaleGranularityM, ScaleGranularityN, ScaleGranularityK,
|
|
||||||
cute::UMMA::Major::MN, cute::UMMA::Major::K>;
|
|
||||||
using LayoutSFA = decltype(ScaleConfig::deduce_layoutSFA());
|
using LayoutSFA = decltype(ScaleConfig::deduce_layoutSFA());
|
||||||
using LayoutSFB = decltype(ScaleConfig::deduce_layoutSFB());
|
using LayoutSFB = decltype(ScaleConfig::deduce_layoutSFB());
|
||||||
|
|
||||||
@ -73,7 +74,6 @@ struct cutlass_3x_gemm_fp8_blockwise {
|
|||||||
|
|
||||||
static constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
|
static constexpr auto RoundStyle = cutlass::FloatRoundStyle::round_to_nearest;
|
||||||
using ElementScalar = float;
|
using ElementScalar = float;
|
||||||
// clang-format off
|
|
||||||
using DefaultOperation = cutlass::epilogue::fusion::LinearCombination<ElementD, ElementCompute, ElementC, ElementScalar, RoundStyle>;
|
using DefaultOperation = cutlass::epilogue::fusion::LinearCombination<ElementD, ElementCompute, ElementC, ElementScalar, RoundStyle>;
|
||||||
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
|
using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder<
|
||||||
ArchTag,
|
ArchTag,
|
||||||
@ -84,33 +84,47 @@ struct cutlass_3x_gemm_fp8_blockwise {
|
|||||||
ElementAccumulator,
|
ElementAccumulator,
|
||||||
ElementCompute,
|
ElementCompute,
|
||||||
ElementC,
|
ElementC,
|
||||||
LayoutC,
|
conditional_t<swap_ab, LayoutC_Transpose, LayoutC>,
|
||||||
AlignmentC,
|
AlignmentC,
|
||||||
ElementD,
|
ElementD,
|
||||||
LayoutD,
|
conditional_t<swap_ab, LayoutD_Transpose, LayoutD>,
|
||||||
AlignmentD,
|
AlignmentD,
|
||||||
EpilogueScheduler,
|
EpilogueScheduler,
|
||||||
DefaultOperation
|
DefaultOperation
|
||||||
>::CollectiveOp;
|
>::CollectiveOp;
|
||||||
|
|
||||||
using StageCountType = cutlass::gemm::collective::StageCountAuto;
|
using StageCountType = cutlass::gemm::collective::StageCountAuto;
|
||||||
using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder<
|
using CollectiveMainloop = conditional_t<swap_ab,
|
||||||
ArchTag,
|
typename cutlass::gemm::collective::CollectiveBuilder<
|
||||||
OperatorClass,
|
ArchTag,
|
||||||
ElementA,
|
OperatorClass,
|
||||||
cute::tuple<LayoutA, LayoutSFA>,
|
ElementB,
|
||||||
AlignmentA,
|
cute::tuple<LayoutB_Transpose, LayoutSFA>,
|
||||||
ElementB,
|
AlignmentB,
|
||||||
cute::tuple<LayoutB, LayoutSFB>,
|
ElementA,
|
||||||
AlignmentB,
|
cute::tuple<LayoutA_Transpose, LayoutSFB>,
|
||||||
ElementAccumulator,
|
AlignmentA,
|
||||||
MmaTileShape,
|
ElementAccumulator,
|
||||||
ClusterShape,
|
MmaTileShape,
|
||||||
|
ClusterShape,
|
||||||
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
|
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
|
||||||
MainloopScheduler
|
MainloopScheduler
|
||||||
>::CollectiveOp;
|
>::CollectiveOp,
|
||||||
// clang-format on
|
typename cutlass::gemm::collective::CollectiveBuilder<
|
||||||
|
ArchTag,
|
||||||
|
OperatorClass,
|
||||||
|
ElementA,
|
||||||
|
cute::tuple<LayoutA, LayoutSFA>,
|
||||||
|
AlignmentA,
|
||||||
|
ElementB,
|
||||||
|
cute::tuple<LayoutB, LayoutSFB>,
|
||||||
|
AlignmentB,
|
||||||
|
ElementAccumulator,
|
||||||
|
MmaTileShape,
|
||||||
|
ClusterShape,
|
||||||
|
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>,
|
||||||
|
MainloopScheduler
|
||||||
|
>::CollectiveOp>;
|
||||||
|
|
||||||
using KernelType = enable_sm100_only<cutlass::gemm::kernel::GemmUniversal<
|
using KernelType = enable_sm100_only<cutlass::gemm::kernel::GemmUniversal<
|
||||||
Shape<int, int, int, int>, CollectiveMainloop, CollectiveEpilogue>>;
|
Shape<int, int, int, int>, CollectiveMainloop, CollectiveEpilogue>>;
|
||||||
@ -123,6 +137,7 @@ void cutlass_gemm_caller_blockwise(torch::Tensor& out, torch::Tensor const& a,
|
|||||||
torch::Tensor const& b,
|
torch::Tensor const& b,
|
||||||
torch::Tensor const& a_scales,
|
torch::Tensor const& a_scales,
|
||||||
torch::Tensor const& b_scales) {
|
torch::Tensor const& b_scales) {
|
||||||
|
static constexpr bool swap_ab = Gemm::swap_ab;
|
||||||
using GemmKernel = typename Gemm::GemmKernel;
|
using GemmKernel = typename Gemm::GemmKernel;
|
||||||
using StrideA = typename Gemm::GemmKernel::StrideA;
|
using StrideA = typename Gemm::GemmKernel::StrideA;
|
||||||
using StrideB = typename Gemm::GemmKernel::StrideB;
|
using StrideB = typename Gemm::GemmKernel::StrideB;
|
||||||
@ -136,7 +151,6 @@ void cutlass_gemm_caller_blockwise(torch::Tensor& out, torch::Tensor const& a,
|
|||||||
using ElementD = typename Gemm::ElementD;
|
using ElementD = typename Gemm::ElementD;
|
||||||
|
|
||||||
int32_t m = a.size(0), n = b.size(1), k = a.size(1);
|
int32_t m = a.size(0), n = b.size(1), k = a.size(1);
|
||||||
auto prob_shape = cute::make_shape(m, n, k, 1);
|
|
||||||
|
|
||||||
StrideA a_stride;
|
StrideA a_stride;
|
||||||
StrideB b_stride;
|
StrideB b_stride;
|
||||||
@ -146,11 +160,13 @@ void cutlass_gemm_caller_blockwise(torch::Tensor& out, torch::Tensor const& a,
|
|||||||
b_stride =
|
b_stride =
|
||||||
cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(n, k, 1));
|
cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(n, k, 1));
|
||||||
c_stride =
|
c_stride =
|
||||||
cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(m, n, 1));
|
cutlass::make_cute_packed_stride(StrideC{}, swap_ab ? cute::make_shape(n, m, 1) : cute::make_shape(m, n, 1));
|
||||||
|
|
||||||
LayoutSFA layout_SFA =
|
LayoutSFA layout_SFA = swap_ab ?
|
||||||
|
ScaleConfig::tile_atom_to_shape_SFA(make_shape(n, m, k, 1)) :
|
||||||
ScaleConfig::tile_atom_to_shape_SFA(make_shape(m, n, k, 1));
|
ScaleConfig::tile_atom_to_shape_SFA(make_shape(m, n, k, 1));
|
||||||
LayoutSFB layout_SFB =
|
LayoutSFB layout_SFB = swap_ab ?
|
||||||
|
ScaleConfig::tile_atom_to_shape_SFB(make_shape(n, m, k, 1)) :
|
||||||
ScaleConfig::tile_atom_to_shape_SFB(make_shape(m, n, k, 1));
|
ScaleConfig::tile_atom_to_shape_SFB(make_shape(m, n, k, 1));
|
||||||
|
|
||||||
auto a_ptr = static_cast<ElementAB*>(a.data_ptr());
|
auto a_ptr = static_cast<ElementAB*>(a.data_ptr());
|
||||||
@ -158,9 +174,22 @@ void cutlass_gemm_caller_blockwise(torch::Tensor& out, torch::Tensor const& a,
|
|||||||
auto a_scales_ptr = static_cast<float*>(a_scales.data_ptr());
|
auto a_scales_ptr = static_cast<float*>(a_scales.data_ptr());
|
||||||
auto b_scales_ptr = static_cast<float*>(b_scales.data_ptr());
|
auto b_scales_ptr = static_cast<float*>(b_scales.data_ptr());
|
||||||
|
|
||||||
typename GemmKernel::MainloopArguments mainloop_args{
|
auto mainloop_args = [&](){
|
||||||
a_ptr, a_stride, b_ptr, b_stride,
|
// layout_SFA and layout_SFB cannot be swapped since they are deduced.
|
||||||
a_scales_ptr, layout_SFA, b_scales_ptr, layout_SFB};
|
if (swap_ab) {
|
||||||
|
return typename GemmKernel::MainloopArguments{
|
||||||
|
b_ptr, b_stride, a_ptr, a_stride,
|
||||||
|
b_scales_ptr, layout_SFA, a_scales_ptr, layout_SFB
|
||||||
|
};
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return typename GemmKernel::MainloopArguments{
|
||||||
|
a_ptr, a_stride, b_ptr, b_stride,
|
||||||
|
a_scales_ptr, layout_SFA, b_scales_ptr, layout_SFB
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}();
|
||||||
|
auto prob_shape = swap_ab ? cute::make_shape(n, m, k, 1) : cute::make_shape(m, n, k, 1);
|
||||||
|
|
||||||
auto c_ptr = static_cast<ElementD*>(out.data_ptr());
|
auto c_ptr = static_cast<ElementD*>(out.data_ptr());
|
||||||
typename GemmKernel::EpilogueArguments epilogue_args{
|
typename GemmKernel::EpilogueArguments epilogue_args{
|
||||||
@ -175,29 +204,74 @@ void cutlass_gemm_blockwise_sm100_fp8_dispatch(torch::Tensor& out,
|
|||||||
torch::Tensor const& b,
|
torch::Tensor const& b,
|
||||||
torch::Tensor const& a_scales,
|
torch::Tensor const& a_scales,
|
||||||
torch::Tensor const& b_scales) {
|
torch::Tensor const& b_scales) {
|
||||||
auto m = a.size(0);
|
int32_t m = a.size(0), n = b.size(1), k = a.size(1), sms;
|
||||||
auto k = a.size(1);
|
|
||||||
auto n = b.size(1);
|
|
||||||
int sms;
|
|
||||||
cudaDeviceGetAttribute(&sms, cudaDevAttrMultiProcessorCount, a.get_device());
|
cudaDeviceGetAttribute(&sms, cudaDevAttrMultiProcessorCount, a.get_device());
|
||||||
|
|
||||||
auto should_use_2sm = [&sms](int m, int n, int tile1SM = 128) {
|
constexpr int TILE_K = 128;
|
||||||
return std::ceil(static_cast<float>(m) / tile1SM) *
|
// TODO: better heuristics
|
||||||
std::ceil(static_cast<float>(n) / tile1SM) >=
|
bool swap_ab = (m < 16) || (m % 4 != 0);
|
||||||
sms;
|
bool use_tma_epilogue = (m * n) % 4 == 0;
|
||||||
};
|
if (!swap_ab) {
|
||||||
bool use_2sm = should_use_2sm(m, n);
|
constexpr int TILE_N = 128;
|
||||||
if (use_2sm) {
|
int tile_m = 256;
|
||||||
cutlass_gemm_caller_blockwise<cutlass_3x_gemm_fp8_blockwise<
|
if (cuda_utils::ceil_div(n, TILE_N) * cuda_utils::ceil_div(m, 64) <= sms) {
|
||||||
OutType, Shape<_256, _128, _128>, Shape<_256, _1, _1>,
|
tile_m = 64;
|
||||||
Shape<_2, _2, _1>, cutlass::epilogue::TmaWarpSpecialized2Sm,
|
}
|
||||||
cutlass::gemm::KernelTmaWarpSpecializedBlockwise2SmSm100>>(
|
else if (cuda_utils::ceil_div(n, TILE_N) * cuda_utils::ceil_div(m, 128) <= sms) {
|
||||||
out, a, b, a_scales, b_scales);
|
tile_m = 128;
|
||||||
|
}
|
||||||
|
if (tile_m == 64) {
|
||||||
|
if (use_tma_epilogue) {
|
||||||
|
cutlass_gemm_caller_blockwise<cutlass_3x_gemm_fp8_blockwise<
|
||||||
|
OutType, 1, TILE_N, TILE_K, Shape<_64, Int<TILE_N>, Int<TILE_K>>,
|
||||||
|
Shape<_1, _1, _1>, cutlass::epilogue::TmaWarpSpecialized1Sm,
|
||||||
|
cutlass::gemm::KernelTmaWarpSpecializedBlockwise1SmSm100>>(
|
||||||
|
out, a, b, a_scales, b_scales);
|
||||||
|
} else {
|
||||||
|
cutlass_gemm_caller_blockwise<cutlass_3x_gemm_fp8_blockwise<
|
||||||
|
OutType, 1, TILE_N, TILE_K, Shape<_64, Int<TILE_N>, Int<TILE_K>>,
|
||||||
|
Shape<_1, _1, _1>, cutlass::epilogue::NoSmemWarpSpecialized1Sm,
|
||||||
|
cutlass::gemm::KernelTmaWarpSpecializedBlockwise1SmSm100>>(
|
||||||
|
out, a, b, a_scales, b_scales);
|
||||||
|
}
|
||||||
|
} else if (tile_m == 128) {
|
||||||
|
if (use_tma_epilogue) {
|
||||||
|
cutlass_gemm_caller_blockwise<cutlass_3x_gemm_fp8_blockwise<
|
||||||
|
OutType, 1, TILE_N, TILE_K, Shape<_128, Int<TILE_N>, Int<TILE_K>>,
|
||||||
|
Shape<_1, _1, _1>, cutlass::epilogue::TmaWarpSpecialized1Sm,
|
||||||
|
cutlass::gemm::KernelTmaWarpSpecializedBlockwise1SmSm100>>(
|
||||||
|
out, a, b, a_scales, b_scales);
|
||||||
|
} else {
|
||||||
|
cutlass_gemm_caller_blockwise<cutlass_3x_gemm_fp8_blockwise<
|
||||||
|
OutType, 1, TILE_N, TILE_K, Shape<_128, Int<TILE_N>, Int<TILE_K>>,
|
||||||
|
Shape<_1, _1, _1>, cutlass::epilogue::NoSmemWarpSpecialized1Sm,
|
||||||
|
cutlass::gemm::KernelTmaWarpSpecializedBlockwise1SmSm100>>(
|
||||||
|
out, a, b, a_scales, b_scales);
|
||||||
|
}
|
||||||
|
} else { // tile_m == 256
|
||||||
|
if (use_tma_epilogue) {
|
||||||
|
cutlass_gemm_caller_blockwise<cutlass_3x_gemm_fp8_blockwise<
|
||||||
|
OutType, 1, TILE_N, TILE_K, Shape<_256, Int<TILE_N>, Int<TILE_K>>,
|
||||||
|
Shape<_2, _1, _1>, cutlass::epilogue::TmaWarpSpecialized2Sm,
|
||||||
|
cutlass::gemm::KernelTmaWarpSpecializedBlockwise2SmSm100>>(
|
||||||
|
out, a, b, a_scales, b_scales);
|
||||||
|
} else {
|
||||||
|
cutlass_gemm_caller_blockwise<cutlass_3x_gemm_fp8_blockwise<
|
||||||
|
OutType, 1, TILE_N, TILE_K, Shape<_256, Int<TILE_N>, Int<TILE_K>>,
|
||||||
|
Shape<_2, _1, _1>, cutlass::epilogue::NoSmemWarpSpecialized2Sm,
|
||||||
|
cutlass::gemm::KernelTmaWarpSpecializedBlockwise2SmSm100>>(
|
||||||
|
out, a, b, a_scales, b_scales);
|
||||||
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
|
// TODO: Test more tile N configs
|
||||||
|
constexpr int TILE_M = 128;
|
||||||
|
constexpr int TILE_N = 16;
|
||||||
|
// TMA epilogue isn't compatible with Swap A/B
|
||||||
cutlass_gemm_caller_blockwise<cutlass_3x_gemm_fp8_blockwise<
|
cutlass_gemm_caller_blockwise<cutlass_3x_gemm_fp8_blockwise<
|
||||||
OutType, Shape<_128, _128, _128>, Shape<_128, _1, _1>,
|
OutType, TILE_M, 1, TILE_K, Shape<Int<TILE_M>, Int<TILE_N>, Int<TILE_K>>,
|
||||||
Shape<_1, _1, _1>, cutlass::epilogue::TmaWarpSpecialized1Sm,
|
Shape<_1, _1, _1>, cutlass::epilogue::NoSmemWarpSpecialized1Sm,
|
||||||
cutlass::gemm::KernelTmaWarpSpecializedBlockwise1SmSm100>>(
|
cutlass::gemm::KernelTmaWarpSpecializedBlockwise1SmSm100, true>>(
|
||||||
out, a, b, a_scales, b_scales);
|
out, a, b, a_scales, b_scales);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -15,6 +15,7 @@ using c3x::cutlass_gemm_caller;
|
|||||||
template <typename InType, typename OutType,
|
template <typename InType, typename OutType,
|
||||||
template <typename, typename, typename> typename Epilogue>
|
template <typename, typename, typename> typename Epilogue>
|
||||||
struct sm100_fp8_config_default {
|
struct sm100_fp8_config_default {
|
||||||
|
// M in (128, inf)
|
||||||
static_assert(std::is_same<InType, cutlass::float_e4m3_t>());
|
static_assert(std::is_same<InType, cutlass::float_e4m3_t>());
|
||||||
using KernelSchedule = cutlass::gemm::collective::KernelScheduleAuto;
|
using KernelSchedule = cutlass::gemm::collective::KernelScheduleAuto;
|
||||||
using EpilogueSchedule = cutlass::epilogue::collective::EpilogueScheduleAuto;
|
using EpilogueSchedule = cutlass::epilogue::collective::EpilogueScheduleAuto;
|
||||||
@ -25,6 +26,34 @@ struct sm100_fp8_config_default {
|
|||||||
KernelSchedule, EpilogueSchedule>;
|
KernelSchedule, EpilogueSchedule>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <typename InType, typename OutType,
|
||||||
|
template <typename, typename, typename> typename Epilogue>
|
||||||
|
struct sm100_fp8_config_M128 {
|
||||||
|
// M in (64, 128]
|
||||||
|
static_assert(std::is_same<InType, cutlass::float_e4m3_t>());
|
||||||
|
using KernelSchedule = cutlass::gemm::collective::KernelScheduleAuto;
|
||||||
|
using EpilogueSchedule = cutlass::epilogue::collective::EpilogueScheduleAuto;
|
||||||
|
using TileShape = Shape<_128, _128, _64>;
|
||||||
|
using ClusterShape = Shape<_2, _2, _1>;
|
||||||
|
using Cutlass3xGemm =
|
||||||
|
cutlass_3x_gemm_sm100<InType, OutType, Epilogue, TileShape, ClusterShape,
|
||||||
|
KernelSchedule, EpilogueSchedule>;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename InType, typename OutType,
|
||||||
|
template <typename, typename, typename> typename Epilogue>
|
||||||
|
struct sm100_fp8_config_M64 {
|
||||||
|
// M in [1, 64]
|
||||||
|
static_assert(std::is_same<InType, cutlass::float_e4m3_t>());
|
||||||
|
using KernelSchedule = cutlass::gemm::collective::KernelScheduleAuto;
|
||||||
|
using EpilogueSchedule = cutlass::epilogue::collective::EpilogueScheduleAuto;
|
||||||
|
using TileShape = Shape<_64, _64, _256>;
|
||||||
|
using ClusterShape = Shape<_1, _8, _1>;
|
||||||
|
using Cutlass3xGemm =
|
||||||
|
cutlass_3x_gemm_sm100<InType, OutType, Epilogue, TileShape, ClusterShape,
|
||||||
|
KernelSchedule, EpilogueSchedule>;
|
||||||
|
};
|
||||||
|
|
||||||
template <typename InType, typename OutType,
|
template <typename InType, typename OutType,
|
||||||
template <typename, typename, typename> typename Epilogue,
|
template <typename, typename, typename> typename Epilogue,
|
||||||
typename... EpilogueArgs>
|
typename... EpilogueArgs>
|
||||||
@ -39,8 +68,28 @@ inline void cutlass_gemm_sm100_fp8_dispatch(torch::Tensor& out,
|
|||||||
using Cutlass3xGemmDefault =
|
using Cutlass3xGemmDefault =
|
||||||
typename sm100_fp8_config_default<InType, OutType,
|
typename sm100_fp8_config_default<InType, OutType,
|
||||||
Epilogue>::Cutlass3xGemm;
|
Epilogue>::Cutlass3xGemm;
|
||||||
return cutlass_gemm_caller<Cutlass3xGemmDefault>(
|
using Cutlass3xGemmM64 =
|
||||||
out, a, b, std::forward<EpilogueArgs>(args)...);
|
typename sm100_fp8_config_M64<InType, OutType, Epilogue>::Cutlass3xGemm;
|
||||||
|
using Cutlass3xGemmM128 =
|
||||||
|
typename sm100_fp8_config_M128<InType, OutType, Epilogue>::Cutlass3xGemm;
|
||||||
|
|
||||||
|
uint32_t const m = a.size(0);
|
||||||
|
uint32_t const mp2 =
|
||||||
|
std::max(static_cast<uint32_t>(64), next_pow_2(m)); // next power of 2
|
||||||
|
|
||||||
|
if (mp2 <= 64) {
|
||||||
|
// m in [1, 64]
|
||||||
|
return cutlass_gemm_caller<Cutlass3xGemmM64>(
|
||||||
|
out, a, b, std::forward<EpilogueArgs>(args)...);
|
||||||
|
} else if (mp2 <= 128) {
|
||||||
|
// m in (64, 128]
|
||||||
|
return cutlass_gemm_caller<Cutlass3xGemmM128>(
|
||||||
|
out, a, b, std::forward<EpilogueArgs>(args)...);
|
||||||
|
} else {
|
||||||
|
// m in (128, inf)
|
||||||
|
return cutlass_gemm_caller<Cutlass3xGemmDefault>(
|
||||||
|
out, a, b, std::forward<EpilogueArgs>(args)...);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <template <typename, typename, typename> typename Epilogue,
|
template <template <typename, typename, typename> typename Epilogue,
|
||||||
|
|||||||
@ -84,7 +84,8 @@ void run_cutlass_moe_mm_sm90(
|
|||||||
torch::Tensor const& b_tensors, torch::Tensor const& a_scales,
|
torch::Tensor const& b_tensors, torch::Tensor const& a_scales,
|
||||||
torch::Tensor const& b_scales, torch::Tensor const& expert_offsets,
|
torch::Tensor const& b_scales, torch::Tensor const& expert_offsets,
|
||||||
torch::Tensor const& problem_sizes, torch::Tensor const& a_strides,
|
torch::Tensor const& problem_sizes, torch::Tensor const& a_strides,
|
||||||
torch::Tensor const& b_strides, torch::Tensor const& c_strides) {
|
torch::Tensor const& b_strides, torch::Tensor const& c_strides,
|
||||||
|
bool per_act_token, bool per_out_ch) {
|
||||||
TORCH_CHECK(a_tensors.size(0) > 0, "No input A tensors provided.");
|
TORCH_CHECK(a_tensors.size(0) > 0, "No input A tensors provided.");
|
||||||
TORCH_CHECK(b_tensors.size(0) > 0, "No input B tensors provided.");
|
TORCH_CHECK(b_tensors.size(0) > 0, "No input B tensors provided.");
|
||||||
TORCH_CHECK(out_tensors.size(0) > 0, "No output tensors provided.");
|
TORCH_CHECK(out_tensors.size(0) > 0, "No output tensors provided.");
|
||||||
@ -113,19 +114,23 @@ void run_cutlass_moe_mm_sm90(
|
|||||||
if (n >= 8192) {
|
if (n >= 8192) {
|
||||||
cutlass_group_gemm_caller<Cutlass3xGemmN8192>(
|
cutlass_group_gemm_caller<Cutlass3xGemmN8192>(
|
||||||
out_tensors, a_tensors, b_tensors, a_scales, b_scales, expert_offsets,
|
out_tensors, a_tensors, b_tensors, a_scales, b_scales, expert_offsets,
|
||||||
problem_sizes, a_strides, b_strides, c_strides);
|
problem_sizes, a_strides, b_strides, c_strides, per_act_token,
|
||||||
|
per_out_ch);
|
||||||
} else if (k >= 8192) {
|
} else if (k >= 8192) {
|
||||||
cutlass_group_gemm_caller<Cutlass3xGemmK8192>(
|
cutlass_group_gemm_caller<Cutlass3xGemmK8192>(
|
||||||
out_tensors, a_tensors, b_tensors, a_scales, b_scales, expert_offsets,
|
out_tensors, a_tensors, b_tensors, a_scales, b_scales, expert_offsets,
|
||||||
problem_sizes, a_strides, b_strides, c_strides);
|
problem_sizes, a_strides, b_strides, c_strides, per_act_token,
|
||||||
|
per_out_ch);
|
||||||
} else if (m <= 16) {
|
} else if (m <= 16) {
|
||||||
cutlass_group_gemm_caller<Cutlass3xGemmM16>(
|
cutlass_group_gemm_caller<Cutlass3xGemmM16>(
|
||||||
out_tensors, a_tensors, b_tensors, a_scales, b_scales, expert_offsets,
|
out_tensors, a_tensors, b_tensors, a_scales, b_scales, expert_offsets,
|
||||||
problem_sizes, a_strides, b_strides, c_strides);
|
problem_sizes, a_strides, b_strides, c_strides, per_act_token,
|
||||||
|
per_out_ch);
|
||||||
} else {
|
} else {
|
||||||
cutlass_group_gemm_caller<Cutlass3xGemmDefault>(
|
cutlass_group_gemm_caller<Cutlass3xGemmDefault>(
|
||||||
out_tensors, a_tensors, b_tensors, a_scales, b_scales, expert_offsets,
|
out_tensors, a_tensors, b_tensors, a_scales, b_scales, expert_offsets,
|
||||||
problem_sizes, a_strides, b_strides, c_strides);
|
problem_sizes, a_strides, b_strides, c_strides, per_act_token,
|
||||||
|
per_out_ch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -134,15 +139,18 @@ void dispatch_moe_mm_sm90(
|
|||||||
torch::Tensor const& b_tensors, torch::Tensor const& a_scales,
|
torch::Tensor const& b_tensors, torch::Tensor const& a_scales,
|
||||||
torch::Tensor const& b_scales, torch::Tensor const& expert_offsets,
|
torch::Tensor const& b_scales, torch::Tensor const& expert_offsets,
|
||||||
torch::Tensor const& problem_sizes, torch::Tensor const& a_strides,
|
torch::Tensor const& problem_sizes, torch::Tensor const& a_strides,
|
||||||
torch::Tensor const& b_strides, torch::Tensor const& c_strides) {
|
torch::Tensor const& b_strides, torch::Tensor const& c_strides,
|
||||||
|
bool per_act_token, bool per_out_ch) {
|
||||||
if (out_tensors.dtype() == torch::kBFloat16) {
|
if (out_tensors.dtype() == torch::kBFloat16) {
|
||||||
run_cutlass_moe_mm_sm90<cutlass::float_e4m3_t, cutlass::bfloat16_t>(
|
run_cutlass_moe_mm_sm90<cutlass::float_e4m3_t, cutlass::bfloat16_t>(
|
||||||
out_tensors, a_tensors, b_tensors, a_scales, b_scales, expert_offsets,
|
out_tensors, a_tensors, b_tensors, a_scales, b_scales, expert_offsets,
|
||||||
problem_sizes, a_strides, b_strides, c_strides);
|
problem_sizes, a_strides, b_strides, c_strides, per_act_token,
|
||||||
|
per_out_ch);
|
||||||
} else {
|
} else {
|
||||||
run_cutlass_moe_mm_sm90<cutlass::float_e4m3_t, cutlass::half_t>(
|
run_cutlass_moe_mm_sm90<cutlass::float_e4m3_t, cutlass::half_t>(
|
||||||
out_tensors, a_tensors, b_tensors, a_scales, b_scales, expert_offsets,
|
out_tensors, a_tensors, b_tensors, a_scales, b_scales, expert_offsets,
|
||||||
problem_sizes, a_strides, b_strides, c_strides);
|
problem_sizes, a_strides, b_strides, c_strides, per_act_token,
|
||||||
|
per_out_ch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,8 +161,9 @@ void cutlass_moe_mm_sm90(
|
|||||||
torch::Tensor const& b_tensors, torch::Tensor const& a_scales,
|
torch::Tensor const& b_tensors, torch::Tensor const& a_scales,
|
||||||
torch::Tensor const& b_scales, torch::Tensor const& expert_offsets,
|
torch::Tensor const& b_scales, torch::Tensor const& expert_offsets,
|
||||||
torch::Tensor const& problem_sizes, torch::Tensor const& a_strides,
|
torch::Tensor const& problem_sizes, torch::Tensor const& a_strides,
|
||||||
torch::Tensor const& b_strides, torch::Tensor const& c_strides) {
|
torch::Tensor const& b_strides, torch::Tensor const& c_strides,
|
||||||
|
bool per_act_token, bool per_out_ch) {
|
||||||
dispatch_moe_mm_sm90(out_tensors, a_tensors, b_tensors, a_scales, b_scales,
|
dispatch_moe_mm_sm90(out_tensors, a_tensors, b_tensors, a_scales, b_scales,
|
||||||
expert_offsets, problem_sizes, a_strides, b_strides,
|
expert_offsets, problem_sizes, a_strides, b_strides,
|
||||||
c_strides);
|
c_strides, per_act_token, per_out_ch);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -76,7 +76,8 @@ void cutlass_group_gemm_caller(
|
|||||||
torch::Tensor const& b_tensors, torch::Tensor const& a_scales,
|
torch::Tensor const& b_tensors, torch::Tensor const& a_scales,
|
||||||
torch::Tensor const& b_scales, torch::Tensor const& expert_offsets,
|
torch::Tensor const& b_scales, torch::Tensor const& expert_offsets,
|
||||||
torch::Tensor const& problem_sizes, torch::Tensor const& a_strides,
|
torch::Tensor const& problem_sizes, torch::Tensor const& a_strides,
|
||||||
torch::Tensor const& b_strides, torch::Tensor const& c_strides) {
|
torch::Tensor const& b_strides, torch::Tensor const& c_strides,
|
||||||
|
bool per_act_token, bool per_out_ch) {
|
||||||
using ElementAB = typename Gemm::ElementAB;
|
using ElementAB = typename Gemm::ElementAB;
|
||||||
using ElementD = typename Gemm::ElementD;
|
using ElementD = typename Gemm::ElementD;
|
||||||
|
|
||||||
@ -84,9 +85,6 @@ void cutlass_group_gemm_caller(
|
|||||||
int k_size = a_tensors.size(1);
|
int k_size = a_tensors.size(1);
|
||||||
int n_size = out_tensors.size(1);
|
int n_size = out_tensors.size(1);
|
||||||
|
|
||||||
bool per_act_token = a_scales.numel() != 1;
|
|
||||||
bool per_out_ch = b_scales.numel() != num_experts;
|
|
||||||
|
|
||||||
auto stream = at::cuda::getCurrentCUDAStream(a_tensors.device().index());
|
auto stream = at::cuda::getCurrentCUDAStream(a_tensors.device().index());
|
||||||
|
|
||||||
auto options_int =
|
auto options_int =
|
||||||
|
|||||||
@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
constexpr uint64_t THREADS_PER_EXPERT = 512;
|
constexpr uint64_t THREADS_PER_EXPERT = 512;
|
||||||
|
|
||||||
__global__ void compute_problem_sizes(const int* __restrict__ topk_ids,
|
__global__ void compute_problem_sizes(const uint32_t* __restrict__ topk_ids,
|
||||||
int32_t* problem_sizes1,
|
int32_t* problem_sizes1,
|
||||||
int32_t* problem_sizes2,
|
int32_t* problem_sizes2,
|
||||||
int32_t* atomic_buffer,
|
int32_t* atomic_buffer,
|
||||||
@ -45,7 +45,24 @@ __global__ void compute_expert_offsets(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void compute_arg_sorts(const int* __restrict__ topk_ids,
|
__global__ void compute_expert_blockscale_offsets(
|
||||||
|
const int32_t* __restrict__ problem_sizes1, int32_t* expert_offsets,
|
||||||
|
int32_t* blockscale_offsets, int32_t* atomic_buffer,
|
||||||
|
const int num_experts) {
|
||||||
|
int32_t tot_offset = 0;
|
||||||
|
int32_t tot_offset_round = 0;
|
||||||
|
expert_offsets[0] = 0;
|
||||||
|
blockscale_offsets[0] = 0;
|
||||||
|
for (int i = 0; i < num_experts; ++i) {
|
||||||
|
atomic_buffer[i] = tot_offset;
|
||||||
|
tot_offset += problem_sizes1[i * 3];
|
||||||
|
expert_offsets[i + 1] = tot_offset;
|
||||||
|
tot_offset_round += (problem_sizes1[i * 3] + (128 - 1)) / 128 * 128;
|
||||||
|
blockscale_offsets[i + 1] = tot_offset_round;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__global__ void compute_arg_sorts(const uint32_t* __restrict__ topk_ids,
|
||||||
const int32_t* __restrict__ expert_offsets,
|
const int32_t* __restrict__ expert_offsets,
|
||||||
int32_t* input_permutation,
|
int32_t* input_permutation,
|
||||||
int32_t* output_permutation,
|
int32_t* output_permutation,
|
||||||
@ -77,7 +94,8 @@ void get_cutlass_moe_mm_data_caller(
|
|||||||
const torch::Tensor& topk_ids, torch::Tensor& expert_offsets,
|
const torch::Tensor& topk_ids, torch::Tensor& expert_offsets,
|
||||||
torch::Tensor& problem_sizes1, torch::Tensor& problem_sizes2,
|
torch::Tensor& problem_sizes1, torch::Tensor& problem_sizes2,
|
||||||
torch::Tensor& input_permutation, torch::Tensor& output_permutation,
|
torch::Tensor& input_permutation, torch::Tensor& output_permutation,
|
||||||
const int64_t num_experts, const int64_t n, const int64_t k) {
|
const int64_t num_experts, const int64_t n, const int64_t k,
|
||||||
|
const std::optional<torch::Tensor>& blockscale_offsets) {
|
||||||
auto stream = at::cuda::getCurrentCUDAStream(topk_ids.device().index());
|
auto stream = at::cuda::getCurrentCUDAStream(topk_ids.device().index());
|
||||||
auto options_int32 =
|
auto options_int32 =
|
||||||
torch::TensorOptions().dtype(torch::kInt32).device(topk_ids.device());
|
torch::TensorOptions().dtype(torch::kInt32).device(topk_ids.device());
|
||||||
@ -85,19 +103,61 @@ void get_cutlass_moe_mm_data_caller(
|
|||||||
|
|
||||||
int num_threads = min(THREADS_PER_EXPERT, topk_ids.numel());
|
int num_threads = min(THREADS_PER_EXPERT, topk_ids.numel());
|
||||||
compute_problem_sizes<<<num_experts, num_threads, 0, stream>>>(
|
compute_problem_sizes<<<num_experts, num_threads, 0, stream>>>(
|
||||||
static_cast<const int32_t*>(topk_ids.data_ptr()),
|
static_cast<const uint32_t*>(topk_ids.data_ptr()),
|
||||||
static_cast<int32_t*>(problem_sizes1.data_ptr()),
|
static_cast<int32_t*>(problem_sizes1.data_ptr()),
|
||||||
static_cast<int32_t*>(problem_sizes2.data_ptr()),
|
static_cast<int32_t*>(problem_sizes2.data_ptr()),
|
||||||
static_cast<int32_t*>(atomic_buffer.data_ptr()), topk_ids.numel(), n, k);
|
static_cast<int32_t*>(atomic_buffer.data_ptr()), topk_ids.numel(), n, k);
|
||||||
compute_expert_offsets<<<1, 1, 0, stream>>>(
|
if (blockscale_offsets.has_value()) {
|
||||||
static_cast<const int32_t*>(problem_sizes1.data_ptr()),
|
compute_expert_blockscale_offsets<<<1, 1, 0, stream>>>(
|
||||||
static_cast<int32_t*>(expert_offsets.data_ptr()),
|
static_cast<const int32_t*>(problem_sizes1.data_ptr()),
|
||||||
static_cast<int32_t*>(atomic_buffer.data_ptr()), num_experts);
|
static_cast<int32_t*>(expert_offsets.data_ptr()),
|
||||||
|
static_cast<int32_t*>(blockscale_offsets.value().data_ptr()),
|
||||||
|
static_cast<int32_t*>(atomic_buffer.data_ptr()), num_experts);
|
||||||
|
} else {
|
||||||
|
compute_expert_offsets<<<1, 1, 0, stream>>>(
|
||||||
|
static_cast<const int32_t*>(problem_sizes1.data_ptr()),
|
||||||
|
static_cast<int32_t*>(expert_offsets.data_ptr()),
|
||||||
|
static_cast<int32_t*>(atomic_buffer.data_ptr()), num_experts);
|
||||||
|
}
|
||||||
compute_arg_sorts<<<num_experts, num_threads, 0, stream>>>(
|
compute_arg_sorts<<<num_experts, num_threads, 0, stream>>>(
|
||||||
static_cast<const int32_t*>(topk_ids.data_ptr()),
|
static_cast<const uint32_t*>(topk_ids.data_ptr()),
|
||||||
static_cast<const int32_t*>(expert_offsets.data_ptr()),
|
static_cast<const int32_t*>(expert_offsets.data_ptr()),
|
||||||
static_cast<int32_t*>(input_permutation.data_ptr()),
|
static_cast<int32_t*>(input_permutation.data_ptr()),
|
||||||
static_cast<int32_t*>(output_permutation.data_ptr()),
|
static_cast<int32_t*>(output_permutation.data_ptr()),
|
||||||
static_cast<int32_t*>(atomic_buffer.data_ptr()), topk_ids.numel(),
|
static_cast<int32_t*>(atomic_buffer.data_ptr()), topk_ids.numel(),
|
||||||
topk_ids.size(1));
|
topk_ids.size(1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__global__ void compute_pplx_data(int32_t* expert_offsets,
|
||||||
|
int32_t* problem_sizes1,
|
||||||
|
int32_t* problem_sizes2,
|
||||||
|
const int32_t* __restrict__ expert_num_tokens,
|
||||||
|
const int padded_m, const int n,
|
||||||
|
const int k) {
|
||||||
|
int expert_idx = threadIdx.x;
|
||||||
|
|
||||||
|
expert_offsets[expert_idx] = expert_idx * padded_m;
|
||||||
|
problem_sizes1[expert_idx * 3] = expert_num_tokens[expert_idx];
|
||||||
|
problem_sizes1[expert_idx * 3 + 1] = 2 * n;
|
||||||
|
problem_sizes1[expert_idx * 3 + 2] = k;
|
||||||
|
problem_sizes2[expert_idx * 3] = expert_num_tokens[expert_idx];
|
||||||
|
problem_sizes2[expert_idx * 3 + 1] = k;
|
||||||
|
problem_sizes2[expert_idx * 3 + 2] = n;
|
||||||
|
}
|
||||||
|
|
||||||
|
void get_cutlass_pplx_moe_mm_data_caller(torch::Tensor& expert_offsets,
|
||||||
|
torch::Tensor& problem_sizes1,
|
||||||
|
torch::Tensor& problem_sizes2,
|
||||||
|
const torch::Tensor& expert_num_tokens,
|
||||||
|
const int64_t num_local_experts,
|
||||||
|
const int64_t padded_m,
|
||||||
|
const int64_t n, const int64_t k) {
|
||||||
|
auto stream = at::cuda::getCurrentCUDAStream(expert_offsets.device().index());
|
||||||
|
|
||||||
|
compute_pplx_data<<<1, num_local_experts, 0, stream>>>(
|
||||||
|
static_cast<int32_t*>(expert_offsets.data_ptr()),
|
||||||
|
static_cast<int32_t*>(problem_sizes1.data_ptr()),
|
||||||
|
static_cast<int32_t*>(problem_sizes2.data_ptr()),
|
||||||
|
static_cast<const int32_t*>(expert_num_tokens.data_ptr()), padded_m, n,
|
||||||
|
k);
|
||||||
|
}
|
||||||
|
|||||||
@ -36,7 +36,8 @@ void cutlass_moe_mm_sm90(
|
|||||||
torch::Tensor const& b_tensors, torch::Tensor const& a_scales,
|
torch::Tensor const& b_tensors, torch::Tensor const& a_scales,
|
||||||
torch::Tensor const& b_scales, torch::Tensor const& expert_offsets,
|
torch::Tensor const& b_scales, torch::Tensor const& expert_offsets,
|
||||||
torch::Tensor const& problem_sizes, torch::Tensor const& a_strides,
|
torch::Tensor const& problem_sizes, torch::Tensor const& a_strides,
|
||||||
torch::Tensor const& b_strides, torch::Tensor const& c_strides);
|
torch::Tensor const& b_strides, torch::Tensor const& c_strides,
|
||||||
|
bool per_act_token, bool per_out_ch);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -54,7 +55,16 @@ void get_cutlass_moe_mm_data_caller(
|
|||||||
const torch::Tensor& topk_ids, torch::Tensor& expert_offsets,
|
const torch::Tensor& topk_ids, torch::Tensor& expert_offsets,
|
||||||
torch::Tensor& problem_sizes1, torch::Tensor& problem_sizes2,
|
torch::Tensor& problem_sizes1, torch::Tensor& problem_sizes2,
|
||||||
torch::Tensor& input_permutation, torch::Tensor& output_permutation,
|
torch::Tensor& input_permutation, torch::Tensor& output_permutation,
|
||||||
const int64_t num_experts, const int64_t n, const int64_t k);
|
const int64_t num_experts, const int64_t n, const int64_t k,
|
||||||
|
const std::optional<torch::Tensor>& blockscale_offsets);
|
||||||
|
|
||||||
|
void get_cutlass_pplx_moe_mm_data_caller(torch::Tensor& expert_offsets,
|
||||||
|
torch::Tensor& problem_sizes1,
|
||||||
|
torch::Tensor& problem_sizes2,
|
||||||
|
const torch::Tensor& expert_num_tokens,
|
||||||
|
const int64_t num_local_experts,
|
||||||
|
const int64_t padded_m,
|
||||||
|
const int64_t n, const int64_t k);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void cutlass_scaled_mm_azp_sm75(torch::Tensor& c, torch::Tensor const& a,
|
void cutlass_scaled_mm_azp_sm75(torch::Tensor& c, torch::Tensor const& a,
|
||||||
@ -206,12 +216,13 @@ void cutlass_moe_mm(
|
|||||||
torch::Tensor const& b_tensors, torch::Tensor const& a_scales,
|
torch::Tensor const& b_tensors, torch::Tensor const& a_scales,
|
||||||
torch::Tensor const& b_scales, torch::Tensor const& expert_offsets,
|
torch::Tensor const& b_scales, torch::Tensor const& expert_offsets,
|
||||||
torch::Tensor const& problem_sizes, torch::Tensor const& a_strides,
|
torch::Tensor const& problem_sizes, torch::Tensor const& a_strides,
|
||||||
torch::Tensor const& b_strides, torch::Tensor const& c_strides) {
|
torch::Tensor const& b_strides, torch::Tensor const& c_strides,
|
||||||
|
bool per_act_token, bool per_out_ch) {
|
||||||
int32_t version_num = get_sm_version_num();
|
int32_t version_num = get_sm_version_num();
|
||||||
#if defined ENABLE_CUTLASS_MOE_SM90 && ENABLE_CUTLASS_MOE_SM90
|
#if defined ENABLE_CUTLASS_MOE_SM90 && ENABLE_CUTLASS_MOE_SM90
|
||||||
cutlass_moe_mm_sm90(out_tensors, a_tensors, b_tensors, a_scales, b_scales,
|
cutlass_moe_mm_sm90(out_tensors, a_tensors, b_tensors, a_scales, b_scales,
|
||||||
expert_offsets, problem_sizes, a_strides, b_strides,
|
expert_offsets, problem_sizes, a_strides, b_strides,
|
||||||
c_strides);
|
c_strides, per_act_token, per_out_ch);
|
||||||
return;
|
return;
|
||||||
#endif
|
#endif
|
||||||
TORCH_CHECK_NOT_IMPLEMENTED(
|
TORCH_CHECK_NOT_IMPLEMENTED(
|
||||||
@ -224,7 +235,8 @@ void get_cutlass_moe_mm_data(
|
|||||||
const torch::Tensor& topk_ids, torch::Tensor& expert_offsets,
|
const torch::Tensor& topk_ids, torch::Tensor& expert_offsets,
|
||||||
torch::Tensor& problem_sizes1, torch::Tensor& problem_sizes2,
|
torch::Tensor& problem_sizes1, torch::Tensor& problem_sizes2,
|
||||||
torch::Tensor& input_permutation, torch::Tensor& output_permutation,
|
torch::Tensor& input_permutation, torch::Tensor& output_permutation,
|
||||||
const int64_t num_experts, const int64_t n, const int64_t k) {
|
const int64_t num_experts, const int64_t n, const int64_t k,
|
||||||
|
const std::optional<torch::Tensor>& blockscale_offsets) {
|
||||||
// This function currently gets compiled only if we have a valid cutlass moe
|
// This function currently gets compiled only if we have a valid cutlass moe
|
||||||
// mm to run it for.
|
// mm to run it for.
|
||||||
int32_t version_num = get_sm_version_num();
|
int32_t version_num = get_sm_version_num();
|
||||||
@ -232,7 +244,8 @@ void get_cutlass_moe_mm_data(
|
|||||||
(defined ENABLE_SCALED_MM_SM100 && ENABLE_SCALED_MM_SM90)
|
(defined ENABLE_SCALED_MM_SM100 && ENABLE_SCALED_MM_SM90)
|
||||||
get_cutlass_moe_mm_data_caller(topk_ids, expert_offsets, problem_sizes1,
|
get_cutlass_moe_mm_data_caller(topk_ids, expert_offsets, problem_sizes1,
|
||||||
problem_sizes2, input_permutation,
|
problem_sizes2, input_permutation,
|
||||||
output_permutation, num_experts, n, k);
|
output_permutation, num_experts, n, k,
|
||||||
|
blockscale_offsets);
|
||||||
return;
|
return;
|
||||||
#endif
|
#endif
|
||||||
TORCH_CHECK_NOT_IMPLEMENTED(
|
TORCH_CHECK_NOT_IMPLEMENTED(
|
||||||
@ -242,6 +255,29 @@ void get_cutlass_moe_mm_data(
|
|||||||
version_num, ". Required capability: 90");
|
version_num, ". Required capability: 90");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void get_cutlass_pplx_moe_mm_data(torch::Tensor& expert_offsets,
|
||||||
|
torch::Tensor& problem_sizes1,
|
||||||
|
torch::Tensor& problem_sizes2,
|
||||||
|
const torch::Tensor& expert_num_tokens,
|
||||||
|
const int64_t num_local_experts,
|
||||||
|
const int64_t padded_m, const int64_t n,
|
||||||
|
const int64_t k) {
|
||||||
|
// This function currently gets compiled only if we have a valid cutlass moe
|
||||||
|
// mm to run it for.
|
||||||
|
int32_t version_num = get_sm_version_num();
|
||||||
|
#if defined ENABLE_CUTLASS_MOE_SM90 && ENABLE_CUTLASS_MOE_SM90
|
||||||
|
get_cutlass_pplx_moe_mm_data_caller(expert_offsets, problem_sizes1,
|
||||||
|
problem_sizes2, expert_num_tokens,
|
||||||
|
num_local_experts, padded_m, n, k);
|
||||||
|
return;
|
||||||
|
#endif
|
||||||
|
TORCH_CHECK_NOT_IMPLEMENTED(
|
||||||
|
false,
|
||||||
|
"No compiled get_cutlass_pplx_moe_mm_data: no cutlass_scaled_mm kernel "
|
||||||
|
"for CUDA device capability: ",
|
||||||
|
version_num, ". Required capability: 90");
|
||||||
|
}
|
||||||
|
|
||||||
void cutlass_scaled_mm_azp(torch::Tensor& c, torch::Tensor const& a,
|
void cutlass_scaled_mm_azp(torch::Tensor& c, torch::Tensor const& a,
|
||||||
torch::Tensor const& b,
|
torch::Tensor const& b,
|
||||||
torch::Tensor const& a_scales,
|
torch::Tensor const& a_scales,
|
||||||
|
|||||||
@ -39,8 +39,8 @@ __global__ void dynamic_per_token_scaled_fp8_quant_kernel(
|
|||||||
fp8_type* __restrict__ token_output = &out[offset];
|
fp8_type* __restrict__ token_output = &out[offset];
|
||||||
|
|
||||||
// For vectorization, token_input and token_output pointers need to be
|
// For vectorization, token_input and token_output pointers need to be
|
||||||
// aligned at 8-byte and 4-byte addresses respectively.
|
// aligned at 32-byte and 16-byte addresses respectively.
|
||||||
bool const can_vectorize = hidden_size % 4 == 0;
|
bool const can_vectorize = hidden_size % 16 == 0;
|
||||||
|
|
||||||
float absmax_val = 0.0f;
|
float absmax_val = 0.0f;
|
||||||
if (can_vectorize) {
|
if (can_vectorize) {
|
||||||
@ -48,24 +48,24 @@ __global__ void dynamic_per_token_scaled_fp8_quant_kernel(
|
|||||||
} else {
|
} else {
|
||||||
for (int i = tid; i < hidden_size; i += blockDim.x) {
|
for (int i = tid; i < hidden_size; i += blockDim.x) {
|
||||||
float const x = static_cast<float>(token_input[i]);
|
float const x = static_cast<float>(token_input[i]);
|
||||||
absmax_val = max(absmax_val, fabs(x));
|
absmax_val = fmaxf(absmax_val, fabsf(x));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
using BlockReduce = cub::BlockReduce<float, 1024>;
|
using BlockReduce = cub::BlockReduce<float, 256>;
|
||||||
__shared__ typename BlockReduce::TempStorage reduceStorage;
|
__shared__ typename BlockReduce::TempStorage reduceStorage;
|
||||||
float const block_absmax_val_maybe =
|
float const block_absmax_val_maybe =
|
||||||
BlockReduce(reduceStorage).Reduce(absmax_val, cub::Max{}, blockDim.x);
|
BlockReduce(reduceStorage).Reduce(absmax_val, cub::Max{}, blockDim.x);
|
||||||
__shared__ float token_scale;
|
__shared__ float token_scale;
|
||||||
if (tid == 0) {
|
if (tid == 0) {
|
||||||
if (scale_ub) {
|
if (scale_ub) {
|
||||||
token_scale = min(block_absmax_val_maybe, *scale_ub);
|
token_scale = fminf(block_absmax_val_maybe, *scale_ub);
|
||||||
} else {
|
} else {
|
||||||
token_scale = block_absmax_val_maybe;
|
token_scale = block_absmax_val_maybe;
|
||||||
}
|
}
|
||||||
// token scale computation
|
// token scale computation
|
||||||
token_scale = max(token_scale / quant_type_max_v<fp8_type>,
|
token_scale = fmaxf(token_scale / quant_type_max_v<fp8_type>,
|
||||||
min_scaling_factor<fp8_type>::val());
|
min_scaling_factor<fp8_type>::val());
|
||||||
scale[token_idx] = token_scale;
|
scale[token_idx] = token_scale;
|
||||||
}
|
}
|
||||||
__syncthreads();
|
__syncthreads();
|
||||||
@ -88,10 +88,11 @@ void static_scaled_fp8_quant(torch::Tensor& out, // [..., d]
|
|||||||
torch::Tensor const& input, // [..., d]
|
torch::Tensor const& input, // [..., d]
|
||||||
torch::Tensor const& scale) // [1]
|
torch::Tensor const& scale) // [1]
|
||||||
{
|
{
|
||||||
int64_t num_tokens = input.numel() / input.size(-1);
|
int const block_size = 256;
|
||||||
int64_t num_elems = input.numel();
|
int const num_tokens = input.numel() / input.size(-1);
|
||||||
dim3 grid(num_tokens);
|
int const num_elems = input.numel();
|
||||||
dim3 block(1024);
|
dim3 const grid(num_tokens);
|
||||||
|
dim3 const block(block_size);
|
||||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(input));
|
const at::cuda::OptionalCUDAGuard device_guard(device_of(input));
|
||||||
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||||
VLLM_DISPATCH_FLOATING_TYPES(
|
VLLM_DISPATCH_FLOATING_TYPES(
|
||||||
@ -110,10 +111,11 @@ void dynamic_scaled_fp8_quant(torch::Tensor& out, // [..., d]
|
|||||||
torch::Tensor const& input, // [..., d]
|
torch::Tensor const& input, // [..., d]
|
||||||
torch::Tensor& scale) // [1]
|
torch::Tensor& scale) // [1]
|
||||||
{
|
{
|
||||||
int64_t num_tokens = input.numel() / input.size(-1);
|
int const block_size = 256;
|
||||||
int64_t num_elems = input.numel();
|
int const num_tokens = input.numel() / input.size(-1);
|
||||||
dim3 grid(num_tokens);
|
int const num_elems = input.numel();
|
||||||
dim3 block(1024);
|
dim3 const grid(num_tokens);
|
||||||
|
dim3 const block(block_size);
|
||||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(input));
|
const at::cuda::OptionalCUDAGuard device_guard(device_of(input));
|
||||||
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||||
VLLM_DISPATCH_FLOATING_TYPES(
|
VLLM_DISPATCH_FLOATING_TYPES(
|
||||||
@ -141,8 +143,9 @@ void dynamic_per_token_scaled_fp8_quant(
|
|||||||
|
|
||||||
int const hidden_size = input.size(-1);
|
int const hidden_size = input.size(-1);
|
||||||
int const num_tokens = input.numel() / hidden_size;
|
int const num_tokens = input.numel() / hidden_size;
|
||||||
|
int const block_size = 256;
|
||||||
dim3 const grid(num_tokens);
|
dim3 const grid(num_tokens);
|
||||||
dim3 const block(std::min(hidden_size, 1024));
|
dim3 const block(std::min(hidden_size, block_size));
|
||||||
|
|
||||||
const at::cuda::OptionalCUDAGuard device_guard(device_of(input));
|
const at::cuda::OptionalCUDAGuard device_guard(device_of(input));
|
||||||
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||||
|
|||||||
@ -46,7 +46,7 @@ __device__ __forceinline__ fp8_type scaled_fp8_conversion(float const val,
|
|||||||
}
|
}
|
||||||
|
|
||||||
float r =
|
float r =
|
||||||
fmax(-quant_type_max_v<fp8_type>, fmin(x, quant_type_max_v<fp8_type>));
|
fmaxf(-quant_type_max_v<fp8_type>, fminf(x, quant_type_max_v<fp8_type>));
|
||||||
#ifndef USE_ROCM
|
#ifndef USE_ROCM
|
||||||
return static_cast<fp8_type>(r);
|
return static_cast<fp8_type>(r);
|
||||||
#else
|
#else
|
||||||
@ -65,7 +65,7 @@ template <typename scalar_t, typename fp8_type>
|
|||||||
__global__ void segmented_max_reduction(float* __restrict__ scale,
|
__global__ void segmented_max_reduction(float* __restrict__ scale,
|
||||||
const scalar_t* __restrict__ input,
|
const scalar_t* __restrict__ input,
|
||||||
int64_t num_elems) {
|
int64_t num_elems) {
|
||||||
__shared__ float cache[1024];
|
__shared__ float cache[256];
|
||||||
int64_t i = blockDim.x * blockIdx.x + threadIdx.x;
|
int64_t i = blockDim.x * blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
// First store maximum for all values processes by
|
// First store maximum for all values processes by
|
||||||
@ -73,7 +73,7 @@ __global__ void segmented_max_reduction(float* __restrict__ scale,
|
|||||||
scalar_t tmp = 0.0;
|
scalar_t tmp = 0.0;
|
||||||
while (i < num_elems) {
|
while (i < num_elems) {
|
||||||
float x = static_cast<float>(input[i]);
|
float x = static_cast<float>(input[i]);
|
||||||
tmp = max(tmp, fabs(x));
|
tmp = fmaxf(tmp, fabsf(x));
|
||||||
i += blockDim.x * gridDim.x;
|
i += blockDim.x * gridDim.x;
|
||||||
}
|
}
|
||||||
cache[threadIdx.x] = tmp;
|
cache[threadIdx.x] = tmp;
|
||||||
@ -100,25 +100,27 @@ template <typename scalar_t>
|
|||||||
__device__ float thread_max_vec(scalar_t const* __restrict__ input,
|
__device__ float thread_max_vec(scalar_t const* __restrict__ input,
|
||||||
int64_t const num_elems, int const tid,
|
int64_t const num_elems, int const tid,
|
||||||
int const step) {
|
int const step) {
|
||||||
|
constexpr size_t VEC_SIZE = 16;
|
||||||
|
using scalarxN_t = vec_n_t<scalar_t, VEC_SIZE>;
|
||||||
// Vectorized input/output to better utilize memory bandwidth.
|
// Vectorized input/output to better utilize memory bandwidth.
|
||||||
vec4_t<scalar_t> const* vectorized_in =
|
auto const* vectorized_in = reinterpret_cast<scalarxN_t const*>(input);
|
||||||
reinterpret_cast<vec4_t<scalar_t> const*>(input);
|
|
||||||
|
|
||||||
int64_t const num_vec_elems = num_elems >> 2;
|
// num_elems / VEC_SIZE (which is 16)
|
||||||
|
int64_t const num_vec_elems = num_elems >> 4;
|
||||||
float absmax_val = 0.0f;
|
float absmax_val = 0.0f;
|
||||||
|
|
||||||
#pragma unroll 4
|
#pragma unroll
|
||||||
for (int64_t i = tid; i < num_vec_elems; i += step) {
|
for (int64_t i = tid; i < num_vec_elems; i += step) {
|
||||||
vec4_t<scalar_t> in_vec = vectorized_in[i];
|
scalarxN_t in_vec = vectorized_in[i];
|
||||||
absmax_val = max(absmax_val, fabs(in_vec.x));
|
#pragma unroll
|
||||||
absmax_val = max(absmax_val, fabs(in_vec.y));
|
for (int j = 0; j < VEC_SIZE; ++j) {
|
||||||
absmax_val = max(absmax_val, fabs(in_vec.z));
|
absmax_val = fmaxf(absmax_val, fabsf(in_vec.val[j]));
|
||||||
absmax_val = max(absmax_val, fabs(in_vec.w));
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle the remaining elements if num_elems is not divisible by 4
|
// Handle the remaining elements if num_elems is not divisible by VEC_SIZE
|
||||||
for (int64_t i = num_vec_elems * 4 + tid; i < num_elems; i += step) {
|
for (int64_t i = num_vec_elems * VEC_SIZE + tid; i < num_elems; i += step) {
|
||||||
absmax_val = max(absmax_val, fabs(input[i]));
|
absmax_val = fmaxf(absmax_val, fabsf(input[i]));
|
||||||
}
|
}
|
||||||
|
|
||||||
return absmax_val;
|
return absmax_val;
|
||||||
@ -130,31 +132,31 @@ __device__ void scaled_fp8_conversion_vec(fp8_type* __restrict__ out,
|
|||||||
float const scale,
|
float const scale,
|
||||||
int64_t const num_elems,
|
int64_t const num_elems,
|
||||||
int const tid, int const step) {
|
int const tid, int const step) {
|
||||||
using float8x4_t = q8x4_t<fp8_type>;
|
constexpr size_t VEC_SIZE = 16;
|
||||||
|
using scalarxN_t = vec_n_t<scalar_t, VEC_SIZE>;
|
||||||
|
using float8xN_t = q8_n_t<fp8_type, VEC_SIZE>;
|
||||||
// Vectorized input/output to better utilize memory bandwidth.
|
// Vectorized input/output to better utilize memory bandwidth.
|
||||||
auto const* vectorized_in = reinterpret_cast<vec4_t<scalar_t> const*>(input);
|
auto const* vectorized_in = reinterpret_cast<scalarxN_t const*>(input);
|
||||||
auto* vectorized_out = reinterpret_cast<float8x4_t*>(out);
|
auto* vectorized_out = reinterpret_cast<float8xN_t*>(out);
|
||||||
|
|
||||||
int64_t const num_vec_elems = num_elems >> 2;
|
// num_elems / VEC_SIZE (which is 16)
|
||||||
|
int64_t const num_vec_elems = num_elems >> 4;
|
||||||
|
|
||||||
#pragma unroll 4
|
#pragma unroll
|
||||||
for (int64_t i = tid; i < num_vec_elems; i += step) {
|
for (int64_t i = tid; i < num_vec_elems; i += step) {
|
||||||
vec4_t<scalar_t> in_vec = vectorized_in[i];
|
scalarxN_t in_vec = vectorized_in[i];
|
||||||
float8x4_t out_vec;
|
float8xN_t out_vec;
|
||||||
|
|
||||||
out_vec.x = scaled_fp8_conversion<is_scale_inverted, fp8_type>(
|
#pragma unroll
|
||||||
static_cast<float>(in_vec.x), scale);
|
for (int j = 0; j < VEC_SIZE; ++j) {
|
||||||
out_vec.y = scaled_fp8_conversion<is_scale_inverted, fp8_type>(
|
out_vec.val[j] = scaled_fp8_conversion<is_scale_inverted, fp8_type>(
|
||||||
static_cast<float>(in_vec.y), scale);
|
static_cast<float>(in_vec.val[j]), scale);
|
||||||
out_vec.z = scaled_fp8_conversion<is_scale_inverted, fp8_type>(
|
}
|
||||||
static_cast<float>(in_vec.z), scale);
|
|
||||||
out_vec.w = scaled_fp8_conversion<is_scale_inverted, fp8_type>(
|
|
||||||
static_cast<float>(in_vec.w), scale);
|
|
||||||
vectorized_out[i] = out_vec;
|
vectorized_out[i] = out_vec;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle the remaining elements if num_elems is not divisible by 4
|
// Handle the remaining elements if num_elems is not divisible by VEC_SIZE
|
||||||
for (int64_t i = num_vec_elems * 4 + tid; i < num_elems; i += step) {
|
for (int64_t i = num_vec_elems * VEC_SIZE + tid; i < num_elems; i += step) {
|
||||||
out[i] = scaled_fp8_conversion<is_scale_inverted, fp8_type>(
|
out[i] = scaled_fp8_conversion<is_scale_inverted, fp8_type>(
|
||||||
static_cast<float>(input[i]), scale);
|
static_cast<float>(input[i]), scale);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -140,6 +140,7 @@ __device__ void compute_rms(float* rms, scalar_t const* __restrict__ input,
|
|||||||
// sum of squares
|
// sum of squares
|
||||||
float ss = 0.0f;
|
float ss = 0.0f;
|
||||||
|
|
||||||
|
const int VEC_SIZE = 4;
|
||||||
int32_t const num_vec_elems = hidden_size >> 2;
|
int32_t const num_vec_elems = hidden_size >> 2;
|
||||||
|
|
||||||
#pragma unroll 4
|
#pragma unroll 4
|
||||||
@ -147,22 +148,23 @@ __device__ void compute_rms(float* rms, scalar_t const* __restrict__ input,
|
|||||||
vec4_t<scalar_t> in = vec_input[i];
|
vec4_t<scalar_t> in = vec_input[i];
|
||||||
|
|
||||||
vec4_t<float> x;
|
vec4_t<float> x;
|
||||||
x.x = static_cast<float>(in.x);
|
#pragma unroll
|
||||||
x.y = static_cast<float>(in.y);
|
for (int j = 0; j < VEC_SIZE; ++j) {
|
||||||
x.z = static_cast<float>(in.z);
|
x.val[j] = static_cast<float>(in.val[j]);
|
||||||
x.w = static_cast<float>(in.w);
|
|
||||||
if constexpr (has_residual) {
|
|
||||||
vec4_t<scalar_t> r = vec_residual[i];
|
|
||||||
x.x += static_cast<float>(r.x);
|
|
||||||
x.y += static_cast<float>(r.y);
|
|
||||||
x.z += static_cast<float>(r.z);
|
|
||||||
x.w += static_cast<float>(r.w);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ss += x.x * x.x;
|
if constexpr (has_residual) {
|
||||||
ss += x.y * x.y;
|
vec4_t<scalar_t> r = vec_residual[i];
|
||||||
ss += x.z * x.z;
|
#pragma unroll
|
||||||
ss += x.w * x.w;
|
for (int j = 0; j < VEC_SIZE; ++j) {
|
||||||
|
x.val[j] += static_cast<float>(r.val[j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < VEC_SIZE; ++j) {
|
||||||
|
ss += x.val[j] * x.val[j];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
using BlockReduce = cub::BlockReduce<float, 1024>;
|
using BlockReduce = cub::BlockReduce<float, 1024>;
|
||||||
@ -203,6 +205,7 @@ __device__ void compute_dynamic_per_token_scales(
|
|||||||
|
|
||||||
constexpr scalar_out_t qmax{quant_type_max_v<scalar_out_t>};
|
constexpr scalar_out_t qmax{quant_type_max_v<scalar_out_t>};
|
||||||
|
|
||||||
|
const int VEC_SIZE = 4;
|
||||||
int32_t const num_vec_elems = hidden_size >> 2;
|
int32_t const num_vec_elems = hidden_size >> 2;
|
||||||
float block_absmax_val_maybe = 0.0f;
|
float block_absmax_val_maybe = 0.0f;
|
||||||
|
|
||||||
@ -212,26 +215,25 @@ __device__ void compute_dynamic_per_token_scales(
|
|||||||
vec4_t<scalar_t> const w = vec_weight[i];
|
vec4_t<scalar_t> const w = vec_weight[i];
|
||||||
|
|
||||||
vec4_t<float> x;
|
vec4_t<float> x;
|
||||||
x.x = static_cast<float>(in.x);
|
#pragma unroll
|
||||||
x.y = static_cast<float>(in.y);
|
for (int j = 0; j < VEC_SIZE; ++j) {
|
||||||
x.z = static_cast<float>(in.z);
|
x.val[j] = static_cast<float>(in.val[j]);
|
||||||
x.w = static_cast<float>(in.w);
|
|
||||||
if constexpr (has_residual) {
|
|
||||||
vec4_t<scalar_t> r = vec_residual[i];
|
|
||||||
x.x += static_cast<float>(r.x);
|
|
||||||
x.y += static_cast<float>(r.y);
|
|
||||||
x.z += static_cast<float>(r.z);
|
|
||||||
x.w += static_cast<float>(r.w);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
block_absmax_val_maybe = fmaxf(
|
if constexpr (has_residual) {
|
||||||
block_absmax_val_maybe, fabs(static_cast<scalar_t>(x.x * rms) * w.x));
|
vec4_t<scalar_t> r = vec_residual[i];
|
||||||
block_absmax_val_maybe = fmaxf(
|
#pragma unroll
|
||||||
block_absmax_val_maybe, fabs(static_cast<scalar_t>(x.y * rms) * w.y));
|
for (int j = 0; j < VEC_SIZE; ++j) {
|
||||||
block_absmax_val_maybe = fmaxf(
|
x.val[j] += static_cast<float>(r.val[j]);
|
||||||
block_absmax_val_maybe, fabs(static_cast<scalar_t>(x.z * rms) * w.z));
|
}
|
||||||
block_absmax_val_maybe = fmaxf(
|
}
|
||||||
block_absmax_val_maybe, fabs(static_cast<scalar_t>(x.w * rms) * w.w));
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int j = 0; j < VEC_SIZE; ++j) {
|
||||||
|
block_absmax_val_maybe =
|
||||||
|
fmaxf(block_absmax_val_maybe,
|
||||||
|
fabs(static_cast<scalar_t>(x.val[j] * rms) * w.val[j]));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
using BlockReduce = cub::BlockReduce<float, 1024>;
|
using BlockReduce = cub::BlockReduce<float, 1024>;
|
||||||
@ -282,6 +284,7 @@ __device__ void norm_and_quant(scalar_out_t* __restrict__ output,
|
|||||||
vec_residual = reinterpret_cast<vec4_t<scalar_t>*>(&residual[token_offset]);
|
vec_residual = reinterpret_cast<vec4_t<scalar_t>*>(&residual[token_offset]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const int VEC_SIZE = 4;
|
||||||
int32_t const num_vec_elems = hidden_size >> 2;
|
int32_t const num_vec_elems = hidden_size >> 2;
|
||||||
|
|
||||||
// TODO(luka/varun) extract into type-agnostic vectorized quant function to
|
// TODO(luka/varun) extract into type-agnostic vectorized quant function to
|
||||||
@ -292,33 +295,31 @@ __device__ void norm_and_quant(scalar_out_t* __restrict__ output,
|
|||||||
vec4_t<scalar_t> const w = vec_weight[i];
|
vec4_t<scalar_t> const w = vec_weight[i];
|
||||||
|
|
||||||
vec4_t<float> x;
|
vec4_t<float> x;
|
||||||
x.x = static_cast<float>(in.x);
|
#pragma unroll
|
||||||
x.y = static_cast<float>(in.y);
|
for (int j = 0; j < VEC_SIZE; ++j) {
|
||||||
x.z = static_cast<float>(in.z);
|
x.val[j] = static_cast<float>(in.val[j]);
|
||||||
x.w = static_cast<float>(in.w);
|
}
|
||||||
|
|
||||||
if constexpr (has_residual) {
|
if constexpr (has_residual) {
|
||||||
vec4_t<scalar_t> r = vec_residual[i];
|
vec4_t<scalar_t> r = vec_residual[i];
|
||||||
x.x += static_cast<float>(r.x);
|
#pragma unroll
|
||||||
x.y += static_cast<float>(r.y);
|
for (int j = 0; j < VEC_SIZE; ++j) {
|
||||||
x.z += static_cast<float>(r.z);
|
x.val[j] += static_cast<float>(r.val[j]);
|
||||||
x.w += static_cast<float>(r.w);
|
}
|
||||||
// Update residual
|
// Update residual
|
||||||
r.x = static_cast<scalar_t>(x.x);
|
#pragma unroll
|
||||||
r.y = static_cast<scalar_t>(x.y);
|
for (int j = 0; j < VEC_SIZE; ++j) {
|
||||||
r.z = static_cast<scalar_t>(x.z);
|
r.val[j] = static_cast<scalar_t>(x.val[j]);
|
||||||
r.w = static_cast<scalar_t>(x.w);
|
}
|
||||||
vec_residual[i] = r;
|
vec_residual[i] = r;
|
||||||
}
|
}
|
||||||
|
|
||||||
q8x4_t<scalar_out_t> out;
|
q8x4_t<scalar_out_t> out;
|
||||||
out.x = ScaledQuant<scalar_out_t, is_scale_inverted>::quant_fn(
|
#pragma unroll
|
||||||
static_cast<scalar_t>(x.x * rms) * w.x, scale);
|
for (int j = 0; j < VEC_SIZE; ++j) {
|
||||||
out.y = ScaledQuant<scalar_out_t, is_scale_inverted>::quant_fn(
|
out.val[j] = ScaledQuant<scalar_out_t, is_scale_inverted>::quant_fn(
|
||||||
static_cast<scalar_t>(x.y * rms) * w.y, scale);
|
static_cast<scalar_t>(x.val[j] * rms) * w.val[j], scale);
|
||||||
out.z = ScaledQuant<scalar_out_t, is_scale_inverted>::quant_fn(
|
}
|
||||||
static_cast<scalar_t>(x.z * rms) * w.z, scale);
|
|
||||||
out.w = ScaledQuant<scalar_out_t, is_scale_inverted>::quant_fn(
|
|
||||||
static_cast<scalar_t>(x.w * rms) * w.w, scale);
|
|
||||||
vec_output[i] = out;
|
vec_output[i] = out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
import glob
|
import glob
|
||||||
import itertools
|
import itertools
|
||||||
import os
|
import os
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
import itertools
|
import itertools
|
||||||
import math
|
import math
|
||||||
|
|||||||
@ -10,23 +10,22 @@
|
|||||||
namespace vllm {
|
namespace vllm {
|
||||||
|
|
||||||
// Vectorization containers
|
// Vectorization containers
|
||||||
template <typename scalar_t>
|
template <typename scalar_t, size_t vec_size>
|
||||||
struct __align__(8) vec4_t {
|
struct __align__(vec_size * sizeof(scalar_t)) vec_n_t {
|
||||||
scalar_t x;
|
scalar_t val[vec_size];
|
||||||
scalar_t y;
|
|
||||||
scalar_t z;
|
|
||||||
scalar_t w;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename quant_type_t>
|
template <typename quant_type_t, size_t vec_size>
|
||||||
struct __align__(4) q8x4_t {
|
struct __align__(vec_size * sizeof(quant_type_t)) q8_n_t {
|
||||||
static_assert(std::is_same_v<quant_type_t, int8_t> ||
|
static_assert(std::is_same_v<quant_type_t, int8_t> ||
|
||||||
std::is_same_v<quant_type_t, c10::Float8_e4m3fn> ||
|
std::is_same_v<quant_type_t, c10::Float8_e4m3fn> ||
|
||||||
std::is_same_v<quant_type_t, c10::Float8_e4m3fnuz>);
|
std::is_same_v<quant_type_t, c10::Float8_e4m3fnuz>);
|
||||||
quant_type_t x;
|
quant_type_t val[vec_size];
|
||||||
quant_type_t y;
|
|
||||||
quant_type_t z;
|
|
||||||
quant_type_t w;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <typename scalar_t>
|
||||||
|
using vec4_t = vec_n_t<scalar_t, 4>;
|
||||||
|
template <typename quant_type_t>
|
||||||
|
using q8x4_t = q8_n_t<quant_type_t, 4>;
|
||||||
|
|
||||||
} // namespace vllm
|
} // namespace vllm
|
||||||
|
|||||||
86
csrc/sampler.cu
Normal file
86
csrc/sampler.cu
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
#include "dispatch_utils.h"
|
||||||
|
|
||||||
|
#include <torch/cuda.h>
|
||||||
|
#include <c10/cuda/CUDAGuard.h>
|
||||||
|
|
||||||
|
#ifndef USE_ROCM
|
||||||
|
#include <cub/cub.cuh>
|
||||||
|
#else
|
||||||
|
#include <hipcub/hipcub.hpp>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace vllm {
|
||||||
|
|
||||||
|
template <typename scalar_t>
|
||||||
|
__global__ void apply_repetition_penalties_kernel(
|
||||||
|
scalar_t* __restrict__ logits, // [num_seqs, vocab_size]
|
||||||
|
const bool* __restrict__ prompt_mask, // [num_seqs, vocab_size]
|
||||||
|
const bool* __restrict__ output_mask, // [num_seqs, vocab_size]
|
||||||
|
const scalar_t* __restrict__ repetition_penalties, // [num_seqs]
|
||||||
|
const int num_seqs, const int vocab_size, const int tile_size) {
|
||||||
|
// Each block handles one sequence and a tile of vocab
|
||||||
|
const int seq_idx = blockIdx.x;
|
||||||
|
if (seq_idx >= num_seqs) return;
|
||||||
|
|
||||||
|
const int tile_start = blockIdx.y * tile_size;
|
||||||
|
const int tile_end = min(tile_start + tile_size, vocab_size);
|
||||||
|
|
||||||
|
// Load repetition penalty for this sequence
|
||||||
|
const scalar_t penalty = repetition_penalties[seq_idx];
|
||||||
|
|
||||||
|
// Each thread processes multiple vocab items within the tile
|
||||||
|
for (int vocab_idx = tile_start + threadIdx.x; vocab_idx < tile_end;
|
||||||
|
vocab_idx += blockDim.x) {
|
||||||
|
const int64_t idx = static_cast<int64_t>(seq_idx) * vocab_size + vocab_idx;
|
||||||
|
const bool is_repeated = prompt_mask[idx] || output_mask[idx];
|
||||||
|
if (is_repeated) {
|
||||||
|
scalar_t logit = logits[idx];
|
||||||
|
if (logit > 0) {
|
||||||
|
logits[idx] = logit / penalty;
|
||||||
|
} else {
|
||||||
|
logits[idx] = logit * penalty;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace vllm
|
||||||
|
|
||||||
|
void apply_repetition_penalties_(
|
||||||
|
torch::Tensor& logits, // [num_seqs, vocab_size], in-place
|
||||||
|
const torch::Tensor& prompt_mask, // [num_seqs, vocab_size]
|
||||||
|
const torch::Tensor& output_mask, // [num_seqs, vocab_size]
|
||||||
|
const torch::Tensor& repetition_penalties) { // [num_seqs]
|
||||||
|
TORCH_CHECK(logits.is_contiguous());
|
||||||
|
TORCH_CHECK(prompt_mask.is_contiguous());
|
||||||
|
TORCH_CHECK(output_mask.is_contiguous());
|
||||||
|
TORCH_CHECK(repetition_penalties.is_contiguous());
|
||||||
|
|
||||||
|
int vocab_size = logits.size(-1);
|
||||||
|
int num_seqs = logits.size(0);
|
||||||
|
|
||||||
|
// Get number of SMs on the current device
|
||||||
|
int sms = 0;
|
||||||
|
cudaDeviceGetAttribute(&sms, cudaDevAttrMultiProcessorCount,
|
||||||
|
logits.get_device());
|
||||||
|
|
||||||
|
// Compute tile_num and tile_size
|
||||||
|
int tile_num =
|
||||||
|
std::min(vocab_size, std::max(1, (sms + num_seqs - 1) / num_seqs));
|
||||||
|
int tile_size = (vocab_size + tile_num - 1) / tile_num;
|
||||||
|
|
||||||
|
// Each block handles one sequence and a tile of vocab
|
||||||
|
dim3 grid(num_seqs, tile_num);
|
||||||
|
dim3 block(std::min(tile_size, 1024));
|
||||||
|
const at::cuda::OptionalCUDAGuard device_guard(device_of(logits));
|
||||||
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||||
|
VLLM_DISPATCH_FLOATING_TYPES(
|
||||||
|
logits.scalar_type(), "apply_repetition_penalties_kernel", [&] {
|
||||||
|
vllm::apply_repetition_penalties_kernel<scalar_t>
|
||||||
|
<<<grid, block, 0, stream>>>(
|
||||||
|
logits.data_ptr<scalar_t>(), prompt_mask.data_ptr<bool>(),
|
||||||
|
output_mask.data_ptr<bool>(),
|
||||||
|
repetition_penalties.data_ptr<scalar_t>(), num_seqs, vocab_size,
|
||||||
|
tile_size);
|
||||||
|
});
|
||||||
|
}
|
||||||
@ -170,6 +170,13 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) {
|
|||||||
"float epsilon) -> ()");
|
"float epsilon) -> ()");
|
||||||
ops.impl("fused_add_rms_norm", torch::kCUDA, &fused_add_rms_norm);
|
ops.impl("fused_add_rms_norm", torch::kCUDA, &fused_add_rms_norm);
|
||||||
|
|
||||||
|
// Apply repetition penalties to logits in-place
|
||||||
|
ops.def(
|
||||||
|
"apply_repetition_penalties_(Tensor! logits, Tensor prompt_mask, "
|
||||||
|
"Tensor output_mask, Tensor repetition_penalties) -> ()");
|
||||||
|
ops.impl("apply_repetition_penalties_", torch::kCUDA,
|
||||||
|
&apply_repetition_penalties_);
|
||||||
|
|
||||||
// Layernorm-quant
|
// Layernorm-quant
|
||||||
// Apply Root Mean Square (RMS) Normalization to the input tensor.
|
// Apply Root Mean Square (RMS) Normalization to the input tensor.
|
||||||
ops.def(
|
ops.def(
|
||||||
@ -428,7 +435,8 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) {
|
|||||||
"cutlass_moe_mm(Tensor! out_tensors, Tensor a_tensors, Tensor b_tensors, "
|
"cutlass_moe_mm(Tensor! out_tensors, Tensor a_tensors, Tensor b_tensors, "
|
||||||
" Tensor a_scales, Tensor b_scales, Tensor expert_offsets, "
|
" Tensor a_scales, Tensor b_scales, Tensor expert_offsets, "
|
||||||
" Tensor problem_sizes, Tensor a_strides, "
|
" Tensor problem_sizes, Tensor a_strides, "
|
||||||
" Tensor b_strides, Tensor c_strides) -> ()",
|
" Tensor b_strides, Tensor c_strides, bool per_act_token, "
|
||||||
|
" bool per_out_ch) -> ()",
|
||||||
{stride_tag});
|
{stride_tag});
|
||||||
ops.impl("cutlass_moe_mm", torch::kCUDA, &cutlass_moe_mm);
|
ops.impl("cutlass_moe_mm", torch::kCUDA, &cutlass_moe_mm);
|
||||||
|
|
||||||
@ -443,10 +451,26 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) {
|
|||||||
" Tensor! problem_sizes1, Tensor! problem_sizes2, "
|
" Tensor! problem_sizes1, Tensor! problem_sizes2, "
|
||||||
" Tensor! input_permutation, "
|
" Tensor! input_permutation, "
|
||||||
" Tensor! output_permutation, int num_experts, "
|
" Tensor! output_permutation, int num_experts, "
|
||||||
" int n, int k) -> ()",
|
" int n, int k, Tensor? blockscale_offsets) -> ()",
|
||||||
{stride_tag});
|
{stride_tag});
|
||||||
ops.impl("get_cutlass_moe_mm_data", torch::kCUDA, &get_cutlass_moe_mm_data);
|
ops.impl("get_cutlass_moe_mm_data", torch::kCUDA, &get_cutlass_moe_mm_data);
|
||||||
|
|
||||||
|
// A function that computes data required to run fused MoE with w8a8 grouped
|
||||||
|
// GEMM and PPLX. It takes expert_num_tokens and non_zero_expert_idxs
|
||||||
|
// as an input, and computes expert_offsets (token start indices of each
|
||||||
|
// expert). In addition to this, it computes problem sizes for each expert's
|
||||||
|
// multiplication used by the two mms called from fused MoE operation.
|
||||||
|
ops.def(
|
||||||
|
"get_cutlass_pplx_moe_mm_data(Tensor! expert_offsets, "
|
||||||
|
" Tensor! problem_sizes1, "
|
||||||
|
" Tensor! problem_sizes2, "
|
||||||
|
" Tensor expert_num_tokens, "
|
||||||
|
" int num_local_experts, int padded_m, "
|
||||||
|
" int n, int k) -> ()",
|
||||||
|
{stride_tag});
|
||||||
|
ops.impl("get_cutlass_pplx_moe_mm_data", torch::kCUDA,
|
||||||
|
&get_cutlass_pplx_moe_mm_data);
|
||||||
|
|
||||||
// Check if cutlass scaled_mm supports block quantization (used by DeepSeekV3)
|
// Check if cutlass scaled_mm supports block quantization (used by DeepSeekV3)
|
||||||
ops.def(
|
ops.def(
|
||||||
"cutlass_scaled_mm_supports_block_fp8(int cuda_device_capability) -> "
|
"cutlass_scaled_mm_supports_block_fp8(int cuda_device_capability) -> "
|
||||||
|
|||||||
@ -75,6 +75,7 @@ RUN --mount=type=bind,source=.git,target=.git \
|
|||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
--mount=type=cache,target=/root/.cache/ccache \
|
--mount=type=cache,target=/root/.cache/ccache \
|
||||||
|
--mount=type=cache,target=/workspace/vllm/.deps,sharing=locked \
|
||||||
--mount=type=bind,source=.git,target=.git \
|
--mount=type=bind,source=.git,target=.git \
|
||||||
VLLM_TARGET_DEVICE=cpu python3 setup.py bdist_wheel
|
VLLM_TARGET_DEVICE=cpu python3 setup.py bdist_wheel
|
||||||
|
|
||||||
@ -85,7 +86,7 @@ WORKDIR /workspace/vllm
|
|||||||
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
apt-get install -y --no-install-recommends vim numactl
|
apt-get install -y --no-install-recommends vim numactl xz-utils
|
||||||
|
|
||||||
# install development dependencies (for testing)
|
# install development dependencies (for testing)
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
@ -108,8 +109,11 @@ FROM base AS vllm-test
|
|||||||
WORKDIR /workspace/
|
WORKDIR /workspace/
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
--mount=type=bind,src=requirements/test.txt,target=requirements/test.txt \
|
--mount=type=bind,src=requirements/test.in,target=requirements/test.in \
|
||||||
uv pip install -r requirements/test.txt
|
cp requirements/test.in requirements/test-cpu.in && \
|
||||||
|
sed -i '/mamba_ssm/d' requirements/test-cpu.in && \
|
||||||
|
uv pip compile requirements/test-cpu.in -o requirements/cpu-test.txt && \
|
||||||
|
uv pip install -r requirements/cpu-test.txt
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
--mount=type=bind,from=vllm-build,src=/workspace/vllm/dist,target=dist \
|
--mount=type=bind,from=vllm-build,src=/workspace/vllm/dist,target=dist \
|
||||||
|
|||||||
@ -34,7 +34,7 @@ RUN --mount=type=bind,source=.git,target=.git \
|
|||||||
if [ "$GIT_REPO_CHECK" != 0 ]; then bash tools/check_repo.sh ; fi
|
if [ "$GIT_REPO_CHECK" != 0 ]; then bash tools/check_repo.sh ; fi
|
||||||
|
|
||||||
RUN python3 -m pip install -U \
|
RUN python3 -m pip install -U \
|
||||||
'cmake>=3.26' ninja packaging 'setuptools-scm>=8' wheel jinja2 \
|
'cmake>=3.26.1' ninja packaging 'setuptools-scm>=8' wheel jinja2 \
|
||||||
-r requirements/neuron.txt
|
-r requirements/neuron.txt
|
||||||
|
|
||||||
ENV VLLM_TARGET_DEVICE neuron
|
ENV VLLM_TARGET_DEVICE neuron
|
||||||
|
|||||||
@ -312,4 +312,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
|||||||
# Logging to confirm the torch versions
|
# Logging to confirm the torch versions
|
||||||
RUN pip freeze | grep -E 'torch|xformers|vllm|flashinfer'
|
RUN pip freeze | grep -E 'torch|xformers|vllm|flashinfer'
|
||||||
|
|
||||||
|
# Logging to confirm all the packages are installed
|
||||||
|
RUN pip freeze
|
||||||
|
|
||||||
#################### UNITTEST IMAGE #############################
|
#################### UNITTEST IMAGE #############################
|
||||||
|
|||||||
@ -1,10 +1,41 @@
|
|||||||
ARG BASE_UBI_IMAGE_TAG=9.5-1741850109
|
ARG BASE_UBI_IMAGE_TAG=9.5-1741850109
|
||||||
|
|
||||||
|
###############################################################
|
||||||
|
# Stage to build openblas
|
||||||
|
###############################################################
|
||||||
|
|
||||||
|
FROM registry.access.redhat.com/ubi9/ubi-minimal:${BASE_UBI_IMAGE_TAG} AS openblas-builder
|
||||||
|
|
||||||
|
ARG MAX_JOBS
|
||||||
|
ARG OPENBLAS_VERSION=0.3.29
|
||||||
|
RUN microdnf install -y dnf && dnf install -y gcc-toolset-13 make wget unzip \
|
||||||
|
&& source /opt/rh/gcc-toolset-13/enable \
|
||||||
|
&& wget https://github.com/OpenMathLib/OpenBLAS/releases/download/v$OPENBLAS_VERSION/OpenBLAS-$OPENBLAS_VERSION.zip \
|
||||||
|
&& unzip OpenBLAS-$OPENBLAS_VERSION.zip \
|
||||||
|
&& cd OpenBLAS-$OPENBLAS_VERSION \
|
||||||
|
&& make -j${MAX_JOBS} TARGET=POWER9 BINARY=64 USE_OPENMP=1 USE_THREAD=1 NUM_THREADS=120 DYNAMIC_ARCH=1 INTERFACE64=0 \
|
||||||
|
&& cd /tmp && touch control
|
||||||
|
|
||||||
|
|
||||||
|
###############################################################
|
||||||
|
# base stage with dependencies coming from centos mirrors
|
||||||
|
###############################################################
|
||||||
|
FROM registry.access.redhat.com/ubi9/ubi-minimal:${BASE_UBI_IMAGE_TAG} AS centos-deps-builder
|
||||||
|
RUN microdnf install -y dnf && \
|
||||||
|
dnf install -y https://mirror.stream.centos.org/9-stream/BaseOS/`arch`/os/Packages/centos-gpg-keys-9.0-24.el9.noarch.rpm \
|
||||||
|
https://mirror.stream.centos.org/9-stream/BaseOS/`arch`/os/Packages/centos-stream-repos-9.0-24.el9.noarch.rpm \
|
||||||
|
https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm && \
|
||||||
|
dnf config-manager --set-enabled crb
|
||||||
|
|
||||||
|
RUN dnf install -y openjpeg2-devel lcms2-devel tcl-devel tk-devel fribidi-devel && \
|
||||||
|
dnf remove -y centos-gpg-keys-9.0-24.el9.noarch centos-stream-repos-9.0-24.el9.noarch
|
||||||
|
|
||||||
|
|
||||||
###############################################################
|
###############################################################
|
||||||
# base stage with basic dependencies
|
# base stage with basic dependencies
|
||||||
###############################################################
|
###############################################################
|
||||||
|
|
||||||
FROM registry.access.redhat.com/ubi9/ubi-minimal:${BASE_UBI_IMAGE_TAG} AS base-builder
|
FROM centos-deps-builder AS base-builder
|
||||||
|
|
||||||
ARG PYTHON_VERSION=3.12
|
ARG PYTHON_VERSION=3.12
|
||||||
ARG OPENBLAS_VERSION=0.3.29
|
ARG OPENBLAS_VERSION=0.3.29
|
||||||
@ -20,25 +51,27 @@ ENV UV_LINK_MODE=copy
|
|||||||
# Note: A symlink for libatomic.so is created for gcc-13 (linker fails to find libatomic otherwise - reqd. for sentencepiece)
|
# Note: A symlink for libatomic.so is created for gcc-13 (linker fails to find libatomic otherwise - reqd. for sentencepiece)
|
||||||
# Note: A dummy file 'control' is created in /tmp/ to artificially create dependencies between stages when building stages in parallel
|
# Note: A dummy file 'control' is created in /tmp/ to artificially create dependencies between stages when building stages in parallel
|
||||||
# when `--jobs=<N>` is passed with podman build command
|
# when `--jobs=<N>` is passed with podman build command
|
||||||
RUN microdnf install -y openssl-devel dnf \
|
|
||||||
&& dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm \
|
COPY --from=openblas-builder /tmp/control /dev/null
|
||||||
&& dnf config-manager --set-enabled codeready-builder-for-rhel-9-ppc64le-rpms \
|
|
||||||
|
RUN --mount=type=bind,from=openblas-builder,source=/OpenBLAS-$OPENBLAS_VERSION/,target=/openblas/,rw \
|
||||||
|
dnf install -y openssl-devel \
|
||||||
&& dnf install -y \
|
&& dnf install -y \
|
||||||
git tar gcc-toolset-13 automake libtool numactl-devel lapack-devel \
|
git tar gcc-toolset-13 automake libtool \
|
||||||
pkgconfig xsimd zeromq-devel kmod findutils protobuf* \
|
pkgconfig xsimd zeromq-devel kmod findutils protobuf* \
|
||||||
libtiff-devel libjpeg-devel openjpeg2-devel zlib-devel \
|
libtiff-devel libjpeg-devel zlib-devel freetype-devel libwebp-devel \
|
||||||
freetype-devel lcms2-devel libwebp-devel tcl-devel tk-devel \
|
harfbuzz-devel libraqm-devel libimagequant-devel libxcb-devel \
|
||||||
harfbuzz-devel fribidi-devel libraqm-devel libimagequant-devel libxcb-devel \
|
|
||||||
python${PYTHON_VERSION}-devel python${PYTHON_VERSION}-pip \
|
python${PYTHON_VERSION}-devel python${PYTHON_VERSION}-pip \
|
||||||
&& dnf clean all \
|
&& dnf clean all \
|
||||||
|
&& PREFIX=/usr/local make -C /openblas install \
|
||||||
&& ln -sf /usr/lib64/libatomic.so.1 /usr/lib64/libatomic.so \
|
&& ln -sf /usr/lib64/libatomic.so.1 /usr/lib64/libatomic.so \
|
||||||
&& python${PYTHON_VERSION} -m venv ${VIRTUAL_ENV} \
|
&& python${PYTHON_VERSION} -m venv ${VIRTUAL_ENV} \
|
||||||
&& python -m pip install -U pip uv \
|
&& python -m pip install -U pip uv \
|
||||||
&& uv pip install wheel build "setuptools<70" setuptools_scm setuptools_rust meson-python 'cmake<4' ninja cython scikit_build_core scikit_build \
|
&& uv pip install wheel build "setuptools<70" setuptools_scm setuptools_rust meson-python 'cmake<4' ninja cython scikit_build_core scikit_build \
|
||||||
&& curl -sL https://ftp2.osuosl.org/pub/ppc64el/openblas/latest/Openblas_${OPENBLAS_VERSION}_ppc64le.tar.gz | tar xvf - -C /usr/local \
|
|
||||||
&& curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y \
|
&& curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y \
|
||||||
&& cd /tmp && touch control
|
&& cd /tmp && touch control
|
||||||
|
|
||||||
|
|
||||||
###############################################################
|
###############################################################
|
||||||
# Stage to build torch family
|
# Stage to build torch family
|
||||||
###############################################################
|
###############################################################
|
||||||
@ -48,6 +81,8 @@ FROM base-builder AS torch-builder
|
|||||||
ARG MAX_JOBS
|
ARG MAX_JOBS
|
||||||
ARG TORCH_VERSION=2.6.0
|
ARG TORCH_VERSION=2.6.0
|
||||||
ARG _GLIBCXX_USE_CXX11_ABI=1
|
ARG _GLIBCXX_USE_CXX11_ABI=1
|
||||||
|
ARG OPENBLAS_VERSION=0.3.29
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
source /opt/rh/gcc-toolset-13/enable && \
|
source /opt/rh/gcc-toolset-13/enable && \
|
||||||
git clone --recursive https://github.com/pytorch/pytorch.git -b v${TORCH_VERSION} && \
|
git clone --recursive https://github.com/pytorch/pytorch.git -b v${TORCH_VERSION} && \
|
||||||
@ -109,7 +144,8 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
|||||||
.. && \
|
.. && \
|
||||||
make install -j ${MAX_JOBS:-$(nproc)} && \
|
make install -j ${MAX_JOBS:-$(nproc)} && \
|
||||||
cd ../../python/ && \
|
cd ../../python/ && \
|
||||||
uv pip install -v -r requirements-wheel-build.txt && \
|
uv pip install -v -r requirements-build.txt && uv pip install numpy==2.1.3 && \
|
||||||
|
pip show numpy && ls -lrt /opt/vllm/lib/python3.12/site-packages/numpy && \
|
||||||
PYARROW_PARALLEL=${PYARROW_PARALLEL:-$(nproc)} \
|
PYARROW_PARALLEL=${PYARROW_PARALLEL:-$(nproc)} \
|
||||||
python setup.py build_ext \
|
python setup.py build_ext \
|
||||||
--build-type=release --bundle-arrow-cpp \
|
--build-type=release --bundle-arrow-cpp \
|
||||||
@ -132,47 +168,9 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
|||||||
cd opencv-python && \
|
cd opencv-python && \
|
||||||
sed -i -E -e 's/"setuptools.+",/"setuptools",/g' pyproject.toml && \
|
sed -i -E -e 's/"setuptools.+",/"setuptools",/g' pyproject.toml && \
|
||||||
cd opencv && git cherry-pick --no-commit $OPENCV_PATCH && cd .. && \
|
cd opencv && git cherry-pick --no-commit $OPENCV_PATCH && cd .. && \
|
||||||
|
uv pip install scikit-build && \
|
||||||
python -m build --wheel --installer=uv --outdir /opencvwheels/
|
python -m build --wheel --installer=uv --outdir /opencvwheels/
|
||||||
|
|
||||||
###############################################################
|
|
||||||
# Stage to build vllm - this stage builds and installs
|
|
||||||
# vllm, tensorizer and vllm-tgis-adapter and builds uv cache
|
|
||||||
# for transitive dependencies - eg. grpcio
|
|
||||||
###############################################################
|
|
||||||
|
|
||||||
FROM base-builder AS vllmcache-builder
|
|
||||||
|
|
||||||
COPY --from=torch-builder /tmp/control /dev/null
|
|
||||||
COPY --from=arrow-builder /tmp/control /dev/null
|
|
||||||
COPY --from=cv-builder /tmp/control /dev/null
|
|
||||||
|
|
||||||
ARG VLLM_TARGET_DEVICE=cpu
|
|
||||||
ARG GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=1
|
|
||||||
|
|
||||||
# this step installs vllm and populates uv cache
|
|
||||||
# with all the transitive dependencies
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
||||||
source /opt/rh/gcc-toolset-13/enable && \
|
|
||||||
git clone https://github.com/huggingface/xet-core.git && cd xet-core/hf_xet/ && \
|
|
||||||
uv pip install maturin && \
|
|
||||||
uv build --wheel --out-dir /hf_wheels/
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
||||||
--mount=type=bind,from=torch-builder,source=/torchwheels/,target=/torchwheels/,ro \
|
|
||||||
--mount=type=bind,from=arrow-builder,source=/arrowwheels/,target=/arrowwheels/,ro \
|
|
||||||
--mount=type=bind,from=cv-builder,source=/opencvwheels/,target=/opencvwheels/,ro \
|
|
||||||
--mount=type=bind,src=.,dst=/src/,rw \
|
|
||||||
source /opt/rh/gcc-toolset-13/enable && \
|
|
||||||
uv pip install /opencvwheels/*.whl /arrowwheels/*.whl /torchwheels/*.whl && \
|
|
||||||
sed -i -e 's/.*torch.*//g' /src/pyproject.toml /src/requirements/*.txt && \
|
|
||||||
uv pip install pandas pythran pybind11 /hf_wheels/*.whl && \
|
|
||||||
# sentencepiece.pc is in some pkgconfig inside uv cache
|
|
||||||
export PKG_CONFIG_PATH=$(find / -type d -name "pkgconfig" 2>/dev/null | tr '\n' ':') && \
|
|
||||||
uv pip install -r /src/requirements/common.txt -r /src/requirements/cpu.txt -r /src/requirements/build.txt --no-build-isolation && \
|
|
||||||
cd /src/ && \
|
|
||||||
uv build --wheel --out-dir /vllmwheel/ --no-build-isolation && \
|
|
||||||
uv pip install /vllmwheel/*.whl
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################
|
###############################################################
|
||||||
# Stage to build numactl
|
# Stage to build numactl
|
||||||
###############################################################
|
###############################################################
|
||||||
@ -188,6 +186,49 @@ RUN git clone --recursive https://github.com/numactl/numactl.git -b v${NUMACTL_V
|
|||||||
&& autoreconf -i && ./configure \
|
&& autoreconf -i && ./configure \
|
||||||
&& make -j ${MAX_JOBS:-$(nproc)}
|
&& make -j ${MAX_JOBS:-$(nproc)}
|
||||||
|
|
||||||
|
|
||||||
|
###############################################################
|
||||||
|
# Stage to build vllm - this stage builds and installs
|
||||||
|
# vllm, tensorizer and vllm-tgis-adapter and builds uv cache
|
||||||
|
# for transitive dependencies - eg. grpcio
|
||||||
|
###############################################################
|
||||||
|
|
||||||
|
FROM base-builder AS vllmcache-builder
|
||||||
|
|
||||||
|
COPY --from=torch-builder /tmp/control /dev/null
|
||||||
|
COPY --from=arrow-builder /tmp/control /dev/null
|
||||||
|
COPY --from=cv-builder /tmp/control /dev/null
|
||||||
|
COPY --from=numa-builder /tmp/control /dev/null
|
||||||
|
|
||||||
|
ARG VLLM_TARGET_DEVICE=cpu
|
||||||
|
ARG GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=1
|
||||||
|
|
||||||
|
# this step installs vllm and populates uv cache
|
||||||
|
# with all the transitive dependencies
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
|
source /opt/rh/gcc-toolset-13/enable && \
|
||||||
|
git clone https://github.com/huggingface/xet-core.git && cd xet-core/hf_xet/ && \
|
||||||
|
uv pip install maturin && \
|
||||||
|
uv build --wheel --out-dir /hf_wheels/
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
|
--mount=type=bind,from=torch-builder,source=/torchwheels/,target=/torchwheels/,ro \
|
||||||
|
--mount=type=bind,from=arrow-builder,source=/arrowwheels/,target=/arrowwheels/,ro \
|
||||||
|
--mount=type=bind,from=cv-builder,source=/opencvwheels/,target=/opencvwheels/,ro \
|
||||||
|
--mount=type=bind,from=numa-builder,source=/numactl/,target=/numactl/,rw \
|
||||||
|
--mount=type=bind,src=.,dst=/src/,rw \
|
||||||
|
source /opt/rh/gcc-toolset-13/enable && \
|
||||||
|
uv pip install /opencvwheels/*.whl /arrowwheels/*.whl /torchwheels/*.whl && \
|
||||||
|
sed -i -e 's/.*torch.*//g' /src/pyproject.toml /src/requirements/*.txt && \
|
||||||
|
uv pip install pandas pythran pybind11 /hf_wheels/*.whl && \
|
||||||
|
make -C /numactl install && \
|
||||||
|
# sentencepiece.pc is in some pkgconfig inside uv cache
|
||||||
|
export PKG_CONFIG_PATH=$(find / -type d -name "pkgconfig" 2>/dev/null | tr '\n' ':') && \
|
||||||
|
uv pip install -r /src/requirements/common.txt -r /src/requirements/cpu.txt -r /src/requirements/build.txt --no-build-isolation && \
|
||||||
|
cd /src/ && \
|
||||||
|
uv build --wheel --out-dir /vllmwheel/ --no-build-isolation && \
|
||||||
|
uv pip install /vllmwheel/*.whl
|
||||||
|
|
||||||
|
|
||||||
###############################################################
|
###############################################################
|
||||||
# Stage to build lapack
|
# Stage to build lapack
|
||||||
###############################################################
|
###############################################################
|
||||||
@ -217,6 +258,7 @@ ENV PATH=${VIRTUAL_ENV}/bin:$PATH
|
|||||||
ENV PKG_CONFIG_PATH=/usr/local/lib/pkgconfig/
|
ENV PKG_CONFIG_PATH=/usr/local/lib/pkgconfig/
|
||||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib64:/usr/local/lib:/usr/lib64:/usr/lib
|
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib64:/usr/local/lib:/usr/lib64:/usr/lib
|
||||||
ENV UV_LINK_MODE=copy
|
ENV UV_LINK_MODE=copy
|
||||||
|
ENV OMP_NUM_THREADS=16
|
||||||
|
|
||||||
# create artificial dependencies between stages for independent stages to build in parallel
|
# create artificial dependencies between stages for independent stages to build in parallel
|
||||||
COPY --from=torch-builder /tmp/control /dev/null
|
COPY --from=torch-builder /tmp/control /dev/null
|
||||||
@ -225,11 +267,13 @@ COPY --from=cv-builder /tmp/control /dev/null
|
|||||||
COPY --from=vllmcache-builder /tmp/control /dev/null
|
COPY --from=vllmcache-builder /tmp/control /dev/null
|
||||||
COPY --from=numa-builder /tmp/control /dev/null
|
COPY --from=numa-builder /tmp/control /dev/null
|
||||||
COPY --from=lapack-builder /tmp/control /dev/null
|
COPY --from=lapack-builder /tmp/control /dev/null
|
||||||
|
COPY --from=openblas-builder /tmp/control /dev/null
|
||||||
|
|
||||||
# install gcc-11, python, openblas, numactl, lapack
|
# install gcc-11, python, openblas, numactl, lapack
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
--mount=type=bind,from=numa-builder,source=/numactl/,target=/numactl/,rw \
|
--mount=type=bind,from=numa-builder,source=/numactl/,target=/numactl/,rw \
|
||||||
--mount=type=bind,from=lapack-builder,source=/lapack/,target=/lapack/,rw \
|
--mount=type=bind,from=lapack-builder,source=/lapack/,target=/lapack/,rw \
|
||||||
|
--mount=type=bind,from=openblas-builder,source=/OpenBLAS-$OPENBLAS_VERSION/,target=/openblas/,rw \
|
||||||
rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm && \
|
rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm && \
|
||||||
microdnf install --nodocs -y \
|
microdnf install --nodocs -y \
|
||||||
tar findutils openssl \
|
tar findutils openssl \
|
||||||
@ -241,8 +285,8 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
|||||||
&& microdnf clean all \
|
&& microdnf clean all \
|
||||||
&& python${PYTHON_VERSION} -m venv ${VIRTUAL_ENV} \
|
&& python${PYTHON_VERSION} -m venv ${VIRTUAL_ENV} \
|
||||||
&& python -m pip install -U pip uv --no-cache \
|
&& python -m pip install -U pip uv --no-cache \
|
||||||
&& curl -sL https://ftp2.osuosl.org/pub/ppc64el/openblas/latest/Openblas_${OPENBLAS_VERSION}_ppc64le.tar.gz | tar xvf - -C /usr/local \
|
|
||||||
&& make -C /numactl install \
|
&& make -C /numactl install \
|
||||||
|
&& PREFIX=/usr/local make -C /openblas install \
|
||||||
&& uv pip install 'cmake<4' \
|
&& uv pip install 'cmake<4' \
|
||||||
&& cmake --install /lapack/build \
|
&& cmake --install /lapack/build \
|
||||||
&& uv pip uninstall cmake
|
&& uv pip uninstall cmake
|
||||||
|
|||||||
@ -1,7 +1,5 @@
|
|||||||
# default base image
|
# default base image
|
||||||
ARG REMOTE_VLLM="0"
|
ARG REMOTE_VLLM="0"
|
||||||
ARG USE_CYTHON="0"
|
|
||||||
ARG BUILD_RPD="1"
|
|
||||||
ARG COMMON_WORKDIR=/app
|
ARG COMMON_WORKDIR=/app
|
||||||
ARG BASE_IMAGE=rocm/vllm-dev:base
|
ARG BASE_IMAGE=rocm/vllm-dev:base
|
||||||
|
|
||||||
@ -15,7 +13,7 @@ RUN apt-get update -q -y && apt-get install -q -y \
|
|||||||
sqlite3 libsqlite3-dev libfmt-dev libmsgpack-dev libsuitesparse-dev \
|
sqlite3 libsqlite3-dev libfmt-dev libmsgpack-dev libsuitesparse-dev \
|
||||||
apt-transport-https ca-certificates wget curl
|
apt-transport-https ca-certificates wget curl
|
||||||
# Remove sccache
|
# Remove sccache
|
||||||
RUN python3 -m pip install --upgrade pip && pip install setuptools_scm
|
RUN python3 -m pip install --upgrade pip
|
||||||
RUN apt-get purge -y sccache; python3 -m pip uninstall -y sccache; rm -f "$(which sccache)"
|
RUN apt-get purge -y sccache; python3 -m pip uninstall -y sccache; rm -f "$(which sccache)"
|
||||||
ARG COMMON_WORKDIR
|
ARG COMMON_WORKDIR
|
||||||
WORKDIR ${COMMON_WORKDIR}
|
WORKDIR ${COMMON_WORKDIR}
|
||||||
@ -30,18 +28,17 @@ ARG VLLM_REPO="https://github.com/vllm-project/vllm.git"
|
|||||||
ARG VLLM_BRANCH="main"
|
ARG VLLM_BRANCH="main"
|
||||||
ONBUILD RUN git clone ${VLLM_REPO} \
|
ONBUILD RUN git clone ${VLLM_REPO} \
|
||||||
&& cd vllm \
|
&& cd vllm \
|
||||||
&& git checkout ${VLLM_BRANCH}
|
&& git fetch -v --prune -- origin ${VLLM_BRANCH} \
|
||||||
|
&& git checkout FETCH_HEAD
|
||||||
FROM fetch_vllm_${REMOTE_VLLM} AS fetch_vllm
|
FROM fetch_vllm_${REMOTE_VLLM} AS fetch_vllm
|
||||||
|
|
||||||
# -----------------------
|
# -----------------------
|
||||||
# vLLM build stages
|
# vLLM build stages
|
||||||
FROM fetch_vllm AS build_vllm
|
FROM fetch_vllm AS build_vllm
|
||||||
ARG USE_CYTHON
|
|
||||||
# Build vLLM
|
# Build vLLM
|
||||||
RUN cd vllm \
|
RUN cd vllm \
|
||||||
&& python3 -m pip install -r requirements/rocm.txt \
|
&& python3 -m pip install -r requirements/rocm.txt \
|
||||||
&& python3 setup.py clean --all \
|
&& python3 setup.py clean --all \
|
||||||
&& if [ ${USE_CYTHON} -eq "1" ]; then python3 tests/build_cython.py build_ext --inplace; fi \
|
|
||||||
&& python3 setup.py bdist_wheel --dist-dir=dist
|
&& python3 setup.py bdist_wheel --dist-dir=dist
|
||||||
FROM scratch AS export_vllm
|
FROM scratch AS export_vllm
|
||||||
ARG COMMON_WORKDIR
|
ARG COMMON_WORKDIR
|
||||||
@ -90,13 +87,6 @@ RUN case "$(which python3)" in \
|
|||||||
*) ;; esac
|
*) ;; esac
|
||||||
|
|
||||||
RUN python3 -m pip install --upgrade huggingface-hub[cli]
|
RUN python3 -m pip install --upgrade huggingface-hub[cli]
|
||||||
ARG BUILD_RPD
|
|
||||||
RUN if [ ${BUILD_RPD} -eq "1" ]; then \
|
|
||||||
git clone -b nvtx_enabled https://github.com/ROCm/rocmProfileData.git \
|
|
||||||
&& cd rocmProfileData/rpd_tracer \
|
|
||||||
&& pip install -r requirements.txt && cd ../ \
|
|
||||||
&& make && make install \
|
|
||||||
&& cd hipMarker && python3 setup.py install ; fi
|
|
||||||
|
|
||||||
# Install vLLM
|
# Install vLLM
|
||||||
RUN --mount=type=bind,from=export_vllm,src=/,target=/install \
|
RUN --mount=type=bind,from=export_vllm,src=/,target=/install \
|
||||||
@ -117,12 +107,6 @@ ENV TOKENIZERS_PARALLELISM=false
|
|||||||
# ENV that can improve safe tensor loading, and end-to-end time
|
# ENV that can improve safe tensor loading, and end-to-end time
|
||||||
ENV SAFETENSORS_FAST_GPU=1
|
ENV SAFETENSORS_FAST_GPU=1
|
||||||
|
|
||||||
# User-friendly environment setting for multi-processing to avoid below RuntimeError.
|
|
||||||
# RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing,
|
|
||||||
# you must use the 'spawn' start method
|
|
||||||
# See https://pytorch.org/docs/stable/notes/multiprocessing.html#cuda-in-multiprocessing
|
|
||||||
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
|
||||||
|
|
||||||
# Performance environment variable.
|
# Performance environment variable.
|
||||||
ENV HIP_FORCE_DEV_KERNARG=1
|
ENV HIP_FORCE_DEV_KERNARG=1
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user