mirror of
https://github.com/vllm-project/vllm.git
synced 2025-12-06 06:53:12 +08:00
84 lines
3.0 KiB
Docker
84 lines
3.0 KiB
Docker
FROM intel/deep-learning-essentials:2025.2.2-0-devel-ubuntu24.04 AS vllm-base
|
|
|
|
RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null && \
|
|
echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | tee /etc/apt/sources.list.d/oneAPI.list && \
|
|
add-apt-repository -y ppa:kobuk-team/intel-graphics
|
|
|
|
RUN apt clean && apt-get update -y && \
|
|
apt-get install -y --no-install-recommends --fix-missing \
|
|
curl \
|
|
ffmpeg \
|
|
git \
|
|
libsndfile1 \
|
|
libsm6 \
|
|
libxext6 \
|
|
libgl1 \
|
|
lsb-release \
|
|
libaio-dev \
|
|
numactl \
|
|
wget \
|
|
vim \
|
|
python3.12 \
|
|
python3.12-dev \
|
|
python3-pip
|
|
|
|
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.12 1
|
|
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.12 1
|
|
|
|
RUN apt install -y libze1 libze-dev libze-intel-gpu1 intel-opencl-icd libze-intel-gpu-raytracing intel-ocloc
|
|
|
|
# This oneccl contains the BMG support which is not the case for default version of oneapi 2025.2.
|
|
RUN wget https://github.com/uxlfoundation/oneCCL/releases/download/2021.15.6/intel-oneccl-2021.15.6.9_offline.sh
|
|
RUN bash intel-oneccl-2021.15.6.9_offline.sh -a --silent --eula accept && \
|
|
echo "source /opt/intel/oneapi/setvars.sh --force" >> /root/.bashrc && \
|
|
echo "source /opt/intel/oneapi/ccl/2021.15/env/vars.sh --force" >> /root/.bashrc
|
|
|
|
SHELL ["bash", "-c"]
|
|
CMD ["bash", "-c", "source /root/.bashrc && exec bash"]
|
|
|
|
WORKDIR /workspace/vllm
|
|
COPY requirements/xpu.txt /workspace/vllm/requirements/xpu.txt
|
|
COPY requirements/common.txt /workspace/vllm/requirements/common.txt
|
|
|
|
# suppress the python externally managed environment error
|
|
RUN python3 -m pip config set global.break-system-packages true
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/pip \
|
|
pip install --no-cache-dir \
|
|
-r requirements/xpu.txt
|
|
|
|
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/"
|
|
|
|
COPY . .
|
|
ARG GIT_REPO_CHECK=0
|
|
RUN --mount=type=bind,source=.git,target=.git \
|
|
if [ "$GIT_REPO_CHECK" != 0 ]; then bash tools/check_repo.sh; fi
|
|
|
|
ENV VLLM_TARGET_DEVICE=xpu
|
|
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/pip \
|
|
--mount=type=bind,source=.git,target=.git \
|
|
pip install --no-build-isolation .
|
|
|
|
CMD ["/bin/bash"]
|
|
|
|
FROM vllm-base AS vllm-openai
|
|
|
|
# install additional dependencies for openai api server
|
|
RUN --mount=type=cache,target=/root/.cache/pip \
|
|
pip install accelerate hf_transfer pytest pytest_asyncio lm_eval[api] modelscope
|
|
|
|
# install development dependencies (for testing)
|
|
RUN python3 -m pip install -e tests/vllm_test_utils
|
|
|
|
# install nixl from source code
|
|
ENV NIXL_VERSION=0.7.0
|
|
RUN python3 /workspace/vllm/tools/install_nixl_from_source_ubuntu.py
|
|
|
|
# remove torch bundled oneccl to avoid conflicts
|
|
RUN --mount=type=cache,target=/root/.cache/pip \
|
|
pip uninstall oneccl oneccl-devel -y
|
|
|
|
ENTRYPOINT ["vllm", "serve"]
|