Dockerfile.2204cu115trt 3.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253
  1. FROM nvidia/cuda:11.5.0-cudnn8-devel-ubuntu18.04
  2. ENV DEBIAN_FRONTEND=noninteractive
  3. RUN apt-get update; apt-get install -y git curl
  4. RUN curl https://repo.anaconda.com/miniconda/Miniconda3-py38_4.12.0-Linux-x86_64.sh -o /tmp/Miniconda3-py38_4.12.0-Linux-x86_64.sh && bash /tmp/Miniconda3-py38_4.12.0-Linux-x86_64.sh -b -p /opt/miniconda
  5. RUN apt-get install -y libboost-dev libre2-dev rapidjson-dev libnuma-dev libssl-dev libb64-dev libarchive-dev libzip-dev
  6. RUN cd /tmp && git clone https://github.com/triton-inference-server/server.git -b r22.04 --recursive
  7. RUN curl -L https://github.com/Kitware/CMake/releases/download/v3.24.3/cmake-3.24.3-linux-x86_64.tar.gz | tar -xz -C /tmp
  8. ENV PATH=/tmp/cmake-3.24.3-linux-x86_64/bin:/opt/miniconda/bin/:$PATH
  9. RUN cd /tmp/server && /opt/miniconda/bin/python build.py --build-dir=/tmp/build --enable-gpu --no-container-source --no-container-build --enable-logging --endpoint=http --endpoint=grpc --backend=python
  10. RUN cd /tmp && git clone https://github.com/microsoft/onnxruntime.git -b rel-1.10.0 --recursive
  11. ENV CUDA_VERSION=11.5
  12. COPY TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz /workspace/TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz
  13. RUN cd /workspace && tar zxvf TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz && mv TensorRT-8.2.5.1 tensorrt
  14. ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-$CUDA_VERSION/lib64
  15. ENV C_INCLUDE_PATH=$C_INCLUDE_PATH:/usr/local/cuda-$CUDA_VERSION/include
  16. ENV CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:/usr/local/cuda-$CUDA_VERSION/include
  17. RUN cd /tmp/onnxruntime && ./build.sh --config Release --build_shared_lib --parallel --skip_tests --skip_onnx_tests --use_cuda --cuda_version=$CUDA_VERSION --cuda_home=/usr/local/cuda-$CUDA_VERSION --cudnn_home=/usr/local/cuda-$CUDA_VERSION --use_tensorrt --tensorrt_home=/workspace/tensorrt --cmake_extra_defines 'CMAKE_CUDA_ARCHITECTURES=70;80'
  18. RUN cd /tmp/onnxruntime/build/Linux/Release/ && make install
  19. RUN cd /tmp && git clone https://github.com/triton-inference-server/onnxruntime_backend.git -b r22.04 --recursive
  20. RUN cd /tmp/onnxruntime_backend && mkdir build && cd build && cmake -DCMAKE_INSTALL_PREFIX:PATH=`pwd`/install -DTRITON_ENABLE_GPU=ON -DTRITON_BUILD_CONTAINER_VERSION=22.04 -DTRITON_ONNXRUNTIME_DOCKER_BUILD=OFF -DTRITON_ONNXRUNTIME_LIB_PATHS=/usr/local/lib/ -DTRITON_BUILD_ONNXRUNTIME_VERSION=1.10.0 -DTRITON_BACKEND_REPO_TAG=r22.04 -DTRITON_CORE_REPO_TAG=r22.04 -DTRITON_COMMON_REPO_TAG=r22.04 ..
  21. ENV C_INCLUDE_PATH=$C_INCLUDE_PATH:/usr/local/include/onnxruntime/core/session
  22. ENV CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:/usr/local/include/onnxruntime/core/session
  23. RUN cd /tmp/onnxruntime_backend/build && make -j && make install
  24. RUN cp -r /tmp/build/tritonserver/install /opt/tritonserver && cp -r /tmp/build/python/install/backends /opt/tritonserver && cp -r /tmp/onnxruntime_backend/build/install/backends/onnxruntime /opt/tritonserver/backends
  25. RUN cd /tmp && rm -rf server build Miniconda3-py38_4.12.0-Linux-x86_64.sh onnxruntime onnxruntime_backend cmake-3.24.3-linux-x86_64 && rm /workspace/TensorRT-8.2.5.1.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz
  26. ENV PATH=$PATH:/opt/tritonserver/bin
  27. ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib
  28. EXPOSE 8000
  29. EXPOSE 8001