docker run \ --rm \ -it \ -e ONNXRUNTIME_REPO=https://github.com/microsoft/onnxruntime \ -e ONNXRUNTIME_COMMIT=v1.17.0 \ -e BUILD_CONFIG=Release \ -e CMAKE_VERSION=3.28.3 \ -e CUDA_ARCHITECTURES="70;75;80;86;89" \ --entrypoint "" \ -v $(pwd):/output \ -w /tmp \ nvcr.io/nvidia/deepstream:6.3-triton-multiarch \ /bin/bash -c " # set up cmake apt remove -y cmake &&\ rm -rf /usr/local/bin/cmake &&\ mkdir /tmp/cmake &&\ wget https://github.com/Kitware/CMake/releases/download/v\${CMAKE_VERSION}/cmake-\${CMAKE_VERSION}-linux-\$(uname -m).tar.gz &&\ tar zxf cmake-\${CMAKE_VERSION}-linux-\$(uname -m).tar.gz --strip-components=1 -C /tmp/cmake &&\ export PATH=\$PATH:/tmp/cmake/bin &&\ # clone onnxruntime repository and build apt-get install -y patch &&\ git clone \${ONNXRUNTIME_REPO} onnxruntime &&\ cd onnxruntime &&\ git checkout \${ONNXRUNTIME_COMMIT} &&\ /bin/sh build.sh \ --parallel 8 \ --build_shared_lib \ --allow_running_as_root \ --cuda_home /usr/local/cuda \ --cudnn_home /usr/lib/\$(uname -m)-linux-gnu/ \ --use_tensorrt \ --tensorrt_home /usr/lib/\$(uname -m)-linux-gnu/ \ --config \${BUILD_CONFIG} \ --skip_tests \ --cmake_extra_defines '\"CMAKE_CUDA_ARCHITECTURES='\${CUDA_ARCHITECTURES}'\"' 'onnxruntime_BUILD_UNIT_TESTS=OFF' &&\ # package and copy to output export ONNXRUNTIME_VERSION=\$(cat /tmp/onnxruntime/VERSION_NUMBER) &&\ rm -rf /tmp/onnxruntime/build/onnxruntime-linux-\$(uname -m)-gpu-\${ONNXRUNTIME_VERSION} &&\ BINARY_DIR=build \ ARTIFACT_NAME=onnxruntime-linux-\$(uname -m)-gpu-\${ONNXRUNTIME_VERSION} \ LIB_NAME=libonnxruntime.so \ BUILD_CONFIG=Linux/\${BUILD_CONFIG} \ SOURCE_DIR=/tmp/onnxruntime \ COMMIT_ID=\$(git rev-parse HEAD) \ tools/ci_build/github/linux/copy_strip_binary.sh &&\ cd /tmp/onnxruntime/build/onnxruntime-linux-\$(uname -m)-gpu-\${ONNXRUNTIME_VERSION}/lib/ &&\ ln -s libonnxruntime.so libonnxruntime.so.\${ONNXRUNTIME_VERSION} &&\ cp -r /tmp/onnxruntime/build/onnxruntime-linux-\$(uname -m)-gpu-\${ONNXRUNTIME_VERSION} /output "