Skip to content

Instantly share code, notes, and snippets.

@Randy420Marsh
Last active December 20, 2025 21:43
Show Gist options
  • Select an option

  • Save Randy420Marsh/bf60eb80a7e47da383c473c00c767349 to your computer and use it in GitHub Desktop.

Select an option

Save Randy420Marsh/bf60eb80a7e47da383c473c00c767349 to your computer and use it in GitHub Desktop.
opencv cuda build on ubuntu 22.04
Get the cuda 12.8 runfile and install and get cudnn cuDNN , for CUDA 12.x
And any newer geforce driver or use the cuda driver. Also get the video codec sdk.
Links:
https://developer.nvidia.com/cudnn-archive
https://developer.nvidia.com/cuda-12-8-1-download-archive
https://developer.nvidia.com/rdp/cudnn-archive
https://developer.nvidia.com/downloads/designworks/video-codec-sdk/secure/13.0.19/video_codec_sdk_13.0.19.zip
! When you install the cudnn add the keyfile as shown.
sudo apt update
sudo apt install cudnn cudnn-dev
sudo apt install libeigen3-dev libgoogle-glog-dev libgflags-dev libgtkglext1-dev libtesseract-dev libvtk7-dev openjpeg-tools libopenblas-dev libavcodec-dev libavformat-dev libavutil-dev libswscale-dev libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev
If you have the archive version:
tar -xf cudnn-linux-x86_64-8.*_cuda12-archive.tar.xz
cd cudnn-linux-x86_64-8.*_cuda12-archive
sudo cp include/cudnn*.h /usr/local/cuda-12.8/include/
sudo cp lib/libcudnn* /usr/local/cuda-12.8/lib64/
cd /usr/local/cuda/targets/x86_64-linux/lib
sudo ln -sf libcudnn.so.8.9.7 libcudnn.so.8
sudo ln -sf libcudnn.so.8 libcudnn.so
sudo ln -sf libcudnn_ops_infer.so.8.9.7 libcudnn_ops_infer.so.8
sudo ln -sf libcudnn_ops_infer.so.8 libcudnn_ops_infer.so
sudo ln -sf libcudnn_ops_train.so.8.9.7 libcudnn_ops_train.so.8
sudo ln -sf libcudnn_ops_train.so.8 libcudnn_ops_train.so
sudo ln -sf libcudnn_cnn_infer.so.8.9.7 libcudnn_cnn_infer.so.8
sudo ln -sf libcudnn_cnn_infer.so.8 libcudnn_cnn_infer.so
sudo ln -sf libcudnn_cnn_train.so.8.9.7 libcudnn_cnn_train.so.8
sudo ln -sf libcudnn_cnn_train.so.8 libcudnn_cnn_train.so
sudo ln -sf libcudnn_adv_infer.so.8.9.7 libcudnn_adv_infer.so.8
sudo ln -sf libcudnn_adv_infer.so.8 libcudnn_adv_infer.so
sudo ln -sf libcudnn_adv_train.so.8.9.7 libcudnn_adv_train.so.8
sudo ln -sf libcudnn_adv_train.so.8 libcudnn_adv_train.so
sudo ldconfig
sudo chmod a+r /usr/local/cuda-12.8/include/cudnn*.h /usr/local/cuda-12.8/lib64/libcudnn*
We also need to copy the nvidia video codec sdk files to appropriate locations.
ls ./Interface/
cuviddec.h nvcuvid.h nvEncodeAPI.h
ls ./Lib/linux/stubs/x86_64/
libnvcuvid.so libnvidia-encode.so
ls /usr/local/cuda-12.8
bin DOCS extras gds-12.8 lib64 nsight-compute-2025.1.1 nsight-systems-2024.6.2 nvvm share targets version.json
compute-sanitizer EULA.txt gds include libnvvp nsightee_plugins nvml README src tools
sudo cp ./Interface/* /usr/local/cuda-12.8/lib64
sudo cp ./Lib/linux/stubs/x86_64/* /usr/local/cuda-12.8/include
1. Create a clean workspace
mkdir -p ~/src/opencv_cuda
cd ~/src/opencv_cuda
Create venv:
uv venv venv --python3.12
source ./venv/bin/activate
uv pip install -U pip
git clone https://github.com/opencv/opencv.git
cd opencv
git fetch --tags
git checkout 4.12.0
cd ..
git clone https://github.com/opencv/opencv_contrib.git
cd opencv_contrib
git fetch --tags
git checkout 4.12.0
cd ..
4. Verify layout
~/src/opencv_cuda/
├── opencv/
├── opencv_contrib/
mkdir opencv_build
cd opencv_build
opencv_cuda/
├── opencv/
├── opencv_contrib/
├── opencv_build/
5. (Optional but recommended) Verify version alignment
cd opencv
git describe --tags
cd ../opencv_contrib
git describe --tags
clean and rebuild:
rm -rf opencv_build
mkdir opencv_build
cd opencv_build
This is what i used:
cmake version:
cmake -S ../opencv -B . \
-D CMAKE_BUILD_TYPE=Release \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D CMAKE_C_COMPILER=/usr/bin/gcc-12 \
-D CMAKE_CXX_COMPILER=/usr/bin/g++-12 \
-D CMAKE_CUDA_HOST_COMPILER=/usr/bin/g++-12 \
-D CUDAToolkit_ROOT=/usr/local/cuda-12.8 \
-D CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda-12.8 \
-D OPENCV_EXTRA_MODULES_PATH=../opencv_contrib/modules \
-D WITH_CUDA=ON \
-D WITH_CUDNN=ON \
-D OPENCV_DNN_CUDA=ON \
-D WITH_CUBLAS=ON \
-D WITH_CUFFT=ON \
-D CUDA_ARCH_BIN=7.5 \
-D ENABLE_FAST_MATH=ON \
-D CUDA_FAST_MATH=ON \
-D WITH_TBB=ON \
-D WITH_OPENGL=ON \
-D WITH_GSTREAMER=ON \
-D WITH_V4L=ON \
-D BUILD_TESTS=OFF \
-D BUILD_PERF_TESTS=OFF \
-D BUILD_EXAMPLES=OFF \
-D BUILD_opencv_python3=ON \
-D BUILD_opencv_python2=OFF \
-D BUILD_opencv_world=OFF \
-D Python3_EXECUTABLE=/usr/bin/python3.12 \
-D Python3_INCLUDE_DIR=/usr/include/python3.12 \
-D Python3_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.12.so \
-D Python3_NumPy_INCLUDE_DIRS=/media/john/5bd86d4c-f31e-4f83-9624-912cb737cf62/image-matcher/venv/lib/python3.12/site-packages/numpy/_core/include \
-D OPENCV_GENERATE_PKGCONFIG=ON \
-D OPENCV_PC_FILE_NAME=opencv.pc
cmake --build . --parallel $(nproc)
sudo make install
ninja:
cmake -S ../opencv -B . -G Ninja \
-D CMAKE_BUILD_TYPE=Release \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D CMAKE_C_COMPILER=/usr/bin/gcc-12 \
-D CMAKE_CXX_COMPILER=/usr/bin/g++-12 \
-D CMAKE_CUDA_HOST_COMPILER=/usr/bin/g++-12 \
-D CUDAToolkit_ROOT=/usr/local/cuda-12.8 \
-D CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda-12.8 \
-D OPENCV_EXTRA_MODULES_PATH=../opencv_contrib/modules \
-D WITH_CUDA=ON \
-D WITH_CUDNN=ON \
-D OPENCV_DNN_CUDA=ON \
-D WITH_CUBLAS=ON \
-D WITH_CUFFT=ON \
-D CUDA_ARCH_BIN=7.5 \
-D ENABLE_FAST_MATH=ON \
-D CUDA_FAST_MATH=ON \
-D WITH_TBB=ON \
-D WITH_OPENGL=ON \
-D WITH_GSTREAMER=ON \
-D WITH_V4L=ON \
-D BUILD_TESTS=OFF \
-D BUILD_PERF_TESTS=OFF \
-D BUILD_EXAMPLES=OFF \
-D BUILD_opencv_python3=ON \
-D BUILD_opencv_python2=OFF \
-D BUILD_opencv_world=OFF \
-D Python3_EXECUTABLE=/usr/bin/python3.12 \
-D Python3_INCLUDE_DIR=/usr/include/python3.12 \
-D Python3_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.12.so \
-D Python3_NumPy_INCLUDE_DIRS=/media/john/5bd86d4c-f31e-4f83-9624-912cb737cf62/image-matcher/venv/lib/python3.12/site-packages/numpy/_core/include \
-D OPENCV_GENERATE_PKGCONFIG=ON \
-D OPENCV_PC_FILE_NAME=opencv.pc
ninja -j8
sudo ninja install
You can try ninja build should be faster...
when the build finishes create the wheel:
the venv still active:
cd opencv_build
uv pip install --upgrade pip setuptools wheel
python3 setup.py bdist_wheel
ls ./modules/python/dist/
uv pip install ./modules/python/dist/opencv_python-4.12.0-*.whl
test:
python3 -c "import cv2; print(cv2.__version__)"
and check for cuda:
python - <<EOF
import cv2
print(cv2.__version__)
print(cv2.cuda.getCudaEnabledDeviceCount())
EOF
the sudo make install
should install the build in the current active virtual environment
On the same system the built wheel should work on different virtual environments on a same python verion 3.12
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment