Skip to content

Instantly share code, notes, and snippets.

@maulikmadhavi
Last active September 29, 2024 14:11
Show Gist options
  • Select an option

  • Save maulikmadhavi/7da97c8754ba23ad90c7a9543afd03b3 to your computer and use it in GitHub Desktop.

Select an option

Save maulikmadhavi/7da97c8754ba23ad90c7a9543afd03b3 to your computer and use it in GitHub Desktop.
decord efficient video reading
import decord
import numpy as np
class EfficientVideoReader:
def __init__(self, video_path, skip_frames=1, max_frames=None, batch_size=32):
"""
Initialize the EfficientVideoReader.
Args:
video_path (str): Path to the video file
skip_frames (int): Number of frames to skip between reads
max_frames (int): Maximum number of frames to read (None for all frames)
batch_size (int): Number of frames to read in each batch
"""
decord.bridge.set_bridge('numpy')
self.vr = decord.VideoReader(video_path)
self.total_frames = len(self.vr)
self.skip_frames = skip_frames
self.max_frames = min(max_frames or self.total_frames, self.total_frames)
self.batch_size = batch_size
self.indices = np.arange(0, self.max_frames, skip_frames)
def __iter__(self):
"""Make the class iterable."""
self.current_index = 0
return self
def __next__(self):
"""Get the next frame."""
if self.current_index >= len(self.indices):
raise StopIteration
batch_end = min(self.current_index + self.batch_size, len(self.indices))
batch_indices = self.indices[self.current_index:batch_end]
frames = self.vr.get_batch(batch_indices).asnumpy()
self.current_index = batch_end
return frames
def __len__(self):
"""Return the total number of frames to be read."""
return len(self.indices)
@property
def frame_shape(self):
"""Return the shape of the video frames."""
return self.vr[0].shape
def reset(self):
"""Reset the reader to the beginning of the video."""
self.current_index = 0
# Usage example
def process_video(video_path, model):
reader = EfficientVideoReader(video_path, skip_frames=2, max_frames=1000, batch_size=32)
print(f"Processing {len(reader)} frames, shape: {reader.frame_shape}")
for batch in reader:
for frame in batch:
# Your ML model inference here
# result = model.predict(frame)
pass
# Assuming you have a ML model
# model = load_your_ml_model()
# process_video('path/to/your/video.mp4', model)
# To use this in your ML demonstration:
# 1. Create an instance of `EfficientVideoReader` with your desired parameters.
# 2. Iterate over the reader to process frames in batches.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment