Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Select an option

  • Save toonarmycaptain/65887b08940b08ee1e2851317cbfaace to your computer and use it in GitHub Desktop.

Select an option

Save toonarmycaptain/65887b08940b08ee1e2851317cbfaace to your computer and use it in GitHub Desktop.
Download all images and videos from your ClassDojo story feed — single directory output location instead of per-file dialog, re-run safe, deduplicates
// ClassDojo Story Feed Downloader (images + videos)
// Based on https://gist.github.com/travishorn/c2b6111a4e63efdbf87a1de84c833ab1#gistcomment-4647516
//
// Usage:
// 1. Log into https://home.classdojo.com in Chrome/Edge
// 2. Open DevTools console (F12 -> Console)
// 3. Paste this entire script and press Enter
// 4. Pick your download folder (e.g. TobiasDojo)
// 5. Images download directly; videos are saved as download_videos.sh
// 6. Run in terminal: cd ~/Pictures/TobiasDojo && bash download_videos.sh
//
// Re-run safe: hashes existing files first and skips anything already downloaded.
// Uses `var` throughout so you can re-paste without "redeclaration of const" errors.
var FIRST_FEED =
"https://home.classdojo.com/api/storyFeed?withStudentCommentsAndLikes=true&studentId=<YouStudent'sID";
// Delay between downloads in ms
var DELAY_MS = 300;
function getFeed(url) {
return fetch(url, {
headers: {
accept: "*/*",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
pragma: "no-cache",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"x-client-identifier": "Web",
"x-sign-attachment-urls": "true",
},
referrer: "https://home.classdojo.com/",
referrerPolicy: "strict-origin-when-cross-origin",
body: null,
method: "GET",
mode: "cors",
credentials: "include",
}).then(function (r) { return r.json(); });
}
function grabFeedAttachments(feed) {
var results = [];
for (var item of feed._items) {
for (var att of (item.contents.attachments ?? [])) {
if (typeof att.path === "string") {
results.push({
url: att.path,
time: item.time,
studentName: item.contents.studentName ?? "unknown",
});
}
}
}
return results;
}
function extensionFromUrl(url) {
try {
var pathname = new URL(url).pathname;
var ext = pathname.split(".").pop()?.toLowerCase();
if (["jpg", "jpeg", "png", "gif", "webp", "mp4", "mov", "heic"].includes(ext)) {
return "." + ext;
}
} catch (e) {}
return ".jpg";
}
function isVideoUrl(url) {
try {
var ext = new URL(url).pathname.split(".").pop()?.toLowerCase();
return ["mp4", "mov", "webm"].includes(ext);
} catch (e) {}
return false;
}
function sleep(ms) {
return new Promise(function (resolve) { setTimeout(resolve, ms); });
}
async function hashBlob(blob) {
var buffer = await blob.arrayBuffer();
var hashBuffer = await crypto.subtle.digest("SHA-256", buffer);
var hashArray = Array.from(new Uint8Array(hashBuffer));
return hashArray.map(function (b) { return b.toString(16).padStart(2, "0"); }).join("");
}
async function hashExistingFiles(dirHandle) {
var hashes = new Set();
var count = 0;
for await (var entry of dirHandle.values()) {
if (entry.kind !== "file") continue;
// Skip the shell script itself
if (entry.name === "download_videos.sh") continue;
try {
var file = await entry.getFile();
var hash = await hashBlob(file);
hashes.add(hash);
count++;
if (count % 100 === 0) {
console.log(" Hashed " + count + " existing files...");
}
} catch (e) {}
}
console.log("Indexed " + count + " existing files (" + hashes.size + " unique hashes).");
return hashes;
}
(async function downloadAll() {
var dirHandle;
try {
dirHandle = await window.showDirectoryPicker({ mode: "readwrite" });
} catch (e) {
console.error("Folder selection cancelled. Aborting.");
return;
}
console.log("Saving to: " + dirHandle.name + "/");
// Phase 1: Hash existing files for dedup
console.log("Hashing existing files for dedup...");
var existingHashes = await hashExistingFiles(dirHandle);
// Phase 2: Scan all feed pages
console.log("Fetching story feed pages...");
var attachments = [];
var feed = await getFeed(FIRST_FEED);
attachments.push(...grabFeedAttachments(feed));
var pageCount = 1;
while (feed._links?.next && feed._items.length > 0) {
feed = await getFeed(feed._links.next.href);
attachments.push(...grabFeedAttachments(feed));
pageCount++;
console.log(" Scanned page " + pageCount + " (" + attachments.length + " attachments so far)");
}
// Split into images and videos
var images = [];
var videos = [];
for (var att of attachments) {
if (isVideoUrl(att.url)) {
videos.push(att);
} else {
images.push(att);
}
}
console.log("Found " + attachments.length + " total: " + images.length + " images, " + videos.length + " videos across " + pageCount + " pages.");
// Show student breakdown
var studentCounts = {};
for (var a of attachments) {
studentCounts[a.studentName] = (studentCounts[a.studentName] ?? 0) + 1;
}
console.log("By student:", studentCounts);
// Phase 3: Download images (fetch + File System Access API)
var saved = 0;
var skipped = 0;
var failed = 0;
console.log("\n--- Downloading " + images.length + " images ---");
for (var i = 0; i < images.length; i++) {
var img = images[i];
var ext = extensionFromUrl(img.url);
var datePart = img.time.split("T")[0];
var safeName = img.studentName.replace(/[^a-zA-Z0-9_-]/g, "_");
var filename = "img_" + String(i).padStart(4, "0") + "_" + datePart + "_" + safeName + ext;
try {
var response = await fetch(img.url);
if (!response.ok) throw new Error("HTTP " + response.status);
var blob = await response.blob();
var hash = await hashBlob(blob);
if (existingHashes.has(hash)) {
skipped++;
} else {
var fileHandle = await dirHandle.getFileHandle(filename, { create: true });
var writable = await fileHandle.createWritable();
await writable.write(blob);
await writable.close();
existingHashes.add(hash);
saved++;
}
if ((i + 1) % 50 === 0 || i === images.length - 1) {
console.log(" Images: " + (i + 1) + "/" + images.length + " (" + saved + " new, " + skipped + " skipped, " + failed + " failed)");
}
} catch (e) {
failed++;
console.warn(" Failed [" + i + "] " + filename + ": " + e.message);
}
if (i < images.length - 1) {
await sleep(DELAY_MS);
}
}
console.log("Images done: " + saved + " new, " + skipped + " already had, " + failed + " failed.");
// Phase 4: Generate shell script for videos (CORS-blocked CDN)
if (videos.length > 0) {
console.log("\n--- Generating download script for " + videos.length + " videos ---");
var lines = [
"#!/bin/bash",
"# ClassDojo video download script",
"# Generated " + new Date().toISOString(),
"# " + videos.length + " videos",
"",
'cd "$(dirname "$0")"',
"",
"downloaded=0",
"skipped=0",
"failed=0",
"",
];
for (var v = 0; v < videos.length; v++) {
var vid = videos[v];
var vExt = extensionFromUrl(vid.url);
var vDate = vid.time.split("T")[0];
var vName = vid.studentName.replace(/[^a-zA-Z0-9_-]/g, "_");
var vFilename = "vid_" + String(v).padStart(4, "0") + "_" + vDate + "_" + vName + vExt;
var escapedUrl = vid.url.replace(/'/g, "'\\''");
// Download to temp file, check if content already exists, then keep or discard
lines.push("tmpfile=$(mktemp)");
lines.push("if wget -q -O \"$tmpfile\" '" + escapedUrl + "'; then");
lines.push(" newh=$(sha256sum \"$tmpfile\" | cut -d' ' -f1)");
lines.push(" if grep -qxF \"$newh\" .content_hashes.txt 2>/dev/null; then");
lines.push(' skipped=$((skipped + 1))');
lines.push(" else");
lines.push(" mv \"$tmpfile\" '" + vFilename + "'");
lines.push(" echo \"$newh\" >> .content_hashes.txt");
lines.push(' downloaded=$((downloaded + 1))');
lines.push(" tmpfile=''");
lines.push(" fi");
lines.push("else");
lines.push(' failed=$((failed + 1))');
lines.push("fi");
lines.push('[ -n "$tmpfile" ] && rm -f "$tmpfile"');
lines.push("");
}
lines.push('echo "Videos done: ${downloaded} new, ${skipped} already had, ${failed} failed."');
var script = lines.join("\n") + "\n";
// Write the hash index of existing files so the video script can dedup too
var hashLines = [];
for (var h of existingHashes) {
hashLines.push(h);
}
var hashFile = await dirHandle.getFileHandle(".content_hashes.txt", { create: true });
var hashWritable = await hashFile.createWritable();
await hashWritable.write(hashLines.join("\n") + "\n");
await hashWritable.close();
var scriptHandle = await dirHandle.getFileHandle("download_videos.sh", { create: true });
var scriptWritable = await scriptHandle.createWritable();
await scriptWritable.write(script);
await scriptWritable.close();
console.log("Saved download_videos.sh (" + videos.length + " videos).");
console.log("Run from terminal: cd ~/Pictures/TobiasDojo && bash download_videos.sh");
}
console.log("\nAll done! " + saved + " new images, " + skipped + " skipped, " + failed + " failed, " + videos.length + " videos queued in shell script.");
})();

Purpose

ClassDojo is a classroom communication app used to share reports between parents and teachers. Teachers track student behavior and upload photos or videos. ClassDojo doesn't provide a native export, so this script downloads all your media via the API.

This is a complete rewrite of the original DOM-scraping approach (forked from @Patrick330). Instead of scrolling through the feed and scraping CSS background-image styles, this version hits the storyFeed API directly and paginates through the entire history automatically. Based on @Loksly's approach from the original gist by @travishorn.

Browser compatibility

Chrome or Edge only. This script uses the File System Access API (showDirectoryPicker) which Firefox does not support.

Usage

  1. Log into home.classdojo.com in Chrome or Edge
  2. (Optional) Edit FIRST_FEED in the script to add &studentId=YOUR_STUDENT_ID for single-child filtering — find the ID in the URL when viewing that child's story
  3. Open DevTools console (F12 → Console), paste the script, press Enter
  4. Pick your download folder when prompted
  5. Wait for images to finish downloading
  6. For videos, run the generated shell script from your terminal:
    cd /path/to/your/folder && bash download_videos.sh

Features

  • Single folder prompt — uses File System Access API so you pick the download folder once, then all files write directly there instead of triggering the save dialog for every file.
  • Re-run safe — SHA-256 hashes existing files before downloading. Only fetches new content. Safe to re-run weeks later.
  • Deduplication — the feed can return the same image under different story posts; duplicates are detected and skipped.
  • Video handling — videos on svideos.classdojo.com block CORS, so the script generates a download_videos.sh shell script with wget commands using the signed CloudFront URLs. Also dedup-aware via a shared .content_hashes.txt hash index.
  • Single-student filtering — add &studentId= to only download one child's feed. withArchived=false is omitted as it can cause feed mixing between children.
  • Re-pasteable — uses var instead of const/let so you can re-paste without SyntaxError: redeclaration errors.
  • Throttled — 300ms delay between downloads to avoid overwhelming the browser.
  • Descriptive filenamesimg_0042_2026-03-15_KidName.jpg, vid_0003_2026-04-16_KidName.mp4
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment