Skip to content

Instantly share code, notes, and snippets.

@quantumxt
Created April 7, 2026 09:20
Show Gist options
  • Select an option

  • Save quantumxt/c6fb704c1f2ed5d4146a70a3ed57f31d to your computer and use it in GitHub Desktop.

Select an option

Save quantumxt/c6fb704c1f2ed5d4146a70a3ed57f31d to your computer and use it in GitHub Desktop.
Star detection utility
import cv2
import numpy as np
import os
TARGET_WIDTH = 2048
TARGET_HEIGHT = 1536
FOV_WIDTH = 3.54
FOV_HEIGHT = 2.62
DPP_HORIZONTAL = FOV_WIDTH/TARGET_WIDTH # Degree/px
DPP_VERTICAL = FOV_HEIGHT/TARGET_HEIGHT # Degree/px
# --- Tunable parameters ---
MIN_VAL = 25 # minimum pixel value to consider as a bright candidate
MIN_AREA = 2 # minimum connected-component area to be a star (not a hot pixel)
BLUR_SIGMA = 2 # gaussian blur sigma for weighted centroid
PAD_RADIUS = 10 # extra padding (px) added to star circle radius
CENTROID_R = 15 # patch radius for weighted centroid refinement
SHOW_STAR_MARKERS = True # set False to hide the yellow circle and crosshair
def detect_star(img: np.ndarray):
"""
Detect the brightest real star in a grayscale image.
Strategy:
- Connected components on pixels above MIN_VAL
- Ignore isolated single pixels (hot pixels, area < MIN_AREA)
- Pick the brightest multi-pixel cluster
- Refine centroid using Gaussian-weighted centroid for subpixel accuracy
Returns (cx, cy, circle_radius, peak_val) or None if no star found.
"""
blurred = cv2.GaussianBlur(img.astype(np.float32), (0, 0), BLUR_SIGMA)
bright = (img > MIN_VAL).astype(np.uint8)
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(
bright, connectivity=8
)
best = None
for i in range(1, num_labels):
area = stats[i, cv2.CC_STAT_AREA]
if area < MIN_AREA:
continue
x = stats[i, cv2.CC_STAT_LEFT]
y = stats[i, cv2.CC_STAT_TOP]
bw = stats[i, cv2.CC_STAT_WIDTH]
bh = stats[i, cv2.CC_STAT_HEIGHT]
max_val = int(img[y:y+bh, x:x+bw].max())
if best is None or max_val > best[0]:
best = (max_val, x, y, bw, bh, centroids[i])
if best is None:
return None
max_val, bx, by, bw, bh, (rough_cx, rough_cy) = best
# Subpixel weighted centroid in patch around rough centre
py, px = int(rough_cy), int(rough_cx)
r = max(CENTROID_R, max(bw, bh))
y0 = max(0, py - r); y1 = min(img.shape[0], py + r)
x0 = max(0, px - r); x1 = min(img.shape[1], px + r)
patch = blurred[y0:y1, x0:x1].astype(np.float64)
patch = np.clip(patch - patch.min(), 0, None)
total = patch.sum()
if total == 0:
return None
ys, xs = np.mgrid[y0:y1, x0:x1]
wcx = float((xs * patch).sum() / total)
wcy = float((ys * patch).sum() / total)
# Circle radius = half PSF bounding-box diagonal + padding
circle_r = int(np.sqrt(bw**2 + bh**2) / 2) + PAD_RADIUS
return wcx, wcy, circle_r, max_val
def draw_translucent(canvas: np.ndarray, draw_fn, alpha: float = 0.4):
"""Draw onto a temp overlay then blend back onto canvas at given alpha."""
overlay = canvas.copy()
draw_fn(overlay)
cv2.addWeighted(overlay, alpha, canvas, 1.0 - alpha, 0, canvas)
def annotate_image(img_path: str, out_path: str):
img_gray = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
if img_gray is None:
print(f" ERROR: could not read {img_path}")
return
h, w = img_gray.shape
if (w, h) != (TARGET_WIDTH, TARGET_HEIGHT):
print(f" SKIPPED (wrong size {w}x{h}): {img_path}")
return
img_cx, img_cy = w // 2, h // 2
canvas = cv2.cvtColor(img_gray, cv2.COLOR_GRAY2BGR)
# --- Translucent grey crosshair at image centre ---
def draw_center_cross(ov):
cv2.line(ov, (img_cx, 0), (img_cx, h), (200, 200, 200), 1)
cv2.line(ov, (0, img_cy), (w, img_cy), (200, 200, 200), 1)
cv2.circle(ov, (img_cx, img_cy), 4, (200, 200, 200), -1)
draw_translucent(canvas, draw_center_cross, alpha=0.25)
# --- Text overlay helpers ---
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.65
thickness = 1
pad = 18
line_h = 26
text_color = (0, 255, 255) # yellow
def draw_text_box(lines):
max_tw = max(cv2.getTextSize(l, font, font_scale, thickness)[0][0] for l in lines)
rect_h = len(lines) * line_h + pad
rect_w = max_tw + pad * 2
sub = canvas[8:8+rect_h, 8:8+rect_w]
black = np.zeros_like(sub)
cv2.addWeighted(black, 0.55, sub, 0.45, 0, sub)
canvas[8:8+rect_h, 8:8+rect_w] = sub
for idx, line in enumerate(lines):
cv2.putText(canvas, line,
(8 + pad // 2, 8 + pad + idx * line_h),
font, font_scale, text_color, thickness, cv2.LINE_AA)
# --- Detect star ---
result = detect_star(img_gray)
if result is None:
print(f" No star detected: {os.path.basename(img_path)}")
draw_text_box(["No star detected"])
cv2.imwrite(out_path, canvas)
return
scx, scy, star_r, peak_val = result
dx = scx - img_cx
dy = img_cy - scy
dist = np.sqrt(dx**2 + dy**2)
isx, isy = int(round(scx)), int(round(scy))
print(f" Star at ({scx:.1f}, {scy:.1f}) "
f"dx={dx:+.1f} dy={dy:+.1f} dist={dist:.1f}px peak={peak_val}")
# --- Translucent yellow circle + tick crosshair at star ---
if SHOW_STAR_MARKERS:
def draw_star_markers(ov):
cv2.circle(ov, (isx, isy), star_r, (0, 255, 255), 1, cv2.LINE_AA)
tick = 6
# cv2.line(ov, (isx - tick, isy), (isx + tick, isy), (0, 255, 255), 1, cv2.LINE_AA)
# cv2.line(ov, (isx, isy - tick), (isx, isy + tick), (0, 255, 255), 1, cv2.LINE_AA)
draw_translucent(canvas, draw_star_markers, alpha=0.45)
# --- Info text top-left ---
draw_text_box([
f"Star: ({scx:.1f}, {scy:.1f})",
f"dX: {dx:+.1f} px (FoV: {abs(dx*DPP_HORIZONTAL):.4f} deg) [{'L' if dx < 0 else 'R'}]",
f"dY: {dy:+.1f} px (FoV: {abs(dx*DPP_VERTICAL):.4f} deg) [{'U' if dy > 0 else 'D'}]",
f"Dist: {dist:.1f} px",
f"Peak: {peak_val}",
])
cv2.imwrite(out_path, canvas)
# ------------------------------------------------------------------
# Batch: walk current directory, process all 2048x1536 images
# ------------------------------------------------------------------
processed = 0
skipped = 0
output_dir = os.path.join(os.getcwd(), "annotated")
os.makedirs(output_dir, exist_ok=True)
for root, dirs, files in os.walk("."):
# Skip output folder to avoid reprocessing
dirs[:] = [d for d in dirs if d != "annotated"]
for file in sorted(files):
if not file.lower().endswith((".png", ".jpg", ".jpeg", ".bmp", ".tiff")):
continue
if file.startswith("annotated_"):
continue
path = os.path.join(root, file)
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
if img is None:
continue
h, w = img.shape[:2]
if (w, h) != (TARGET_WIDTH, TARGET_HEIGHT):
print(f"Skipped (wrong size {w}x{h}): {path}")
skipped += 1
continue
parent = os.path.basename(os.path.abspath(root))
out_path = os.path.join(output_dir, f"annotated_{file}") # f"annotated_{parent}_{file}"
print(f"Processing: {path}")
annotate_image(path, out_path)
processed += 1
print(f"\nDone! Processed: {processed} | Skipped: {skipped}")
print(f"Output folder: {output_dir}")
import cv2
import os
import numpy as np
TARGET_WIDTH = 2048
TARGET_HEIGHT = 1536
# --- Tunable parameters ---
# Minimum pixel value to consider as a candidate (ignore pure noise floor)
MIN_VAL = 25
# Minimum local median deviation to flag an isolated pixel as a hot pixel
MIN_DIFF = 9
# Neighbourhood size for local median (must be odd)
KERNEL_SIZE = 5
# Save a debug overlay showing removed hot pixels?
SAVE_DEBUG_MASK = False
def remove_hot_pixels(img: np.ndarray):
"""
Remove hot pixels from a grayscale image while preserving stars/real features.
Strategy:
- Find all connected bright regions using connected components
- Isolated single pixels (area == 1) with significant local deviation = hot pixel
- Multi-pixel clusters (area > 1) = real features (stars, etc), leave alone
Returns (cleaned_image, hot_pixel_mask)
"""
local_median = cv2.medianBlur(img, KERNEL_SIZE)
diff = cv2.absdiff(img, local_median).astype(np.float32)
# Label all bright pixels as connected regions
bright = (img > MIN_VAL).astype(np.uint8)
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(bright, connectivity=8)
hot_mask = np.zeros_like(img, dtype=np.uint8)
for i in range(1, num_labels):
area = stats[i, cv2.CC_STAT_AREA]
if area == 1:
# Isolated single pixel — check if it deviates significantly from neighbours
x = stats[i, cv2.CC_STAT_LEFT]
y = stats[i, cv2.CC_STAT_TOP]
if diff[y, x] > MIN_DIFF:
hot_mask[y, x] = 255
# area > 1: multi-pixel cluster = real feature, skip
cleaned = cv2.inpaint(img, hot_mask, 3, cv2.INPAINT_TELEA)
return cleaned, hot_mask
# ------------------------------------------------------------------
processed = 0
skipped = 0
for root, dirs, files in os.walk('.'):
for file in files:
# Skip files we already produced
if file.startswith('cleaned_') or file.startswith('mask_'):
continue
if not file.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.tiff')):
continue
path = os.path.join(root, file)
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
if img is None:
continue
h, w = img.shape[:2]
if (w, h) != (TARGET_WIDTH, TARGET_HEIGHT):
print(f"Skipped (wrong size): {path} ({w}x{h})")
skipped += 1
continue
print(f"Processing: {path}")
cleaned, hot_mask = remove_hot_pixels(img)
hot_count = int(np.sum(hot_mask > 0))
print(f" → {hot_count} hot pixels removed")
base, ext = os.path.splitext(file)
cv2.imwrite(os.path.join(root, f"cleaned_{base}{ext}"), cleaned)
if SAVE_DEBUG_MASK:
overlay = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
overlay[hot_mask > 0] = (0, 0, 255) # red = removed hot pixel
cv2.imwrite(os.path.join(root, f"mask_{base}{ext}"), overlay)
processed += 1
print(f"\nDone! Processed: {processed} | Skipped: {skipped}")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment