Last active
April 14, 2026 16:28
-
-
Save Limbicnation/6763b69ab6a406790f3b7d4b56a2f6e8 to your computer and use it in GitHub Desktop.
ubuntu_cleanup.sh A comprehensive system cleanup script for Ubuntu 24.04 that safely removes unnecessary files to free up disk space. This script includes system maintenance tasks like package cleanup, log rotation, cache removal, and system optimization. Features include progress tracking, disk space reporting, resource limiting, and extensive …
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env bash | |
| # Security-Hardened Ubuntu Cleanup Script | |
| # This script performs comprehensive system cleanup with enterprise-grade security | |
| # EXCLUDES: hy3dgen folder from any deletion operations | |
| # | |
| # Security improvements: | |
| # - Comprehensive error handling with trap handlers | |
| # - Safe configuration loading without arbitrary code execution | |
| # - APT and script-level locking mechanisms | |
| # - Kernel retention validation (N-1 policy) | |
| # - System snapshot before destructive operations | |
| # - Syslog integration for audit trail | |
| # - Resource limit enforcement | |
| # - Proper exit codes for monitoring | |
| # Exit codes | |
| readonly EXIT_SUCCESS=0 | |
| readonly EXIT_NETWORK_ERROR=1 | |
| readonly EXIT_DISK_SPACE_ERROR=2 | |
| readonly EXIT_LOCK_ERROR=3 | |
| readonly EXIT_APT_ERROR=4 | |
| readonly EXIT_PERMISSION_ERROR=5 | |
| readonly EXIT_INTEGRITY_ERROR=6 | |
| readonly EXIT_CONFIG_ERROR=7 | |
| readonly EXIT_DEPENDENCY_ERROR=8 | |
| readonly EXIT_USER_ABORT=130 | |
| # Strict error handling | |
| set -euo pipefail | |
| IFS=$'\n\t' | |
| # Configuration variables | |
| # Determine real user's home directory if running with sudo | |
| if [ -n "${SUDO_USER:-}" ]; then | |
| REAL_HOME=$(getent passwd "$SUDO_USER" | cut -d: -f6) | |
| # Validate REAL_HOME path exists | |
| if [[ -z "$REAL_HOME" || ! -d "$REAL_HOME" ]]; then | |
| REAL_HOME="${HOME}" | |
| fi | |
| else | |
| REAL_HOME="${HOME}" | |
| fi | |
| CONFIG_FILE="${REAL_HOME}/.config/ubuntu_cleanup.conf" | |
| LOG_DIR="/var/log/system_cleanup" | |
| LOG_FILE="${LOG_DIR}/cleanup_$(date +%Y%m%d_%H%M%S)_$$.log" | |
| LOCK_FILE="/var/lock/ubuntu_cleanup.lock" | |
| LOCK_FD="" | |
| DRY_RUN=0 | |
| VERBOSE=0 | |
| TIMEOUT_DURATION=60 | |
| PARALLEL_JOBS=2 | |
| MAX_RESOURCE_USAGE=50 | |
| DEFAULT_RETENTION_DAYS=10 | |
| HAS_ROOT=0 | |
| STRICT_GPG_CHECK=${STRICT_GPG_CHECK:-1} | |
| MIN_FREE_SPACE_KB=512000 # 500MB | |
| # EXCLUSION PATTERNS - Add folders/files to protect here | |
| EXCLUDED_PATTERNS=( | |
| "hy3dgen" | |
| "Hunyuan3D" | |
| "huggingface" | |
| ".git" | |
| ".venv" | |
| "node_modules" | |
| "venv" | |
| "env" | |
| ".env" | |
| ) | |
| # Cleanup handler for trap | |
| cleanup_on_error() { | |
| local exit_code=$? | |
| if [ "$exit_code" -ne 0 ]; then | |
| log_operation "err" "Script failed with exit code $exit_code at line ${BASH_LINENO[0]}" | |
| log_operation "err" "Failed command: ${BASH_COMMAND}" | |
| fi | |
| # Release lock if held | |
| release_lock | |
| # Remove temp config | |
| [ -f "${APT_CONF_TMP:-}" ] && rm -f -- "$APT_CONF_TMP" | |
| # Remove snapshot ref if it exists | |
| [ -f "${SNAPSHOT_REF:-}" ] && rm -f -- "$SNAPSHOT_REF" | |
| # Drain tee buffer so final log lines are written to disk | |
| type flush_log &>/dev/null && flush_log | |
| exit "$exit_code" | |
| } | |
| # Set trap handlers early to catch temp file leaks | |
| trap cleanup_on_error ERR EXIT | |
| trap 'log_operation "warning" "Script interrupted by user"; exit $EXIT_USER_ABORT' SIGTERM SIGINT | |
| # Create temporary APT config (after trap is set, so it gets cleaned up on failure) | |
| APT_CONF_TMP=$(mktemp) | |
| echo 'Acquire::http::Timeout "60";' > "$APT_CONF_TMP" | |
| echo 'Acquire::Retries "3";' >> "$APT_CONF_TMP" | |
| export APT_CONFIG="$APT_CONF_TMP" | |
| # Logging function with syslog integration | |
| log_operation() { | |
| local severity=$1 | |
| local message=$2 | |
| echo "[$(date '+%Y-%m-%d %H:%M:%S')] [$severity] $message" | |
| # Send to syslog if logger is available | |
| if command -v logger &>/dev/null; then | |
| logger -t ubuntu_cleanup -p "user.$severity" "$message" | |
| fi | |
| } | |
| # Script-level locking mechanism | |
| acquire_lock() { | |
| # Set restrictive umask for lock file creation | |
| local old_umask | |
| old_umask=$(umask) | |
| umask 077 | |
| if ! exec {LOCK_FD}>"$LOCK_FILE" 2>/dev/null; then | |
| # Fall back to user-writable lock location | |
| LOCK_FILE="${REAL_HOME}/.local/share/system_cleanup/cleanup.lock" | |
| mkdir -p "$(dirname "$LOCK_FILE")" 2>/dev/null || true | |
| exec {LOCK_FD}>"$LOCK_FILE" | |
| fi | |
| umask "$old_umask" | |
| chmod 600 "$LOCK_FILE" 2>/dev/null || true | |
| if ! flock -n "$LOCK_FD"; then | |
| log_operation "err" "Another instance is already running" | |
| exit $EXIT_LOCK_ERROR | |
| fi | |
| echo $$ >&"$LOCK_FD" | |
| log_operation "info" "Lock acquired (PID: $$)" | |
| } | |
| release_lock() { | |
| if [ -n "${LOCK_FD:-}" ] && [ "$LOCK_FD" -gt 0 ] 2>/dev/null; then | |
| flock -u "$LOCK_FD" 2>/dev/null || true | |
| rm -f -- "$LOCK_FILE" 2>/dev/null || true | |
| fi | |
| } | |
| # Create log directory if it doesn't exist with proper permissions | |
| # Fall back to user-writable location when running without root | |
| if ! mkdir -p "$LOG_DIR" 2>/dev/null || ! [ -w "$LOG_DIR" ]; then | |
| LOG_DIR="${REAL_HOME}/.local/share/system_cleanup" | |
| mkdir -p "$LOG_DIR" | |
| LOG_FILE="${LOG_DIR}/cleanup_$(date +%Y%m%d_%H%M%S)_$$.log" | |
| fi | |
| chmod 750 "$LOG_DIR" 2>/dev/null || true | |
| # Setup logging with rotation | |
| exec > >(tee -a "$LOG_FILE") 2>&1 | |
| TEE_PID=$! | |
| # Flush tee buffer before exit to prevent losing final log lines | |
| flush_log() { | |
| exec >/dev/null 2>&1 | |
| wait "${TEE_PID:-}" 2>/dev/null || true | |
| } | |
| # Safe configuration loader - no arbitrary code execution | |
| load_safe_config() { | |
| if [ ! -f "$CONFIG_FILE" ]; then | |
| return 0 | |
| fi | |
| log_operation "info" "Loading configuration from $CONFIG_FILE" | |
| # Validate config file syntax - ensure ALL lines match allowed patterns | |
| # (comments, KEY=VALUE assignments, or blank lines) | |
| if grep -vE '^\s*(#|[A-Z_]+=|$)' "$CONFIG_FILE" | grep -q '.'; then | |
| log_operation "err" "Config file contains invalid syntax" | |
| return 1 | |
| fi | |
| # Load only whitelisted variables with validation | |
| while IFS='=' read -r key value; do | |
| # Skip comments and empty lines | |
| [[ "$key" =~ ^#.*$ || -z "$key" ]] && continue | |
| # Remove leading/trailing whitespace (safe, no special char interpretation) | |
| key="${key#"${key%%[![:space:]]*}"}" | |
| key="${key%"${key##*[![:space:]]}"}" | |
| value="${value#"${value%%[![:space:]]*}"}" | |
| value="${value%"${value##*[![:space:]]}"}" | |
| case "$key" in | |
| TIMEOUT_DURATION|PARALLEL_JOBS|MAX_RESOURCE_USAGE|DEFAULT_RETENTION_DAYS) | |
| # Validate value is numeric and within reasonable range | |
| if [[ "$value" =~ ^[0-9]+$ ]] && [ "$value" -ge 1 ] && [ "$value" -le 9999 ]; then | |
| declare -g "$key=$value" | |
| log_operation "info" "Loaded config: $key=$value" | |
| else | |
| log_operation "warning" "Invalid value for $key: $value (skipped)" | |
| fi | |
| ;; | |
| STRICT_GPG_CHECK) | |
| if [[ "$value" =~ ^[0-1]$ ]]; then | |
| declare -g "$key=$value" | |
| log_operation "info" "Loaded config: $key=$value" | |
| else | |
| log_operation "warning" "Invalid value for $key: $value (must be 0 or 1)" | |
| fi | |
| ;; | |
| *) | |
| log_operation "warning" "Unknown config variable: $key (skipped)" | |
| ;; | |
| esac | |
| done < <(grep -v '^#' "$CONFIG_FILE" | grep -v '^$') | |
| } | |
| apt_with_retry() { | |
| local max_attempts=3 | |
| for attempt in $(seq 1 $max_attempts); do | |
| if run_with_privileges "$@"; then return 0; fi | |
| log_operation "warning" "APT command failed (attempt $attempt/$max_attempts). Retrying in 5s..." | |
| sleep 5 | |
| done | |
| log_operation "err" "APT command failed after $max_attempts attempts." | |
| return 1 | |
| } | |
| # APT lock checking with timeout | |
| check_apt_lock() { | |
| local max_wait=300 # 5 minutes | |
| local waited=0 | |
| local lock_files=( | |
| "/var/lib/dpkg/lock-frontend" | |
| "/var/lib/apt/lists/lock" | |
| "/var/cache/apt/archives/lock" | |
| ) | |
| while true; do | |
| local locked=0 | |
| for lock_file in "${lock_files[@]}"; do | |
| if fuser "$lock_file" >/dev/null 2>&1; then | |
| locked=1 | |
| break | |
| fi | |
| done | |
| if [ $locked -eq 0 ]; then | |
| return 0 | |
| fi | |
| if [ $waited -ge $max_wait ]; then | |
| log_operation "err" "APT lock held for $max_wait seconds, aborting" | |
| exit $EXIT_APT_ERROR | |
| fi | |
| log_operation "warning" "Waiting for APT lock to be released... ($waited/$max_wait seconds)" | |
| sleep 5 | |
| waited=$((waited + 5)) | |
| done | |
| } | |
| # Check for root privileges | |
| check_root() { | |
| if [[ $EUID -eq 0 ]]; then | |
| HAS_ROOT=1 | |
| log_operation "info" "Running with root privileges" | |
| else | |
| HAS_ROOT=0 | |
| log_operation "info" "Running without root, will use sudo for privileged operations" | |
| # Test sudo access | |
| if ! sudo -n true 2>/dev/null; then | |
| log_operation "warning" "Sudo password may be required" | |
| fi | |
| fi | |
| } | |
| # Function to handle privileged commands (no eval!) | |
| run_with_privileges() { | |
| if [[ $HAS_ROOT -eq 1 ]]; then | |
| "$@" | |
| else | |
| sudo "$@" | |
| fi | |
| } | |
| # Resource limit enforcement | |
| enforce_resource_limits() { | |
| # Set nice priority for CPU | |
| renice -n 10 -p $$ >/dev/null 2>&1 || true | |
| # Set ionice for disk I/O (idle class) | |
| if command -v ionice &>/dev/null; then | |
| ionice -c 3 -p $$ >/dev/null 2>&1 || true | |
| fi | |
| log_operation "info" "Resource limits enforced: nice=10, ionice=idle" | |
| } | |
| # Enhanced path validation with parent directory checking | |
| is_safe_path() { | |
| local path=$1 | |
| # Check for relative path components that could enable path traversal | |
| if [[ "$path" == *".."* ]] || [[ "$path" == *"./"* ]]; then | |
| log_operation "err" "Path contains relative components: $path" | |
| return 1 | |
| fi | |
| # Check for double slashes that could be used in path manipulation | |
| if [[ "$path" == *"//"* ]]; then | |
| log_operation "err" "Path contains double slashes: $path" | |
| return 1 | |
| fi | |
| # Resolve to absolute path, following symlinks for security validation | |
| local abs_path | |
| if command -v realpath &>/dev/null; then | |
| abs_path=$(realpath -- "$path" 2>/dev/null) || { | |
| log_operation "warning" "Cannot resolve path: $path" | |
| return 1 | |
| } | |
| else | |
| # Fallback to readlink -f if realpath is not available | |
| abs_path=$(readlink -f "$path" 2>/dev/null) || { | |
| log_operation "warning" "Cannot resolve path: $path" | |
| return 1 | |
| } | |
| fi | |
| # Verify no symlinks in parent path (except last component) | |
| local parent_path | |
| parent_path=$(dirname "$abs_path") | |
| if [ -L "$parent_path" ]; then | |
| log_operation "err" "Path contains symlink in parent directory: $path -> $parent_path" | |
| return 1 | |
| fi | |
| # Critical system paths and their subdirectories | |
| local dangerous_paths=( | |
| "/" | |
| "/bin" | |
| "/boot" | |
| "/dev" | |
| "/etc" | |
| "/lib" | |
| "/lib64" | |
| "/proc" | |
| "/root" | |
| "/sbin" | |
| "/sys" | |
| "/usr" | |
| ) | |
| for dangerous_path in "${dangerous_paths[@]}"; do | |
| # Check if path is exactly the dangerous path or under it | |
| if [[ "$abs_path" == "$dangerous_path" ]] || [[ "$abs_path" == "$dangerous_path"/* ]]; then | |
| log_operation "err" "Refusing to remove path under critical system directory: $abs_path" | |
| return 1 | |
| fi | |
| done | |
| return 0 | |
| } | |
| # Function to check if path should be excluded | |
| is_excluded() { | |
| local path=$1 | |
| for pattern in "${EXCLUDED_PATTERNS[@]}"; do | |
| if [[ "$path" == *"$pattern"* ]]; then | |
| log_operation "info" "Excluding protected path: $path (matches pattern: $pattern)" | |
| return 0 | |
| fi | |
| done | |
| return 1 | |
| } | |
| # Function for timeout handling with user prompts | |
| prompt_with_timeout() { | |
| local prompt=$1 | |
| local timeout=$TIMEOUT_DURATION | |
| local response | |
| read -r -t "$timeout" -p "$prompt" response || true | |
| if [ -z "$response" ]; then | |
| echo "Timeout reached, assuming default answer (n)" | |
| return 1 | |
| fi | |
| if [[ "$response" =~ ^[Yy]$ ]]; then | |
| return 0 | |
| else | |
| return 1 | |
| fi | |
| } | |
| # Function to safely remove files with exclusion check | |
| safe_remove() { | |
| local target=$1 | |
| local force=${2:-0} | |
| # Check if target should be excluded | |
| if is_excluded "$target"; then | |
| return 0 | |
| fi | |
| # Check if path is safe | |
| if ! is_safe_path "$target"; then | |
| return 1 | |
| fi | |
| # Check if target exists | |
| if [ ! -e "$target" ]; then | |
| [ "$VERBOSE" -eq 1 ] && log_operation "info" "Target does not exist, skipping: $target" | |
| return 0 | |
| fi | |
| # Additional safety: ensure target is not empty and not a critical path | |
| if [ -z "$target" ] || [ "$target" = "/" ]; then | |
| log_operation "err" "Refusing to remove root or empty path" | |
| return 1 | |
| fi | |
| # Safe removal with enhanced validation | |
| if [ -d "$target" ] && [ ! -L "$target" ]; then | |
| # For directories, ensure path contains at least 3 directory levels (e.g., /home/user/cache) | |
| local path_depth | |
| path_depth=$(echo "$target" | tr -cd '/' | wc -c) | |
| if [ "$path_depth" -lt 2 ]; then | |
| log_operation "err" "Path too shallow for bulk removal: $target (depth: $path_depth)" | |
| return 1 | |
| fi | |
| log_operation "info" "Removing contents of directory $target" | |
| # Use find instead of glob to avoid potential issues | |
| if [ "$force" -eq 1 ]; then | |
| find "$target" -mindepth 1 -maxdepth 1 -exec rm -rf -- {} + 2>/dev/null || true | |
| else | |
| find "$target" -mindepth 1 -maxdepth 1 -exec rm -r -- {} + 2>/dev/null || true | |
| fi | |
| else | |
| log_operation "info" "Removing $target" | |
| if [ "$force" -eq 1 ]; then | |
| rm -f -- "$target" 2>/dev/null || true | |
| else | |
| rm -- "$target" 2>/dev/null || true | |
| fi | |
| fi | |
| return 0 | |
| } | |
| # Enhanced find command that excludes protected patterns | |
| safe_find() { | |
| local base_path=$1 | |
| shift | |
| local find_args=("$@") | |
| # Build exclusion arguments for find | |
| local exclude_args=() | |
| for pattern in "${EXCLUDED_PATTERNS[@]}"; do | |
| exclude_args+=(-not -path "*${pattern}*") | |
| done | |
| find "$base_path" "${exclude_args[@]}" "${find_args[@]}" 2>/dev/null || true | |
| } | |
| # Function to clean user trash | |
| clean_user_trash() { | |
| local trash_dir="${REAL_HOME}/.local/share/Trash" | |
| if [ -d "$trash_dir" ]; then | |
| log_operation "info" "Cleaning user trash: $trash_dir" | |
| # Measure size before | |
| local trash_size | |
| trash_size=$(du -sh "$trash_dir" 2>/dev/null | cut -f1) || trash_size="unknown" | |
| log_operation "info" "Trash size before: $trash_size" | |
| # Delete contents of files and info subdirectories safely | |
| safe_find "$trash_dir/files" -mindepth 1 -delete | |
| safe_find "$trash_dir/info" -mindepth 1 -delete | |
| # Remove expunged files if present | |
| safe_find "$trash_dir/expunged" -mindepth 1 -delete | |
| fi | |
| } | |
| # Function to clean docker resources | |
| clean_docker() { | |
| if command -v docker &>/dev/null; then | |
| log_operation "info" "Docker detected. Checking for unused resources..." | |
| # Prune dangling images (safe) and stopped containers | |
| # We do NOT use -a (all unused images) to avoid deleting cached build layers | |
| if [ "$DRY_RUN" -eq 0 ]; then | |
| log_operation "info" "Pruning docker system (dangling images, stopped containers)..." | |
| docker system prune -f --filter "until=24h" 2>/dev/null || true | |
| else | |
| log_operation "info" "Would run: docker system prune -f" | |
| fi | |
| fi | |
| } | |
| # System snapshot before destructive operations | |
| create_system_snapshot() { | |
| local snapshot_dir | |
| snapshot_dir="/var/backups/ubuntu_cleanup_$(date +%Y%m%d_%H%M%S)" | |
| log_operation "info" "Creating system snapshot: $snapshot_dir" | |
| run_with_privileges mkdir -p "$snapshot_dir" | |
| # Backup package state (needs privileges to write to root-owned snapshot dir) | |
| run_with_privileges bash -c "dpkg --get-selections > '$snapshot_dir/package_selections.txt'" 2>/dev/null || true | |
| run_with_privileges bash -c "apt-mark showmanual > '$snapshot_dir/manual_packages.txt'" 2>/dev/null || true | |
| # Backup kernel list | |
| run_with_privileges bash -c "dpkg -l | grep -E 'linux-image|linux-headers' > '$snapshot_dir/kernel_list.txt'" 2>/dev/null || true | |
| # Backup sources list | |
| run_with_privileges cp -r /etc/apt/sources.list* "$snapshot_dir/" 2>/dev/null || true | |
| log_operation "info" "System snapshot created: $snapshot_dir" | |
| SNAPSHOT_REF=$(mktemp /tmp/ubuntu_cleanup_snapshot.XXXXXX) | |
| chmod 600 "$SNAPSHOT_REF" | |
| echo "$snapshot_dir" > "$SNAPSHOT_REF" | |
| } | |
| # Safe kernel cleanup with N-1 retention validation | |
| safe_kernel_cleanup() { | |
| local current_kernel | |
| current_kernel=$(uname -r) | |
| local current_kernel_pkg="linux-image-$current_kernel" | |
| local installed_kernels | |
| installed_kernels=$(dpkg -l | grep -E '^ii.*linux-image-[0-9]' | awk '{print $2}' | grep -v "linux-image-generic" || true) | |
| local kernel_count | |
| kernel_count=$(echo "$installed_kernels" | grep -c -v '^$') | |
| log_operation "info" "Current kernel: $current_kernel" | |
| log_operation "info" "Current kernel package: $current_kernel_pkg" | |
| log_operation "info" "Installed kernels count: $kernel_count" | |
| # Verify current kernel package is in the installed list | |
| if ! echo "$installed_kernels" | grep -q "$current_kernel_pkg"; then | |
| log_operation "err" "Current kernel package not found in installed list!" | |
| log_operation "err" "This may indicate a kernel naming mismatch or system issue" | |
| return 1 | |
| fi | |
| if [ "$kernel_count" -le 2 ]; then | |
| log_operation "warning" "Only $kernel_count kernels installed. Skipping cleanup to maintain N-1 policy (minimum 2 kernels required)" | |
| return 0 | |
| fi | |
| echo "Installed kernels:" | |
| echo "$installed_kernels" | |
| echo "" | |
| echo "Current kernel: $current_kernel_pkg (WILL BE PROTECTED)" | |
| echo "This will retain current kernel + at least 1 previous version" | |
| if prompt_with_timeout "Remove old kernels (keeping current + 1)? (y/n): "; then | |
| check_apt_lock | |
| # Explicitly hold current kernel to prevent accidental removal | |
| log_operation "info" "Protecting current kernel: $current_kernel_pkg" | |
| if ! run_with_privileges apt-mark hold "$current_kernel_pkg" 2>/dev/null; then | |
| log_operation "err" "CRITICAL: Cannot hold current kernel. Aborting." | |
| return 1 | |
| fi | |
| if apt_with_retry apt-get autoremove --purge -y; then | |
| # Unhold current kernel | |
| run_with_privileges apt-mark unhold "$current_kernel_pkg" 2>/dev/null || true | |
| log_operation "info" "Kernel cleanup completed successfully" | |
| # Verify we still have enough kernels | |
| local remaining_kernels | |
| remaining_kernels=$(dpkg -l | grep -E '^ii.*linux-image-[0-9]' | awk '{print $2}' | grep -cv "linux-image-generic") | |
| if [ "$remaining_kernels" -lt 2 ]; then | |
| log_operation "err" "CRITICAL: Less than 2 kernels remaining after cleanup!" | |
| return 1 | |
| fi | |
| # Verify current kernel is still installed | |
| if ! dpkg -l | grep -q "^ii.*$current_kernel_pkg"; then | |
| log_operation "err" "CRITICAL: Current running kernel was removed!" | |
| log_operation "err" "System may not boot properly. Kernel reinstallation required." | |
| return 1 | |
| fi | |
| log_operation "info" "Remaining kernels: $remaining_kernels" | |
| log_operation "info" "Current kernel verified: $current_kernel_pkg" | |
| else | |
| # Unhold kernel even if autoremove failed | |
| run_with_privileges apt-mark unhold "$current_kernel_pkg" 2>/dev/null || true | |
| log_operation "err" "Kernel cleanup failed" | |
| return 1 | |
| fi | |
| fi | |
| } | |
| # Log rotation for script logs | |
| rotate_old_logs() { | |
| if [ "$DRY_RUN" -eq 1 ]; then | |
| log_operation "info" "DRY RUN: skipping log rotation" | |
| return 0 | |
| fi | |
| log_operation "info" "Rotating old cleanup logs" | |
| # Keep only last 30 days of cleanup logs | |
| find "$LOG_DIR" -name "cleanup_*.log" -mtime +30 -delete 2>/dev/null || true | |
| # Keep max 50 log files (use find+sort instead of ls for safe filename handling) | |
| find "$LOG_DIR" -maxdepth 1 -name "cleanup_*.log" -printf '%T@ %p\0' 2>/dev/null | \ | |
| sort -zrn | tail -zn +51 | cut -zd' ' -f2- | \ | |
| xargs -0 rm -f -- 2>/dev/null || true | |
| } | |
| # Check dependencies | |
| check_dependencies() { | |
| local missing_tools=() | |
| for tool in apt-get find grep awk fuser flock; do | |
| if ! command -v "$tool" &>/dev/null; then | |
| missing_tools+=("$tool") | |
| fi | |
| done | |
| if [ ${#missing_tools[@]} -gt 0 ]; then | |
| log_operation "err" "Missing required tools: ${missing_tools[*]}" | |
| exit $EXIT_DEPENDENCY_ERROR | |
| fi | |
| } | |
| # Check for interrupted dpkg operations | |
| check_dpkg_interrupted() { | |
| if dpkg --audit 2>&1 | grep -q .; then | |
| log_operation "warning" "Detected interrupted dpkg operation, attempting recovery..." | |
| if ! run_with_privileges dpkg --configure -a; then | |
| log_operation "err" "Failed to recover interrupted dpkg operation" | |
| exit $EXIT_APT_ERROR | |
| fi | |
| log_operation "info" "Successfully recovered interrupted dpkg operation" | |
| fi | |
| } | |
| # Verify repository signatures | |
| verify_repo_signatures() { | |
| log_operation "info" "Verifying repository signatures..." | |
| local expired_keys_found=0 | |
| local keyring_dir="/etc/apt/trusted.gpg.d" | |
| local legacy_keyring="/etc/apt/trusted.gpg" | |
| # Check modern keyring directory | |
| if [ -d "$keyring_dir" ]; then | |
| for keyring in "$keyring_dir"/*.gpg; do | |
| [ -f "$keyring" ] || continue | |
| if gpg --no-default-keyring --keyring="$keyring" --list-keys 2>/dev/null | grep -qi "expired"; then | |
| log_operation "err" "Found expired GPG keys in $(basename "$keyring")" | |
| expired_keys_found=1 | |
| fi | |
| done | |
| fi | |
| # Check legacy keyring | |
| if [ -f "$legacy_keyring" ]; then | |
| if command -v apt-key &>/dev/null; then | |
| if apt-key list 2>/dev/null | grep -qi "expired"; then | |
| log_operation "err" "Found expired GPG keys in legacy keyring" | |
| expired_keys_found=1 | |
| fi | |
| else | |
| if gpg --no-default-keyring --keyring="$legacy_keyring" --list-keys 2>/dev/null | grep -qi "expired"; then | |
| log_operation "err" "Found expired GPG keys in legacy keyring" | |
| expired_keys_found=1 | |
| fi | |
| fi | |
| fi | |
| if [ $expired_keys_found -eq 1 ]; then | |
| if [ "${STRICT_GPG_CHECK:-1}" -eq 1 ]; then | |
| log_operation "err" "Expired GPG keys detected. To bypass, set STRICT_GPG_CHECK=0 in config." | |
| exit $EXIT_INTEGRITY_ERROR | |
| else | |
| log_operation "warning" "STRICT_GPG_CHECK disabled - proceeding with expired keys" | |
| fi | |
| fi | |
| log_operation "info" "Repository signature verification completed" | |
| } | |
| # Verify package database integrity | |
| verify_package_integrity() { | |
| log_operation "info" "Verifying package database integrity..." | |
| if ! run_with_privileges apt-get check; then | |
| log_operation "err" "Package database integrity check failed" | |
| exit $EXIT_INTEGRITY_ERROR | |
| fi | |
| log_operation "info" "Package database integrity check passed" | |
| } | |
| # Check disk space | |
| check_disk_space() { | |
| local free_space | |
| free_space=$(df /var | tail -1 | awk '{print $4}') | |
| local min_space=${MIN_FREE_SPACE_KB:-512000} | |
| if [[ $free_space -lt $min_space ]]; then | |
| log_operation "err" "Insufficient disk space in /var (need $((min_space/1024))MB free, have $((free_space/1024))MB)" | |
| exit $EXIT_DISK_SPACE_ERROR | |
| fi | |
| log_operation "info" "Disk space check passed ($((free_space/1024))MB free in /var)" | |
| } | |
| # Record initial disk space | |
| record_disk_space() { | |
| log_operation "info" "Initial disk space usage:" | |
| if mountpoint -q /home 2>/dev/null; then | |
| df -h / /home | |
| else | |
| df -h / | |
| fi | |
| initial_space=$(df / | awk 'NR==2 {print $4}') | |
| } | |
| # Parse command line options | |
| while getopts "dnvt:j:r:k:" opt; do | |
| case $opt in | |
| d|n) DRY_RUN=1 ;; | |
| v) VERBOSE=1 ;; | |
| t) [[ "$OPTARG" =~ ^[0-9]+$ ]] && TIMEOUT_DURATION=$OPTARG || { echo "Error: -t requires a number" >&2; exit 1; } ;; | |
| j) [[ "$OPTARG" =~ ^[0-9]+$ ]] && PARALLEL_JOBS=$OPTARG || { echo "Error: -j requires a number" >&2; exit 1; } ;; | |
| r) [[ "$OPTARG" =~ ^[0-9]+$ ]] && MAX_RESOURCE_USAGE=$OPTARG || { echo "Error: -r requires a number" >&2; exit 1; } ;; | |
| k) [[ "$OPTARG" =~ ^[0-9]+$ ]] && DEFAULT_RETENTION_DAYS=$OPTARG || { echo "Error: -k requires a number" >&2; exit 1; } ;; | |
| *) echo "Usage: $0 [-d|-n] [-v] [-t timeout] [-j jobs] [-r max_cpu] [-k retention_days]" >&2 | |
| echo " -d,-n Dry run (show what would be done)" >&2 | |
| echo " -v Verbose output" >&2 | |
| echo " -t Timeout for user prompts in seconds (default: 60)" >&2 | |
| echo " -j Number of parallel jobs (default: 2)" >&2 | |
| echo " -r Maximum CPU percentage (default: 50)" >&2 | |
| echo " -k Retention days for logs (default: 10)" >&2 | |
| exit 1 ;; | |
| esac | |
| done | |
| # Progress function | |
| total_steps=23 | |
| current_step=0 | |
| progress() { | |
| current_step=$((current_step + 1)) | |
| percentage=$((current_step * 100 / total_steps)) | |
| log_operation "info" "[$current_step/$total_steps - $percentage%] $1" | |
| } | |
| # Main execution starts here | |
| log_operation "info" "=== Ubuntu Cleanup Script (Hardened) Started ===" | |
| log_operation "info" "PID: $$" | |
| # Acquire lock first | |
| acquire_lock | |
| # Load configuration safely | |
| load_safe_config || exit $EXIT_CONFIG_ERROR | |
| # Initialize | |
| check_dependencies | |
| check_root | |
| enforce_resource_limits | |
| rotate_old_logs | |
| check_disk_space | |
| check_dpkg_interrupted | |
| verify_repo_signatures | |
| verify_package_integrity | |
| record_disk_space | |
| log_operation "info" "Protected patterns: $(printf '%s ' "${EXCLUDED_PATTERNS[@]}")" | |
| [ "$DRY_RUN" -eq 1 ] && log_operation "warning" "DRY RUN MODE - No changes will be made" | |
| # Confirmation prompt | |
| if [ "$DRY_RUN" -eq 0 ]; then | |
| if ! prompt_with_timeout "Proceed with cleanup? (y/n): "; then | |
| log_operation "warning" "User aborted cleanup" | |
| exit $EXIT_USER_ABORT | |
| fi | |
| # Create system snapshot before any destructive operations | |
| create_system_snapshot | |
| fi | |
| # 1. Update package list | |
| progress "Updating package list" | |
| if [ "$DRY_RUN" -eq 0 ]; then | |
| check_apt_lock | |
| apt_with_retry apt-get update || log_operation "warning" "Failed to update package list" | |
| fi | |
| # 2. Clear user cache (with exclusions) | |
| progress "Clearing user cache (excluding protected folders)" | |
| if [ "$DRY_RUN" -eq 0 ]; then | |
| cache_size_before=$(du -sh "$REAL_HOME/.cache" 2>/dev/null | cut -f1) || cache_size_before="unknown" | |
| log_operation "info" "Cache size before: $cache_size_before" | |
| # Remove cache files older than 3 days, excluding protected patterns | |
| safe_find "$REAL_HOME/.cache" -type f -mtime +3 -delete | |
| cache_size_after=$(du -sh "$REAL_HOME/.cache" 2>/dev/null | cut -f1) || cache_size_after="unknown" | |
| log_operation "info" "Cache size after: $cache_size_after" | |
| fi | |
| # 3. Clean APT cache | |
| progress "Cleaning APT cache" | |
| if [ "$DRY_RUN" -eq 0 ]; then | |
| check_apt_lock | |
| apt_with_retry apt-get clean | |
| fi | |
| # 4. Remove obsolete packages | |
| progress "Removing obsolete packages" | |
| if [ "$DRY_RUN" -eq 0 ]; then | |
| check_apt_lock | |
| apt_with_retry apt-get autoclean | |
| fi | |
| # 5. Remove unused packages | |
| progress "Removing unused packages" | |
| if [ "$DRY_RUN" -eq 0 ]; then | |
| check_apt_lock | |
| log_operation "info" "Packages to be removed:" | |
| apt_with_retry apt-get autoremove -y --dry-run | grep "^Remv" || log_operation "info" "No packages to remove" | |
| if prompt_with_timeout "Remove these packages? (y/n): "; then | |
| apt_with_retry apt-get autoremove -y | |
| fi | |
| fi | |
| # 6. Remove old kernels (SAFE with N-1 validation) | |
| progress "Checking old kernel versions" | |
| if [ "$DRY_RUN" -eq 0 ]; then | |
| safe_kernel_cleanup | |
| fi | |
| # 7. Clean Snap packages | |
| progress "Cleaning Snap packages" | |
| if [ "$DRY_RUN" -eq 0 ] && command -v snap &> /dev/null; then | |
| # Wrap in subshell to protect against pipefail when snap list fails | |
| ( | |
| run_with_privileges snap list --all 2>/dev/null | awk '/disabled/{print $1, $3}' | \ | |
| while read -r snapname revision; do | |
| # Add input validation | |
| if [[ "$snapname" =~ ^[a-zA-Z0-9_-]+$ ]] && [[ "$revision" =~ ^[0-9]+$ ]]; then | |
| log_operation "info" "Removing $snapname revision $revision" | |
| run_with_privileges snap remove "$snapname" --revision="$revision" || true | |
| fi | |
| done | |
| ) || true | |
| fi | |
| # 8. Clean Flatpak | |
| progress "Cleaning Flatpak" | |
| if [ "$DRY_RUN" -eq 0 ] && command -v flatpak &> /dev/null; then | |
| run_with_privileges flatpak uninstall --unused -y || true | |
| fi | |
| # 9. Clear thumbnails (with age limit) | |
| progress "Clearing old thumbnails" | |
| if [ "$DRY_RUN" -eq 0 ]; then | |
| safe_find "$REAL_HOME/.cache/thumbnails" -type f -mtime +30 -delete | |
| fi | |
| # 10. Clean journal logs | |
| progress "Cleaning systemd journal logs" | |
| if [ "$DRY_RUN" -eq 0 ] && command -v journalctl &> /dev/null; then | |
| run_with_privileges journalctl --vacuum-time="${DEFAULT_RETENTION_DAYS}d" | |
| run_with_privileges journalctl --vacuum-size=50M | |
| fi | |
| # 11. Clean /tmp (carefully) | |
| progress "Cleaning /tmp directory" | |
| if [ "$DRY_RUN" -eq 0 ]; then | |
| # Only remove files older than retention days and not in use | |
| # Note: fuser returns non-zero for both "not in use" AND "error". | |
| # We use a wrapper to distinguish the two cases and only delete when | |
| # fuser confirms the file is genuinely not in use (exit code 1). | |
| # shellcheck disable=SC2016 | |
| run_with_privileges find /tmp -type f -atime +"$DEFAULT_RETENTION_DAYS" \ | |
| -exec sh -c 'fuser -s "$1" 2>/dev/null; [ $? -eq 1 ]' _ {} \; -delete 2>/dev/null || true | |
| fi | |
| # 12. Clean browser caches (with exclusions) | |
| progress "Cleaning browser caches" | |
| if [ "$DRY_RUN" -eq 0 ]; then | |
| # Firefox - use -xdev to prevent crossing filesystem boundaries (symlink protection) | |
| if [ -d "$REAL_HOME/.mozilla/firefox" ]; then | |
| safe_find "$REAL_HOME/.mozilla/firefox" -xdev -name "*Cache*" -type d -exec rm -rf {} + 2>/dev/null || true | |
| fi | |
| # Chrome/Chromium - use -xdev to prevent crossing filesystem boundaries (symlink protection) | |
| for browser_dir in "$REAL_HOME/.config/google-chrome" "$REAL_HOME/.config/chromium"; do | |
| if [ -d "$browser_dir" ]; then | |
| safe_find "$browser_dir" -xdev -name "Cache" -type d -exec rm -rf {} + 2>/dev/null || true | |
| fi | |
| done | |
| fi | |
| # 13. Clean User Trash | |
| progress "Emptying User Trash" | |
| if [ "$DRY_RUN" -eq 0 ]; then | |
| clean_user_trash | |
| fi | |
| # 14. Clean Docker | |
| progress "Cleaning Docker resources" | |
| if [ "$DRY_RUN" -eq 0 ]; then | |
| clean_docker | |
| fi | |
| # 15. Manage log files | |
| progress "Managing log files" | |
| if [ "$DRY_RUN" -eq 0 ]; then | |
| # Compress old logs (exclude our own log directory — managed by rotate_old_logs) | |
| # Only compress logs not currently in use (fuser exit 1 = not in use) | |
| # shellcheck disable=SC2016 | |
| run_with_privileges find /var/log \ | |
| \( -path "${LOG_DIR}" -prune \) -o \ | |
| \( -type f -name "*.log" -mtime +7 \ | |
| -exec sh -c 'fuser -s "$1" 2>/dev/null; [ $? -eq 1 ]' _ {} \; \ | |
| -exec gzip -9 {} \; \) 2>/dev/null || true | |
| # Remove very old compressed logs (exclude our own log directory) | |
| run_with_privileges find /var/log \ | |
| \( -path "${LOG_DIR}" -prune \) -o \ | |
| \( -type f -name "*.gz" -mtime +"$DEFAULT_RETENTION_DAYS" -delete \) 2>/dev/null || true | |
| fi | |
| # 16. Check core dumps | |
| progress "Checking core dumps" | |
| if [ "$DRY_RUN" -eq 0 ] && [ -d /var/lib/apport/coredump ]; then | |
| core_count=$(find /var/lib/apport/coredump -type f -name "core*" 2>/dev/null | wc -l) || core_count=0 | |
| if [ "$core_count" -gt 0 ]; then | |
| log_operation "info" "Found $core_count core dump files" | |
| if prompt_with_timeout "Delete core dumps? (y/n): "; then | |
| run_with_privileges find /var/lib/apport/coredump -type f -name 'core*' -delete | |
| fi | |
| fi | |
| fi | |
| # 17. Clean package backups | |
| progress "Cleaning package backups" | |
| if [ "$DRY_RUN" -eq 0 ]; then | |
| # Keep only recent backups (last 5) | |
| # Use find with proper sorting instead of unsafe bash -c | |
| backup_count=$(run_with_privileges find /var/backups -name "dpkg.status.*" -type f 2>/dev/null | wc -l) || backup_count=0 | |
| if [ "$backup_count" -gt 5 ]; then | |
| # Get files sorted by modification time, skip first 5 (most recent), delete the rest | |
| # Replace with safe null-delimited processing | |
| run_with_privileges find /var/backups -name "dpkg.status.*" -type f -printf '%T@ %p\0' 2>/dev/null | \ | |
| sort -zrn | tail -zn +6 | cut -zd' ' -f2- | \ | |
| while IFS= read -r -d '' backup_file; do | |
| [ -f "$backup_file" ] && run_with_privileges rm -f -- "$backup_file" | |
| done | |
| log_operation "info" "Cleaned old dpkg backups (kept 5 most recent)" | |
| else | |
| log_operation "info" "No old dpkg backups to clean (only $backup_count found)" | |
| fi | |
| fi | |
| # 18. Clean pip cache | |
| progress "Cleaning pip cache" | |
| if [ "$DRY_RUN" -eq 0 ] && command -v pip &> /dev/null; then | |
| pip cache purge 2>/dev/null || true | |
| fi | |
| # 19. Clean conda cache | |
| progress "Cleaning conda cache" | |
| if [ "$DRY_RUN" -eq 0 ] && command -v conda &> /dev/null; then | |
| conda clean --all -y 2>/dev/null || true | |
| fi | |
| # 20. Clean npm cache | |
| progress "Cleaning npm cache" | |
| if [ "$DRY_RUN" -eq 0 ] && [ -d "$REAL_HOME/.npm" ]; then | |
| # Exclude node_modules and other important npm folders | |
| safe_find "$REAL_HOME/.npm/_cache" -type f -mtime +7 -delete 2>/dev/null || true | |
| fi | |
| # 21. Clean Hunyuan3D model cache | |
| progress "Cleaning Hunyuan3D model cache" | |
| if [ "$DRY_RUN" -eq 0 ] && [ -d "$REAL_HOME/.cache/hy3dgen" ]; then | |
| local_cache_size=$(du -sh "$REAL_HOME/.cache/hy3dgen" 2>/dev/null | cut -f1) || local_cache_size="unknown" | |
| log_operation "info" "Hunyuan3D cache size: $local_cache_size" | |
| if prompt_with_timeout "Remove Hunyuan3D model cache ($local_cache_size)? (y/n): "; then | |
| # Use find directly — safe_find would skip due to hy3dgen exclusion pattern | |
| find "$REAL_HOME/.cache/hy3dgen" -mindepth 1 -delete 2>/dev/null || true | |
| log_operation "info" "Hunyuan3D model cache cleaned" | |
| fi | |
| else | |
| log_operation "info" "No Hunyuan3D cache found, skipping" | |
| fi | |
| # 22. REMOVED: Automatic PPA addition (security risk) | |
| progress "Skipping automatic third-party repository addition" | |
| log_operation "info" "Automatic PPA addition removed for security. Install ucaresystem-core manually if needed." | |
| # 23. Final update | |
| progress "Final system update" | |
| if [ "$DRY_RUN" -eq 0 ]; then | |
| if prompt_with_timeout "Update all packages? (y/n): "; then | |
| check_apt_lock | |
| apt_with_retry apt-get update && apt_with_retry apt-get upgrade -y | |
| fi | |
| fi | |
| # Show results | |
| log_operation "info" "Final disk space usage:" | |
| if mountpoint -q /home 2>/dev/null; then | |
| df -h / /home | |
| else | |
| df -h / | |
| fi | |
| final_space=$(df / | awk 'NR==2 {print $4}') | |
| log_operation "info" "=== Cleanup completed successfully ===" | |
| log_operation "info" "Initial free space: $initial_space KB" | |
| log_operation "info" "Final free space: $final_space KB" | |
| space_freed=$(( ${final_space:-0} - ${initial_space:-0} )) | |
| log_operation "info" "Space freed: $space_freed KB" | |
| log_operation "info" "Log file: $LOG_FILE" | |
| log_operation "info" "Protected patterns excluded: $(printf '%s ' "${EXCLUDED_PATTERNS[@]}")" | |
| if [ -n "${SNAPSHOT_REF:-}" ] && [ -f "$SNAPSHOT_REF" ]; then | |
| snapshot_path=$(cat "$SNAPSHOT_REF") | |
| log_operation "info" "System snapshot available at: $snapshot_path" | |
| rm -f -- "$SNAPSHOT_REF" | |
| fi | |
| flush_log | |
| exit $EXIT_SUCCESS |
Author
You're welcome! Thank you for reporting the issue. :)
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Thank you so much : )