macOS load simulator. CPU + Memory + Disk + Network bash script.
#!/usr/bin/env bash
set -euo pipefail
# macOS load simulator: CPU + Memory + Disk + Network
# Works on macOS Sequoia / Apple Silicon.
#
# Examples:
# ./loadsim.sh cpu --workers 8 --duration 30
# ./loadsim.sh mem --gb 8 --duration 60
# ./loadsim.sh disk --mbps 200 --duration 45
# ./loadsim.sh net --mbps 50 --duration 30
# ./loadsim.sh all --workers 8 --gb 6 --mbps 100 --disk-mbps 150 --duration 60
#
# Notes:
# - Disk test writes to a temp file and deletes it.
# - Memory alloc uses a tmp file mapped by cat->/dev/null; it will consume RAM via cache/pressure.
# - Network uses curl downloads from multiple hosts; it's "best effort" and depends on your ISP.
#
# Ctrl+C stops everything cleanly.
#
# Author: Jon LaBelle
# License: MIT
# Date: 2026-02-18
# https://jonlabelle.com/snippets/view/shell/macos-load-simulator
SCRIPT_NAME="$(basename "$0")"
TMPDIR_BASE="${TMPDIR:-/tmp}"
WORKDIR="$(mktemp -d "${TMPDIR_BASE%/}/loadsim.XXXXXX")"
PIDS=()
CLEANUP_DONE=0
cleanup() {
if [[ "${CLEANUP_DONE}" -eq 1 ]]; then
return 0
fi
CLEANUP_DONE=1
local pid
local deadline
local any_alive
# Ask tracked workers and their direct children to stop first.
for pid in "${PIDS[@]:-}"; do
kill -TERM "${pid}" > /dev/null 2>&1 || true
pkill -TERM -P "${pid}" > /dev/null 2>&1 || true
done
# Give workers a short grace period before forcing termination.
deadline=$((SECONDS + 3))
while ((SECONDS < deadline)); do
any_alive=0
for pid in "${PIDS[@]:-}"; do
if kill -0 "${pid}" > /dev/null 2>&1; then
any_alive=1
break
fi
done
if [[ "${any_alive}" -eq 0 ]]; then
break
fi
sleep 0.1
done
# Forcefully stop any leftovers and reap child processes.
for pid in "${PIDS[@]:-}"; do
kill -KILL "${pid}" > /dev/null 2>&1 || true
pkill -KILL -P "${pid}" > /dev/null 2>&1 || true
wait "${pid}" > /dev/null 2>&1 || true
done
# Remove all temp resources.
rm -rf "${WORKDIR}" > /dev/null 2>&1 || true
}
on_signal() {
local signal_name="$1"
cleanup
trap - EXIT INT TERM
case "${signal_name}" in
INT) exit 130 ;;
TERM) exit 143 ;;
*) exit 1 ;;
esac
}
trap cleanup EXIT
trap 'on_signal INT' INT
trap 'on_signal TERM' TERM
usage() {
cat << EOF
Usage:
${SCRIPT_NAME} <cpu|mem|disk|net|all> [options]
Common options:
--duration <sec> How long to run (default: 30)
CPU options:
--workers <n> Number of CPU workers (default: logical cores)
Memory options:
--gb <n> Approx memory pressure in GB (default: 4)
Disk options:
--disk-mbps <n> Approx write+read MB/s target (default: 200)
Network options:
--mbps <n> Approx download Mbps target (default: 50)
--streams <n> Parallel download streams (default: 4)
Examples:
${SCRIPT_NAME} all --workers 8 --gb 6 --mbps 80 --disk-mbps 150 --duration 60
EOF
}
# Defaults
DURATION=30
CPU_WORKERS=""
MEM_GB=4
NET_MBPS=50
NET_STREAMS=4
DISK_MBPS=200
# Helpers
logical_cores() {
sysctl -n hw.logicalcpu 2> /dev/null || echo 4
}
# Parse args (simple)
if [[ $# -lt 1 ]]; then
usage
exit 1
fi
MODE="$1"
shift
while [[ $# -gt 0 ]]; do
case "$1" in
--duration)
DURATION="$2"
shift 2
;;
--workers)
CPU_WORKERS="$2"
shift 2
;;
--gb)
MEM_GB="$2"
shift 2
;;
--mbps)
NET_MBPS="$2"
shift 2
;;
--streams)
NET_STREAMS="$2"
shift 2
;;
--disk-mbps)
DISK_MBPS="$2"
shift 2
;;
-h | --help)
usage
exit 0
;;
*)
echo "Unknown option: $1"
usage
exit 1
;;
esac
done
[[ -z "${CPU_WORKERS}" ]] && CPU_WORKERS="$(logical_cores)"
echo "Workdir: ${WORKDIR}"
echo "Mode: ${MODE} | duration=${DURATION}s | cpu_workers=${CPU_WORKERS} | mem=${MEM_GB}GB | net=${NET_MBPS}Mbps (streams=${NET_STREAMS}) | disk=${DISK_MBPS}MB/s"
# --- CPU load ---
start_cpu() {
local workers="$1"
echo "[cpu] starting ${workers} workers"
for _ in $(seq 1 "${workers}"); do
# 'yes' is a classic busy loop; redirect to /dev/null.
(yes > /dev/null) &
PIDS+=("$!")
done
}
# --- Memory pressure ---
# Approach: create a big file in $WORKDIR and repeatedly stream it, encouraging caching + memory pressure.
# Also uses optional 'purge' (if available) before starting to better observe the ramp.
start_mem() {
local gb="$1"
echo "[mem] applying approx ${gb}GB memory pressure"
if command -v purge > /dev/null 2>&1; then
echo "[mem] running 'purge' (optional) to clear file cache first"
sudo -n purge > /dev/null 2>&1 || true
fi
local bytes=$((gb * 1024 * 1024 * 1024))
local memfile="${WORKDIR}/mem.bin"
# Create file with random data (can be CPU-heavy); /dev/urandom is expensive.
# Use zeros to be fast; still creates pressure via caching.
dd if=/dev/zero of="${memfile}" bs=16m count=$((bytes / (16 * 1024 * 1024))) status=none
# Stream it in a loop to keep it hot in cache and create memory pressure.
(while :; do cat "${memfile}" > /dev/null; done) &
PIDS+=("$!")
}
# --- Disk load ---
# Write then read a file repeatedly, trying to approximate MB/s by chunk size and sleeps.
# This is best-effort; real throughput depends on SSD, caching, APFS, etc.
start_disk() {
local mbps="$1"
echo "[disk] targeting ~${mbps} MB/s write+read (best effort)"
local block_mb=64
local file="${WORKDIR}/disk.bin"
# A loop that alternates write+read. Add small sleeps to roughly target.
(
while :; do
# Write ~block_mb MB
dd if=/dev/zero of="${file}" bs=1m count="${block_mb}" conv=fsync status=none
# Read it back
dd if="${file}" of=/dev/null bs=1m status=none
# Rough throttle: total moved ~ 2*block_mb MB
# If target is mbps MB/s, desired seconds per cycle:
# t = (2*block_mb)/mbps
# Sleep a portion to avoid constant maxing (still bursts).
python3 - << PY 2> /dev/null || true
import time
block_mb = ${block_mb}
mbps = ${mbps}
t = (2*block_mb)/mbps
# sleep at most 1s, at least 0
s = max(0.0, min(1.0, t*0.25))
time.sleep(s)
PY
done
) &
PIDS+=("$!")
}
# --- Network load ---
# Uses multiple curl streams downloading from fast endpoints.
# Best effort: actual Mbps depends on your path + server limits.
start_net() {
local mbps="$1"
local streams="$2"
echo "[net] targeting ~${mbps} Mbps (best effort) with ${streams} parallel streams"
# A few known "big file" endpoints commonly used for testing.
# If one fails, curl exits non-zero; we ignore per-loop failures.
local urls=(
"https://speed.hetzner.de/1GB.bin"
"https://mirror.init7.net/speedtest/1G"
"https://ipv4.download.thinkbroadband.com/1GB.zip"
)
# Per-stream target in bytes/sec (approx)
local per_stream_bps=$(((mbps * 1000 * 1000 / 8) / streams))
for _ in $(seq 1 "${streams}"); do
(
local idx=0
while :; do
local url="${urls[$((idx % ${#urls[@]}))]}"
idx=$((idx + 1))
# Throttle via --limit-rate (bytes/sec). Keepalive to reuse connections.
curl -L --silent --show-error --max-time 10 \
--limit-rate "${per_stream_bps}" \
"${url}" -o /dev/null > /dev/null 2>&1 || true
done
) &
PIDS+=("$!")
done
}
run_mode() {
case "$1" in
cpu) start_cpu "${CPU_WORKERS}" ;;
mem) start_mem "${MEM_GB}" ;;
disk) start_disk "${DISK_MBPS}" ;;
net) start_net "${NET_MBPS}" "${NET_STREAMS}" ;;
all)
start_cpu "${CPU_WORKERS}"
start_mem "${MEM_GB}"
start_disk "${DISK_MBPS}"
start_net "${NET_MBPS}" "${NET_STREAMS}"
;;
*)
echo "Unknown mode: $1"
usage
exit 1
;;
esac
}
run_mode "${MODE}"
echo "Running for ${DURATION}s... (Ctrl+C to stop)"
sleep "${DURATION}"
echo "Done."