Download files from the Web. Supports HTTP, HTTPS, and FTP.

# To download the contents of an URL to a file (named "foo" in this case):
wget https://example.com/foo

# To download a single web page and all its resources (scripts, stylesheets, images, etc.):
wget --page-requisites --convert-links https://example.com/somepage.html

# To download a full website, with 3-second intervals between requests:
wget --mirror --page-requisites --convert-links --wait=3 https://example.com

# To download the contents of an URL via authenticated FTP:
wget --ftp-user=username --ftp-password=password ftp://example.com

# To limit download speed to 200 kB/s:
wget --limit-rate=200k https://example.com

# To continue an incomplete download:
wget -c https://example.com

# To retry a given number of times if the download doesn't succeed at first:
wget -t number_of_retries https://example.com


# download toc from example.com, convert site links to local links and only fetch from example domain.
wget -rkp -l3 -np -nH --cut-dirs=1 --convert-links --domains example.com http://example.com/toc


# download website
wget --wait=20 --limit-rate=20K -r -p -U Mozilla http://www.somesite.com/

# Safari 5 
wget --wait=20 --limit-rate=20K -r -p -U Mozilla/5.0 http://developer.apple.com/safaridemos/showcase/transitions/

# quick and dirty
wget -r -p -U Mozilla http://somesite.com/

# wget check for 404's
wget get --spider -nd -r <URL>

# download a list of urls from a text file with wget
wget -i ~/Desktop/urls.txt

# get directory prefix
wget --directory-prefix=Downloads http://example.com/video.mov

# mirror website
wget -m http://textfiles.com/hacking/INTERNET/

# grab all .mp3s from url
mp3=$(lynx -dump http://server1.cyberciti.biz/media/index.html  | grep 'http://' | awk '/mp3/{print $2}')
for i in $mp3
    wget $i
done