Download files from the Web. Supports HTTP, HTTPS, and FTP.
# To download the contents of an URL to a file (named "foo" in this case):
wget <https://example.com/foo>
# To download the contents of an URL to a file (named "bar" in this case):
wget --output-document <bar> <https://example.com/foo>
# To download a single web page and all its resources with 3-second intervals between requests (scripts, stylesheets, images, etc.):
wget --page-requisites --convert-links --wait=3 <https://example.com/somepage.html>
# To download all listed files within a directory and its sub-directories (does not download embedded page elements):
wget --mirror --no-parent <https://example.com/somepath/>
# To limit the download speed and the number of connection retries:
wget --limit-rate=<300k> --tries=<100> <https://example.com/somepath/>
# To download a file from an HTTP server using Basic Auth (also works for FTP):
wget --user=<username> --password=<password> <https://example.com>
# To continue an incomplete download:
wget --continue <https://example.com>
# To download all URLs stored in a text file to a specific directory:
wget --directory-prefix <path/to/directory> --input-file <URLs.txt>
# ---
# To retry a given number of times if the download doesn't succeed at first:
wget -t number_of_retries https://example.com
# download toc from example.com, convert site links to local links and only fetch from example domain.
wget -rkp -l3 -np -nH --cut-dirs=1 --convert-links --domains example.com http://example.com/toc
# download website
wget --wait=20 --limit-rate=20K -r -p -U Mozilla http://www.somesite.com/
# Safari 5
wget --wait=20 --limit-rate=20K -r -p -U Mozilla/5.0 http://developer.apple.com/safaridemos/showcase/transitions/
# quick and dirty
wget -r -p -U Mozilla http://somesite.com/
# wget check for 404's
wget get --spider -nd -r <URL>
# download a list of urls from a text file with wget
wget -i ~/Desktop/urls.txt
# get directory prefix
wget --directory-prefix=Downloads http://example.com/video.mov
# mirror website
wget -m http://textfiles.com/hacking/INTERNET/
# grab all .mp3s from url
mp3=$(lynx -dump http://server1.cyberciti.biz/media/index.html | grep 'http://' | awk '/mp3/{print $2}')
for i in $mp3
wget $i
done