脚本用法
./down.txt dir_to_save book_link
book_link中请不要包含index.htm
比如要下载 http://www.chinalinuxpub.com/doc ... erl/learn/index.htm
到 prog_perl中
./down.sh prog_perl http://www.chinalinuxpub.com/doc/oreillybookself/perl/learn/
如有什么问题
Just Read The ****ing Codes
复制内容到剪贴板
代码:
[No.616 04:24:22 Downloads]$ cat down.sh
#! /bin/bash
#
# author: li-jiahuan@sohu.com
# date : 06/02/01
# usage : ./down.txt dir_to_save book_link
# set -n
link="http://210.82.89.226/doc/oreillybookself/perl/learn"
index=${link}/index.htm
# ${para-default}, ${para:-default}
dir=${1:-$PWD}
link=${2:-$link}
mkdir -p $dir
cd $dir
#if [[ $PWD != $dir ]];then
# echo "Can not cd to $dir"
# exit
#fi
# download the index page
[[ -f "index.htm" ]] || curl $index > index.htm
# web="http://210.82.89.226/doc/oreillybookself/perl/learn/prf1_01.htm"
# download the Preface
for i in {1..5};do
for j in `seq -w 1 15`;do
web=${link}/prf${i}_${j}.htm
preface=${web##*/}
[[ -e $preface ]] && continue
echo -e "\nDownloading $web ..."
curl $web > $preface
if grep "was not found on this server" $preface ;then
rm $preface
break
fi
done
done
# web="http://210.82.89.226/doc/oreillybookself/perl/learn/ch0_01.htm"
# download chapters
for i in `seq -w 1 19`;do
for j in `seq -w 1 50`;do
web="${link}/ch${i}_${j}.htm"
chapter=${web##*/}
[[ -e $chapter ]] && continue
echo -e "\nDownloading $web ..."
curl $web > $chapter
if grep "was not found on this server" $chapter ;then
rm $chapter
break
fi
done
done
# web=http://210.82.89.226/doc/oreillybookself/perl/learn/appa_01.htm"
# download Appendix
for i in {a..z};do
for j in `seq -w 1 50`;do
web="${link}/app${i}_${j}.htm"
appendix=${web##*/}
[[ -e $appendix ]] && continue
echo -e "\nDownloading $web ..."
curl $web > $appendix
if grep "was not found on this server" $appendix ;then
rm $appendix
break
fi
done
done
# web=http://210.82.89.226/doc/oreillybookself/perl/learn/index/idx_a.htm
# download index
link=${link}/index
mkdir -p index
cd index
[[ -f idx_0.htm ]] || curl $link/idx_0.htm > idx_0.htm
for j in {a..z};do
web="${link}/idx_${j}.htm"
idx=${web##*/}
[[ -e $idx ]] && continue
echo -e "\nDownloading $web ..."
curl $web > $idx
if grep "was not found on this server" $idx ;then
rm $idx
# break
fi
done
[No.617 04:24:25 Downloads]$