从起始id到15000逐条爬取知轩藏书的书籍信息(目前网站上书的id尚未超过15000),存为csv文件,可以用excel打开或者导入数据库。可以设置interval,让一次爬取的数目小一点。
代码如下:
from urllib import request, parse
import time
import random
from ua_info import ua_list
import re
import csv
import os
class ZxcsSpider:
# 定义常用变量,比如url或计数变量
def __init__(self):
self.url = 'http://www.zxcs.me/post/{}'
self.url_review='http://www.zxcs.me/content/plugins/cgz_xinqing/cgz_xinqing_action.php?action=show&id={}&m={}'
self.header=['编号','书名','作者','类型','仙草','毒草']
self.interval=15000
# 获取响应内容函数,使用随机User-Agent
def get_html(self, url):
req = request.Request(
url=url, headers={'User-Agent': random.choice(ua_list)})
res = request.urlopen(req,timeout=5)
html = res.read().decode("utf-8")
return html
# 使用正则来解析页面,提取数据
def parse_html(self, bookid,html):
pattern = re.compile(
'title.*?《(.*?)》.*?作者:(.*?) -.*?keywords.*?精校电子书,(.*?)"', re.S)
title = pattern.search(html)
return [bookid,title.group(1),title.group(2),title.group(3)]
def get_review(self,bookid):
url=self.review.format(bookid,random.random());
return self.get_html(url)
# 存储提取的数据
def write2csv(self,filename,header,data):
with open(filename,'w',encoding='utf_8_sig',newline='') as f:
writer=csv.writer(f)
writer.writerow(header)
writer.writerows(data)
# 主函数
def run(self):
bookid = input('input bookid:')
#filename=input('input filename:')
startid=int(bookid)
data=[]
start=time.time()
for i in range(startid,startid+self.interval):
url=self.url.format(i)
url_review=self.url_review.format(i,random.random())
try:
html = self.get_html(url)
r_list = self.parse_html(i,html)
review=self.get_html(url_review).split(',')
r_list.append(review[0])
r_list.append(review[4])
except:
print(url+' 访问失败')
else:
data.append(r_list[:])
try:
filename=bookid+'.csv'
self.write2csv(filename,self.header,data)
except Exception as e:
print('存储失败',e)
else:
print('存储成功')
end=time.time()
print('用时{}'.format(end-start))
if __name__ == '__main__':
spider = ZxcsSpider()
spider.run()
os.system()
原创文章,作者:kepupublish,如若转载,请注明出处:https://blog.ytso.com/tech/python/272871.html