百度贴吧爬虫

# encoding: utf-8
import urllib.request
import urllib.parse
import time
import random


def load_page(url):
	"""
	通过url来获取网页内容jfa
	:param url: 待获取的页面
	:return: url对应的网页内容
	"""
	headers = {
		'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'
	}

	request = urllib.request.Request(url=url, headers=headers)
	response = urllib.request.urlopen(request)
	content = response.read()
	return content.decode("utf-8")


def write_page(html, filename):
	"""
	:param html: 要保持的页面内容
	:param filename: 要保持页面内容的文件名
	:return:
	"""
	print("正在保存文件:" + filename)
	with open(filename, "w", encoding="utf-8") as f:
		f.write(html)
	print("保持文件完毕:" + filename)


def tieba_spider(keyword, start, end):
	"""
	爬取百度贴吧内容
	:param keyword: 指定要爬取的贴吧
	:param start: 开始的页面
	:param end: 终止的页面
	:return:
	"""
	search = {'kw': keyword}
	kw = urllib.parse.urlencode(search)
	# kw = urllib.parse.quote(keyword)
	url = "http://tieba.baidu.com/f?"
	url = url + kw + "&ie=utf-8"
	urls = []
	for val in range(start-1, end):
		temp = url + "&pn=" + str(val * 50)
		urls.append(temp)
	# print(urls)

	page = 1
	for url in urls:
		html = load_page(url)
		write_page(html, keyword + str(page) + ".html")
		time.sleep(random.randrange(3, 15))
		page = page + 1


if __name__ == "__main__":
	# content = load_page(url)
	# write_page(content, "baidutieba.html")
	keyword = input("请输入要爬取的贴吧")
	tieba_spider(keyword, 1, 5)

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值