class PostdemoSpider(scrapy.Spider):
name = 'postDemo'
# allowed_domains = ['www.baidu.com']
start_urls = ['https://fanyi.baidu.com/sug']
'''
最开始是有start_request函数的,默认是get请求
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url=url,callback=self.parse)
发起post请求常见方式:
一定要对start_requests方法进行重写。
Request()方法中给method属性赋值成post
FormRequest()进行post请求的发送
'''
def start_requests(self):
print('start_request')
data = {'kw': 'dog'}
for url in self.start_urls:
yield scrapy.FormRequest(url=url, formdata=data, callback=self.parse)
def parse(self, response):
# print(response.text)
pass
cookie
post请求之后自动处理cookie,直接处理相关页面即可,注意callback
一定要注意新建工程的设置
import scrapy
class DoubanSpider(scrapy.Spider):
name = 'douban'
# allowed_domains = ['www.douban.com']
start_urls = ['https://accounts.douban.com/j/mobile/login/basic']
def start_requests(self):
print('start')
for url in self.start_urls:
data={
'ck':'',
'name':'',
'password':'',
'remember':'false',
'ticket':''
}
yield scrapy.FormRequest(url=url,formdata=data,callback=self.parse)
def parse(self, response):
print('登陆')
url='https://www.douban.com/people/193627830/'
yield scrapy.Request(url=url, callback=self.parseBySecond)
def parseBySecond(self,response):
print('写入')
with open ('./test.html','w',encoding='utf-8') as f:
f.write(response.text)
代理
就是在中间件里自定义一个类,然后写个process_request
request.meta[‘proxy’] =‘https://代理ip'
settings里设置,每次请求都会被更改
class DoubanSpider(scrapy.Spider):
name = 'douban'
# allowed_domains = ['www.douban.com']
start_urls = ['https://www.baidu.com/s?wd=ip']
def parse(self, response):
with open('baidu.html','w',encoding='utf-8') as f:
f.write(response.text)