原理及技术报告请点击此处查看
一、爬虫部分
# -*- coding:utf-8 -*-
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium import webdriver
from bs4 import BeautifulSoup
from urllib import parse
import time
import pymysql
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
browser = webdriver.Chrome(chrome_options=chrome_options)
wait = WebDriverWait(browser, 10)
def get_url(n, word,pinpai):
print('正在爬取第' + str(n) + '页')
# 确定搜索商品的内容
keyword = {'keyword':word}
# 页面n与参数page的关系
page = '&page=%s' % (2 * n - 1)
pinpai='&ev=exbrand_%s'%(pinpai)
url = 'https://search.jd.com/Search?' +parse.urlencode(keyword) +pinpai+'&enc=utf-8' + page
print(url)
return url
def parse_page(url,pinpai):
print('爬取信息并保存中...')
browser.get(url)
# 把滑轮慢慢下拉至底部,触发ajax
for y in range(100):
js = 'window.scrollBy(0,100)'
browser.execute_script(js)
time.sleep(0.1)
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#J_goodsList .gl-item')))
html = browser.page_source
soup = BeautifulSoup(html, 'lxml')
# 找到所有商品标签
goods = soup.find_all('li', class_="gl-item")
# 遍历每个商品,得到每个商品的信息
for good in goods:
num = good['data-sku']
tag = good.find('div', class_="p-price").strong.em.string
money = good.find('div', class_="p-price").strong.i.string
#就是京东有些商品竟然没有店铺名,导检索store时找不到对应的节点导致