抓取的是豆瓣的API信息,并保存
豆瓣的API已经打不开了
import json
import urllib.request as urlrequest
id_list=[26387939,11803087,20451290]
with open('movie.txt',"w") as file:
for i in id_list:
url_visit='https://api.douban.com/v2/movie/{}'.format(i)
crawl_content=urlrequest.urlopen(url_visit).read()
json_content=json.loads(crawl_content.decode('utf-8')) #API是json文件
rank=json_content['rating']['average']
file.write("{}{}".format(i,rank))
抓取的是豆瓣的网页信息
加载 urllib,抓取豆瓣的电影,id_list是豆瓣电影的ID号
import urllib.request as urlrequest
id_list=[26387939,11803087,20451290]
for i in id_list:
url_visit='https://movie.douban.com/subject/{}'.format(i) #网址
crawl_content=urlrequest.urlopen(url_visit).read()
aa=crawl_content.decode('utf-8')
print(aa)
用beautifulsoup分析HTML网页信息,抓取网站上某个城市最近几天的温度信息
网址https://forecast.weather.gov/MapClick.php?lat=37.777120000000025&lon=-122.41963999999996#.XY6v5W5uIuU
pip install beautifulsoup4
pip Install requests
介绍两种获取网页信息的方式:
一种是直接requests,一种是从urllib.request
直接从request中获取
import requests
from bs4 import BeautifulSoup
# 通过requests获取
html = requests.get('https://forecast.weather.gov/MapClick.php?lat=37.777120000000025&lon=-122.41963999999996')
soup = BeautifulSoup(html.text)
# 处理本地文件
soup = BeautifulSoup(open('*******.html'))
从urllib.request中获取:
from bs4 import BeautifulSoup
import urllib.request as urlrequest
url='https://forecast.weather.gov/MapClick.php?lat=37.777120000000025&lon=-122.41963999999996'
url_crawl=urlrequest.urlopen(url).read()
print(url_crawl.decode('utf-8'))
url_content=BeautifulSoup(url_crawl)
想要获取id=‘seven-day-forecast-body’里的信息
print(url_content.find(id='seven-day-forecast-container')) #获取id里面的全部内容
print(url_content.find(id='seven-day-forecast-container').get_text()) #只关注里面的文本信息
将日期,温度截取下来
from bs4 import BeautifulSoup
import urllib.request as urlrequest
url='https://forecast.weather.gov/MapClick.php?lat=37.777120000000025&lon=-122.41963999999996'
url_crawl=urlrequest.urlopen(url).read()
url_content=BeautifulSoup(url_crawl,'html.parser')
text=url_content.find(id='seven-day-forecast-container') #定位到目标行的id
#找到目标内所有的日期,天气,温度
date_list=text.find_all(class_='period-name')
desc_list=text.find_all(class_='short-desc')
temp_list=text.find_all(class_='temp')
#里面总共有最新九天的天气预报
for i in range(9):
data=date_list[i].get_text() #提取文本信息
desc=desc_list[i].get_text()
temp=temp_list[i].get_text()
print("{} {} {}".format(data,desc,temp))