案例需求:
1.使用selenium自动化爬虫爬取哔哩哔哩排行榜中舞蹈类的数据(包括视频标题、up主、播放量和评论量)
2.利用bs4进行数据解析和提取
3.将爬取的数据保存在本地json文件中
4.保存在excel文件中
分析:
1.请求url地址:https://www.bilibili.com/v/popular/rank/dance
2.加载等待事件,否则获取数据不充分
wait = WebDriverWait(self.browsers, 280) wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'rank-item'))) time.sleep(5)
3.获取相应内容
last_height = self.browsers.execute_script("return document.body.scrollHeight") while True: self.browsers.execute_script('window.scrollTo(0, document.body.scrollHeight);') time.sleep(5) data = self.browsers.page_source # 获取网页源码 self.parse_data(data=data) new_height = self.browsers.execute_script("return document.body.scrollHeight") if new_height == last_height: break last_height = new_height
4.使用bs4解析数据
soup = BeautifulSoup(data, 'lxml') titles = soup.select('.info .title') # 标题 up_names = soup.select('.info .up-name') # up主 # :nth-of-type(2) 用于选择指定类型的第二个元素 play_counts = soup.select('.info .detail-state .data-box:nth-of-type(1)') # 播放量 comment_counts = soup.select('.info .detail-state .data-box:nth-of-type(2)') # 评论量 rank_data = {} print(len(titles)) for title, name, play_count, comment_count in zip(titles, up_names, play_counts, comment_counts): t = title.get_text().strip() n = name.get_text().strip() p = play_count.get_text().strip() c = comment_count.get_text().strip() print('标题:', t) print('up主:', n) print('播放量:', p) print('评论量:', c) print('==========================')
5.保存在本地json文件中
with open('rank_data.json', 'a', encoding='utf-8') as f: f.write(json.dumps(rank_data, ensure_ascii=False) + '\n')
6.保存在excel文件中
wb =workbook.Workbook()#创建一个EXcel对象 就相当于是要生成一个excel 程序 ws = wb.active #激活当前表 ws.append(['标题','up主','播放量','评论量'])
#保存数据 def save_data(self,title,name,paly,comment): ws.append([title,name,paly,comment]) # 保存为Excel数据 wb.save('哔哩哔哩排行榜数据.xlsx')
案例代码:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
from openpyxl import workbook #第三方模块 需要安装
import time
import json
wb =workbook.Workbook()#创建一个EXcel对象 就相当于是要生成一个excel 程序
ws = wb.active #激活当前表
ws.append(['标题','up主','播放量','评论量'])
class Spider:
def __init__(self):
self.url = 'https://www.bilibili.com/v/popular/rank/dance'
self.options = webdriver.ChromeOptions()
self.options.add_experimental_option('excludeSwitches', ['enable-automation'])
self.browsers = webdriver.Chrome(options=self.options)
# 访问哔哩哔哩排行榜
def get_bili(self):
self.browsers.get(self.url)
wait = WebDriverWait(self.browsers, 280)
wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'rank-item')))
time.sleep(5)
# 获取响应内容
def get_data(self):
last_height = self.browsers.execute_script("return document.body.scrollHeight")
while True:
self.browsers.execute_script('window.scrollTo(0, document.body.scrollHeight);')
time.sleep(5)
data = self.browsers.page_source # 获取网页源码
self.parse_data(data=data)
new_height = self.browsers.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
# 解析信息
def parse_data(self, data):
soup = BeautifulSoup(data, 'lxml')
titles = soup.select('.info .title') # 标题
up_names = soup.select('.info .up-name') # up主
# :nth-of-type(2) 用于选择指定类型的第二个元素
play_counts = soup.select('.info .detail-state .data-box:nth-of-type(1)') # 播放量
comment_counts = soup.select('.info .detail-state .data-box:nth-of-type(2)') # 评论量
rank_data = {}
print(len(titles))
for title, name, play_count, comment_count in zip(titles, up_names, play_counts, comment_counts):
t = title.get_text().strip()
n = name.get_text().strip()
p = play_count.get_text().strip()
c = comment_count.get_text().strip()
print('标题:', t)
print('up主:', n)
print('播放量:', p)
print('评论量:', c)
print('==========================')
self.save_data(t,n,p,c)
rank_data['标题'] = t
rank_data['up主'] = n
rank_data['播放量'] = p
rank_data['评论量'] = c
with open('rank_data.json', 'a', encoding='utf-8') as f:
f.write(json.dumps(rank_data, ensure_ascii=False) + '\n')
#保存数据
def save_data(self,title,name,paly,comment):
ws.append([title,name,paly,comment])
# 保存为Excel数据
wb.save('哔哩哔哩排行榜数据.xlsx')
if __name__ == '__main__':
s = Spider()
s.get_bili()
s.get_data()
运行结果: