py
import requests
from bs4 import BeautifulSoup
# 起始网址
url = 'https://www.chnmuseum.cn/zx/xingnew/index_1.shtml'
# 用于存储所有数据
all_data = []
page = 1
global_index = 1 # 定义全局序号变量并初始化为1
while True:
html_url = requests.get(url).text
if requests.get(url).status_code == 200:
print(f"第 {page} 页,请求成功")
else :
print(f"第 {page} 页,请求失败")
soup = BeautifulSoup(html_url, 'lxml') # 'lxml'是解析器类型,用于解析HTML文档,lxml是一个高性能的Python XML和HTML解析库
datas = soup.select('li')
for data in datas:
a = data.select_one('a')
span = data.select_one('span')
if span and a:
my_date = span.get_text()
my_title = a.get_text()
my_href = a.get('href')[2:]
print(global_index,my_title, my_date, my_href)
# 添加序号
all_data.append((global_index, my_date, my_title, my_href))
global_index+=1
# 判断数据是否达到100条
if len(all_data) >= 100:
break
# 查找下一页链接
page += 1
url = f'https://www.chnmuseum.cn/zx/xingnew/index_{page}.shtml'
# 将数据保存到CSV文件
with open("数据保存.csv", 'w', encoding='utf-8') as file:
file.write('序号,时间,标题,网址\n')
for data in all_data:
file.write('{},{},{},{}\n'.format(data[0], data[1], data[2], data[3]))
结果如下: