一、爬取后txt文件保存
需要先pip install requests和BeautifulSoup库
python
import requests
from bs4 import BeautifulSoup
# 定义要爬取的新闻网站URL
url = 'https://www.chinadaily.com.cn/' # China Daily 网站
# 发送请求获取页面内容
response = requests.get(url)
# 检查请求是否成功
if response.status_code == 200:
print('Successfully retrieved the website.')
# 解析网页内容
soup = BeautifulSoup(response.text, 'html.parser')
# 打开一个文件以写入爬取的数据
with open('news_data.txt', 'w', encoding='utf-8') as f:
# 选择网站上合适的新闻标签
for item in soup.find_all('a', href=True): # 这里使用<a>标签,因为它包含链接
title = item.get_text().strip() # 获取标题
link = item['href'] # 获取链接
# 过滤掉无效的标题或链接
if title and 'http' in link:
# 将标题和链接写入文件
f.write(f'链接标题: {title}\n链接地址: {link}\n\n')
print("Data saved to 'news_data.txt'.")
else:
print(f'Failed to retrieve the website. Status code: {response.status_code}')
二、 爬取后csv文件保存
python
import requests
from bs4 import BeautifulSoup
import csv
# 定义要爬取的新闻网站URL
url = 'https://www.chinadaily.com.cn/' # 示例网站
# 发送请求获取页面内容
response = requests.get(url)
# 手动设置编码为utf-8(如果页面是使用utf-8编码)
response.encoding = 'utf-8' # 确保使用正确的编码格式
# 检查请求是否成功
if response.status_code == 200:
print('Successfully retrieved the website.')
# 解析网页内容
soup = BeautifulSoup(response.text, 'html.parser')
# 打开一个CSV文件以写入爬取的数据
with open('news_data.csv', 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(['Title', 'Link']) # 写入标题行
# 查找所有包含链接的<a>标签
for item in soup.find_all('a', href=True):
title = item.get_text().strip() # 获取标题
link = item['href'] # 获取链接
# 过滤掉无效的标题或链接
if title and link:
writer.writerow([title, link])
print("Data saved to 'news_data.csv'.")
else:
print(f'Failed to retrieve the website. Status code: {response.status_code}')