python
复制代码
import requests # 导入requests库,用于发送HTTP请求
from fake_useragent import UserAgent # 导入UserAgent库,用于生成随机的User-Agent
import time # 导入time库,用于添加延时
import bs4 # 导入BeautifulSoup库,用于解析HTML
import os # 导入os库,用于操作文件系统
# 设置目标网页的URL
url = "https://pvp.qq.com/web201605/herolist.shtml"
# 创建UserAgent对象,用于生成随机User-Agent
us = UserAgent()
# 设置HTTP请求的headers,模拟浏览器访问
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
"cache-control": "max-age=0",
"priority": "u=0, i",
"sec-ch-ua": "\"Chromium\";v=\"128\", \"Not;A=Brand\";v=\"24\", \"Microsoft Edge\";v=\"128\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Windows\"",
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "none",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"user-agent": us.random # 使用UserAgent生成的随机User-Agent
}
# 发送GET请求获取网页内容
res = requests.get(url=url, headers=headers)
# 修改相应内容解码,因为网页编码可能是GBK
html_text = res.content.decode('gbk')
# 使用BeautifulSoup解析HTML内容
soup = bs4.BeautifulSoup(html_text, 'html.parser')
# 找到包含英雄列表的<ul>标签
ul = soup.find("ul", {"class": "herolist clearfix"})
# 找到所有<li>标签,代表每个英雄
lis = ul.find_all('li')
# 设置图片保存的目录
save_dir = "imgs"
os.makedirs(save_dir, exist_ok=True) # 如果目录不存在,则创建它
# 遍历每个英雄的<li>标签
for li in lis:
# 获取英雄图片的URL
src = li.a.img.get('src')
# 获取英雄的名字
name = li.a.img.get('alt')
# 补全图片URL
src_url = "https:" + src
# 发送请求下载图片
img_res = requests.get(src_url, headers=headers)
# 定义图片保存的路径
base = os.path.join(save_dir, f"{name}.jpg")
# 打开文件并写入图片内容
with open(base, "wb") as f:
f.write(img_res.content)
# 打印下载完成的信息
print(f"{name} 皮肤下载完成,保存路径为 {base}")
# 等待1秒,防止请求过快
time.sleep(1)