爬取第一试卷网高三数学试卷并下载到本地

python 复制代码
import requests
import re
import os
filename = '试卷\\'
if not os.path.exists(filename):
    os.mkdir(filename)
url = 'https://www.shijuan1.com/a/sjsxg3/list_727_1.html'
headers = {
    "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
}
response = requests.get(url=url,headers=headers)
response.encoding = response.apparent_encoding
href_list = re.findall("<td width='52%' height='23'><a href=\"(.*?)\" class=\"title\" target='_blank'>",response.text)
title_list = re.findall("class=\"title\" target='_blank'>(.*?)</a>",response.text)
# https://www.shijuan1.com/a/sjywg3/243565.html
for title,href in zip(title_list,href_list):
    href = 'https://www.shijuan1.com'+href
    data_html = requests.get(url=href,headers=headers)
    data_html.encoding = data_html.apparent_encoding
    data_url = 'https://www.shijuan1.com'+re.findall('<li><a href="(.*?)" target="_blank">本地下载</a></li>',data_html.text)[0]
    doc = requests.get(url=data_url,headers=headers).content
    with open('试卷\\'+title+'.rar',mode='wb') as f:
        f.write(doc)

结果展现:

改进代码:

python 复制代码
import requests
import os
import re

def get_html_data(url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
    }
    response = requests.get(url=url,headers=headers)
    response.encoding = response.apparent_encoding

    return response

def get_analyse_html(response):
    href_list = re.findall("<td width='52%' height='23'><a href=\"(.*?)\" class=\"title\" target='_blank'>",
                           response.text)
    title_list = re.findall("class=\"title\" target='_blank'>(.*?)</a>", response.text)

    return title_list,href_list

def save(title_list,doc_list):
    filename = '试卷\\'
    if not os.path.exists(filename):
        os.mkdir(filename)
    for title,doc in zip(title_list,doc_list):
        with open('试卷\\' + title + '.rar', mode='wb') as f:
            f.write(doc)
            print(f'{title}已经下载完成')

def get_doc(href_list):
    doc_list = []
    for href in  href_list:
        href = 'https://www.shijuan1.com' + href
        doc_html = get_html_data(href)
        data_url = 'https://www.shijuan1.com' + re.findall('<li><a href="(.*?)" target="_blank">本地下载</a></li>', doc_html.text)[0]
        doc = get_html_data(data_url).content
        doc_list.append(doc)
    return doc_list


if __name__ == '__main__':
    url = 'https://www.shijuan1.com/a/sjsxg3/list_727_1.html'
    response = get_html_data(url)
    title_list,href_list = get_analyse_html(response)
    doc_list = get_doc(href_list)
    save(title_list,doc_list)

进一步写成类:

python 复制代码
import requests
import os
import re
class save_doc():


    def get_html_data(self,href):
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
        }
        response = requests.get(url=href, headers=headers)
        response.encoding = response.apparent_encoding

        return response

    def get_analyse_html(self,response):
        href_list = re.findall("<td width='52%' height='23'><a href=\"(.*?)\" class=\"title\" target='_blank'>",
                               response.text)
        title_list = re.findall("class=\"title\" target='_blank'>(.*?)</a>", response.text)

        return title_list, href_list

    def save(self,title_list,doc_list):
        filename = '试卷\\'
        if not os.path.exists(filename):
            os.mkdir(filename)
        for title, doc in zip(title_list, doc_list):
            with open('试卷\\' + title + '.rar', mode='wb') as f:
                f.write(doc)
                print(f'{title}已经下载完成')

    def get_doc(self,href_list):
        doc_list = []
        for href in href_list:
            href = 'https://www.shijuan1.com' + href
            doc_html = self.get_html_data(href)
            data_url = 'https://www.shijuan1.com' + re.findall('<li><a href="(.*?)" target="_blank">本地下载</a></li>', doc_html.text)[0]
            doc = self.get_html_data(data_url).content
            doc_list.append(doc)
        return doc_list
save = save_doc()
response = save.get_html_data('https://www.shijuan1.com/a/sjsxg3/list_727_1.html')
title_list,href_list = save.get_analyse_html(response)
doc_list = save.get_doc(href_list)
save.save(title_list,doc_list)

对于类还是很不熟,我想要类中的方法返回的值,可以直接传入类中的其他方法,应该怎么写呢?我想要写一个类,传入一个url,直接下载所需要的数据,即最终代码为

python 复制代码
save = save_doc("https://www.shijuan1.com/a/sjsxg3/list_727_1.html")

不需要上面那么复杂的传来传去,应该怎么做呢?

相关推荐
吴秋霖1 天前
主流反爬虫、反作弊防护与风控对抗手段
爬虫·算法·反爬虫技术
hui函数2 天前
scrapy框架-day02
后端·爬虫·python·scrapy
用户051610461672 天前
爬虫 API 技术全解析:从原理到实战的高效数据采集指南
爬虫·api
xiaoxiongip6664 天前
动态ip适合挂什么项目
网络·爬虫·python·网络协议·tcp/ip·ip
q567315234 天前
自动化拨号爬虫体系:虚拟机集群部署与增量管理
运维·爬虫·网络协议·自动化
电商API_180079052475 天前
淘宝商品视频批量自动化获取的常见渠道分享
java·爬虫·自动化·网络爬虫·音视频
果壳~5 天前
【Python】爬虫html提取内容基础,bs4
爬虫·python·html
jay神5 天前
基于Python的商品爬取与可视化系统
爬虫·python·数据分析·毕业设计·可视化系统
华科云商xiao徐6 天前
如何在C语言环境中借助Linux库构建高效网络爬虫
爬虫·数据挖掘·数据分析
明远湖之鱼6 天前
巧用 Puppeteer + Cheerio:批量生成高质量 Emoji 图片
前端·爬虫·node.js