python爬虫-爬小说

python 复制代码
# 导入BeautifulSoup
from bs4 import BeautifulSoup as bf
from fastapi import FastAPI,Form,File
import time
import random
import requests
import traceback

app = FastAPI(
    title='爬虫',
    description='regex web: https://regexr-cn.com/  \n  eg : <a href="https://www.zbytb.com/s-zb-.*?</a>  \n eg : <a href="[./].*?</a>',
    version='1.0.0')

headers = [
    {"User-Agent":"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"},
    {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36"},
    {"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0"},
    {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14"},
    {"User-Agent":"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)"},
    {"User-Agent":"Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11"},
    {"User-Agent":"Opera/9.25 (Windows NT 5.1; U; en)"},
    {"User-Agent":"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)"},
    {"User-Agent":"Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)"},
    {"User-Agent":"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12"},
    {"User-Agent":"Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9"},
    {"User-Agent":"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7"},
    {"User-Agent":"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 "}
]

proxys = []

def wait():
    time.sleep(0.2)

def getHeader():
    return random.choice(headers)

def getProxy():
    return random.choice(proxys)

def parseUrl(url):
    if(url.startswith('./')):
        url = url.replace('./','')
    return url

def start():
    try:
        list_html = requests.get('https://www.xjwxsw.com/xsmulu/27614204/', headers=getHeader())
        list_html.encoding = list_html.apparent_encoding
        list_obj = bf(list_html.text, 'html.parser')
        atags = list_obj.find_all('div', id='content_1')[0].find_all('a')
        f = open('C://Users//admin//Desktop//777.txt', "a", encoding='utf-8')
        for atag in atags:
            title = atag.text
            print(title)
            f.write(title)
            f.write("\n")
            href1 = 'https://www.xjwxsw.com'+atag.get('href')
            href2 = href1.split('.html')[0]+'_2.html'

            context1 = requests.get(href1, headers=getHeader())
            context1.encoding = context1.apparent_encoding
            context_obj1 = bf(context1.text, 'html.parser')
            ptags1 = context_obj1.find_all('div', id='booktxt')[0].find_all('p')
            for ptag1 in ptags1:
                f.write(ptag1.text)
                f.write("\n")

            context2 = requests.get(href2, headers=getHeader())
            context2.encoding = context2.apparent_encoding
            context_obj2 = bf(context2.text, 'html.parser')
            ptags2 = context_obj2.find_all('div', id='booktxt')[0].find_all('p')
            for ptag2 in ptags2:
                f.write(ptag2.text)
                f.write("\n")
    except Exception as e:
        traceback.print_exc()
    finally:
        f.close()
if __name__ == '__main__':
    start()
python 复制代码
# 导入BeautifulSoup
from bs4 import BeautifulSoup as bf
from fastapi import FastAPI,Form,File
import time
import random
import requests
import traceback

app = FastAPI(
    title='爬虫',
    description='regex web: https://regexr-cn.com/  \n  eg : <a href="https://www.zbytb.com/s-zb-.*?</a>  \n eg : <a href="[./].*?</a>',
    version='1.0.0')

headers = [
    {"User-Agent":"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"},
    {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36"},
    {"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0"},
    {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14"},
    {"User-Agent":"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)"},
    {"User-Agent":"Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11"},
    {"User-Agent":"Opera/9.25 (Windows NT 5.1; U; en)"},
    {"User-Agent":"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)"},
    {"User-Agent":"Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)"},
    {"User-Agent":"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12"},
    {"User-Agent":"Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9"},
    {"User-Agent":"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7"},
    {"User-Agent":"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 "}
]

proxys = []

def wait():
    time.sleep(0.2)

def getHeader():
    return random.choice(headers)

def getProxy():
    return random.choice(proxys)

def parseUrl(url):
    if(url.startswith('./')):
        url = url.replace('./','')
    return url

def start():
    try:
        list_html = requests.get('https://www.uuks5.com/book/766295/', headers=getHeader())
        list_html.encoding = list_html.apparent_encoding
        list_obj = bf(list_html.text, 'html.parser')
        atags = list_obj.find_all('ul', id='chapterList')[0].find_all('a')
        f = open('C://Users//admin//Desktop//123.txt', "a", encoding='utf-8')
        for atag in atags:
            title = atag.text
            print(title)
            f.write(title)
            f.write("\n")
            href1 = 'https://www.uuks5.com/'+atag.get('href')

            context1 = requests.get(href1, headers=getHeader())
            context1.encoding = context1.apparent_encoding
            context_obj1 = bf(context1.text, 'html.parser')
            ptags1 = context_obj1.find_all('div', id='TextContent')[0].find_all('p')
            for ptag1 in ptags1:
                f.write(ptag1.text)
                f.write("\n")
    except Exception as e:
        traceback.print_exc()
    finally:
        f.close()
if __name__ == '__main__':
    start()
相关推荐
万亿少女的梦1684 小时前
WEB渗透技术研究与安全防御
开发语言·前端·网络·爬虫·安全·网络安全·php
数据小小爬虫9 小时前
如何使用Python爬虫按关键字搜索AliExpress商品:代码示例与实践指南
开发语言·爬虫·python
Python大数据分析@9 小时前
通俗的讲,网络爬虫到底是什么?
前端·爬虫·网络爬虫
silver68710 小时前
网络爬虫技术如何影响网络安全的
爬虫
m0_7482550221 小时前
头歌答案--爬虫实战
java·前端·爬虫
数据小小爬虫1 天前
如何使用Python爬虫获取微店商品详情:代码示例与实践指南
开发语言·爬虫·python
德迅云安全-甲锵1 天前
网络爬虫技术如何影响网络安全的
爬虫
m0_748255261 天前
【头歌】Scrapy爬虫(二)热门网站数据爬取
爬虫·scrapy
小盼江2 天前
Hadoop美食推荐系统 爬虫1.8w+数据 协同过滤余弦函数推荐美食 Springboot Vue Element-UI前后端分离
hadoop·爬虫·美食
m0_748248772 天前
小白爬虫——selenium入门超详细教程
爬虫·selenium·测试工具