重拾Scrapy框架

基于Scrapy框架实现 舔狗语录+百度翻译 输出结果到txt文档

爬虫脚本

python 复制代码
from typing import Iterable, Any, AsyncIterator

import scrapy
import json
from post.items import PostItem


class BaidufanyiSpider(scrapy.Spider):
    name = "baidufanyi"
    allowed_domains = ["fanyi.baidu.com", "api.oick.cn"]
    start_urls = ["https://api.oick.cn/api/dog"]
    headers = {
        "Content-Type": "application/json",
    }

    def __init__(self):
        super().__init__()

    def parse_post(self, response):
        word = response.meta["word"]
        for line in response.body.decode("utf-8").split("\n"):
            try:
                new_data = json.loads(line.lstrip("data: "))
                event = new_data["data"]["event"]
                if event == "Translating":
                    pi = PostItem()
                    pi["org"] = new_data["data"]["list"][0]["src"]
                    pi["res"] = new_data["data"]["list"][0]["dst"]
                    if word == new_data["data"]["list"][0]["src"]:
                        yield pi

            except Exception as e:
                continue

    def parse(self, response):
        yield scrapy.Request(url=self.start_urls[0], callback=self.parse_detail)

    def parse_detail(self, response):
        word = response.text.strip('"')
        url = "https://fanyi.baidu.com/ait/text/translate"
        data = {
            "query": f"{word}",
            "from": "zh",
            "to": "en",
            "needPhonetic": True,
        }
        yield scrapy.FormRequest(url=url, method="POST", headers=self.headers, body=json.dumps(data),
                                 callback=self.parse_post, meta={"word": word})

        yield scrapy.Request(url=self.start_urls[0], callback=self.parse_detail,dont_filter=True)

items.py

python 复制代码
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class PostItem(scrapy.Item):
    # define the fields for yo在这里插入代码片ur item here like:
    # name = scrapy.Field()
    org = scrapy.Field()
    res = scrapy.Field()

piplines.py

python 复制代码
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from datetime import datetime



class PostPipeline:

    def open_spider(self, spider):
        self.f = open('result.txt', 'a', encoding='utf-8')

    def process_item(self, item, spider):
        new_item = f'{item["org"]}\t{item["res"]}\t{datetime.now().strftime("%Y-%m-%d %H:%M:%S")}\n'
        self.f.write(new_item)
        self.f.flush()
        return item

    def close_spider(self, spider):
        self.f.close()

settings.py

python 复制代码
# Scrapy settings for post project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = "post"

SPIDER_MODULES = ["post.spiders"]
NEWSPIDER_MODULE = "post.spiders"

ADDONS = {}

# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = "post (+http://www.yourdomain.com)"

# Obey robots.txt rules
# ROBOTSTXT_OBEY = True

# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 60 / 120
RANDOMIZE_DOWNLOAD_DELAY = True
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
# COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False

# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
#    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
#    "Accept-Language": "en",
# }

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
#    "post.middlewares.PostSpiderMiddleware": 543,
# }

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
#    "post.middlewares.PostDownloaderMiddleware": 543,
# }

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
#    "scrapy.extensions.telnet.TelnetConsole": None,
# }

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    "post.pipelines.PostPipeline": 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = "httpcache"
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"

# Set settings whose default value is deprecated to a future-proof value
FEED_EXPORT_ENCODING = "utf-8"
相关推荐
cooldream20091 天前
利用 Scrapy 构建高效网页爬虫:框架解析与实战流程
爬虫·scrapy·架构
一个天蝎座 白勺 程序猿2 天前
Python爬虫(48)基于Scrapy-Redis与深度强化学习的智能分布式爬虫架构设计与实践
爬虫·python·scrapy
myt20004 天前
关于scrapy在pycharm中run可以运行,但是debug不行的问题
scrapy·pycharm
_一路向北_7 天前
爬虫框架:scrapy使用心得
爬虫·scrapy
一个天蝎座 白勺 程序猿10 天前
Python爬虫(32)Python爬虫高阶:动态页面处理与Scrapy+Selenium+BeautifulSoup分布式架构深度解析实战
爬虫·python·selenium·scrapy·beautifulsoup
明月清风徐徐14 天前
Scrapy爬取heima论坛所有页面内容并保存到MySQL数据库中
数据库·scrapy·mysql
一个天蝎座 白勺 程序猿17 天前
Python爬虫(30)Python爬虫高阶:Selenium+Scrapy+Playwright融合架构,攻克动态页面与高反爬场景
爬虫·python·selenium·scrapy·playwright
梦想画家19 天前
Scrapy进阶实践指南:从脚本运行到分布式爬取
分布式·scrapy·数据工程
一个天蝎座 白勺 程序猿19 天前
Python爬虫(29)Python爬虫高阶:动态页面处理与云原生部署全链路实践(Selenium、Scrapy、K8s)
redis·爬虫·python·selenium·scrapy·云原生·k8s