基于Scrapy框架实现 舔狗语录+百度翻译 输出结果到txt文档
爬虫脚本
python
from typing import Iterable, Any, AsyncIterator
import scrapy
import json
from post.items import PostItem
class BaidufanyiSpider(scrapy.Spider):
name = "baidufanyi"
allowed_domains = ["fanyi.baidu.com", "api.oick.cn"]
start_urls = ["https://api.oick.cn/api/dog"]
headers = {
"Content-Type": "application/json",
}
def __init__(self):
super().__init__()
def parse_post(self, response):
word = response.meta["word"]
for line in response.body.decode("utf-8").split("\n"):
try:
new_data = json.loads(line.lstrip("data: "))
event = new_data["data"]["event"]
if event == "Translating":
pi = PostItem()
pi["org"] = new_data["data"]["list"][0]["src"]
pi["res"] = new_data["data"]["list"][0]["dst"]
if word == new_data["data"]["list"][0]["src"]:
yield pi
except Exception as e:
continue
def parse(self, response):
yield scrapy.Request(url=self.start_urls[0], callback=self.parse_detail)
def parse_detail(self, response):
word = response.text.strip('"')
url = "https://fanyi.baidu.com/ait/text/translate"
data = {
"query": f"{word}",
"from": "zh",
"to": "en",
"needPhonetic": True,
}
yield scrapy.FormRequest(url=url, method="POST", headers=self.headers, body=json.dumps(data),
callback=self.parse_post, meta={"word": word})
yield scrapy.Request(url=self.start_urls[0], callback=self.parse_detail,dont_filter=True)
python
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class PostItem(scrapy.Item):
# define the fields for yo在这里插入代码片ur item here like:
# name = scrapy.Field()
org = scrapy.Field()
res = scrapy.Field()
python
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from datetime import datetime
class PostPipeline:
def open_spider(self, spider):
self.f = open('result.txt', 'a', encoding='utf-8')
def process_item(self, item, spider):
new_item = f'{item["org"]}\t{item["res"]}\t{datetime.now().strftime("%Y-%m-%d %H:%M:%S")}\n'
self.f.write(new_item)
self.f.flush()
return item
def close_spider(self, spider):
self.f.close()
python
# Scrapy settings for post project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = "post"
SPIDER_MODULES = ["post.spiders"]
NEWSPIDER_MODULE = "post.spiders"
ADDONS = {}
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = "post (+http://www.yourdomain.com)"
# Obey robots.txt rules
# ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 60 / 120
RANDOMIZE_DOWNLOAD_DELAY = True
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
# "Accept-Language": "en",
# }
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# "post.middlewares.PostSpiderMiddleware": 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# "post.middlewares.PostDownloaderMiddleware": 543,
# }
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# "scrapy.extensions.telnet.TelnetConsole": None,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
"post.pipelines.PostPipeline": 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = "httpcache"
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
# Set settings whose default value is deprecated to a future-proof value
FEED_EXPORT_ENCODING = "utf-8"