scrapy 爬虫:多线程爬取去微博热搜排行榜数据信息,进入详情页面拿取第一条微博信息,保存到本地text文件、保存到excel

如果想要保存到excel中可以看我的这个爬虫

使用Scrapy 框架开启多进程爬取贝壳网数据保存到excel文件中,包括分页数据、详情页数据,新手保护期快来看!!仅供学习参考,别乱搞_爬取贝壳成交数据c端用户登录-CSDN博客

最终数据展示

QuotesSpider 爬虫程序

python 复制代码
import scrapy
import re

from weibo_top.items import WeiboTopItem

class QuotesSpider(scrapy.Spider):
    name = "weibo_top"
    allowed_domains = ['s.weibo.com']
    def start_requests(self):
        yield scrapy.Request(url="https://s.weibo.com/top/summary?cate=realtimehot")

    def parse(self, response, **kwargs):
        trs = response.css('#pl_top_realtimehot > table > tbody > tr')
        count = 0
        for tr in trs:
            if count >= 30:  # 获取前3条数据
                break  # 停止处理后续数据
            item = WeiboTopItem()
            title = tr.css('.td-02 a::text').get()
            link = 'https://s.weibo.com/' + tr.css('.td-02 a::attr(href)').get()
            item['title'] = title
            item['link'] = link
            if link:
                count += 1  # 增加计数器
                yield scrapy.Request(url=link, callback=self.parse_detail, meta={'item': item})
            else:
                yield item

    def parse_detail(self, response, **kwargs):

        item = response.meta['item']
        list_items = response.css('div.card-wrap[action-type="feed_list_item"]')
        limit = 0
        for li in list_items:
            if limit >= 1:
                break  # 停止处理后续数据
            else:
                content = li.xpath('.//p[@class="txt"]/text()').getall()
                processed_content = [re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9【】,]', '', text) for text in content]
                processed_content = [text.strip() for text in processed_content if text.strip()]
                processed_content = ','.join(processed_content).replace('【,','【')
                item['desc'] = processed_content
                print(processed_content)
                yield item
                limit += 1  # 增加计数器

item 定义数据结构

python 复制代码
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class WeiboTopItem(scrapy.Item):
    title = scrapy.Field()  # '名称'
    link = scrapy.Field()  # '详情地址'
    desc = scrapy.Field()  # 'desc'
    pass

中间件 设置cookie\User-Agent\Host

python 复制代码
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html

from scrapy import signals
from fake_useragent import UserAgent
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter


class WeiboTopSpiderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, or item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Request or item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn't have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info("Spider opened: %s" % spider.name)


class WeiboTopDownloaderMiddleware:
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.
    def __init__(self):
        self.cookie_string = "SUB=_2AkMS10-nf8NxqwFRmfoXyG3jaoxxygHEieKki758JRMxHRl-yT9vqhIrtRB6OVdhSYUGwRsrtuQyFPy_aLfaay7wguyu; SUBP=0033WrSXqPxfM72-Ws9jqgMF55529P9D9WhBJpfihr9Mo_TDhk.fIHFo; _s_tentry=www.baidu.com; UOR=www.baidu.com,s.weibo.com,www.baidu.com; Apache=5259811159487.941.1709629772294; SINAGLOBAL=5259811159487.941.1709629772294; ULV=1709629772313:1:1:1:5259811159487.941.1709629772294:"
        # self.referer = "https://sh.ke.com/chengjiao/"

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        cookie_dict = self.get_cookie()
        request.cookies = cookie_dict
        request.headers['User-Agent'] = UserAgent().random
        request.headers['Host'] = 's.weibo.com'
        # request.headers["referer"] = self.referer
        return None

    def get_cookie(self):
        cookie_dict = {}
        for kv in self.cookie_string.split(";"):
            k = kv.split('=')[0]
            v = kv.split('=')[1]
            cookie_dict[k] = v
        return cookie_dict

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info("Spider opened: %s" % spider.name)

管道 数据保存到记事本

python 复制代码
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter


class WeiboTopPipeline:
    def __init__(self):
        self.items = []

    def process_item(self, item, spider):
        # 将item添加到列表中
        self.items.append(item)
        print('\n\nitem',item)
        return item

    def close_spider(self, spider):
        # 打开文件,将所有items写入文件
        with open('weibo_top_data.txt', 'w', encoding='utf-8') as file:
            for item in self.items:
                title = item.get('title', '')
                desc = item.get('desc', '')
                output_string = f'{title}\n{desc}\n\n'
                file.write(output_string)

settings 配置多线程、延迟

python 复制代码
# Scrapy settings for weibo_top project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = "weibo_top"

SPIDER_MODULES = ["weibo_top.spiders"]
NEWSPIDER_MODULE = "weibo_top.spiders"


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = "weibo_top (+http://www.yourdomain.com)"

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 8

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
#    "Accept-Language": "en",
#}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    "weibo_top.middlewares.WeiboTopSpiderMiddleware": 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
   "weibo_top.middlewares.WeiboTopDownloaderMiddleware": 543,
}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    "scrapy.extensions.telnet.TelnetConsole": None,
#}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   "weibo_top.pipelines.WeiboTopPipeline": 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 80
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 160
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = "httpcache"
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"

# Set settings whose default value is deprecated to a future-proof value
REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
FEED_EXPORT_ENCODING = "utf-8"
相关推荐
满心欢喜love23 分钟前
Python爬虫康复训练——笔趣阁《神魂至尊》
开发语言·爬虫·python
程序无涯海1 小时前
Python爬虫教程第0篇-写在前面
开发语言·爬虫·python·教程·python爬虫
Yima_Dangxian2 小时前
爬虫笔记19——代理IP的使用
笔记·爬虫·tcp/ip
象野VH1 天前
压缩和混淆
爬虫
阿福不是狗1 天前
网络爬虫之爬虫逆向的学习途径、相关网站和学习资料
爬虫·学习·安全
独孤--蝴蝶1 天前
爬虫-网页基础
爬虫
猫猫村晨总1 天前
使用Python3和Selenium打造百度图片爬虫
爬虫·python·selenium
nbplus_0071 天前
基于golang的文章信息抓取
开发语言·后端·爬虫·golang·iphone·个人开发
elderingezez1 天前
2024年用scrapy爬取BOSS直聘的操作
爬虫·python·scrapy
nice肥牛1 天前
Python爬取国家医保平台公开数据
开发语言·爬虫·python·国家医保平台