文章目录
声明

 请您遵守网站的robots文件规定,本文目的只是做学习交流使用,包括多个模块,例如数据存储、日志记录、错误处理、多线程或异步请求
安装必要的库
 pip install requests beautifulsoup4 sqlite3
项目结构
创建以下文件和目录结构
my_crawler/
├── config.py
├── crawler.py
├── db.py
├── logger.py
└── main.py
技术细节
配置文件 config.py
配置文件用于存储常量和配置项:
            
            
              python
              
              
            
          
          BASE_URL = 'https://example.com'
ARTICLES_URL = f'{BASE_URL}/articles'
DATABASE_NAME = 'articles.db'
LOG_FILE = 'crawler.log'
        1.数据库操作 db.py
用于创建数据库表和插入数据:
            
            
              python
              
              
            
          
          #db.py
import sqlite3
from config import DATABASE_NAME
def init_db():
    conn = sqlite3.connect(DATABASE_NAME)
    cursor = conn.cursor()
    cursor.execute('''
        CREATE TABLE IF NOT EXISTS articles (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            title TEXT NOT NULL,
            url TEXT NOT NULL
        )
    ''')
    conn.commit()
    conn.close()
def insert_article(title, url):
    conn = sqlite3.connect(DATABASE_NAME)
    cursor = conn.cursor()
    cursor.execute('INSERT INTO articles (title, url) VALUES (?, ?)', (title, url))
    conn.commit()
    conn.close()
        2.日志记录 logger.py
用于配置日志记录:
            
            
              python
              
              
            
          
          logger.py
import logging
from config import LOG_FILE
def setup_logger():
    logging.basicConfig(
        filename=LOG_FILE,
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s'
    )
logger = setup_logger()
        3.爬虫逻辑 crawler.py
包含爬虫的主要逻辑:
            
            
              python
              
              
            
          
           crawler.py
import requests
from bs4 import BeautifulSoup
from config import ARTICLES_URL
from db import insert_article
from logger import logger
def fetch_page(url):
    """获取网页内容"""
    try:
        response = requests.get(url)
        response.raise_for_status()  # 检查请求是否成功
        return response.text
    except requests.RequestException as e:
        logger.error(f"请求错误: {e}")
        return None
def parse_html(html):
    """解析HTML,提取文章标题和URL"""
    soup = BeautifulSoup(html, 'html.parser')
    articles = []
    for article in soup.find_all('div', class_='article'):
        title = article.find('h1', class_='article-title').get_text(strip=True)
        url = article.find('a')['href']
        articles.append((title, url))
    return articles
def crawl_articles():
    """抓取并保存文章"""
    html = fetch_page(ARTICLES_URL)
    if html:
        articles = parse_html(html)
        for title, url in articles:
            insert_article(title, url)
            logger.info(f"已保存文章: {title} - {url}")
        - 主程序 main.py
启动爬虫的主程序: 
            
            
              python
              
              
            
          
           main.py
from crawler import crawl_articles
from db import init_db
from logger import logger
def main():
    logger.info("开始爬虫任务")
    init_db()
    crawl_articles()
    logger.info("爬虫任务完成")
if __name__ == '__main__':
    main()
        小结
 请您遵守网站的robots文件规定,本文目的只是做学习交流使用,感谢csdn平台