一、项目概述
本项目实现了一个完整的知网论文关键词共现网络分析系统,包含数据爬取、关键词提取、共现矩阵构建和网络可视化功能。
以下是完整的项目代码:
python
import requests
import json
import time
import re
import pandas as pd
import numpy as np
from collections import defaultdict
from bs4 import BeautifulSoup
import networkx as nx
import matplotlib.pyplot as plt
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class CNKISpider:
def __init__(self):
self.session = requests.Session()
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
}
self.keyword_cooccurrence = defaultdict(int)
self.papers_data = []
def search_papers(self, keyword, pages=5):
"""搜索指定关键词的论文"""
all_papers = []
for page in range(1, pages + 1):
print(f"正在搜索第{page}页...")
url = f"http://search.cnki.net/search.aspx?q={keyword}&rank=relevant&cluster=all&val=CJFD&p={page}"
try:
response = self.session.get(url, headers=self.headers)
if response.status_code == 200:
papers = self.parse_search_results(response.text)
all_papers.extend(papers)
time.sleep(1) # 避免请求过快
else:
print(f"搜索失败,状态码: {response.status_code}")
except Exception as e:
print(f"搜索异常: {e}")
return all_papers
def parse_search_results(self, html):
"""解析搜索结果页面"""
soup = BeautifulSoup(html, 'html.parser')
papers = []
# 解析论文列表
paper_elements = soup.find_all('div', class_='list-item')
for element in paper_elements:
paper = {}
# 提取标题
title_elem = element.find('a', class_='title')
if title_elem:
paper['title'] = title_elem.text.strip()
paper['u