python
# 统计词频
import jieba
jieba.load_userdict(r'\百度分词词库.txt') #载入用户自定义词典,使分词结果更准确
stops_word_path = r'\stopwords_all.txt' #载入停用词表,此处使用的是哈工大停用词表
stopwords = pd.read_table(stops_word_path,encoding='utf-8',quoting=3)['words'].tolist()
stopwords.append('\n') #在停用词中加入换行符和空格,也可以自定义其他不需要统计词频的词语
stopwords.append(' ')
# stopwords
dic = dict()
file_path = r'C:\Users\Shy0418\Desktop\text.txt' #定义要统计的txt文件路径
with open(file_path, encoding='utf-8', mode='r+') as file_read:
lines = file_read.readlines()
for line in lines:
# print(list(jieba.cut(line)))
for word in list(jieba.cut(line)):
if word not in stopwords:
if word in dic.keys():
dic[word] += 1
else:
dic[word] = 1
else:
continue
dic
word_freq = sorted(dic.items(), key = lambda kv:(kv[1], kv[0]), reverse=True) #按词频降序排列
word_freq #结果以词典的形式展示:{词语:词频}