关键字提取-TF-IDF算法和TextRank算法
import pandas as pd
raw = pd.read_table('../data/金庸-射雕英雄传txt精校版.txt', names=['txt'], encoding="GBK")
# 章节判断用变量预处理
def m_head(tmpstr):
return tmpstr[:1]
def m_mid(tmpstr):
return tmpstr.find("回 ")
raw['head'] = raw.txt.apply(m_head)
raw['mid'] = raw.txt.apply(m_mid)
raw['len'] = raw.txt.apply(len)
# 章节判断
chapnum = 0
for i in range(len(raw)):
if raw['head'][i] == "第" and raw['mid'][i] > 0 and raw['len'][i] < 30:
chapnum += 1
if chapnum >= 40 and raw['txt'][i] == "附录一:成吉思汗家族":
chapnum = 0
raw.loc[i, 'chap'] = chapnum
# 删除临时变量
del raw['head']
del raw['mid']
del raw['len']
rawgrp = raw.groupby('chap')
chapter = rawgrp.agg(sum) # 只有字符串的情况下,sum函数自动转为合并字符串
chapter = chapter[chapter.index != 0]
chapter
import jieba
import jieba.analyse
# 注意:函数在使用默认的TFIDF模型进行分析
jieba.analyse.extract_tags(chapter.txt[1])
# 要求返回权重值
jieba.analyse.extract_tags(chapter.txt[1], withWeight=True)
# 应用自定义词典改善分词效果
jieba.load_userdict('../data/金庸小说词库.txt') # dict为自定义词典的路径
# 在TFIDF计算中直接应用停用词表
jieba.analyse.set_stop_words('../data/停用词.txt')
TFres = jieba.analyse.extract_tags(chapter.txt[1], withWeight=True)
TFres
# 使用自定义TF-IDF频率文件
jieba.analyse.set_idf_path('../data/idf.txt.big')
TFres = jieba.analyse.extract_tags(chapter.txt[1], withWeight=True)
TFres
from sklearn.feature_extraction.text import TfidfTransformer
txtlist = [" ".join(m_cut(w)) for w in chapter.txt.iloc[:5]]
vectorizer = CountVectorizer()
x = vectorizer.fit_transform(txtlist) # 将文本中的词语转换成词频矩阵
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(x) # 基于词频矩阵x计算TF-IDF
tfidf
# 转换成数组
tfidf.toarray()
# 转换成矩阵
tfidf.todense()
tfidf.todense().shape
print("字典长度:", len(vectorizer.vocabulary_))
vectorizer.vocabulary_
# 文档分词及预处理
chaplist = [m_cut(w) for w in chapter.txt.iloc[:5]]
chaplist