文档自动摘要及案例实现
以小说射雕英雄传中的第一段为例:
import pandas as pd
raw = pd.read_table('../data/金庸-射雕英雄传txt精校版.txt', names=['txt'], encoding="GBK")
# 章节判断用变量预处理
def m_head(tmpstr):
return tmpstr[:1]
def m_mid(tmpstr):
return tmpstr.find("回 ")
raw['head'] = raw.txt.apply(m_head)
raw['mid'] = raw.txt.apply(m_mid)
raw['len'] = raw.txt.apply(len)
# 章节判断
chapnum = 0
for i in range(len(raw)):
if raw['head'][i] == "第" and raw['mid'][i] > 0 and raw['len'][i] < 30:
chapnum += 1
if chapnum >= 40 and raw['txt'][i] == "附录一:成吉思汗家族":
chapnum = 0
raw.loc[i, 'chap'] = chapnum
# 删除临时变量
del raw['head']
del raw['mid']
del raw['len']
rawgrp = raw.groupby('chap')
chapter = rawgrp.agg(sum) # 只有字符串的情况下,sum函数自动转为合并字符串
chapter = chapter[chapter.index != 0]
chapter
def cut_sentence(intxt):
delimiters = frozenset('。!?')
buf = []
for ch in intxt:
buf.append(ch)
if delimiters.__contains__(ch):
yield ''.join(buf)
buf = []
if buf:
yield ''.join(buf)
sentdf = pd.DataFrame(cut_sentence(chapter.txt[1]))
sentdf
# 去除过短句子,避免摘要出现无意义的内容
sentdf['txtlen'] = sentdf[0].apply(len)
sentdf
sentlist = sentdf[0][sentdf.txtlen>20]
print(len(sentlist))
sentlist
import jieba
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
txtlist = [" ".join(jieba.lcut(w)) for w in sentlist]
vectorizer = CountVectorizer()
x = vectorizer.fit_transform(txtlist) # 将文本中的词语转换为词频矩阵
x
tfidf_matrix = TfidfTransformer().fit_transform(x)
tfidf_matrix
# 利用nx包实现pagerank算法
import networkx as nx
similarity = nx.from_scipy_sparse_matrix(tfidf_matrix * tfidf_matrix.T)
scores = nx.pagerank(similarity)
scores
tops = sorted(scores.items(), key = lambda x: x[1], reverse=True)
tops
tops[:5]
print(sentlist.iloc[tops[0][0]])
print(sentlist.iloc[tops[1][0]])
print(sentlist.iloc[tops[2][0]])
topn = 5
topsent = sorted(tops[:topn])
abstract = ''
for item in topsent:
abstract = abstract + sentlist.iloc[item[0]] + '......'
abstract[:-6]