抽取文档主题之gensim实现
示例代码:
import jieba
import pandas as pd
from gensim import corpora, models
from gensim.models.ldamodel import LdaModel
raw = pd.read_table('./金庸-射雕英雄传txt精校版.txt', names=['txt'], encoding="GBK")
# 章节判断用变量预处理
def m_head(tmpstr):
return tmpstr[:1]
def m_mid(tmpstr):
return tmpstr.find("回 ")
raw['head'] = raw.txt.apply(m_head)
raw['mid'] = raw.txt.apply(m_mid)
raw['len'] = raw.txt.apply(len)
# 章节判断
chapnum = 0
for i in range(len(raw)):
if raw['head'][i] == "第" and raw['mid'][i] > 0 and raw['len'][i] < 30:
chapnum += 1
if chapnum >= 40 and raw['txt'][i] == "附录一:成吉思汗家族":
chapnum = 0
raw.loc[i, 'chap'] = chapnum
# 删除临时变量
del raw['head']
del raw['mid']
del raw['len']
rawgrp = raw.groupby('chap')
chapter = rawgrp.agg(sum) # 只有字符串的情况下,sum函数自动转为合并字符串
chapter = chapter[chapter.index != 0]
# print(chapter)
# 设定分词及请理停用词函数
stop_list = list(pd.read_csv('./停用词.txt', names=['w'], sep='aaa', encoding='utf-8').w)
# print(stop_list)
# jeiba分词
def m_cut(intxt):
return [w for w in jieba.cut(intxt) if w not in stop_list and len(w) > 1]
# 文档预处理,提取主题词
chap_list = [m_cut(w) for w in chapter.txt]
# 生成文档对应的字典和bow稀疏向量
dictionary = corpora.Dictionary(chap_list)
corpus = [dictionary.doc2bow(text) for text in chap_list] # 仍为list in list
tfidf_model = models.TfidfModel(corpus) # 建立TF-IDF模型
corpus_tfidf = tfidf_model[corpus] # 对所需文档计算TF-IDF
ldamodel = LdaModel(corpus_tfidf, id2word=dictionary, num_topics=10, passes=5)
# 列出最重要的前若干个主题
a = ldamodel.print_topic(6)
print(a)
# 计算各语料的LDA模型值
corpus_lda = ldamodel[corpus_tfidf] # 此处应当使用和模型训练时相同类型的矩阵
for doc in corpus_lda:
print(doc)
b = ldamodel.get_topics()
print(b)
# 检索和文本内容最接近的主题
query = chapter.txt[1] # 检索和第一章最解近的主题
query_bow = dictionary.doc2bow(m_cut(query)) # 频数向量
query_tfidf = tfidf_model[query_bow] # TF-IDF向量
print('转换后:', query_tfidf[:10])
c = ldamodel.get_document_topics(query_tfidf) # 需要输入和文档对应的bow向量
print(c)
# 检索和文本内容最接近的主题
d = ldamodel[query_tfidf]
print(d)
代码存在一点小问题,精度达不到。