Взят отсюда :
# Create a new corpus, made of previously unseen documents.
cnn_article = [
['This', 'is', 'my', 'cnn', 'article'],
]
other_corpus = [common_dictionary.doc2bow(text) for text in cnn_article]
unseen_doc = other_corpus[0]
vector = lda[unseen_doc] # get topic probability distribution for a document
Затем получите сходство с помощью gensims Класс сходства .
Обновление:
Чтобы точнее обратиться к учебнику и вашему текстовому файлу:
# Create a corpus from a list of texts
common_dictionary = Dictionary(common_texts)
common_corpus = [common_dictionary.doc2bow(text) for text in common_texts]
# Train the model on the corpus.
lda = LdaModel(common_corpus, id2word=common_dictionary, num_topics=10)
# optional: print topics of your model
for topic in lda.print_topics(10):
print(topic)
# load your CNN article from file
with open("cnn.txt", "r") as file:
cnn = file.read()
# split article into list of words and make this list an element of a list
cnn = [cnn.split(" ")]
cnn_corpus = [common_dictionary.doc2bow(text) for text in cnn]
unseen_doc = cnn_corpus[0]
vector = lda[unseen_doc] # get topic probability distribution for a document
# print out «similarity» of cnn article to each of the topics
# bigger number = more similar to topic
print(vector)