Сравните несколько целевых документов с несколькими исходными документами - PullRequest
1 голос
/ 10 мая 2019

Я новичок в искусственном интеллекте и сентиментальном анализе. Я делаю сентиментальный анализ между двумя документами. Этот код прекрасно работает, когда я добавляю только один исходный документ, а не список из нескольких исходных документов, чтобы сравнить его с несколькими целевыми документами.

Может кто-нибудь сказать, что мне нужно изменить, чтобы работать с несколькими исходными документами?

#Loading pre=trained word2vec model

from gensim.models.keyedvectors import KeyedVectors

# You need to dowload google pre-trained model using below link
# https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit
#Change the path according to your directory

model_path = 'E:\GoogleNews_vectors_negative300.bin'   
w2v_model = KeyedVectors.load_word2vec_format(model_path, binary=True)



#Setting Parameters for model

class DocSim(object):
    def __init__(self, w2v_model , stopwords=[]):
        self.w2v_model = w2v_model
        self.stopwords = stopwords

    def vectorize(self, doc):
        """Identify the vector values for each word in the given document"""
        doc = doc.lower()
        words = [w for w in doc.split(" ") if w not in self.stopwords]
        word_vecs = []
        for word in words:
            try:
                vec = self.w2v_model[word]
                word_vecs.append(vec)
            except KeyError:
                # Ignore, if the word doesn't exist in the vocabulary
                pass

        # Assuming that document vector is the mean of all the word vectors

        vector = np.mean(word_vecs, axis=0)
        return vector


    def _cosine_sim(self, vecA, vecB):
        """Find the cosine similarity distance between two vectors."""
        csim = np.dot(vecA, vecB) / (np.linalg.norm(vecA) * np.linalg.norm(vecB))
        if np.isnan(np.sum(csim)):
            return 0
        return csim

    def calculate_similarity(self, source_doc, target_docs=[], threshold=0):
        """Calculates & returns similarity scores between given source document & all
        the target documents."""
        if isinstance(target_docs, str):
            target_docs = [target_docs]


        source_vec = self.vectorize(source_doc)
        results = []
        for doc in target_docs:
            target_vec = self.vectorize(doc)
            sim_score = self._cosine_sim(source_vec, target_vec)
            if sim_score > threshold:
                results.append({
                    'score' : sim_score,
                    'doc' : doc
                })
            # Sort results by score in desc order
            results.sort(key=lambda k : k['score'] , reverse=True)

        return results


ds = DocSim(w2v_model)



#Calculate the similarity score between a source rule & a target rule.

source_rule = [ '2.1.1 Context','2.2.3 Value']
target_rule = [ '2.1.1 Context','2.1.2.4 Assist Failed Train']

# This will return one target rules text with a similarity score

sim_scores = ds.calculate_similarity(source_rule, target_rule)

print(sim_scores)

Это ошибка, которую я получаю прямо сейчас.

---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-22-041084a3f599> in <module>
      6 # This will return one target rules text with similarity score
      7 
----> 8 sim_scores = ds.calculate_similarity(source_rule, target_rule)
      9 
     10 print(sim_scores)

<ipython-input-20-055f5d25808f> in calculate_similarity(self, source_doc, target_docs, threshold)
     41             source_doc=[source_doc]
     42 
---> 43         source_vec = self.vectorize(source_doc)
     44         results = []
     45         for doc in target_docs:

<ipython-input-20-055f5d25808f> in vectorize(self, doc)
      8     def vectorize(self, doc):
      9         """Identify the vector values for each word in the given document"""
---> 10         doc = doc.lower()
     11         words = [w for w in doc.split(" ") if w not in self.stopwords]
     12         word_vecs = []

AttributeError: 'list' object has no attribute 'lower'

1 Ответ

0 голосов
/ 10 мая 2019

Вместо того, чтобы отправлять весь список в функцию, убедитесь, что source_rule является списком, затем выполните итерации по нему и затем выполните функцию calculate_similarity() для него

#Loading pre=trained word2vec model

from gensim.models.keyedvectors import KeyedVectors

# You need to dowload google pre-trained model using below link
# https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit
#Change the path according to your directory

model_path = 'E:\GoogleNews_vectors_negative300.bin'   
w2v_model = KeyedVectors.load_word2vec_format(model_path, binary=True)



#Setting Parameters for model

class DocSim(object):
    def __init__(self, w2v_model , stopwords=[]):
        self.w2v_model = w2v_model
        self.stopwords = stopwords

    def vectorize(self, doc):
        """Identify the vector values for each word in the given document"""
        doc = doc.lower()
        words = [w for w in doc.split(" ") if w not in self.stopwords]
        word_vecs = []
        for word in words:
            try:
                vec = self.w2v_model[word]
                word_vecs.append(vec)
            except KeyError:
                # Ignore, if the word doesn't exist in the vocabulary
                pass

        # Assuming that document vector is the mean of all the word vectors

        vector = np.mean(word_vecs, axis=0)
        return vector


    def _cosine_sim(self, vecA, vecB):
        """Find the cosine similarity distance between two vectors."""
        csim = np.dot(vecA, vecB) / (np.linalg.norm(vecA) * np.linalg.norm(vecB))
        if np.isnan(np.sum(csim)):
            return 0
        return csim

    def calculate_similarity(self, source_doc, target_docs=[], threshold=0):
        """Calculates & returns similarity scores between given source document & all
        the target documents."""
        if isinstance(target_docs, str):
            target_docs = [target_docs]


        source_vec = self.vectorize(source_doc)
        results = []
        for doc in target_docs:
            target_vec = self.vectorize(doc)
            sim_score = self._cosine_sim(source_vec, target_vec)
            if sim_score > threshold:
                results.append({
                    'score' : sim_score,
                    'doc' : doc
                })
            # Sort results by score in desc order
            results.sort(key=lambda k : k['score'] , reverse=True)

        return results


ds = DocSim(w2v_model)



#Calculate the similarity score between a source rule & a target rule.

source_rule = [ '2.1.1 Context','2.2.3 Value']
target_rule = [ '2.1.1 Context','2.1.2.4 Assist Failed Train']

if isinstance(source_rule, str):
    source_rule = [source_rule]

# This will return one target rules text with a similarity score
for rule in source_rule:
    sim_scores = ds.calculate_similarity(rule, target_rule)

    print("Similarity with {} is {}".format(rule, sim_scores))
Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...