Я написал код нейронной сети в блокноте jupyter и, запустив программу, я могу просто протестировать в другой ячейке без необходимости повторного запуска всей модели. Теперь, как я могу сделать то же самое, если я пишу код в каком-то другом текстовые редакторы.
from __future__ import division, print_function
import pandas as pd
import numpy as np
import re
import nltk
from nltk.corpus import stopwords
from numpy import array
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers.core import Activation, Dropout, Dense
from keras.layers import Flatten
from keras.layers import GlobalMaxPooling1D
from keras.layers.embeddings import Embedding
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
import re
import string
data = pd.read_csv('totaldt.csv',encoding='UTF-8')
data.columns = ['Text', 'Label']
data.head()
data.Label.value_counts()
def remove_punct(text):
text_nopunct = ''
text_nopunct = re.sub('['+ string.punctuation +']', '', text)
return text_nopunct
data['Text_Clean'] = data['Text'].apply(lambda x: remove_punct(x))
from nltk import word_tokenize
tokens = [word_tokenize(sen) for sen in data.Text_Clean]
def lower_token(tokens):
return [w.lower() for w in tokens]
lower_tokens = [lower_token(token) for token in tokens]
from gensim import models
from keras.callbacks import ModelCheckpoint
from keras.layers import Dense, Dropout, Reshape, Flatten, concatenate, Input, Embedding
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model
from sklearn.model_selection import train_test_split
from gensim.models.wrappers import FastText
import numpy as np
import pandas as pd
import os
import collections
import re
import string
from gensim.models import Word2Vec
from gensim.models.wrappers import FastText
model = FastText.load_fasttext_format('cc.te.300.bin')
def get_average_word2vec(tokens_list, vector, generate_missing=False, k=300):
if len(tokens_list)<1:
return np.zeros(k)
if generate_missing:
vectorized = [vector[word] if word in vector else np.random.rand(k) for word in tokens_list]
else:
vectorized = [vector[word] if word in vector else np.zeros(k) for word in tokens_list]
length = len(vectorized)
summed = np.sum(vectorized, axis=0)
averaged = np.divide(summed, length)
return averaged
x = Dense(128, activation='relu')(lstm)
x = Dropout(0.2)(x)
preds = Dense(labels_index, activation='sigmoid')(x)
model = Model(sequence_input, preds)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['acc'])
model.summary()
return model
model = recurrent_nn(train_embedding_weights, MAX_SEQUENCE_LENGTH, len(train_word_index)+1, EMBEDDING_DIM,
len(list(label_names)))
num_epochs = 4
batch_size = 34
hist = model.fit(x_train, y_tr, epochs=num_epochs, validation_split=0.1, shuffle=True, batch_size=batch_size)
#predictions = model.predict(test_cnn_data, batch_size=10, verbose=1)
#labels = ["pos", "neg"]
#prediction_labels=[]
#for p in predictions:
# prediction_labels.append(labels[np.argmax(p)])
#sum(data_test.Label==prediction_labels)/len(prediction_labels)
Это часть тестирования, и мне нужно ввести столько предложений, сколько нужно, и протестировать модель без повторного запуска
s = list(input("enter sentence"))
test_sequence = tokenizer.texts_to_sequences(s)
test_cnn_dat = pad_sequences(test_sequence, maxlen=MAX_SEQUENCE_LENGTH)
prediction= model.predict(test_cnn_dat, batch_size=1 , verbose=1)
print(prediction)