Ниже код работает нормально, но мне интересно, как я могу создать модель CNN-BILSTM из этого.Общая цель - повысить точность модели.Также учитывая, какие другие методы могут быть использованы для повышения точности модели?наборы данных состоят из X (столбец input = 120) и Y (столбец output = 30).
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import LSTM
from keras.layers import Dense
from keras.models import Sequential
from keras.layers import Bidirectional
from sklearn.metrics import accuracy_score
np.random.seed(1)
# configurations
INPUT_SIZE = 120
OUTPUT_SIZE = 30
EVENT_THRESHOLD = 60
ITERATIONS = 10
LSTM_NEURONS = 30
NUM_ITERATIONS = 5
TEST_DATASET_SIZE = 40
avg_rmse_all_iterations = []
avg_accuracy_all_iterations = []
for _ in range(ITERATIONS):
# load, shuffle and process dataset
raw_dataset = np.genfromtxt('supervised120-15g.csv', skip_header=True, delimiter=',')
np.random.shuffle(raw_dataset)
dataset = np.diff(raw_dataset)
# print dataset
print('Shape: ', raw_dataset.shape)
print('Before Processing: ', raw_dataset)
print()
print('Shape: ', raw_dataset.shape)
print('After Processing: ', dataset)
# plot one data row
plt.subplot(211)
X = np.arange(raw_dataset.shape[-1])
plt.plot(X[:INPUT_SIZE], raw_dataset[0, :INPUT_SIZE], 'g--')
plt.plot(X[INPUT_SIZE:], raw_dataset[0, INPUT_SIZE:], 'r--')
plt.axhline(y=EVENT_THRESHOLD, color='y')
plt.axvline(x=INPUT_SIZE, color='k', linestyle='--')
plt.xlabel('Time')
plt.ylabel('ABP')
plt.subplot(212)
X = np.arange(dataset.shape[-1])
plt.plot(X[:INPUT_SIZE - 1], dataset[0, :INPUT_SIZE - 1], 'g--')
plt.plot(X[INPUT_SIZE - 1:], dataset[0, INPUT_SIZE - 1:], 'r--')
plt.axvline(x=INPUT_SIZE - 1, color='k', linestyle='--')
plt.xlabel('Time')
plt.ylabel('Processed ABP')
plt.title('Data Processing')
plt.show()
# Split test/train dataset
X = dataset[:, :INPUT_SIZE - 1]
y = dataset[:, INPUT_SIZE - 1:]
X = X.reshape(X.shape[0], 1, X.shape[1])
train_x, test_x, train_y, test_y = X[TEST_DATASET_SIZE:], X[:TEST_DATASET_SIZE], y[TEST_DATASET_SIZE:], y[:TEST_DATASET_SIZE]
print('number of data points:', X.shape[0])
print('number of features:', X.shape[-1])
# create model bi-lstm
model = Sequential()
model.add(Bidirectional(LSTM(LSTM_NEURONS), batch_input_shape=(1, X.shape[1], X.shape[2])))
model.add(Dense(OUTPUT_SIZE, activation='tanh'))
model.compile(loss='mean_squared_error', optimizer='RMSprop')
model.summary()
# train model
for i in range(NUM_ITERATIONS):
#np.random.seed(1)
model.fit(train_x, train_y, epochs=1, batch_size=1, verbose=1, shuffle=False)
model.reset_states()
# print training results
X = np.arange(INPUT_SIZE + OUTPUT_SIZE - 1)
t1 = train_x[0].reshape(-1)
t2 = train_y[0].reshape(-1)
t3 = model.predict(train_x[0].reshape(1, 1, -1)).reshape(-1)
plt.plot(X[:INPUT_SIZE - 1], t1, 'g--', label='X')
plt.plot(X[INPUT_SIZE - 1:], t2, 'r--', label='y_real')
plt.plot(X[INPUT_SIZE - 1:], t3, 'b--', label='y_pred')
plt.axvline(x=INPUT_SIZE - 1, color='k', linestyle='--')
plt.legend(loc='best')
plt.xlabel('Time')
plt.ylabel('ABP')
plt.title('Training Results')
plt.show()
# model testing
raw_predicted = []
for i in range(test_x.shape[0]):
x = test_x[i]
y_pred = model.predict(x.reshape(1, 1, -1)).reshape(-1)
raw_predicted.append(np.cumsum(np.hstack([raw_dataset[i, 0], x.reshape(-1), y_pred])))
raw_predicted = np.asarray(raw_predicted)
X = np.arange(raw_dataset.shape[-1])
confidence = ((raw_dataset[4] - raw_predicted[4]) ** 2).mean() ** 0.5
#plt.plot(X[:INPUT_SIZE], raw_dataset[9, :INPUT_SIZE], 'g--', label='Input')
plt.plot(X[INPUT_SIZE:], raw_dataset[4, INPUT_SIZE:], 'b--', label='Output')
plt.plot(X[INPUT_SIZE:], raw_predicted[4, INPUT_SIZE:], 'r--', label='Predicted')
plt.axvline(x=INPUT_SIZE, color='k', linestyle='--')
plt.axhline(y=EVENT_THRESHOLD, color='y', label='Thresold')
plt.legend(loc='best')
plt.xlabel('Time')
plt.ylabel('ABP')
plt.title('RMSE: %.6f' % (confidence, ))
plt.show()
# convert regression value to class
true_class = [int(np.max(x[INPUT_SIZE:]) < EVENT_THRESHOLD) for x in raw_dataset[:TEST_DATASET_SIZE]]
pred_class = [int(np.max(x[INPUT_SIZE:]) < EVENT_THRESHOLD) for x in raw_predicted]
print('True Class:', true_class)
print('Predicted Class:', pred_class)
print('Accuracy: ', accuracy_score(true_class, pred_class))
accuracy = accuracy_score(true_class, pred_class)
# calculate rmse
total_rmse = (((raw_dataset[:TEST_DATASET_SIZE] - raw_predicted) ** 2).mean(axis=0) ** 0.5)
print(total_rmse)
# save rmse for later plot
avg_rmse_all_iterations.append(total_rmse)
avg_accuracy_all_iterations.append(accuracy)
# calculate average rmse over all iterations and plot it
avg_rmse_all_iterations = np.asarray(avg_rmse_all_iterations).mean(axis=0**)**[INPUT_SIZE:]
X = np.arange(avg_rmse_all_iterations.shape[0])
plt.plot(X, avg_rmse_all_iterations, 'b--', label='Average RMSE')
plt.legend(loc='best')
plt.xlabel('Time')
plt.ylabel('Average RMSE')
plt.title('Average RMSE over %d iterations' % (ITERATIONS, ))
plt.show()
avg_accuracy_all_iterations = np.asarray(avg_accuracy_all_iterations).mean(axis=0)
print('Average accuracy over all iterations: ', avg_accuracy_all_iterations)
Код выше работает нормально, но мне интересно, как я могу создать модель CNN-BILSTM из этого.Общая цель - повысить точность модели.Также учитывая, какие другие методы могут быть использованы для повышения точности модели?наборы данных состоят из X (столбец input = 120) и Y (столбец output = 30).