Я построил Variational Autoencoder, используя Keras в R, я заметил, что если я тренирую модель на обычной сессии R, все отлично работает, но когда я тренирую модель на сессии Shiny, она идет ужасно неправильно: когда сессия Shiny попадает на линиюкоторый обучает модель:
history<- vae %>% fit(
x_train, x_train,
shuffle = TRUE,
epochs = 25,
batch_size = batch_size,
validation_data = list(x_test, x_test)
)
нет обратной связи по эпохе, и весь компьютер зависает.(Я не получаю никаких ошибок, просто сокрушая компьютер)
Есть ли специальная конфигурация для установки при использовании Keras на Shiny?
Редактировать: я использую Variational Autoencoder для уменьшения размерности, вот мойфункция, я использую его с блестящего сервера:
get_variational_autoencoder<- function(data_set,
reduce_to = 2){
library(keras)
use_condaenv("r-tensorflow")
# Data preparation ---------------------------------
row_names<- row.names(data_set)
data_set<- normalize(data_set)
row.names(data_set)<- row_names
partition<- data_partition(data_set, .80)
x_train <- partition["train_set"]$train_set
x_test <- partition["test_set"]$test_set
# Parameters ---------------------------------------
batch_size = DEAFULT_BATCH_SIZE_FOR_AUTOANCODER
original_dim = dim(data_set)[2]
latent_dim = reduce_to
intermediate_dim = (2/3)*original_dim + latent_dim
nb_epoch = DEAFULT_NUMBER_OF_EPOCH_FOR_AUTOANCODER
epsilon_std = DEAFULT_EPSILON_FOR_AUTOANCODER
#encoder
# input layer
x <- layer_input(shape = c(original_dim))
# hidden intermediate, lower-res
h <- layer_dense(x, intermediate_dim, activation = "relu")
# latent var 1, 2-dim (mainly for plotting!): mean
z_mean <- layer_dense(h, latent_dim)
# latent var 2, 2-dim: variance
z_log_var <- layer_dense(h, latent_dim)
sampling <- function(arg){
z_mean <- arg[, 1:(latent_dim)]
z_log_var <- arg[, (latent_dim + 1):(2 * latent_dim)]
epsilon <- k_random_normal(
shape = c(k_shape(z_mean)[[1]]),
mean=0.,
stddev=epsilon_std
)
z_mean + k_exp(z_log_var/2)*epsilon
}
z <- layer_concatenate(list(z_mean, z_log_var)) %>%
layer_lambda(sampling)
# hidden intermediate, higher-res
decoder_h <- layer_dense(units = intermediate_dim, activation = "relu")
# decoder for the mean, high-res again
decoder_mean <- layer_dense(units = original_dim, activation = "sigmoid")
h_decoded <- decoder_h(z)
x_decoded_mean <- decoder_mean(h_decoded)
# the complete model, from input to decoded output
vae <- keras_model(x, x_decoded_mean)
# encoder, from inputs to latent space
encoder <- keras_model(x, z_mean)
# generator, from latent space to reconstructed inputs
decoder_input <- layer_input(shape = latent_dim)
h_decoded_2 <- decoder_h(decoder_input)
x_decoded_mean_2 <- decoder_mean(h_decoded_2)
generator <- keras_model(decoder_input, x_decoded_mean_2)
vae_loss <- function(x, x_decoded_mean){
xent_loss <- (original_dim/1.0)*loss_binary_crossentropy(x,
x_decoded_mean)
kl_loss <- -0.5*k_mean(1 + z_log_var - k_square(z_mean) -
k_exp(z_log_var), axis = -1L)
xent_loss + kl_loss
}
vae %>% compile(optimizer = "rmsprop", loss = vae_loss, metrics =
c('accuracy'))
# Model training ---------------------------------------------------------
history<- vae %>% fit(
x_train, x_train,
shuffle = TRUE,
epochs = 25,
batch_size = batch_size,
validation_data = list(x_test, x_test)
)
data_set_after_vae <- keras::predict(encoder, data_set, batch_size =
batch_size)
vae_result<- list ("v_autoencoder" = vae,
"data_set_after_vae" = data_set_after_vae %>%
as_data_frame(),
"history" = history,
"encoder" = encoder)
return (vae_result)
}