Гипера для оптимизации гиперпараметров не работает - PullRequest
0 голосов
/ 28 марта 2020

Привет, я пытаюсь использовать гиперы для оптимизации. Ранее (пару месяцев назад) тот же код для работы, но теперь он дает ошибку, и я попробовал несколько вещей, но ничего не работает. Я попытался понизить версию до версии 1.14, но это также не сработало.

Кто-нибудь может подсказать, как решить эту проблему?

Спасибо

import sys
import random
import cv2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-white')
import seaborn as sns
sns.set_style("white")
from sklearn.utils import shuffle
#import bloscpack as bp

%matplotlib inline
import keras
# import cv2
from sklearn.model_selection import train_test_split

from tqdm import tqdm_notebook #, tnrange
#from itertools import chain
from skimage.io import imread, imshow #, concatenate_images
from skimage.transform import resize
from skimage.morphology import label

from keras.models import Model, load_model, save_model
from keras.layers import Input,Dropout,BatchNormalization,Activation,Add,Flatten,Dense,Reshape
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.core import Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras import backend as K
from keras import optimizers

import tensorflow as tf

from keras.preprocessing.image import array_to_img, img_to_array, load_img#,save_img

import time
t_start = time.time()
from hyperas import optim
from hyperopt import STATUS_OK, Trials, tpe
from hyperopt.pyll.stochastic import choice, uniform, loguniform
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras import optimizers
import pickle
def data():
    ti = np.load("/content/gdrive/My Drive/ti1_hyperas.npy")
    ti = ti.reshape(ti.shape[0],ti.shape[1],ti.shape[2],1)
    tr = np.load("/content/gdrive/My Drive/tr1_hyperas.npy")
    return ti, tr
import numpy as np
def model(ti, tr):

    #Build Encoder
    inputs = Input((502, 200, 3))
    x = Conv2D(8, (6, 3), padding = "valid", strides = 1, activation=None, use_bias=False)(inputs)
    x = BatchNormalization(momentum={{uniform(0.01, 0.99)}})(x)
    x = LeakyReLU(alpha={{uniform(0.001, 0.2)}})(x)
    x = MaxPooling2D(pool_size=(2,1))(x)

    x = Conv2D(16, (6, 3), padding = "valid", strides = 1, activation=None, use_bias=False)(x)
    x = BatchNormalization(momentum=0.8)(x)
    x = LeakyReLU(alpha=0.07885780376708844)(x)
    x = MaxPooling2D(pool_size=(2,2))(x)

    x = Conv2D(32, (6, 3), padding = "valid", strides = 1, activation=None, use_bias=False)(x)
    x = BatchNormalization(momentum=0.8)(x)
    x = LeakyReLU(alpha=0.13056689715598363)(x)
    x = MaxPooling2D(pool_size=(2,2))(x)

    x = Conv2D(64, (6, 3), padding = "valid", strides = 1, activation=None, use_bias=False)(x)
    x = BatchNormalization(momentum=0.8)(x)
    x = LeakyReLU(alpha=0.19340487593094036)(x)
    x = MaxPooling2D(pool_size=(2,2))(x)

    x = Conv2D(128, (8, 5), padding = "valid", strides = 1, activation=None, use_bias=False)(x)
    x = BatchNormalization(momentum=0.8)(x)
    x = LeakyReLU(alpha=0.19340487593094036)(x)

    #use Conv2DTranspose to reverse the conv layers from the encoder
    x = Conv2DTranspose(64, (5, 5), padding = "same", activation='relu', strides=(2, 2))(x)
    x = Conv2D(64, 5, padding='same', activation=None)(x)
    x = BatchNormalization(momentum=0.8)(x)
    x = LeakyReLU(alpha=0.1017579888760034)(x)

    x = Conv2DTranspose(32, (5, 5), padding = "same", activation='relu', strides=(2, 2))(x)
    x = Conv2D(32, 5, padding='same', activation=None)(x)
    x = BatchNormalization(momentum=0.8)(x)
    x = LeakyReLU(alpha=0.1017579888760034)(x)

    x = Conv2DTranspose(16, (5, 5), padding = "same", activation='relu', strides=(2, 2))(x)
    x = Conv2D(16, 3, padding='valid', activation=None)(x)
    x = BatchNormalization(momentum=0.8)(x)
    x = LeakyReLU(alpha=0.1017579888760034)(x)

    x = Conv2DTranspose(8, (5, 5), padding = "same", activation='relu', strides=(2, 2))(x)
    x = Conv2D(8, 3, padding='same', activation=None)(x)
    x = BatchNormalization(momentum=0.8)(x)
    x = LeakyReLU(alpha=0.1017579888760034)(x)

    outputs = Conv2D(1, (1, 1), padding="same", activation="relu")(x)

    # encoder model statement
    model = Model(inputs, outputs)

    adam = keras.optimizers.Adam(lr={{uniform(0.0001, 0.1)}})
    rmsprop = keras.optimizers.RMSprop(lr={{uniform(0.0001, 0.1)}})
    sgd = keras.optimizers.SGD(lr={{uniform(0.0001, 0.1)}})

    choiceval = {{choice(['adam', 'sgd', 'rmsprop'])}}
    if choiceval == 'adam':
        optim = adam
    elif choiceval == 'rmsprop':
        optim = rmsprop
    else:
        optim = sgd

    model.compile(loss='mse', optimizer=optim)
    if 'results' not in globals():
        global results
        results = []
    results = []

    result = model.fit(x=tr, y=ti, batch_size={{choice([16,32,64])}}, epochs=3, verbose=2, validation_split=0.1, shuffle=True)
    valLoss = np.amin(result.history['val_loss'])
    if np.isnan(valLoss) == True:
        valLoss = 999.0              
    K.clear_session()
    return {'loss':  valLoss, 'status': STATUS_OK}
import os
import sys
import oauth2client.client

print('oauth2client version', oauth2client.__version__)
print('SETTINGS.env_name 1', oauth2client.client.SETTINGS.env_name)
print('In GAE environment:', oauth2client.client._in_gae_environment())
print('SETTINGS.env_name 2', oauth2client.client.SETTINGS.env_name)
print('Server software', os.environ.get(oauth2client.client._SERVER_SOFTWARE, ''))
if 'google.appengine' in sys.modules:
    print('google.appengine ', sys.modules['google.appengine'].__file__)
# See: https://stackoverflow.com/questions/49920031/get-the-path-of-the-notebook-on-google-colab
# Install the PyDrive wrapper & import libraries.
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

# Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

# Copy/download the file
fid = drive.ListFile({'q':"title='UNET_3shot_train.ipynb'"}).GetList()[0]['id']
f = drive.CreateFile({'id': fid})
f.GetContentFile('UNET_3shot_train.ipynb')
best_run, best_model = optim.minimize(model=model,
                                          data=data,
                                          max_evals=150,
                                          algo=tpe.suggest,
                                          notebook_name='UNET_3shot_train', # This is important!
                                          trials=Trials())

Я получаю следующую ошибку

>>> Imports:
#coding=utf-8

try:
    from google.colab import drive
except:
    pass

try:
    import os
except:
    pass

try:
    import sys
except:
    pass

try:
    import random
except:
    pass

try:
    import cv2
except:
    pass

try:
    import pandas as pd
except:
    pass

try:
    import numpy as np
except:
    pass

try:
    import matplotlib.pyplot as plt
except:
    pass

try:
    import seaborn as sns
except:
    pass

try:
    from sklearn.utils import shuffle
except:
    pass

try:
    import keras
except:
    pass

try:
    from sklearn.model_selection import train_test_split
except:
    pass

try:
    from tqdm import tqdm_notebook
except:
    pass

try:
    from skimage.io import imread, imshow
except:
    pass

try:
    from skimage.transform import resize
except:
    pass

try:
    from skimage.morphology import label
except:
    pass

try:
    from keras.models import Model, load_model, save_model
except:
    pass

try:
    from keras.layers import Input, Dropout, BatchNormalization, Activation, Add, Flatten, Dense, Reshape
except:
    pass

try:
    from keras.layers.advanced_activations import LeakyReLU
except:
    pass

try:
    from keras.layers.core import Lambda
except:
    pass

try:
    from keras.layers.convolutional import Conv2D, Conv2DTranspose
except:
    pass

try:
    from keras.layers.pooling import MaxPooling2D
except:
    pass

try:
    from keras.layers.merge import concatenate
except:
    pass

try:
    from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
except:
    pass

try:
    from keras import backend as K
except:
    pass

try:
    from keras import optimizers
except:
    pass

try:
    import tensorflow as tf
except:
    pass

try:
    from keras.preprocessing.image import array_to_img, img_to_array, load_img
except:
    pass

try:
    import time
except:
    pass

try:
    from keras.callbacks import ReduceLROnPlateau
except:
    pass

try:
    import matplotlib as m
except:
    pass

try:
    import matplotlib.pyplot as plt
except:
    pass

try:
    import numpy as np
except:
    pass

try:
    from sklearn.metrics import mean_squared_error
except:
    pass

try:
    from keras.callbacks import ReduceLROnPlateau
except:
    pass

try:
    import matplotlib as m
except:
    pass

try:
    import matplotlib.pyplot as plt
except:
    pass

try:
    import numpy as np
except:
    pass

try:
    from sklearn.metrics import mean_squared_error
except:
    pass

try:
    from keras.callbacks import ReduceLROnPlateau
except:
    pass

try:
    import matplotlib as m
except:
    pass

try:
    import matplotlib.pyplot as plt
except:
    pass

try:
    import numpy as np
except:
    pass

try:
    from sklearn.metrics import mean_squared_error
except:
    pass

try:
    from sklearn.metrics import mean_squared_error
except:
    pass

try:
    from sklearn.metrics import mean_squared_error
except:
    pass

try:
    from hyperas import optim
except:
    pass

try:
    from hyperopt import STATUS_OK, Trials, tpe
except:
    pass

try:
    from hyperopt.pyll.stochastic import choice, uniform, loguniform
except:
    pass

try:
    from keras.datasets import mnist
except:
    pass

try:
    from keras.utils import np_utils
except:
    pass

try:
    from keras.models import Sequential
except:
    pass

try:
    from keras.layers import Dense, Dropout, Activation
except:
    pass

try:
    from keras import optimizers
except:
    pass

try:
    import pickle
except:
    pass

try:
    import numpy as np
except:
    pass

try:
    import os
except:
    pass

try:
    import sys
except:
    pass

try:
    import oauth2client.client
except:
    pass

try:
    from pydrive.auth import GoogleAuth
except:
    pass

try:
    from pydrive.drive import GoogleDrive
except:
    pass

try:
    from google.colab import auth
except:
    pass

try:
    from oauth2client.client import GoogleCredentials
except:
    pass

try:
    import pickle
except:
    pass

try:
    from hyperas import optim
except:
    pass

try:
    from hyperopt import STATUS_OK, Trials, tpe
except:
    pass

try:
    from hyperopt.pyll.stochastic import choice, uniform, loguniform
except:
    pass

try:
    import pickle
except:
    pass

try:
    import numpy as np
except:
    pass

try:
    import os
except:
    pass

try:
    import sys
except:
    pass

try:
    import oauth2client.client
except:
    pass

try:
    from pydrive.auth import GoogleAuth
except:
    pass

try:
    from pydrive.drive import GoogleDrive
except:
    pass

try:
    from google.colab import auth
except:
    pass

try:
    from oauth2client.client import GoogleCredentials
except:
    pass

try:
    import pickle
except:
    pass

>>> Hyperas search space:

def get_space():
    return {
        'momentum': hp.uniform('momentum', 0.01, 0.99),
        'choiceact': hp.choice('choiceact', ['Leaky', 'relu', 'sigmoid', 'tanh']),
        'alpha': hp.uniform('alpha', 0.001, 0.2),
        'alpha_1': hp.uniform('alpha_1', 0.001, 0.2),
        'latent_dim': hp.choice('latent_dim', [2,3,5]),
        'filter_size': hp.choice('filter_size', [2,3,5,7,9]),
        'alpha_2': hp.uniform('alpha_2', 0.001, 0.2),
        'alpha_3': hp.uniform('alpha_3', 0.001, 0.2),
        'alpha_4': hp.uniform('alpha_4', 0.001, 0.2),
        'alpha_5': hp.uniform('alpha_5', 0.001, 0.2),
        'alpha_6': hp.uniform('alpha_6', 0.001, 0.2),
        'alpha_7': hp.uniform('alpha_7', 0.001, 0.2),
        'alpha_8': hp.uniform('alpha_8', 0.001, 0.2),
        'alpha_9': hp.uniform('alpha_9', 0.001, 0.2),
        'alpha_10': hp.uniform('alpha_10', 0.001, 0.2),
        'kl_loss': hp.uniform('kl_loss', -5e-1, -5e-10),
        'lr': hp.uniform('lr', 0.0001, 0.1),
        'lr_1': hp.uniform('lr_1', 0.0001, 0.1),
        'lr_2': hp.uniform('lr_2', 0.0001, 0.1),
        'choiceval': hp.choice('choiceval', ['adam', 'sgd', 'rmsprop']),
        'batch_size': hp.choice('batch_size', [16,32,64]),
    }

>>> Data
  1: 
  2: ti = np.load("/content/gdrive/My Drive/OPTIC EARTH/DATA/GAN/ti_tr/training_data/ti1_hyperas.npy")
  3: ti = ti.reshape(ti.shape[0],ti.shape[1],ti.shape[2],1)
  4: tr = np.load("/content/gdrive/My Drive/OPTIC EARTH/DATA/GAN/ti_tr/training_data/tr1_hyperas.npy")
  5: 
  6: 
  7: 
>>> Resulting replaced keras model:

   1: def keras_fmin_fnct(space):
   2: 
   3: 
   4:     #Build Encoder
   5:     inputs = Input((502, 200, 3))
   6:     x = Conv2D(8, (6, 3), padding = "valid", strides = 1, activation=None, use_bias=False)(inputs)
   7:     x = BatchNormalization(momentum=space['momentum'])(x)
   8:     # choiceact = space['choiceact']
   9:     # if choiceact == 'Leaky':
  10:     #     x = LeakyReLU(alpha=space['alpha'])(x)
  11:     # if choiceact == 'relu':
  12:     #     x = keras.activations.relu(x)
  13:     # if choiceact == 'sigmoid':
  14:     #     x = keras.activations.sigmoid(x)
  15:     # if choiceact == 'tanh':
  16:     #     x = keras.activations.tanh(x)
  17:     # x = choiceact
  18:     x = LeakyReLU(alpha=space['alpha_1'])(x)
  19:     x = MaxPooling2D(pool_size=(2,1))(x)
  20: 
  21:     x = Conv2D(16, (6, 3), padding = "valid", strides = 1, activation=None, use_bias=False)(x)
  22:     x = BatchNormalization(momentum=0.8)(x)
  23:     x = LeakyReLU(alpha=0.07885780376708844)(x)
  24:     x = MaxPooling2D(pool_size=(2,2))(x)
  25: 
  26:     x = Conv2D(32, (6, 3), padding = "valid", strides = 1, activation=None, use_bias=False)(x)
  27:     x = BatchNormalization(momentum=0.8)(x)
  28:     x = LeakyReLU(alpha=0.13056689715598363)(x)
  29:     x = MaxPooling2D(pool_size=(2,2))(x)
  30: 
  31:     x = Conv2D(64, (6, 3), padding = "valid", strides = 1, activation=None, use_bias=False)(x)
  32:     x = BatchNormalization(momentum=0.8)(x)
  33:     x = LeakyReLU(alpha=0.19340487593094036)(x)
  34:     x = MaxPooling2D(pool_size=(2,2))(x)
  35: 
  36:     x = Conv2D(128, (8, 5), padding = "valid", strides = 1, activation=None, use_bias=False)(x)
  37:     x = BatchNormalization(momentum=0.8)(x)
  38:     x = LeakyReLU(alpha=0.19340487593094036)(x)
  39: 
  40:     #use Conv2DTranspose to reverse the conv layers from the encoder
  41:     x = Conv2DTranspose(64, (5, 5), padding = "same", activation='relu', strides=(2, 2))(x)
  42:     x = Conv2D(64, 5, padding='same', activation=None)(x)
  43:     x = BatchNormalization(momentum=0.8)(x)
  44:     x = LeakyReLU(alpha=0.1017579888760034)(x)
  45: 
  46:     x = Conv2DTranspose(32, (5, 5), padding = "same", activation='relu', strides=(2, 2))(x)
  47:     x = Conv2D(32, 5, padding='same', activation=None)(x)
  48:     x = BatchNormalization(momentum=0.8)(x)
  49:     x = LeakyReLU(alpha=0.1017579888760034)(x)
  50: 
  51:     x = Conv2DTranspose(16, (5, 5), padding = "same", activation='relu', strides=(2, 2))(x)
  52:     x = Conv2D(16, 3, padding='valid', activation=None)(x)
  53:     x = BatchNormalization(momentum=0.8)(x)
  54:     x = LeakyReLU(alpha=0.1017579888760034)(x)
  55: 
  56:     x = Conv2DTranspose(8, (5, 5), padding = "same", activation='relu', strides=(2, 2))(x)
  57:     x = Conv2D(8, 3, padding='same', activation=None)(x)
  58:     x = BatchNormalization(momentum=0.8)(x)
  59:     x = LeakyReLU(alpha=0.1017579888760034)(x)
  60: 
  61:     outputs = Conv2D(1, (1, 1), padding="same", activation="relu")(x)
  62: 
  63:     # encoder model statement
  64:     model = Model(inputs, outputs)
  65:     #model.summary()
  66: 
  67: 
  68:     # # sampling function
  69:     # def sampling(args):
  70:     #     z_mu, z_log_sigma = args
  71:     #     epsilon = K.random_normal(shape=(K.shape(z_mu)[0], latent_dim),
  72:     #                               mean=0., stddev=1.)
  73:     #     return z_mu + K.exp(z_log_sigma) * epsilon
  74: 
  75:     # #Build Encoder
  76:     # latent_dim = space['latent_dim']
  77:     # inputs = Input((256, 256, 1))
  78:     # filter_size = space['filter_size']
  79:     # x = Conv2D(16, (filter_size, filter_size), padding = "same", strides = 2, activation=None, use_bias=False)(inputs)
  80:     # x = BatchNormalization(momentum=0.8)(x)
  81:     # x = LeakyReLU(alpha=space['alpha_2'])(x)    
  82:     # x = Conv2D(32, (filter_size, filter_size), padding = "same", strides = 2, activation=None, use_bias=False)(x)
  83:     # x = BatchNormalization(momentum=0.8)(x)
  84:     # x = LeakyReLU(alpha=space['alpha_3'])(x)
  85:     # x = Conv2D(64, (filter_size, filter_size), padding = "same", strides = 2, activation=None, use_bias=False)(x)
  86:     # x = BatchNormalization(momentum=0.8)(x)
  87:     # x = LeakyReLU(alpha=space['alpha_4'])(x)
  88:     # x = Conv2D(128, (filter_size, filter_size), padding = "same", strides = 2, activation=None, use_bias=False)(x)
  89:     # x = BatchNormalization(momentum=0.8)(x)
  90:     # x = LeakyReLU(alpha=space['alpha_5'])(x)
  91:     # # need to know the shape of the network here for the decoder
  92:     # shape_before_flattening = K.int_shape(x)
  93: 
  94:     # x = Flatten()(x)
  95: 
  96:     # x = Dense(32, activation=None)(x)
  97:     # x = BatchNormalization(momentum=0.8)(x)
  98:     # x = LeakyReLU(alpha=space['alpha_6'])(x)
  99:     # # Two outputs, latent mean and (log)variance
 100:     # z_mean = Dense(latent_dim)(x)
 101:     # z_log_var = Dense(latent_dim)(x)
 102: 
 103:     # # use reparameterization trick to push the sampling out as input
 104:     # # note that "output_shape" isn't necessary with the TensorFlow backend
 105:     # z = Lambda(sampling)([z_mean, z_log_var])
 106: 
 107:     # #Decoder
 108:     # decoder_input = Input(K.int_shape(z)[1:])
 109: 
 110:     # # Expand to 784 total pixels
 111:     # x = Dense(np.prod(shape_before_flattening[1:]),
 112:     #                 activation='relu')(decoder_input)
 113: 
 114:     # # reshape
 115:     # x = Reshape(shape_before_flattening[1:])(x)
 116: 
 117:     # # use Conv2DTranspose to reverse the conv layers from the encoder
 118:     # x = Conv2DTranspose(128, filter_size, padding='same', activation='relu', strides=(2, 2))(x)
 119:     # x = Conv2D(128, filter_size, padding='same', activation=None)(x)
 120:     # x = BatchNormalization(momentum=0.8)(x)
 121:     # x = LeakyReLU(alpha=space['alpha_7'])(x)
 122: 
 123:     # x = Conv2DTranspose(64, filter_size, padding='same', activation='relu', strides=(2, 2))(x)
 124:     # x = Conv2D(64, filter_size, padding='same', activation=None)(x)
 125:     # x = BatchNormalization(momentum=0.8)(x)
 126:     # x = LeakyReLU(alpha=space['alpha_8'])(x)
 127: 
 128:     # x = Conv2DTranspose(32, filter_size, padding='same', activation='relu', strides=(2, 2))(x)
 129:     # x = Conv2D(32, filter_size, padding='same', activation=None)(x)
 130:     # x = BatchNormalization(momentum=0.8)(x)
 131:     # x = LeakyReLU(alpha=space['alpha_9'])(x)
 132: 
 133:     # x = Conv2DTranspose(16, filter_size, padding='same', activation='relu', strides=(2, 2))(x)
 134:     # x = Conv2D(16, filter_size, padding='same', activation=None)(x)
 135:     # x = BatchNormalization(momentum=0.8)(x)
 136:     # x = LeakyReLU(alpha=space['alpha_10'])(x)
 137: 
 138:     # x = Conv2D(1, filter_size, padding='same', activation='sigmoid')(x)
 139:     # # decoder model statement
 140:     # decoder = Model(decoder_input, x)
 141:     # #decoder.summary()
 142: 
 143:     # # apply the decoder to the sample from the latent distribution
 144:     # z_decoded = decoder(z)
 145:     # vae = Model(inputs, z_decoded, name='vae_mlp')
 146: 
 147:     # def custom_loss():
 148:     #     def kl_loss(y_true, y_pred):
 149:     #         x = K.flatten(y_true)
 150:     #         z_decoded = K.flatten(y_pred)
 151:     #         # Reconstruction loss 
 152:     #         xent_loss = keras.metrics.binary_crossentropy(x, z_decoded) 
 153:     #         # KL divergence
 154:     #         #kl_loss = -5e-5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
 155:     #         kl_loss = space['kl_loss'] * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
 156:     #         return K.mean(xent_loss + kl_loss)        
 157:     #     return kl_loss 
 158: 
 159: 
 160:     # model_loss = custom_loss()
 161: 
 162:     adam = keras.optimizers.Adam(lr=space['lr'])
 163:     rmsprop = keras.optimizers.RMSprop(lr=space['lr_1'])
 164:     sgd = keras.optimizers.SGD(lr=space['lr_2'])
 165: 
 166:     choiceval = space['choiceval']
 167:     if choiceval == 'adam':
 168:         optim = adam
 169:     elif choiceval == 'rmsprop':
 170:         optim = rmsprop
 171:     else:
 172:         optim = sgd
 173:         
 174:     model.compile(loss='mse', optimizer=optim)
 175:     if 'results' not in globals():
 176:         global results
 177:         results = []
 178:     results = []
 179: 
 180:     result = model.fit(x=tr, y=ti, batch_size=space['batch_size'], epochs=3, verbose=2, validation_split=0.1, shuffle=True)
 181:     valLoss = np.amin(result.history['val_loss'])
 182:     if np.isnan(valLoss) == True:
 183:         valLoss = 999.0              
 184:     K.clear_session()
 185:     return {'loss':  valLoss, 'status': STATUS_OK}
 186: 
  0%|          | 0/150 [00:00<?, ?it/s, best loss: ?]
---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py in _get_default_graph()
     65     try:
---> 66         return tf.get_default_graph()
     67     except AttributeError:

AttributeError: module 'tensorflow' has no attribute 'get_default_graph'

During handling of the above exception, another exception occurred:

RuntimeError                              Traceback (most recent call last)
15 frames
/usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py in _get_default_graph()
     67     except AttributeError:
     68         raise RuntimeError(
---> 69             'It looks like you are trying to use '
     70             'a version of multi-backend Keras that '
     71             'does not support TensorFlow 2.0. We recommend '

RuntimeError: It looks like you are trying to use a version of multi-backend Keras that does not support TensorFlow 2.0. We recommend using `tf.keras`, or alternatively, downgrading to TensorFlow 1.14.
...