Как обновить TensorFlow с помощью Anaconda? - PullRequest
0 голосов
/ 17 июня 2020

Я следовал этому руководству, чтобы установить TensorFlow: https://www.youtube.com/watch?v=gDzAm25CORk. Вот мой код:

# Convolutional Neural Networks
# Importing the libraries
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
tf.__version__
# Part 1 - Data Preprocessing
# Preprocessing the Training set
train_datagen = ImageDataGenerator(
        rescale=1./255,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True)
training_set = train_datagen.flow_from_directory(
        'dataset/training_set',
        target_size=(64, 64),
        batch_size=32,
        class_mode='binary')
# Preprocessing the Test set
test_datagen = ImageDataGenerator(rescale=1./255)
test_set = test_datagen.flow_from_directory(
        'dataset/test_set',
        target_size=(64, 64),
        batch_size=32,
        class_mode='binary')
# Part 2 - Building the CNN
# Initialising the CNN
cnn = tf.keras.models.Sequential()
# Step 1 - Convolution
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64, 64, 3]))
# Step 2 - Pooling
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
Adding a second convolutional layer
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'))
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
# Step 3 - Flattening
cnn.add(tf.keras.layers.Flatten())
# Step 4 - Full Connection
cnn.add(tf.keras.layers.Dense(units=128, activation='relu'))
# Step 5 - Output Layer
cnn.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))
# Part 3 - Training the CNN
# Compiling the CNN
cnn.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Training the CNN on the Training set and evaluating it on the Test set
cnn.fit(x = training_set, validation_data = test_set, epochs = 25)
# Part 4 - Making a single prediction
import numpy as np
from keras.preprocessing.image import image
test_image = image.load_img('dataset/single_prediction/cat_or_dog1.jpg', target_size=(64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = cnn.predict(test_image)
training_set.class_indices
if result[0][0] == 1:
    prediction = 'dog'
else:
    prediction = 'cat'
print(prediction)

Я предлагаю вам игнорировать большую часть кода, но когда дело доходит до той части, где вы тренируете CNN на обучающем наборе и оцениваете его на тестовом наборе, я получаю ошибка, которую я показал ниже:

---------------------------------------------------------------------------
ImportError                               Traceback (most recent call last)
<ipython-input-13-894200829115> in <module>
----> 1 cnn.fit(x = training_set, validation_data = test_set, epochs = 25)

D:\Anaconda install\envs\tensorflow-sessions\lib\site-packages\tensorflow_core\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
    817         max_queue_size=max_queue_size,
    818         workers=workers,
--> 819         use_multiprocessing=use_multiprocessing)
    820 
    821   def evaluate(self,

D:\Anaconda install\envs\tensorflow-sessions\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
    233           max_queue_size=max_queue_size,
    234           workers=workers,
--> 235           use_multiprocessing=use_multiprocessing)
    236 
    237       total_samples = _get_total_number_of_samples(training_data_adapter)

D:\Anaconda install\envs\tensorflow-sessions\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in _process_training_inputs(model, x, y, batch_size, epochs, sample_weights, class_weights, steps_per_epoch, validation_split, validation_data, validation_steps, shuffle, distribution_strategy, max_queue_size, workers, use_multiprocessing)
    591         max_queue_size=max_queue_size,
    592         workers=workers,
--> 593         use_multiprocessing=use_multiprocessing)
    594     val_adapter = None
    595     if validation_data:

D:\Anaconda install\envs\tensorflow-sessions\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in _process_inputs(model, mode, x, y, batch_size, epochs, sample_weights, class_weights, shuffle, steps, distribution_strategy, max_queue_size, workers, use_multiprocessing)
    704       max_queue_size=max_queue_size,
    705       workers=workers,
--> 706       use_multiprocessing=use_multiprocessing)
    707 
    708   return adapter

D:\Anaconda install\envs\tensorflow-sessions\lib\site-packages\tensorflow_core\python\keras\engine\data_adapter.py in __init__(self, x, y, sample_weights, standardize_function, shuffle, workers, use_multiprocessing, max_queue_size, **kwargs)
    950         use_multiprocessing=use_multiprocessing,
    951         max_queue_size=max_queue_size,
--> 952         **kwargs)
    953 
    954   @staticmethod

D:\Anaconda install\envs\tensorflow-sessions\lib\site-packages\tensorflow_core\python\keras\engine\data_adapter.py in __init__(self, x, y, sample_weights, standardize_function, workers, use_multiprocessing, max_queue_size, **kwargs)
    745     # Since we have to know the dtype of the python generator when we build the
    746     # dataset, we have to look at a batch to infer the structure.
--> 747     peek, x = self._peek_and_restore(x)
    748     assert_not_namedtuple(peek)
    749 

D:\Anaconda install\envs\tensorflow-sessions\lib\site-packages\tensorflow_core\python\keras\engine\data_adapter.py in _peek_and_restore(x)
    954   @staticmethod
    955   def _peek_and_restore(x):
--> 956     return x[0], x
    957 
    958   def _make_callable(self, x, workers, use_multiprocessing, max_queue_size):

D:\Anaconda install\envs\tensorflow-sessions\lib\site-packages\keras_preprocessing\image\iterator.py in __getitem__(self, idx)
     63         index_array = self.index_array[self.batch_size * idx:
     64                                        self.batch_size * (idx + 1)]
---> 65         return self._get_batches_of_transformed_samples(index_array)
     66 
     67     def __len__(self):

D:\Anaconda install\envs\tensorflow-sessions\lib\site-packages\keras_preprocessing\image\iterator.py in _get_batches_of_transformed_samples(self, index_array)
    228                            color_mode=self.color_mode,
    229                            target_size=self.target_size,
--> 230                            interpolation=self.interpolation)
    231             x = img_to_array(img, data_format=self.data_format)
    232             # Pillow images should be closed after `load_img`,

D:\Anaconda install\envs\tensorflow-sessions\lib\site-packages\keras_preprocessing\image\utils.py in load_img(path, grayscale, color_mode, target_size, interpolation)
    106         color_mode = 'grayscale'
    107     if pil_image is None:
--> 108         raise ImportError('Could not import PIL.Image. '
    109                           'The use of `load_img` requires PIL.')
    110     img = pil_image.open(path)

ImportError: Could not import PIL.Image. The use of `load_img` requires PIL.

Теперь вернемся к биту «Обновление TensorFlow», этот код был написан в Tensorflow 2.2.0, а у меня версия 2.1.0. Я считаю, что получаю эту ошибку, потому что у меня TensorFlow 2.1.0, но этот код предназначен для Tensorflow 2.2.0. Я установил его с помощью Anaconda Navigator в виртуальной среде, но важная часть состоит в том, как обновить его так же, как я установил.

Я НЕ ИСПОЛЬЗОВАЛ КОМАНДНУЮ СТРОКУ

...