Я пытаюсь построить и обучить сверточную нейронную сеть на Raspberry Pi с Tensorflow, и я получаю странную ошибку, которая, кажется, не имеет смысла. Это просто подтверждает, что каталог существует, и я не знаю, как его разрешить. Вот сама нейронная сеть:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import gzip
from six.moves import xrange
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
def _read32(bytestream):
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype = dt)[0]
def read_data_sets(
fake_data=False,
one_hot=False,
dtype=dtypes.float32,
reshape=True,
validation_size=1440,
seed=None,
):
if fake_data:
def fake():
return DataSet(
[], [], fake_data=True, one_hot=one_hot, dtype=dtype, seed=seed)
train = fake()
validation = fake()
test = fake()
return base.Datasets(train=train, validation=validation, test=test)
TRAIN_IMAGES = "/home/pi/Desktop/TrainData"
TRAIN_LABELS = ["Gravel", "Water", "Road"]
TEST_IMAGES = "/home/pi/Desktop/TestData"
TEST_LABELS = ["Gravel", "Water", "Road"]
with gfile.Open(TRAIN_IMAGES, 'rb') as f:
train_images = extract_images(f)
with gfile.Open(TRAIN_LABELS, 'rb') as f:
train_labels = extract_labels(f, one_hot=one_hot)
with gfile.Open(TEST_IMAGES, 'rb') as f:
test_images = extract_images(f)
with gfile.Open(TEST_LABELS, 'rb') as f:
test_labels = extract_labels(f, one_hot=one_hot)
if not 0 <= validation_size <= len(train_images):
raise ValueError('Validation size should be between 0 and {}. Received: {}.'
.format(len(train_images), validation_size))
validation_images = test_images
validation_labels = test_labels
train_images = train_images
train_labels = train_labels
options = dict(dtype=dtype, reshape=reshape, seed=seed)
train = DataSet(train_images, train_labels, **options)
validation = DataSet(validation_images, validation_labels, **options)
test = DataSet(test_images, test_labels, **options)
return train, validation
def extract_images(f):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth].
Args:
f: A file object that can be passed into a gzip reader.
Returns:
data: A 4D uint8 numpy array [index, y, x, depth].
Raises:
ValueError: If the bytestream does not start with 2051.
"""
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST image file: %s' %
(magic, f.name))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def extract_labels(f, one_hot=False, num_classes=10):
"""Extract the labels into a 1D uint8 numpy array [index].
Args:
f: A file object that can be passed into a gzip reader.
one_hot: Does one hot encoding for the result.
num_classes: Number of classes for the one hot encoding.
Returns:
labels: a 1D uint8 numpy array.
Raises:
ValueError: If the bystream doesn't start with 2049.
"""
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST label file: %s' %
(magic, f.name))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels, num_classes)
return labels
train, test = read_data_sets(one_hot = True)
def weight_variable(shape):
w = tf.truncated_normal(shape, stddev = 0.1)
return tf.Variable(w)
def bias_variable(shape):
b = tf.constant(0.1, shape = shape)
return tf.Variable(b)
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME')
def new_conv_layer(input, num_input_channels, filter_size, num_filters, use_pooling = True):
shape = [filter_size, filter_size, num_input_channels, num_filters]
weights = weight_variable(shape)
biases = bias_variable([num_filters])
layer = tf.nn.relu(tf.nn.conv2d(input = input, filter = weights,
strides = [1,1,1,1],
padding = 'SAME') + biases)
if use_pooling:
return max_pool_2x2(layer), weights
return layer, weights
def flatten_layer(layer):
layer_shape = layer-get_shape()
num_features = layer_shape[1:4].num_elements()
layer_flat = tf.reshape(layer, [-1, num_features])
return layer_flat, num_features
def new_fc_layer(input, num_inputs, num_outputs, use_relu = True):
weights = weight_variable([num_inputs, num_outputs])
biases = bias_variable([num_outputs])
layer = tf.matmul(input, weights) + biases
if use_relu:
layer = tf.nn.relu(layer)
return layer
x = tf.placeholder(tf.float32, shape = [None, 1000*750], name = 'input_data')
x_image = tf.reshape(x, [-1, 1000, 750, 1])
y = tf.placeholder(tf.float32, shape = [None, 3], name = 'correct_labels')
convlayer1, w1 = new_conv_layer(x_image, 1, 5, 32)
convlayer2, w2 = new_conv_layer(convlayer1, 32, 4, 64)
flat_layer, num_features = flatten_layer(convlayer2)
fclayer = new_fc_layer(flat_layer, num_features, 1024)
keep_prob = tf.placeholder(tf.float32)
drop_layer = tf.nn.dropout(fclayer, keep_prob)
W_f = weight_variable([1024, 3])
b_f = bias_variable([3])
y_f = tf.matmul(drop_layer, W_f) + b_f
y_f_softmax = tf.nn.softmax(y_f)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits = y_f))
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
correct_prediction = tf.equal(tf.argmax(y_f_softmax, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.global_variables_initializer()
num_steps = 1000
bat_size = 10
test_size = 1440
with tf.Session() as sess:
sess.run(init)
for step in range(num_steps):
batch = train.next_batch(batch_size)
if step%50 == 0:
train_accuracy = accuracy.eval(feed_dict = {
x:batch[0], y:batch[1], keep_prob:1.0})
print('step %d, training accuracy %f' %(step, train_accuracy))
train_step.run(feed_dict = {x:batch[0], y:batch[1], keep_prob:0.5})
print("Done!")
print("Evaluating...")
test_accuracy = 0.0
for i in xrange(test_size/40):
batch = test.next_batch(40)
acc = accuracy.eval(feed_dict = {x:batch[0], y:batch[1], keep_prob:1.0})
if i%10 == 0:
print('%d: test accuracy %f' % (i, acc))
test_accuracy += acc
print("avg test accuracy: " + test_accuracy/(test_size/40))
Полная трассировка ошибок может быть найдена ниже:
Traceback (most recent call last):
File "/home/pi/newCNN.py", line 124, in <module>
train, test = read_data_sets(one_hot = True)
File "/home/pi/newCNN.py", line 47, in read_data_sets
train_images = extract_images(f)
File "/home/pi/newCNN.py", line 86, in extract_images
magic = _read32(bytestream)
File "/home/pi/newCNN.py", line 18, in _read32
return np.frombuffer(bytestream.read(4), dtype = dt)[0]
File "/usr/lib/python3.5/gzip.py", line 274, in read
return self._buffer.read(size)
File "/usr/lib/python3.5/_compression.py", line 68, in readinto
data = self.read(len(byte_view))
File "/usr/lib/python3.5/gzip.py", line 461, in read
if not self._read_gzip_header():
File "/usr/lib/python3.5/gzip.py", line 404, in _read_gzip_header
magic = self._fp.read(2)
File "/usr/lib/python3.5/gzip.py", line 91, in read
self.file.read(size-self._length+read)
File "/home/pi/.local/lib/python3.5/site-packages/tensorflow/python/lib/io/file_io.py", line 132, in read
pywrap_tensorflow.ReadFromStream(self._read_buf, length, status))
File "/home/pi/.local/lib/python3.5/site-packages/tensorflow/python/framework/errors_impl.py", line 519, in __exit__
c_api.TF_GetCode(self.status.status))
tensorflow.python.framework.errors_impl.FailedPreconditionError: /home/pi/Desktop/TrainData; Is a directory
Заранее спасибо.
PS Если вы заметили какие-либо явные ошибки в CNN, пожалуйста, дайте мне знать в комментариях.