Проблемы Python Conv с нейронной сетью - PullRequest
0 голосов
/ 31 мая 2018

Я никогда раньше не программировал на Python, и мне нужно написать функцию вывода для класса.Большая часть кода была написана для студентов, и нам просто нужно сделать функцию вывода.Я нашел несколько примеров в Интернете, но мы должны использовать слои, отличные от тех, которые использовались в Интернете.Я запутался в формировании, но я не слишком уверен в любом коде.

Вот что у меня есть:

    import tensorflow as tf

    def _variable_with_weight_decay(name, shape, wd):
      with tf.device('/cpu:0'):
        var = tf.get_variable(name, shape, 
                    initializer=tf.contrib.layers.xavier_initializer())
      if wd is not None:
        weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, 
    name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
      return var

    def inference(X,phase=False,dropout_rate=0.8,n_classes=10,weight_decay=1e-4):
# logits should be of dimension (batch_size, n_classes)
# X is tensor with dimension (NONE, 32, 32,3)

# conv layer 1
batchSize = tf.shape(X)[0] # is this batch_size?

with tf.variable_scope('conv1') as scope:
    kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64], wd=weight_decay)
    conv = tf.nn.conv2d(X, kernel, [1, 1, 1, 1], padding='SAME')
    biases = tf.get_variable('biases', [64], initializer=tf.constant_initializer(0.0))
    pre_activation = tf.nn.bias_add(conv, biases)
    conv1 = tf.nn.relu(pre_activation, name=scope.name)
    pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
# Batch Normalization
#Batch variables
epsilon = 1e-3
batch_mean1, batch_var1 = tf.nn.moments(pool1,[0])
scale1 = tf.Variable(tf.ones([64]))
beta1 = tf.Variable(tf.zeros([64]))
batch1 = tf.nn.batch_normalization(pool1,batch_mean1,batch_var1,scale1,beta1,epsilon)
    with tf.variable_scope('conv2') as scope:

    kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 64], wd=weight_decay)
    conv = tf.nn.conv2d(batch1, kernel, [1, 1, 1, 1], padding='SAME')
    biases = tf.get_variable('biases', [64], initializer=tf.constant_initializer(0.0))
    pre_activation = tf.nn.bias_add(conv, biases)
    conv2 = tf.nn.relu(pre_activation, name=scope.name)
    pool2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')

#Batch Normalization 2
#Batch variables
batch_mean2, batch_var2 = tf.nn.moments(pool2,[0])
keep_prob = tf.placeholder(tf.float32)
scale2 = tf.Variable(tf.ones([64]))
beta2 = tf.Variable(tf.zeros([64]))
batch2 = tf.nn.batch_normalization(pool2,batch_mean2,batch_var2,scale2,beta2,epsilon)
#dropout 2
drop2 = tf.nn.dropout(batch2, keep_prob)
    with tf.variable_scope('local1') as scope:
    reshape = tf.reshape(drop2, [batchSize, -1])
    #dim = reshape.get_shape()[1].value
    dim = reshape[-1].value
    weights = _variable_with_weight_decay('weights', shape=[dim, 384], wd=0.004)
    biases = tf.get_variable('biases', [384], initializer=tf.constant_initializer(0.1))
    local1 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
    # #_activation_summary(local1)
    # # local2
with tf.variable_scope('local2') as scope:
    weights = _variable_with_weight_decay('weights', shape=[384, 192], wd=0.004)
    biases = tf.get_variable('biases', [192], initializer=tf.constant_initializer(0.1))
    local2 = tf.nn.relu(tf.matmul(local1, weights) + biases, name=scope.name)



with tf.variable_scope('softmax_linear') as scope:
    weights = _variable_with_weight_decay('weights', [192, n_classes], wd=None)
    biases = tf.get_variable('biases', [n_classes], initializer=tf.constant_initializer(0.1))
    logits = tf.add(tf.matmul(local2, weights), biases, name=scope.name)



return logits

У меня должно быть 4 сверточных слоя, но это слишком запутало меня.Теперь это портит.Он продолжает говорить, что ранг 1 и 0 или что-то в этом роде.Как мне изменить его, чтобы сделать это (batch_size, n_samples)?И я знаю, что отступ отключен, но это было именно так, как я вставил его в это, а не в мой настоящий код.

Спасибо

РЕДАКТИРОВАТЬ --------------------------------------------------------

Хорошо, поэтому я исправил некоторые из них, но теперь возникает проблема: строка

    dim = reshape.get_shape()[1].value

размер моей партии равен 32, до конца, когда он меняется на 16. Например, если есть 336итого, было бы 10 полных партий по 32 и 1 партия по 16. Строка выше дает ошибку: «Форма новой переменной должна быть полностью определена, но вместо этого была (?, 256)».

Здесьэто весь мой новый кодЯ пошел вперед и попытался сделать 4 слоя свертки.

    import tensorflow as tf

    def _variable_with_weight_decay(name, shape, wd):
      with tf.device('/cpu:0'):
        var = tf.get_variable(name, shape, initializer=tf.contrib.layers.xavier_initializer())
      if wd is not None:
        weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
      return var  

    def inference(X,phase=False,dropout_rate=0.8,n_classes=10,weight_decay=1e-4):
# logits should be of dimension (batch_size, n_classes)


# conv layer 1
#batchSize = tf.shape(X)[0]
batchSize = X.get_shape().as_list()[0]
if batchSize is None:
    batchSize = 32

with tf.variable_scope('conv1') as scope:
    kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64], wd=weight_decay)
    conv = tf.nn.conv2d(X, kernel, [1, 1, 1, 1], padding='SAME')
    biases = tf.get_variable('biases', [64], initializer=tf.constant_initializer(0.0))
    pre_activation = tf.nn.bias_add(conv, biases)
    conv1 = tf.nn.relu(pre_activation, name=scope.name)
    #_activation_summary(conv1)

# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
# Batch Normalization
#Batch variables
epsilon = 1e-3
batch_mean1, batch_var1 = tf.nn.moments(pool1,[0])
scale1 = tf.Variable(tf.ones([64]))
beta1 = tf.Variable(tf.zeros([64]))
batch1 = tf.nn.batch_normalization(pool1,batch_mean1,batch_var1,scale1,beta1,epsilon)
#dropout 1
#keep_prob = tf.placeholder(tf.float32,[batchSize,32,32,3])
#drop1 = tf.nn.dropout(batch1, keep_prob)

#conv layer 2

with tf.variable_scope('conv2') as scope:

    kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 128], wd=weight_decay)
    conv = tf.nn.conv2d(batch1, kernel, [1, 1, 1, 1], padding='SAME')
    biases = tf.get_variable('biases', [128], initializer=tf.constant_initializer(0.0))
    pre_activation = tf.nn.bias_add(conv, biases)
    conv2 = tf.nn.relu(pre_activation, name=scope.name)
    #_activation_summary(conv2)



#pool 2
pool2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')

#Batch Normalization 2
#Batch variables
batch_mean2, batch_var2 = tf.nn.moments(pool2,[0])
scale2 = tf.Variable(tf.ones([128]))
beta2 = tf.Variable(tf.zeros([128]))
batch2 = tf.nn.batch_normalization(pool2,batch_mean2,batch_var2,scale2,beta2,epsilon)
#dropout 2
#drop2 = tf.nn.dropout(batch2, keep_prob)

#conv layer 3

with tf.variable_scope('conv3') as scope:
    kernel = _variable_with_weight_decay('weights', shape=[5, 5, 128, 256], wd=weight_decay)
    conv = tf.nn.conv2d(batch2, kernel, [1, 1, 1, 1], padding='SAME')
    biases = tf.get_variable('biases', [256], initializer=tf.constant_initializer(0.0))
    pre_activation = tf.nn.bias_add(conv, biases)
    conv3 = tf.nn.relu(pre_activation, name=scope.name)
    #_activation_summary(conv3)


#pool 3
pool3 = tf.nn.max_pool(conv3, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool3')
#Batch Normalization 3
#Batch variables
batch_mean3, batch_var3 = tf.nn.moments(pool3,[0])
scale3 = tf.Variable(tf.ones([256]))
beta3 = tf.Variable(tf.zeros([256]))
batch3 = tf.nn.batch_normalization(pool3,batch_mean3,batch_var3,scale3,beta3,epsilon)
#dropout 3
#drop3 = tf.nn.dropout(batch3, keep_prob)

# conv layer 3

with tf.variable_scope('conv4') as scope:
    kernel = _variable_with_weight_decay('weights', shape=[5, 5, 256, 256], wd=weight_decay)
    conv = tf.nn.conv2d(batch3, kernel, [1, 1, 1, 1], padding='SAME')
    biases = tf.get_variable('biases', [256], initializer=tf.constant_initializer(0.0))
    pre_activation = tf.nn.bias_add(conv, biases)
    conv4 = tf.nn.relu(pre_activation, name=scope.name)
    #_activation_summary(conv4)


#pool 4
pool4 = tf.nn.max_pool(conv4, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool4')
#Batch Normalization 4
#Batch variables
batch_mean4, batch_var4 = tf.nn.moments(pool4,[0])
scale4 = tf.Variable(tf.ones([256]))
beta4 = tf.Variable(tf.zeros([256]))
batch4 = tf.nn.batch_normalization(pool4,batch_mean4,batch_var4,scale4,beta4,epsilon)
#dropout 4
#drop4 = tf.nn.dropout(batch4, keep_prob)


# local layer

with tf.variable_scope('local1') as scope:
    reshape = tf.reshape(batch4, [batchSize, -1])
    dim = reshape.get_shape()[1].value
    #dim = batchSize*32
    weights = _variable_with_weight_decay('weights', shape=[dim, 256], wd=0.004)
    biases = tf.get_variable('biases', [256], initializer=tf.constant_initializer(0.1))
    local1 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
    # #_activation_summary(local1)
    # # local2
with tf.variable_scope('local2') as scope:
    weights = _variable_with_weight_decay('weights', shape=[256, batchSize], wd=0.004)
    biases = tf.get_variable('biases', [batchSize], initializer=tf.constant_initializer(0.1))
    local2 = tf.nn.relu(tf.matmul(local1, weights) + biases, name=scope.name)
    # #_activation_summary(local2)


with tf.variable_scope('softmax_linear') as scope:
    #reshape = tf.reshape(batch4, [batchSize, n_classes])
    weights = _variable_with_weight_decay('weights', [batchSize, n_classes], wd=None)
    biases = tf.get_variable('biases', [n_classes], initializer=tf.constant_initializer(0.1))
    logits = tf.add(tf.matmul(local2, weights), biases, name=scope.name)
    #_activation_summary(logits)


return logits
Добро пожаловать на сайт PullRequest, где вы можете задавать вопросы и получать ответы от других членов сообщества.
...