ValueError: Размеры должны быть равны, но 49152 и 64 для ‘Attention_0 / add '(op:' Add ') - PullRequest
0 голосов
/ 03 января 2019

Я хочу попробовать заменить содержимое кодирования и декодера в этом github code (т.е. в строке 83 dcrnn_model.py) на кодер и декодер внимания.

Вот код перед кодером-декодером:

    max_diffusion_step = int(model_kwargs.get('max_diffusion_step', 2))
    cl_decay_steps = int(model_kwargs.get('cl_decay_steps', 1000))
    filter_type = model_kwargs.get('filter_type', 'laplacian')
    horizon = int(model_kwargs.get('horizon', 1))
    max_grad_norm = float(model_kwargs.get('max_grad_norm', 5.0))
    num_nodes = int(model_kwargs.get('num_nodes', 1))
    num_rnn_layers = int(model_kwargs.get('num_rnn_layers', 1))
    rnn_units = int(model_kwargs.get('rnn_units'))
    seq_len = int(model_kwargs.get('seq_len'))
    use_curriculum_learning = bool(model_kwargs.get('use_curriculum_learning', False))
    input_dim = int(model_kwargs.get('input_dim', 1))
    output_dim = int(model_kwargs.get('output_dim', 1))
    aux_dim = input_dim - output_dim

    # Input (batch_size, timesteps, num_sensor, input_dim)
    self._inputs = tf.placeholder(tf.float32, shape=(batch_size, seq_len, num_nodes, input_dim), name='inputs')
    # Labels: (batch_size, timesteps, num_sensor, input_dim), same format with input except the temporal dimension.
    self._labels = tf.placeholder(tf.float32, shape=(batch_size, horizon, num_nodes, input_dim), name='labels')

    # GO_SYMBOL = tf.zeros(shape=(batch_size, num_nodes * input_dim))
    GO_SYMBOL = tf.zeros(shape=(batch_size, num_nodes * output_dim))

    cell = DCGRUCell(rnn_units, adj_mx, max_diffusion_step=max_diffusion_step, num_nodes=num_nodes,
                     filter_type=filter_type)

    cell_with_projection = DCGRUCell(rnn_units, adj_mx, max_diffusion_step=max_diffusion_step, num_nodes=num_nodes,
                                     num_proj=output_dim, filter_type=filter_type)
    encoding_cells = [cell] * num_rnn_layers
    decoding_cells = [cell] * (num_rnn_layers - 1) + [cell_with_projection]
    encoding_cells = tf.contrib.rnn.MultiRNNCell(encoding_cells, state_is_tuple=True)
    decoding_cells = tf.contrib.rnn.MultiRNNCell(decoding_cells, state_is_tuple=True)

    global_step = tf.train.get_or_create_global_step()
    # Outputs: (batch_size, timesteps, num_nodes, output_dim)
    with tf.variable_scope('DCRNN_SEQ'):
        inputs = tf.unstack(tf.reshape(self._inputs, (batch_size, seq_len, num_nodes * input_dim)), axis=1)
        labels = tf.unstack(
            tf.reshape(self._labels[..., :output_dim], (batch_size, horizon, num_nodes * output_dim)), axis=1)
        if aux_dim > 0:
            aux_info = tf.unstack(self._labels[..., output_dim:], axis=1)
            aux_info.insert(0, None)
        labels.insert(0, GO_SYMBOL)

        def _loop_function(prev, i):
            if is_training:
                # Return either the model's prediction or the previous ground truth in training.
                if use_curriculum_learning:
                    c = tf.random_uniform((), minval=0, maxval=1.)
                    threshold = self._compute_sampling_threshold(global_step, cl_decay_steps)
                    result = tf.cond(tf.less(c, threshold), lambda: labels[i], lambda: prev)
                else:
                    result = labels[i]
            else:
                # Return the prediction of the model in testing.
                result = prev
            if False and aux_dim > 0: 
                result = tf.reshape(result, (batch_size, num_nodes, output_dim))
                result = tf.concat([result, aux_info[i]], axis=-1)
                result = tf.reshape(result, (batch_size, num_nodes * input_dim))
            return result

Это оригинальный код для кодера-декодера:

#DCRNN-encoder-decoder
_, enc_state = tf.contrib.rnn.static_rnn(encoding_cells, inputs, dtype=tf.float32)#encoder
outputs, final_state = legacy_seq2seq.rnn_decoder(labels, enc_state, decoding_cells,
                                                          loop_function=_loop_function)#decoder

Мой код выглядит следующим образом:

# Encoder and Attention_decoder
encoder_outputs, enc_state = tf.contrib.rnn.static_rnn(encoding_cells, inputs, dtype=tf.float32)#encoder
# First calculate a concatenation of encoder outputs to put attention on.
top_states = [tf.reshape(encoder_outputs,[-1, 1, decoding_cells.output_size])]
attention_states = tf.concat(top_states,1)
outputs, final_state = legacy_seq2seq.attention_decoder(labels, enc_state,attention_states, decoding_cells,loop_function=_loop_function)#attention_decoder

Тем не менее, произошла такая размерная ошибка:

ValueError: Размеры должны быть равны, но равны 49152 и 64 для 'Train / DCRNN / DCRNN_SEQ / Внимание_декодер / Attention_0 / add' (op: 'Добавить') с входными формами: [49152,1,1,207] , [64,1,1,207].

...