Tensorflow error: ValueError: Cannot feed value of shape

problem description

TensorFlow wrote a simple neural network error report: ValueError: Cannot feed value of shape (50,) for Tensor"ymurf output0 error, which has shape"(?, 2)"

the environmental background of the problems and what methods you have tried

tried the np.reshape method, but it didn"t work (I don"t know if I used it incorrectly. ).

related codes

The

code is modeled on the neural network of the minist data set in the TensorFlow practical Google deep learning framework.
each row of my data contains 629 pieces of data, of which the first 200 are used as features, and the penultimate column is labels.

-sharp!/usr/bin/env python3
-sharp coding: utf-8


import pandas as pd
import numpy as np
import os
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold

-sharp 
INPUT_NODE = 200
OUTPUT_NODE = 2

-sharp , 
LAYER1_NODE = 500
BATCH_SIZE = 50
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
TOTAL_NUM = 1376


def load_data(file):
    """
    """
    data = np.loadtxt("./coef_arr.txt")
    np.random.shuffle(data)
    global TOTAL_NUMs
    TOTAL_NUM = len(data)
    train_num = int(TOTAL_NUM*0.7)
    test_data = data[train_num:, :]
    train_data = data[:train_num, :]
    TOTAL_NUM = len(train_data)
    return train_data, test_data


def inference(input_tensor, avg_class, weights1, biases1, weights2, biases2):
    """
    """
    -sharp 
    if avg_class == None:
        -sharp ReLu
        layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)
        -sharp 
        return tf.matmul(layer1, weights2) + biases2
    else:
        -sharp avg_class.average
        -sharp 
        layer1 = tf.nn.relu(tf.matmul(input_tensor, avg_class.average(weights1)) +
                            avg_class.average(biases1))
        return tf.matmul(layer1, avg_class.average(weights2))+avg_class.average(biases2)


def train():
    """
    """
    -sharp 
    train_data, test_data = load_data("./coef_arr.txt")

    x = tf.placeholder(tf.float32, [None, INPUT_NODE], name="x-input")
    y_ = tf.placeholder(tf.float32, [None, 2], name="y-output")

    -sharp 
    weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1))
    biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]))
    -sharp 
    weights2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1))
    biases2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))

    -sharp None
    -sharp 
    y = inference(x, None, weights1, biases1, weights2, biases2)

    -sharp ()
    global_step = tf.Variable(0, trainable=False)

    -sharp 
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)

    -sharp global_step
    -sharp tf.trainabel_variablesGraphKeys.TRAINABLE_VARIABLES
    -sharp trainable=False
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    -sharp 
    average_y = inference(x, variable_averages, weights1, biases1, weights2, biases2)

    -sharp 
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    -sharp batch
    cross_entropy_mean = tf.reduce_mean(cross_entropy)

    -sharp L2
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    -sharp 
    regularization = regularizer(weights1)+regularizer(weights2)
    -sharp 
    loss = cross_entropy_mean+regularization
    -sharp 
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        TOTAL_NUM,
        LEARNING_RATE_DECAY)

    -sharp tf.train.GradientDescentOptimizer
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)

    -sharp 
    train_op = tf.group(train_step, variables_averages_op)

    -sharp 
    correct_prediction = tf.equal(tf.argmax(average_y, 1), tf.argmax(y_, 1))
    -sharp 
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    -sharp 
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        -sharp    
        validate_feed = {x: np.reshape(test_data[:, :200], (592, -1)), y_: np.reshape(test_data[:, -2].astype(np.uint8), (592, -1))}

        -sharp 
        for i in range(TRAINING_STEPS):
            -sharp 1000
            if i % 1000 == 0:
                -sharp 
                validate_acc = sess.run(accuracy, feed_dict=validate_feed)
                print("After {0} training steps, validation accracy using average model is {1}".format(i, validate_acc))

            -sharp batch
            train_data_index = np.random.randint(TOTAL_NUM, size=BATCH_SIZE)
            xs, ys = np.reshape(train_data[train_data_index, :200], (50, -1)), np.reshape(train_data[train_data_index, -2].astype(np.uint8), (50, -1))
            sess.run(train_op, feed_dict={x: xs, y_: ys})

        -sharp 
        test_acc = sess.run(accuracy, feed_dict=validate_feed)
        print("After {0} training steps, test accuracy using average model is {1}".format(TRAINING_STEPS, test_acc))


if __name__ == "__main__":
    train()

the structure of input data is as follows:

In [7]:     test_data = data[train_num:, :]
   ...:     train_data = data[:train_num, :]
   ...:
   ...:
-sharp 592
In [8]: x = test_data[:,:200]; x
Out[8]:
array([[ 8.91246164e+00,  1.01078152e+01,  8.80562559e+00, ...,
         0.00000000e+00,  2.68379727e-01, -0.00000000e+00],
       [ 2.24351527e+00,  2.21014256e+00,  3.24120606e-01, ...,
        -0.00000000e+00,  4.15543410e-01, -0.00000000e+00],
       [ 1.66988056e+01,  1.73381736e+01,  1.68301896e+01, ...,
         0.00000000e+00,  1.02455868e-02, -0.00000000e+00],
       ...,
       [ 2.35881337e+00,  4.92812666e+00,  2.67332157e+00, ...,
         1.41453446e-01,  0.00000000e+00, -1.59909463e-02],
       [-2.10887190e-01,  1.31097380e+00,  8.98165441e-01, ...,
         2.33660936e-02, -0.00000000e+00,  6.08660936e-02],
       [-7.28152009e+00, -6.08726298e+00, -4.23098025e+00, ...,
        -7.18160515e-02,  0.00000000e+00,  0.00000000e+00]])

In [9]: x.shape
Out[9]: (592, 200)

-sharp 50
In [10]: x_ = train_data[:50, :200]; x_
Out[10]:
array([[ 9.56146318,  9.52391585,  9.43482532, ..., -0.        ,
         0.14322389,  0.        ],
       [13.28242346, 13.83013572, 14.28976303, ..., -0.        ,
        -0.16746366,  0.09310116],
       [ 1.3820352 ,  1.54098823,  3.45830433, ..., -0.        ,
        -0.06416255,  0.        ],
       ...,
       [10.98494526, 11.38627988, 11.44886119, ..., -0.        ,
        -0.05050572, -0.        ],
       [ 7.56331162,  7.9327677 ,  8.63404904, ..., -0.01920361,
        -0.        , -0.03519111],
       [ 4.60998919,  5.06155302,  7.19940497, ..., -0.27916831,
         0.        , -0.        ]])

In [11]: x_.shape
Out[11]: (50, 200)
y_test
In [18]: y_test = test_data[:,-2]; y_test
Out[18]:
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
       0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
      ... ...
       1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])

the error message is as follows:

ValueError: Cannot feed value of shape (592,1) for Tensor"ymurf output which has shape"

look on the Internet that you want to use reshape, but it still doesn"t work after using reshape in the code. How do I change it, please? Thank you!
in addition, I have two categories here, such as the above label is 1p0, and the number of output nodes of the neural network I set is 2, but the input y of my training and testing is dimension 1, so I guess it doesn"t match here? It is true that I can run after changing the number of output nodes, but the accuracy of the output is 1, how to change it, thank you!

May.07,2022
Menu