# tf v1 for undergoing linear regression
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
#Clear the default graph to default everything
tf.compat.v1.reset_default_graph()
# Placeholders are places data can be fed into the model which
# indicate a value that will be fed later when the network is run.
input_data = tf.compat.v1.placeholder(dtype=tf.float32, shape=None)
output_data = tf.compat.v1.placeholder(dtype=tf.float32, shape=None)
#Blindly pick 2 numbers to make up the y = mx + b part of the linear model
slope = tf.Variable(0.5, dtype = tf.float32)
intercept = tf.Variable(1, dtype = tf.float32)
model = slope*input_data + intercept
#finds the mean cost from the arrays
loss = tf.reduce_mean(tf.square(model - output_data))
#will find the smallest cost with a gradient step of 0.01 and will minimize the parameter
train = tf.optimizers.SGD(learning_rate=0.01).minimize(loss, var_list=None)
Я получаю ошибку
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
in
16
17 #will find the smallest cost with a gradient step of 0.01 and will minimize the parameter
---> 18 train = tf.optimizers.SGD(learning_rate=0.01).minimize(tf.convert_to_tensor(loss), var_list=None)
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py in minimize(self, loss, var_list, grad_loss, name)
331
332
--> 333 grads_and_vars = self._compute_gradients(
334 loss, var_list=var_list, grad_loss=grad_loss)
335
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py in _compute_gradients(self, loss, var_list, grad_loss)
385 with backprop.GradientTape() as tape:
386 if not callable(var_list):
--> 387 tape.watch(var_list)
388 loss_value = loss()
389 if callable(var_list):
/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/tensorflow/python/eager/backprop.py in watch(self, tensor)
878 for t in nest.flatten(tensor):
879 if not (_pywrap_utils.IsTensor(t) or _pywrap_utils.IsVariable(t)):
--> 880 raise ValueError("Passed in object of type {}, not tf.Tensor".format(
881 type(t)))
882 if not backprop_util.IsTrainable(t):
ValueError: Passed in object of type , not tf.Tensor
Не уверен, что не так, поскольку tf.reduce_mean все равно возвращает tf.Tensor. Я делал это раньше, когда работал над изучением тензорного потока, когда он был еще в версии 1.13, но теперь я не уверен, как исправить это с новым обновлением 2.0.