Liu/ne_ZL_TF2.py

40 lines
1.4 KiB
Python

#=======================================================================
'''
# 实现线性回归模型: 拟合线性函数
# 所谓线性回归模型就是y = W * x + b的形式的表达式拟合的模型。
'''
#=======================================================================
#import tensorflow as tf
import tensorflow.compat.v1 as tf
import matplotlib.pyplot as plt
import numpy as np
tf.compat.v1.disable_eager_execution()
# 初始数据准备
x_data = np.float32(np.random.rand(100)) # np.random.rand 默认数据类型为float64
y_data = x_data * 2 + 1.6
# 权重、偏置这些不断更新的值用tf变量存储
weights = tf.Variable(tf.zeros([1])) # tf.zeros 默认数据类型为float32
biases = tf.Variable(tf.zeros([1]))
# 定义线性模型
y = weights * x_data + biases
# 损失函数。tf.reduce_mean()是取均值。square是平方。
loss = tf.reduce_mean(tf.square(y - y_data))
# 用梯度优化方法最小化损失函数;
# 训练用到的优化算法里面的数值参数:学习速率,优化速率
# loss为优化指标
#optimizer = tf.train.GradientDescentOptimizer(0.5)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(201):
sess.run(train)
if step % 20 == 0:
print (step, sess.run(weights), sess.run(biases))