diff --git a/ANN_TF2.py b/ANN_TF2.py new file mode 100644 index 0000000..8eeb77b --- /dev/null +++ b/ANN_TF2.py @@ -0,0 +1,64 @@ +# try your best to do it! +# @Time : 2022/9/24 21:55 +# @Author : LianghengZhang +# @File : MINIST_Classsification.py +import tensorflow as tf +from tensorflow.keras import datasets, layers, models +import matplotlib.pyplot as plt + +import matplotlib +matplotlib.use('TkAgg') # 使用 TkAgg 后端 +import matplotlib.pyplot as plt + +if __name__ == '__main__': + (train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data() + + train_images, test_images = train_images / 255.0, test_images / 255.0 + # 查看数据维数信息 + print("data_shpe:",train_images.shape,test_images.shape,train_labels.shape,test_labels.shape) + """ + 输出:((60000, 28, 28), (10000, 28, 28), (60000,), (10000,)) + """ + + plt.figure(figsize=(20, 10)) + for i in range(20): + plt.subplot(2, 10, i + 1) + plt.xticks([]) + plt.yticks([]) + plt.grid(False) + plt.imshow(train_images[i], cmap=plt.cm.binary) + plt.xlabel(train_labels[i]) + plt.show() + + # 调整数据到我们需要的格式 + train_images = train_images.reshape((60000, 28, 28, 1)) + test_images = test_images.reshape((10000, 28, 28, 1)) + + print("data_shpe:",train_images.shape, test_images.shape, train_labels.shape, test_labels.shape) + """ + 输出:((60000, 28, 28, 1), (10000, 28, 28, 1), (60000,), (10000,)) + """ + + model = models.Sequential([ + layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)), + layers.MaxPooling2D((2, 2)), + layers.Conv2D(64, (3, 3), activation='relu'), + layers.MaxPooling2D((2, 2)), + + layers.Flatten(), + layers.Dense(64, activation='relu'), + layers.Dense(10) + ]) + model.summary() + + + model.compile( + optimizer='adam', + loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), + metrics=['accuracy']) + + history = model.fit( + train_images, + train_labels, + epochs=10, + ) diff --git a/ANN_mnist.py b/ANN_mnist.py new file mode 100644 index 0000000..50167f9 --- /dev/null +++ b/ANN_mnist.py @@ -0,0 +1,51 @@ +#=============================================== +# 训练简单的神经网络,并显示运行时间 +# 数据集:mnnist +#=============================================== +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import datetime +starttime = datetime.datetime.now() + +import tensorflow as tf +import numpy as np + +# Import data +from tensorflow.examples.tutorials.mnist import input_data +flags = tf.app.flags +FLAGS = flags.FLAGS +flags.DEFINE_string('data_dir', '/learn/tensorflow/python/data/', 'Directory for storing data') # 把数据放在/data文件夹中 +mnist_data = input_data.read_data_sets(FLAGS.data_dir, one_hot=True) # 读取数据集 + +# 建立抽象模型 +x = tf.placeholder(tf.float32, [None, 784]) # 占位符 +y = tf.placeholder(tf.float32, [None, 10]) +W = tf.Variable(tf.zeros([784, 10])) +b = tf.Variable(tf.zeros([10])) +a = tf.nn.softmax(tf.matmul(x, W) + b) + +# 定义损失函数和训练方法 +cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(a), reduction_indices=[1])) # 损失函数为交叉熵,学习速率要设为0.3量级 +#cross_entropy = -tf.reduce_sum(y * tf.log(a)) # 损失函数为交叉熵 +optimizer = tf.train.GradientDescentOptimizer(0.3) # 梯度下降法,学习速率要设为0.001量级 +train_next = optimizer.minimize(cross_entropy) # 训练目标:最小化损失函数 + +# Train +sess = tf.InteractiveSession() # 建立交互式会话 +# tf.global_variables_initializer().run() +sess.run(tf.global_variables_initializer()) +for i in range(1000): + batch_xs, batch_ys = mnist_data.train.next_batch(100) # 随机抓取100个数据 +# train_next.run({x: batch_xs, y: batch_ys}) + sess.run(train_next, feed_dict={x: batch_xs, y: batch_ys}) + +#测试 +correct_prediction = tf.equal(tf.argmax(a, 1), tf.argmax(y, 1)) +# tf.cast先将数据转换成float,防止求平均不准确:比如 tf.float32就是正确,写成tf.float16导致不准确,超出范围。 +accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float')) +print(sess.run(accuracy,feed_dict={x:mnist_data.test.images,y:mnist_data.test.labels})) + +endtime=datetime.datetime.now() +print('total time endtime-starttime:', endtime-starttime) \ No newline at end of file diff --git a/IO.py b/IO.py new file mode 100644 index 0000000..ef11238 --- /dev/null +++ b/IO.py @@ -0,0 +1,35 @@ +#==================================================================== +# 保存和输入matlat格式数据,并画图显示 +# +#==================================================================== +import scipy.io as sio +import numpy as np +import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D + +# 创建4个变量,并赋值 +#sio.savemat('data/testpython.mat', {'a': 1, 'b': 5, 'c': 3, 'd': 4}) +# 创建了一个变量x,并赋予一个矩阵 +#sio.savemat('data/testpython2.mat', {'x': [[1, 3, 5, 4], [5, 3, 2, 8]]}) + +data = sio.loadmat('data/testpython.mat') +data2 = sio.loadmat('data/testpython2.mat') +x=np.zeros([1,4]) +x[0][0] = data['a'] +x[0][1] = data['b'] +x[0][2] = data['c'] +x[0][3] = data['d'] +y=data2['x'] +print(x,x.dtype,y[0],y[0].dtype) + +fig = plt.figure() +ax = fig.add_subplot(111, projection='3d') +xs = x +ys = y[0] +zs = y[1] +ax.scatter(xs, ys, zs, c='b', marker='o') +ax.set_xlabel('x') +ax.set_ylabel('y') +ax.set_zlabel('z') +plt.show() +plt.close() \ No newline at end of file diff --git a/ne_PM.py b/ne_PM.py new file mode 100644 index 0000000..626c3a8 --- /dev/null +++ b/ne_PM.py @@ -0,0 +1,83 @@ +#==================================================================== +# 使用 Python API , 拟合一个平面,训练、测试,并画图显示 +# 麻雀虽小五脏俱全! +#==================================================================== +import tensorflow as tf +import numpy as np +import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D + +# 训练数据 +x_train = np.float32(np.random.rand(2, 100)) # 随机输入 +y_train = np.dot([5.000, 7.000], x_train) + 2.000 + +# 构造一个线性模型 +b = tf.Variable(tf.zeros([1])) +W = tf.Variable(tf.zeros([1,2])) +y = tf.matmul(W, x_train) + b + +# 最小化方差 +loss = tf.reduce_mean(tf.square(y - y_train)) +optimizer = tf.train.GradientDescentOptimizer(0.5) +train = optimizer.minimize(loss) + +# 启动图 (graph) +sess = tf.Session() +sess.run(tf.global_variables_initializer()) + +# 训练:拟合平面 +m = 101 +n = 0 +W_temp = np.zeros([m // 20 + 1, 2]) +b_temp = np.zeros([1, m // 20 + 1]) +for step in range(0, m): + sess.run(train) + if step % 20 == 0: + temp = sess.run(W) + W_temp[n] = temp[0] # 注意:列表和数组属于不同类型数据,否则赋值报错!! + b_temp[0][n] = sess.run(b) + print(step, sess.run(W), sess.run(b)) + n = n + 1 + +W_temp = np.transpose(W_temp) + +# 参数画图 +fig = plt.figure() +step=np.arange(0,m,20) +plt.plot(step,W_temp[0], marker='o', mec='b', mfc='b',label=u'W1') +plt.plot(step,W_temp[1], marker='*', ms=10,label=u'W2') +plt.plot(step,b_temp[0], marker='^', ms=10,label=u'b') +plt.legend() +plt.xlabel('step') +plt.ylabel('value') +plt.title('Parameter') + + +# 测试数据. +x_test = np.float32(np.random.rand(2, 100)) +y_test = np.dot([5.000, 7.000], x_test) + 2.000 + +# 测试:Test trained model +y_=sess.run(tf.matmul(W, x_test) + b) +y = abs(y_-y_test) +y=np.where(y>0.01,y,1) # 数组y_中,如果元素大于一个给定的数0.001,则值保留,否则元素被1替代。 +accuracy=np.sum(y==1)/len(y[0,:]) +print('accuracy:',accuracy) + +sess.close() + +# 测试结果数据画图对比 +fig = plt.figure() +ax = fig.add_subplot(111, projection='3d') +xs = x_test[0] +ys = x_test[1] +zs1 = y_test +zs2 = y_ +ax.scatter(xs, ys, zs1, c='r', marker='v') +ax.scatter(xs, ys, zs2, c='b', marker='^') +ax.set_xlabel('x') +ax.set_ylabel('y') +ax.set_zlabel('z') + +# 显示以上画的两个图 +plt.show() \ No newline at end of file diff --git a/ne_ZL_TF2.py b/ne_ZL_TF2.py new file mode 100644 index 0000000..6ed21a8 --- /dev/null +++ b/ne_ZL_TF2.py @@ -0,0 +1,40 @@ +#======================================================================= +''' +# 实现线性回归模型: 拟合线性函数 +# 所谓线性回归模型就是y = W * x + b的形式的表达式拟合的模型。 +''' +#======================================================================= + +#import tensorflow as tf +import tensorflow.compat.v1 as tf +import matplotlib.pyplot as plt +import numpy as np +tf.compat.v1.disable_eager_execution() + +# 初始数据准备 +x_data = np.float32(np.random.rand(100)) # np.random.rand 默认数据类型为float64 +y_data = x_data * 2 + 1.6 + +# 权重、偏置这些不断更新的值用tf变量存储 +weights = tf.Variable(tf.zeros([1])) # tf.zeros 默认数据类型为float32 +biases = tf.Variable(tf.zeros([1])) + +# 定义线性模型 +y = weights * x_data + biases + +# 损失函数。tf.reduce_mean()是取均值。square是平方。 +loss = tf.reduce_mean(tf.square(y - y_data)) + +# 用梯度优化方法最小化损失函数; +# 训练用到的优化算法里面的数值参数:学习速率,优化速率 +# loss为优化指标 +#optimizer = tf.train.GradientDescentOptimizer(0.5) +optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.5) +train = optimizer.minimize(loss) + +with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + for step in range(201): + sess.run(train) + if step % 20 == 0: + print (step, sess.run(weights), sess.run(biases)) \ No newline at end of file diff --git a/picture.py b/picture.py new file mode 100644 index 0000000..fcd06a3 --- /dev/null +++ b/picture.py @@ -0,0 +1,117 @@ +#=============================================================== +# 画图:柱状图,散点图,拆线图,3D图,概率分布图,累计概率分布图 +#=============================================================== + +import numpy as np +import matplotlib.pyplot as plt + +#=============================================================== +# 柱状图 +plt.subplot(221) +size = 5 +x = np.arange(size) +a = np.random.random(size) +b = np.random.random(size) +width=0.3 +plt.bar(x+width/2, a, label='a',width=width) +plt.bar(x+width*3/2, b, label='b',width=width) +plt.legend() +plt.xlabel('x-axis') +plt.ylabel('y-axis') +plt.title('plot') +#plt.show() + +#=============================================================== +# 散点图 +import matplotlib.pyplot as plt +plt.subplot(222) +#plt.figure(figsize=(6,5)) +x=np.random.random(10) +y=np.random.random(10) +plt.scatter(x,y,s=15,c='b',marker=(6,1),alpha=0.7,lw=2) +plt.xlim(0,1) +plt.ylim(0,1) +#plt.show() + +#=============================================================== +# 拆线图 +import matplotlib.pyplot as plt +plt.subplot(223) +size = 5 +x = np.arange(size) +a = np.random.random(size) +b = np.random.random(size) +plt.plot(x,a, marker='o', mec='b', mfc='b',label=u'y=a') +plt.plot(x,b, marker='*', ms=10,label=u'y=b') +plt.legend() +plt.xlabel('x-axis') +plt.ylabel('y-axis') +plt.title('plot') +plt.show() + +#=============================================================== +# 3D图 +#=============================================================== +# plt.subplot(224) +from mpl_toolkits.mplot3d import Axes3D +import matplotlib.pyplot as plt + +# 定义函数 +def rand_range(n, vmin, vmax): + ''' + make an array of random numbers having shape (n, ) + with each number distributed Uniform(vmin, vmax). + ''' + return (vmax - vmin) * np.random.rand(n) + vmin + +fig = plt.figure() +ax = fig.add_subplot(111, projection='3d') +# plot n random points in the box +# defined by x in [23, 32], y in [0, 100], z in [zlow, zhigh]. +n = 5 +for c, m, zlow, zhigh in [('r', 'o', -50, -25), ('b', '^', -30, -5)]: + xs = rand_range(n, 20, 30) + ys = rand_range(n, 30, 100) + zs = rand_range(n, zlow, zhigh) + ax.scatter(xs, ys, zs, c=c, marker=m) + +# 单独一个散点 +ax.scatter(25, 50, -25, c='g', marker="*") + +ax.set_xlabel('x') +ax.set_ylabel('y') +ax.set_zlabel('z') +plt.show() + +#=============================================================== +# 概率分布图,累计概率分布图 +#=============================================================== + +import matplotlib.pyplot as plt +import numpy as np +#概率分布直方图 +#高斯分布 + +mean = 0 #均值为0 +sigma = 1 #标准差为1,反应数据集中还是分散的值 +x=mean+sigma*np.random.randn(10000) + +#第二个参数是柱子宽一些还是窄一些,越大越窄越密 +fig,(ax0,ax1) = plt.subplots(nrows=2,figsize=(6,6)) + +##pdf概率分布图,一万个数落在某个区间内的数有多少个 +ax0.hist(x,bins=40,density=True,histtype='bar',facecolor='green',alpha=0.8,rwidth=0.8) # bins参数表示将数据分成几组 +ax0.set_title('pdf') + +#cdf累计概率函数,cumulative累计。比如需要统计小于5的数的概率 +# bins参数表示将数据分成几组 +# normed 是否对y轴数据进行标准化:True表是在本区间的点在所有的点中所占的概率,如果 normed 为False, 则是显示点的数量 +ax1.hist(x,bins=20,density=False,histtype='step',facecolor='blue',alpha=0.8,cumulative=True,rwidth=0.8) +ax1.set_title("cdf") +fig.subplots_adjust(hspace=0.4) +plt.show() + +#plt.draw() + +# 可用 help(function) 查看函数帮助,如:help(ax.plot_surface) +#=============================================================== \ No newline at end of file