-
Notifications
You must be signed in to change notification settings - Fork 0
/
mnist.py
116 lines (92 loc) · 3.95 KB
/
mnist.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
"""
@author: JiangZongKang
@contact: [email protected]
@file: mnist.py
@time: 2018/9/10 16:55
"""
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('is_train', 0, '指定程序是预测还是训练')
def full_connected():
# 获取真实的数据
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# 1、建立数据的占位符 x [None, 784] y_true [None, 10]
with tf.variable_scope('data'):
x = tf.placeholder(tf.float32, [None, 784])
print(x)
y_true = tf.placeholder(tf.int32, [None, 10])
# 2、建立一个全连接层的神经网络 w [784, 10] b [10]
with tf.variable_scope('full_model'):
# 随机初始化权重和偏置
weight = tf.Variable(tf.random_normal([784, 10], name='weight'))
bias = tf.Variable(tf.constant(0.0, shape=[10]))
# 预测None个样本的输出结果 matrix [None, 784]*[784, 10] +[10] = [None, 10]
y_predict = tf.matmul(x, weight) + bias
# 求出所有样本的损失,然后求平均值
with tf.variable_scope('soft_cross'):
# 求平均交叉熵损失
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=y_true, logits=y_predict))
# 梯度下降求出损失
with tf.variable_scope('optimizer'):
train_op = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
# 计算准确率
with tf.variable_scope('acc'):
equal_list = tf.equal(tf.argmax(y_true, 1), tf.argmax(y_predict, 1))
accuracy = tf.reduce_mean(tf.cast(equal_list, tf.float32))
# 收集变量,单个数字值收集
tf.summary.scalar('losses', loss)
tf.summary.scalar('acc', accuracy)
# 高纬度变量收集
tf.summary.histogram('weigthes', weight)
tf.summary.histogram('biases', bias)
# 定义一个初始化变量的op
init_op = tf.global_variables_initializer()
# 定义一个合并变量的op
merged = tf.summary.merge_all()
# 创建一个saver
saver = tf.train.Saver()
# 开启会话去训练
with tf.Session() as sess:
# 初始化变量
sess.run(init_op)
# 建立events 文件,然后写入
filewriter = tf.summary.FileWriter(
'./tmp/summary/test/', graph=sess.graph)
# 迭代步数去训练,更新参数来预测
if FLAGS.is_train == 1:
for i in range(2000):
# 取出真实存在的目标值和特征值
mnist_x, mnist_y = mnist.train.next_batch(50)
# 运行train_op 训练
sess.run(train_op, feed_dict={x: mnist_x, y_true: mnist_y})
# 写入每步训练的值
summary = sess.run(merged, feed_dict={
x: mnist_x, y_true: mnist_y})
filewriter.add_summary(summary, i)
print('训练第%d步,准确率为:%f' % (i, sess.run(
accuracy, feed_dict={x: mnist_x, y_true: mnist_y})))
# 保存模型
saver.save(sess, './tmp/summary/test/fc_model')
else:
# 加载模型
saver.restore(sess, './tmp/summary/test/fc_model')
# 如果是false,则做出预测
for i in range(100):
# 每次测试一张图片
x_test, y_test = mnist.test.next_batch(1)
print('第%d张图片,手写数字的目标值是:%d,预测结果是:%d,准确率为:%f' % (
i,
tf.argmax(y_test, 1).eval(),
tf.argmax(sess.run(y_predict, feed_dict={
x: x_test, y_true: y_test}), 1).eval(),
sess.run(accuracy, feed_dict={x: x_test, y_true: y_test})
))
return None
if __name__ == '__main__':
full_connected()