import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_inference
import numpy as np
#模型保存路径
model_save_path = 'C:\\Users\\Administrator\\Desktop\\LZC\\model_save_path\\'
summary_save_path = 'C:\\Users\\Administrator\\Desktop\\LZC\\summary_save_path\\'
#数据路径
data_path = 'C:\\Users\\Administrator\\Desktop\\LZC\\path\\mnist.tfrecords'
#训练次数
train_step = 10000
#将mnist数据转为tfrecord
def mnist_to_TFRecord(mnist):
images = mnist.train.images
labels = mnist.train.labels
num = mnist.train.num_examples
writer = tf.python_io.TFRecordWriter(data_path)
for index in range(num):
image = images[index].tostring()
label = labels[index].tostring()
examples = tf.train.Example(features=tf.train.Features(feature={
'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),
'label': tf.train.Feature(bytes_list=tf.train.BytesList(value=[label]))
}))
writer.write(examples.SerializeToString())
writer.close()
#把tfrecord数据转为batch训练数据
def get_input(batch_size):
file_queue = tf.train.string_input_producer([data_path])
reader = tf.TFRecordReader()
_, serialize_example = reader.read(file_queue)
features = tf.parse_single_example(serialize_example, features={
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.string)
})
image = tf.decode_raw(features['image'], tf.float32)
image = tf.reshape(image, [784])
retype_image = tf.cast(image, tf.float32)
label = tf.decode_raw(features['label'], tf.float64)
label = tf.reshape(label, [10])
image_batch, label_batch = tf.train.shuffle_batch([retype_image, label], batch_size=batch_size, capacity=10300,
min_after_dequeue=10000)
return image_batch, label_batch
# 定义损失函数。对于给定的数据,正则化损失计算规则和命名空间,计算在这个命名空间下的总损失。之所以需要给丁命名空间是
# 因为不同的GPU上计算得出的正则化损失都会加入名为loss的集合,如果不通过命名空间将会将不同的GPU
def get_loss(x, y_, regularizer, scope, reuse_variables=None):
# 沿用5.5节中定义的函数来计算神经网络的前向传播结果。
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
y = mnist_inference.inference(x, regularizer)
# 计算交叉熵损失。
cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.arg_max(y_,1)))
# 计算当前GPU上计算得到的正则化损失。
regularization_loss = tf.add_n(tf.get_collection('loss', scope))
# 计算最终的总损失。
loss = cross_entropy + regularization_loss
return loss
def average_grident(tower_grads):
average_grads = []
for val_and_grad in zip(*tower_grads):
grads = []
for g,_ in val_and_grad:
grad = tf.expand_dims(g, 0)
grads.append(grad)
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
v = val_and_grad[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def main(_):
with tf.Graph().as_default(), tf.device('/cpu:0'):
x, y_ = get_input(100)
regulation = tf.contrib.layers.l2_regularizer(0.0001)
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
learning_rate = tf.train.exponential_decay(0.8, global_step, decay_steps=600, decay_rate=0.99)
opt = tf.train.GradientDescentOptimizer(learning_rate)
tower_grad = []
reuse_variables = False
for i in range(2):
with tf.device('/gpu:%d'%i):
with tf.name_scope('GPU_%d'%i) as scope:
cur_loss = get_loss(x, y_, regulation, scope=scope, reuse_variables=reuse_variables)
reuse_variables = True
grads = opt.compute_gradients(cur_loss)
tower_grad.append(grads)
tf.summary.scalar('loss', cur_loss)
grads = average_grident(tower_grad)
for grad, var in grads:
if grad is not None:
tf.summary.histogram('%s'%var.op.name, grad)
apply_grident_op = opt.apply_gradients(grads,global_step=global_step)
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
variable_averages = tf.train.ExponentialMovingAverage(decay=0.99)
variable_average_op = variable_averages.apply(tf.trainable_variables())
train_op = tf.group(apply_grident_op, variable_average_op)
saver = tf.train.Saver(tf.all_variables())
summary_op = tf.summary.merge_all()
init = tf.global_variables_initializer()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
summary_writer = tf.summary.FileWriter(summary_save_path,tf.get_default_graph())
for i in range(train_step):
_, loss_value = sess.run([train_op, cur_loss])
print(loss_value)
if i != 0 and i % 10 ==0:
summary = sess.run(summary_op)
summary_writer.add_summary(summary, i)
if i % 1000 ==0:
saver.save(sess,model_save_path+'model.ckpt',global_step=i)
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
mnist = input_data.read_data_sets('data', one_hot=True, dtype=tf.float32)
mnist_to_TFRecord(mnist)
tf.app.run()