- def variable_summaries(var):
- """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
- with tf.name_scope('summaries'):
- mean = tf.reduce_mean(var)
- tf.summary.scalar('mean', mean)
- with tf.name_scope('stddev'):
- stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
- tf.summary.scalar('stddev', stddev)
- tf.summary.scalar('max', tf.reduce_max(var))
- tf.summary.scalar('min', tf.reduce_min(var))
- tf.summary.histogram('histogram', var)
- def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
- """Reusable code for making a simple neural net layer.
- It does a matrix multiply, bias add, and then uses relu to nonlinearize.
- It also sets up name scoping so that the resultant graph is easy to read,
- and adds a number of summary ops.
- """
- # Adding a name scope ensures logical grouping of the layers in the graph.
- with tf.name_scope(layer_name):
- # This Variable will hold the state of the weights for the layer
- with tf.name_scope('weights'):
- weights = weight_variable([input_dim, output_dim])
- variable_summaries(weights)
- with tf.name_scope('biases'):
- biases = bias_variable([output_dim])
- variable_summaries(biases)
- with tf.name_scope('Wx_plus_b'):
- preactivate = tf.matmul(input_tensor, weights) + biases
- tf.summary.histogram('pre_activations', preactivate)
- activations = act(preactivate, name='activation')
- tf.summary.histogram('activations', activations)
- return activations
- hidden1 = nn_layer(x, 784, 500, 'layer1')
- with tf.name_scope('dropout'):
- keep_prob = tf.placeholder(tf.float32)
- tf.summary.scalar('dropout_keep_probability', keep_prob)
- dropped = tf.nn.dropout(hidden1, keep_prob)
- # Do not apply softmax activation yet, see below.
- y = nn_layer(dropped, 500, 10, 'layer2', act=tf.identity)
- with tf.name_scope('cross_entropy'):
- # The raw formulation of cross-entropy,
- #
- # tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.softmax(y)),
- # reduction_indices=[1]))
- #
- # can be numerically unstable.
- #
- # So here we use tf.losses.sparse_softmax_cross_entropy on the
- # raw logit outputs of the nn_layer above.
- with tf.name_scope('total'):
- cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=y)
- tf.summary.scalar('cross_entropy', cross_entropy)
- with tf.name_scope('train'):
- train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(
- cross_entropy)
- with tf.name_scope('accuracy'):
- with tf.name_scope('correct_prediction'):
- correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
- with tf.name_scope('accuracy'):
- accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
- tf.summary.scalar('accuracy', accuracy)
- # Merge all the summaries and write them out to /tmp/mnist_logs (by default)
- merged = tf.summary.merge_all()
- train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
- sess.graph)
- test_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/test')
- tf.global_variables_initializer().run()
我们初始化 FileWriters 后,在训练和测试模型时,必须向 FileWriters 添加总结。
- # Train the model, and also write summaries.
- # Every 10th step, measure test-set accuracy, and write test summaries
- # All other steps, run train_step on training data, & add training summaries
- def feed_dict(train):
- """Make a TensorFlow feed_dict: maps data onto Tensor placeholders."""
- if train or FLAGS.fake_data:
- xs, ys = mnist.train.next_batch(100, fake_data=FLAGS.fake_data)
- k = FLAGS.dropout
- else:
- xs, ys = mnist.test.images, mnist.test.labels
- k = 1.0
- return {x: xs, y_: ys, keep_prob: k}
- for i in range(FLAGS.max_steps):
- if i % 10 == 0: # Record summaries and test-set accuracy
- summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict(False))
- test_writer.add_summary(summary, i)
- print('Accuracy at step %s: %s' % (i, acc))
- else: # Record train set summaries, and train
- summary, _ = sess.run([merged, train_step], feed_dict=feed_dict(True))
- train_writer.add_summary(summary, i)
您已完成设置,可以使用 TensorBoard 对数据进行可视化了。
启动 TensorBoard要运行 TensorBoard,请使用以下命令(或者 python -mtensorboard.main)
tensorboard --logdir=path/to/log-directory
其中,logdir 指向 FileWriter 将数据序列化的目录。如果此 logdir 目录下有子目录,而子目录包含基于各个运行的序列化数据,则 TensorBoard 会将所有这些运行涉及的数据都可视化。TensorBoard 运行后,请在您的网络浏览器中转到 localhost:6006 以查看 TensorBoard。
查看 TensorBoard 时,您会看到右上角的导航标签。每个标签代表一组可供可视化的序列化数据。
要深入了解如何使用“图”标签将图可视化,请参阅 TensorBoard:图的可视化。
有关更多 TensorBoard 通用使用信息,请参阅 TensorBoard GitHub。