TensorBoard高级篇

多层感知机的Tensorboard可视化

1
2
3
from __future__ import print_function

import tensorflow as tf

导入数据集

1
2
3
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./data/", one_hot=True)

Extracting ./data/train-images-idx3-ubyte.gz
Extracting ./data/train-labels-idx1-ubyte.gz
Extracting ./data/t10k-images-idx3-ubyte.gz
Extracting ./data/t10k-labels-idx1-ubyte.gz

设置参数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# Parameters
learning_rate = 0.01
training_epochs = 25
batch_size = 100
display_step = 1
logs_path = './log/example/'

# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)

# tf Graph Input
# mnist data image of shape 28*28=784
x = tf.placeholder(tf.float32, [None, 784], name='InputData')
# 0-9 digits recognition => 10 classes
y = tf.placeholder(tf.float32, [None, 10], name='LabelData')

创建多层感知机函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# Create model
def multilayer_perceptron(x, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['w1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Create a summary to visualize the first layer ReLU activation
tf.summary.histogram("relu1", layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['w2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Create another summary to visualize the second layer ReLU activation
tf.summary.histogram("relu2", layer_2)
# Output layer
out_layer = tf.add(tf.matmul(layer_2, weights['w3']), biases['b3'])
return out_layer

# Store layers weight & bias
weights = {
'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1]), name='W1'),
'w2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]), name='W2'),
'w3': tf.Variable(tf.random_normal([n_hidden_2, n_classes]), name='W3')
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1]), name='b1'),
'b2': tf.Variable(tf.random_normal([n_hidden_2]), name='b2'),
'b3': tf.Variable(tf.random_normal([n_classes]), name='b3')
}

创建模型和操作(模型+损失函数+优化+准确率)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# Encapsulating all ops into scopes, making Tensorboard's Graph
# Visualization more convenient
with tf.name_scope('Model'):
# Build model
pred = multilayer_perceptron(x, weights, biases)

with tf.name_scope('Loss'):
# Softmax Cross entropy (cost function)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=y))

with tf.name_scope('SGD'):
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# Op to calculate every variable gradient
grads = tf.gradients(loss, tf.trainable_variables())
grads = list(zip(grads, tf.trainable_variables()))
# Op to update all variables according to their gradient
apply_grads = optimizer.apply_gradients(grads_and_vars=grads)

with tf.name_scope('Accuracy'):
# Accuracy
acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
acc = tf.reduce_mean(tf.cast(acc, tf.float32))

初始化并合并操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()

# Create a summary to monitor cost tensor
tf.summary.scalar("loss", loss)
# Create a summary to monitor accuracy tensor
tf.summary.scalar("accuracy", acc)
# Create summaries to visualize weights
for var in tf.trainable_variables():
tf.summary.histogram(var.name, var)
# Summarize all gradients
for grad, var in grads:
tf.summary.histogram(var.name + '/gradient', grad)
# Merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()

INFO:tensorflow:Summary name W1:0 is illegal; using W1_0 instead.
INFO:tensorflow:Summary name W2:0 is illegal; using W2_0 instead.
INFO:tensorflow:Summary name W3:0 is illegal; using W3_0 instead.
INFO:tensorflow:Summary name b1:0 is illegal; using b1_0 instead.
INFO:tensorflow:Summary name b2:0 is illegal; using b2_0 instead.
INFO:tensorflow:Summary name b3:0 is illegal; using b3_0 instead.
INFO:tensorflow:Summary name W1:0/gradient is illegal; using W1_0/gradient instead.
INFO:tensorflow:Summary name W2:0/gradient is illegal; using W2_0/gradient instead.
INFO:tensorflow:Summary name W3:0/gradient is illegal; using W3_0/gradient instead.
INFO:tensorflow:Summary name b1:0/gradient is illegal; using b1_0/gradient instead.
INFO:tensorflow:Summary name b2:0/gradient is illegal; using b2_0/gradient instead.
INFO:tensorflow:Summary name b3:0/gradient is illegal; using b3_0/gradient instead.

训练并保存log

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# Start training
with tf.Session() as sess:

# Run the initializer
sess.run(init)

# op to write logs to Tensorboard
summary_writer = tf.summary.FileWriter(logs_path,
graph=tf.get_default_graph())

# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Run optimization op (backprop), cost op (to get loss value)
# and summary nodes
_, c, summary = sess.run([apply_grads, loss, merged_summary_op],
feed_dict={x: batch_xs, y: batch_ys})
# Write logs at every iteration
summary_writer.add_summary(summary, epoch * total_batch + i)
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))

print("Optimization Finished!")

# Test model
# Calculate accuracy
print("Accuracy:", acc.eval({x: mnist.test.images, y: mnist.test.labels}))

print("Run the command line:\n" \
"--> tensorboard --logdir=./log " \
"\nThen open http://0.0.0.0:6006/ into your web browser")

Epoch: 0001 cost= 82.491150440
Epoch: 0002 cost= 11.219711702
Epoch: 0003 cost= 6.885841494
Epoch: 0004 cost= 4.898687713
Epoch: 0005 cost= 3.742709111
Epoch: 0006 cost= 2.969850923
Epoch: 0007 cost= 2.429568350
Epoch: 0008 cost= 2.024799560
Epoch: 0009 cost= 1.742192560
Epoch: 0010 cost= 1.494883727
Epoch: 0011 cost= 1.313867836
Epoch: 0012 cost= 1.153405372
Epoch: 0013 cost= 1.022956383
Epoch: 0014 cost= 0.917282970
Epoch: 0015 cost= 0.831443023
Epoch: 0016 cost= 0.739466778
Epoch: 0017 cost= 0.660427638
Epoch: 0018 cost= 0.606233582
Epoch: 0019 cost= 0.547995506
Epoch: 0020 cost= 0.506534999
Epoch: 0021 cost= 0.462353780
Epoch: 0022 cost= 0.424939641
Epoch: 0023 cost= 0.399291764
Epoch: 0024 cost= 0.364750651
Epoch: 0025 cost= 0.334185596
Optimization Finished!
Accuracy: 0.9215
Run the command line:
--> tensorboard --logdir=./log
Then open http://0.0.0.0:6006/ into your web browser

损失和准确率折线图

Loss and Accuracy Visualization

计算图模型的可视化

Computation Graph——Model and SGD
Computation Graph——Loss and Accuracy

权重及其梯度直方图

Weights and Gradients Visualization

偏置及其梯度直方图

bias and Gradients Visualization

FeatureMap 直方图

FeatureMap Visualization

参考

[TensorBoard: 图表可视化]http://wiki.jikexueyuan.com/project/tensorflow-zh/how_tos/graph_viz.html

-------------本文结束 感谢您的阅读-------------
0%