SlideShare a Scribd company logo
TensorFlow Tutorial
TensorFlow Tutorial






>>> import numpy as np
>>> a = np.zeros((2, 2)); b = np.ones((2, 2))
>>> np.sum(b, axis=1)
array([ 2., 2.])
>>> a.shape
(2, 2)
>>> np.reshape(a, (1, 4))
array([[ 0., 0., 0., 0.]])
>>> import tensorflow as tf
>>> sess = tf.Session()
>>> a = tf.zeros((2, 2)); b = tf.ones((2, 2))
>>> sess.run(tf.reduce_sum(b, axis=1))
[ 2., 2.]
>>> a.get_shape()
(2, 2)
>>> sess.run(tf.reshape(a, (1, 4)))
[[ 0., 0., 0., 0.]]
sess.run()
>>> sess = tf.Session()
>>> a = np.zeros((2, 2)); ta = tf.zeros((2, 2))
>>> print(a)
[[ 0. 0.]
[ 0. 0.]]
>>> print(ta)
Tensor("zeros:0", shape=(2, 2), dtype=float32)
>>> print(sess.run(ta))
[[ 0. 0.]
[ 0. 0.]]
sess.run()
>>> sess = tf.Session()
>>> a = np.zeros((2, 2)); ta = tf.zeros((2, 2))
>>> print(a)
[[ 0. 0.]
[ 0. 0.]]
>>> print(ta)
Tensor("zeros:0", shape=(2, 2), dtype=float32)
>>> print(sess.run(ta))
[[ 0. 0.]
[ 0. 0.]]






>>> a = tf.constant(5.0)
>>> b = tf.constant(6.0)
>>> c = a * b
>>> sess = tf.Session()
>>> print(sess.run(c))
30.0


>>> a = tf.constant(5.0)
>>> b = tf.constant(6.0)
>>> c = a * b
>>> sess = tf.Session()
>>> print(sess.run(c))
30.0




>>> a = tf.constant(5.0)
>>> b = tf.constant(6.0)
>>> c = a * b
>>> sess = tf.Session()
>>> print(sess.run(c))
30.0






tf.Session


>>> sess = tf.Session()
>>> print(sess.run(c))
>>> with tf.Session() as sess:
>>> print(sess.run(c))
>>> print(c.eval())


>>> sess = tf.Session()
>>> print(sess.run(c))
>>> with tf.Session() as sess:
>>> print(sess.run(c))
>>> print(c.eval())
sess.run(c) 



tf.Session


>>> w = tf.Variable(tf.zeros((2, 2)), name="weight")
>>> with tf.Session() as sess:
>>> print(sess.run(w))


>>> w = tf.Variable(tf.zeros((2, 2)), name="weight")
>>> with tf.Session() as sess:
>>> print(sess.run(w))
>>> w = tf.Variable(tf.random_normal([5, 2], stddev=0.1), 

name="weight")
>>> with tf.Session() as sess:
>>> sess.run(tf.global_variables_initializer())
>>> print(sess.run(w))

[[-0.10020355 -0.01114563]
[ 0.04050281 -0.15980773]
[-0.00628474 -0.02608337]
[ 0.16397022 0.02898547]
[ 0.04264377 0.04281621]]
>>> w = tf.Variable(tf.random_normal([5, 2], stddev=0.1), 

name="weight")
>>> with tf.Session() as sess:
>>> sess.run(tf.global_variables_initializer())
>>> print(sess.run(w))

[[-0.10020355 -0.01114563]
[ 0.04050281 -0.15980773]
[-0.00628474 -0.02608337]
[ 0.16397022 0.02898547]
[ 0.04264377 0.04281621]]
tf.Variable 

>>> w = tf.Variable(tf.random_normal([5, 2], stddev=0.1), 

name="weight")
>>> with tf.Session() as sess:
>>> sess.run(tf.global_variables_initializer())
>>> print(sess.run(w))

[[-0.10020355 -0.01114563]
[ 0.04050281 -0.15980773]
[-0.00628474 -0.02608337]
[ 0.16397022 0.02898547]
[ 0.04264377 0.04281621]]
tf.Variable
tf.Variable 

>>> state = tf.Variable(0, name="counter")
>>> new_value = tf.add(state, tf.constant(1))
>>> update = tf.assign(state, new_value)
>>> with tf.Session() as sess:
>>> sess.run(tf.global_variables_initializer())
>>> print(sess.run(state))
>>> for _ in range(3):
>>> sess.run(update)
>>> print(sess.run(state))
0
1
2
3
>>> x1 = tf.constant(1)
>>> x2 = tf.constant(2)
>>> x3 = tf.constant(3)
>>> temp = tf.add(x2, x3)
>>> mul = tf.mul(x1, temp)
>>> with tf.Session() as sess:
>>> result1, result2 = sess.run([mul, temp])
>>> print(result1, result2)
5 5
>>> x1 = tf.constant(1)
>>> x2 = tf.constant(2)
>>> x3 = tf.constant(3)
>>> temp = tf.add(x2, x3)
>>> mul = tf.mul(x1, temp)
>>> with tf.Session() as sess:
>>> result1, result2 = sess.run([mul, temp])
>>> print(result1, result2)
5 5
sess.run(var) 



sess.run([var1, .. ,])
TensorFlow Tutorial
tf.placeholder feed_dict
>>> a = tf.placeholder(tf.int16)
>>> b = tf.placeholder(tf.int16)
>>> add = tf.add(a, b)
>>> mul = tf.mul(a, b)
>>> with tf.Session() as sess:
>>> print(sess.run(add, feed_dict={a: 2, b: 3}))
>>> print(sess.run(mul, feed_dict={a: 2, b: 3}))
5
6
tf.placeholder feed_dict
>>> a = tf.placeholder(tf.int16)
>>> b = tf.placeholder(tf.int16)
>>> add = tf.add(a, b)
>>> mul = tf.mul(a, b)
>>> with tf.Session() as sess:
>>> print(sess.run(add, feed_dict={a: 2, b: 3}))
>>> print(sess.run(mul, feed_dict={a: 2, b: 3}))
5
6
tf.placeholder
tf.placeholder feed_dict
>>> a = tf.placeholder(tf.int16)
>>> b = tf.placeholder(tf.int16)
>>> add = tf.add(a, b)
>>> mul = tf.mul(a, b)
>>> with tf.Session() as sess:
>>> print(sess.run(add, feed_dict={a: 2, b: 3}))
>>> print(sess.run(mul, feed_dict={a: 2, b: 3}))
5
6
tf.placeholder
tf.placeholder
# using tf.constant
matrix1 = tf.constant([[3., 3.]])
matrix2 = tf.constant([[2.],[2.]])
product = tf.matmul(matrix1, matrix2)
with tf.Session() as sess:
result = sess.run(product)
print(result)
# using placeholder
import numpy as np
matrix1 = tf.placeholder(tf.float32, [1, 2])
matrix2 = tf.placeholder(tf.float32, [2, 1])
product = tf.matmul(matrix1, matrix2)
with tf.Session() as sess:
mv1 = np.array([[3., 3.]])
mv2 = np.array([[2.], [2.]])
result = sess.run(product, feed_dict={matrix1: mv1, matrix2: mv2})
print(result)
import tensorflow as tf
# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/",
one_hot=True)
learning_rate = 0.001
max_steps = 15000
batch_size = 128
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
def MLP(inputs):
W_1 = tf.Variable(tf.random_normal([784, 256]))
b_1 = tf.Variable(tf.zeros([256]))
W_2 = tf.Variable(tf.random_normal([256, 256]))
b_2 = tf.Variable(tf.zeros([256]))
W_out = tf.Variable(tf.random_normal([256, 10]))
b_out = tf.Variable(tf.zeros([10]))
h_1 = tf.add(tf.matmul(inputs, W_1), b_1)
h_1 = tf.nn.relu(h_1)
h_2 = tf.add(tf.matmul(h_1, W_2), b_2)
h_2 = tf.nn.relu(h_2)
out = tf.add(tf.matmul(h_2, W_out), b_out)
return out
net = MLP(x)
# define loss and optimizer
loss_op = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(net, y))
opt = tf.train.AdamOptimizer(learning_rate).minimize(loss_op)
# initializing the variables
init_op = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init_op)
# train model
for step in range(max_steps):
batch_X, batch_y = mnist.train.next_batch(batch_size)
_, loss = sess.run([opt, loss_op],
feed_dict={x: batch_X, y: batch_y})
if (step+1) % 1000 == 0:
print("[{}/{}] loss:{:.3f}".format(step+1, max_steps, loss))
print("Optimization Finished!")
# test model
correct_prediction = tf.equal(tf.argmax(net, 1), tf.argmax(y, 1))
# calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("Train accuracy: {:.3f}” .format(sess.run(accuracy,
feed_dict={x: mnist.train.images, y: mnist.train.labels})))
print("Test accuracy: {:.3f}” .format(sess.run(accuracy,
feed_dict={x: mnist.test.images, y: mnist.test.labels})))
TensorFlow Tutorial
TensorFlow Tutorial
tf.nn.softmax_cross_entropy_with_logits 

f.train.####Optimizer
tf.train.####Optimizer.minimize(loss_op)
tf.variable_scope()
tf.get_variable()
tf.variable_scope()
var1 = tf.Variable([1], name="var")
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
var2 = tf.Variable([1], name="var")
var3 = tf.Variable([1], name="var")
print("var1: {}".format(var1.name))
print("var2: {}".format(var2.name))
print("var3: {}”.format(var3.name))
var1: var:0
var2: foo/bar/var:0
var3: foo/bar/var_1:0
tf.get_variable()
tf.Variable


var1 = tf.Variable([1], name="var")
with tf.variable_scope("foo"):
with tf.variable_scope("bar") as scp:
var2 = tf.Variable([1], name="var")
scp.reuse_variables() # allow reuse variables
var3 = tf.Variable([1], name="var")
print("var1: {}".format(var1.name))
print("var2: {}".format(var2.name))
print("var3: {}”.format(var3.name))
var1: var:0
var2: foo/bar/var:0
var3: foo/bar/var_1:0
tf.get_variable()
tf.Variable


var1 = tf.Variable([1], name="var")
with tf.variable_scope("foo"):
with tf.variable_scope("bar") as scp:
var2 = tf.Variable([1], name="var")
scp.reuse_variables() # allow reuse variables
var3 = tf.Variable([1], name="var")
print("var1: {}".format(var1.name))
print("var2: {}".format(var2.name))
print("var3: {}”.format(var3.name))
var1: var:0
var2: foo/bar/var:0
var3: foo/bar/var_1:0
var3 foo/bar/var:0
tf.Variable
tf.get_variable()


var1 = tf.get_variable("var", [1])
with tf.variable_scope("foo"):
with tf.variable_scope("bar") as scp:
var2 = tf.get_variable("var", [1])
scp.reuse_variables() # allow reuse variables
var3 = tf.get_variable("var", [1])
print("var1: {}".format(var1.name))
print("var2: {}".format(var2.name))
print("var3: {}".format(var3.name))
var1: var:0
var2: foo/bar/var:0
var3: foo/bar/var:0
tf.get_variable()


var1 = tf.get_variable("var", [1])
with tf.variable_scope("foo"):
with tf.variable_scope("bar") as scp:
var2 = tf.get_variable("var", [1])
scp.reuse_variables() # allow reuse variables
var3 = tf.get_variable("var", [1])
print("var1: {}".format(var1.name))
print("var2: {}".format(var2.name))
print("var3: {}".format(var3.name))
var1: var:0
var2: foo/bar/var:0
var3: foo/bar/var:0
tf.get_variable()
False
ValueError
True
ValueError
with tf.variable_scope("foo"):
with tf.variable_scope("bar") as scp:
var1 = tf.get_variable("var", [1])
scp.reuse_variables()
var2 = tf.get_variable("var", [1])
with tf.variable_scope("bar", reuse=True):
var3 = tf.get_variable("var", [1])
print("var1: {}".format(var1.name))
print("var2: {}".format(var2.name))
print("var3: {}”.format(var3.name))
var1: foo/bar/var:0
var2: foo/bar/var:0
var3: foo/bar/var:0
tf.get_variable tf.Variable
tf.Variable
tf.Variable



tf.###.get_variables_by_name(“my_var”, “my_scope”)
tf.get_variable tf.Variable
tf.Variable
tf.Variable



tf.###.get_variables_by_name(“my_var”, “my_scope”)


• tf.nn.conv2d(input, filter, strides, padding, name=None)
1. input
2. filter
3. stride input
4. padding “SAME” “VALID” 

• tf.nn.##_pool(value, ksize, strides, padding, name=None)
1. value
2. ksize 

3. stride input
4. padding “SAME” “VALID”
from tensorflow.contrib.layers import variance_scaling_initializer
he_init = variance_scaling_initializer()
def conv(bottom,
num_filter, ksize=3, stride=1, padding="SAME",
scope=None):
bottom_shape = bottom.get_shape().as_list()[3]
with tf.variable_scope(scope or "conv"):
W = tf.get_variable("W",
[ksize, ksize, bottom_shape, num_filter],
initializer=he_init)
b = tf.get_variable("b", [num_filter],
initializer=tf.constant_initializer(0))
x = tf.nn.conv2d(bottom, W,
strides=[1, stride, stride, 1],
padding=padding)
x = tf.nn.relu(tf.nn.bias_add(x, b))
return x
from tensorflow.contrib.layers import variance_scaling_initializer
he_init = variance_scaling_initializer()
def conv(bottom,
num_filter, ksize=3, stride=1, padding="SAME",
scope=None):
bottom_shape = bottom.get_shape().as_list()[3]
with tf.variable_scope(scope or "conv"):
W = tf.get_variable("W",
[ksize, ksize, bottom_shape, num_filter],
initializer=he_init)
b = tf.get_variable("b", [num_filter],
initializer=tf.constant_initializer(0))
x = tf.nn.conv2d(bottom, W,
strides=[1, stride, stride, 1],
padding=padding)
x = tf.nn.relu(tf.nn.bias_add(x, b))
return x
from tensorflow.contrib.layers import variance_scaling_initializer
he_init = variance_scaling_initializer()
def conv(bottom,
num_filter, ksize=3, stride=1, padding="SAME",
scope=None):
bottom_shape = bottom.get_shape().as_list()[3]
with tf.variable_scope(scope or "conv"):
W = tf.get_variable("W",
[ksize, ksize, bottom_shape, num_filter],
initializer=he_init)
b = tf.get_variable("b", [num_filter],
initializer=tf.constant_initializer(0))
x = tf.nn.conv2d(bottom, W,
strides=[1, stride, stride, 1],
padding=padding)
x = tf.nn.relu(tf.nn.bias_add(x, b))
return x
def maxpool(bottom,
ksize=2, stride=2, padding="SAME",
scope=None):
with tf.variable_scope(scope or "maxpool"):
pool = tf.nn.max_pool(bottom, ksize=[1, ksize, ksize, 1],
strides=[1, stride, stride, 1],
padding=padding)
return pool
def fc(bottom, num_dims, scope=None):
bottom_shape = bottom.get_shape().as_list()
if len(bottom_shape) > 2:
bottom = tf.reshape(bottom,
[-1, reduce(lambda x, y: x*y, bottom_shape[1:])])
bottom_shape = bottom.get_shape().as_list()
with tf.variable_scope(scope or "fc"):
W = tf.get_variable("W", [bottom_shape[1], num_dims],
initializer=he_init)
b = tf.get_variable("b", [num_dims],
initializer=tf.constant_initializer(0))
out = tf.nn.bias_add(tf.matmul(bottom, W), b)
return out
def fc_relu(bottom, num_dims, scope=None):
with tf.variable_scope(scope or "fc"):
out = fc(bottom, num_dims, scope="fc")
relu = tf.nn.relu(out)
return relu
def fc(bottom, num_dims, scope=None):
bottom_shape = bottom.get_shape().as_list()
if len(bottom_shape) > 2:
bottom = tf.reshape(bottom,
[-1, reduce(lambda x, y: x*y, bottom_shape[1:])])
bottom_shape = bottom.get_shape().as_list()
with tf.variable_scope(scope or "fc"):
W = tf.get_variable("W", [bottom_shape[1], num_dims],
initializer=he_init)
b = tf.get_variable("b", [num_dims],
initializer=tf.constant_initializer(0))
out = tf.nn.bias_add(tf.matmul(bottom, W), b)
return out
def fc_relu(bottom, num_dims, scope=None):
with tf.variable_scope(scope or "fc"):
out = fc(bottom, num_dims, scope="fc")
relu = tf.nn.relu(out)
return relu
keep_prob = tf.placeholder(tf.float32, None)
def conv_net(x, keep_prob):
x = tf.reshape(x, shape=[-1, 28, 28, 1])
conv1 = conv(x, 32, 5, scope="conv_1")
conv1 = maxpool(conv1, scope="maxpool_1")
conv2 = conv(conv1, 64, 5, scope="conv_2")
conv2 = maxpool(conv2, scope="maxpool_2")
fc1 = fc_relu(conv2, 1024, scope="fc_1")
fc1 = tf.nn.dropout(fc1, keep_prob)
out = fc(fc1, 10, scope="out")
return out
keep_prob = tf.placeholder(tf.float32, None)
def conv_net(x, keep_prob):
x = tf.reshape(x, shape=[-1, 28, 28, 1])
conv1 = conv(x, 32, 5, scope="conv_1")
conv1 = maxpool(conv1, scope="maxpool_1")
conv2 = conv(conv1, 64, 5, scope="conv_2")
conv2 = maxpool(conv2, scope="maxpool_2")
fc1 = fc_relu(conv2, 1024, scope="fc_1")
fc1 = tf.nn.dropout(fc1, keep_prob)
out = fc(fc1, 10, scope="out")
return out




config = tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True))
sess = tf.Session(config=config)


tf.contrib
tf.contrib
tf.contrib 

tf.contrib.slim
slim
tf.contrib tf.contrib.slim
tf.slim
tf.contrib.slim
TensorFlow Tutorial
input = ...
with tf.name_scope('conv1_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128],
dtype=tf.float32, stddev=1e-1),
name='weights')
conv = tf.nn.conv2d(input, kernel, [1, 1, 1, 1],
padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128],
dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope)
input = ...
with tf.name_scope('conv1_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128],
dtype=tf.float32, stddev=1e-1),
name='weights')
conv = tf.nn.conv2d(input, kernel, [1, 1, 1, 1],
padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128],
dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope)
input = ...
net = slim.conv2d(input, 128, [3, 3],
padding=‘SAME’, scope=‘conv1_1')
# 1. simple network generation with slim
net = ...
net = slim.conv2d(net, 256, [3, 3], scope='conv3_1')
net = slim.conv2d(net, 256, [3, 3], scope='conv3_2')
net = slim.conv2d(net, 256, [3, 3], scope='conv3_3')
net = slim.max_pool2d(net, [2, 2], scope=‘pool3')

# 1. cleaner by repeat operation:
net = ...
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3],
scope='conv3')
net = slim.max_pool(net, [2, 2], scope=‘pool3')

# 2. Verbose way:
x = slim.fully_connected(x, 32, scope='fc/fc_1')
x = slim.fully_connected(x, 64, scope='fc/fc_2')
x = slim.fully_connected(x, 128, scope='fc/fc_3')

# 2. Equivalent, TF-Slim way using slim.stack:
slim.stack(x, slim.fully_connected, [32, 64, 128], scope='fc')
• tf.truncated_normal_initializer tf
• slim.xavier_initializer
• slim.variance_scaling_initializer
• slim.l1_regularizer
• slim.l2_regularizer
• …
net = slim.conv2d(inputs, 64, [11, 11], 4, padding=‘SAME', 

weights_initializer=slim.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(0.0005),
scope='conv1')


he_init = slim.variance_scaling_initializer()
xavier_init = slim.xavier_initializer()
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=he_init,
weights_regularizer=slim.l2_regularizer(0.0005)):
with slim.arg_scope([slim.conv2d], stride=1, padding='SAME'):
net = slim.conv2d(inputs, 64, [11, 11], 4, scope='conv1')
net = slim.conv2d(net, 256, [5, 5],
weights_initializer=xavier_init,
scope='conv2')
net = slim.fully_connected(net, 1000,
activation_fn=None, scope='fc')


he_init = slim.variance_scaling_initializer()
xavier_init = slim.xavier_initializer()
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=he_init,
weights_regularizer=slim.l2_regularizer(0.0005)):
with slim.arg_scope([slim.conv2d], stride=1, padding='SAME'):
net = slim.conv2d(inputs, 64, [11, 11], 4, scope='conv1')
net = slim.conv2d(net, 256, [5, 5],
weights_initializer=xavier_init,
scope='conv2')
net = slim.fully_connected(net, 1000,
activation_fn=None, scope='fc')
[..]


he_init = slim.variance_scaling_initializer()
xavier_init = slim.xavier_initializer()
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=he_init,
weights_regularizer=slim.l2_regularizer(0.0005)):
with slim.arg_scope([slim.conv2d], stride=1, padding='SAME'):
net = slim.conv2d(inputs, 64, [11, 11], 4, scope='conv1')
net = slim.conv2d(net, 256, [5, 5],
weights_initializer=xavier_init,
scope='conv2')
net = slim.fully_connected(net, 1000,
activation_fn=None, scope='fc')
[..]


# Define the loss functions and get the total loss.
loss1 = slim.losses.softmax_cross_entropy(pred1, label1)
loss2 = slim.losses.mean_squared_error(pred2, label2)
# The following two lines have the same effect:
total_loss = loss1 + loss2
slim.losses.get_total_loss(add_regularization_losses=False)
# If you want to add regularization loss
reg_loss = tf.add_n(slim.losses.get_regularization_losses())
total_loss = loss1 + loss2 + reg_loss
# or
total_loss = slim.losses.get_total_loss()
tf slim
TensorFlow Tutorial
TensorFlow Tutorial
TensorFlow Tutorial
TensorFlow Tutorial


def save(self, ckpt_dir, global_step=None):
if self.config.get("saver") is None:
self.config["saver"] = 

tf.train.Saver(max_to_keep=30)
saver = self.config["saver"]
sess = self.config["sess"]
dirname = os.path.join(ckpt_dir, self.name)
if not os.path.exists(dirname):
os.makedirs(dirname)
saver.save(sess, dirname, global_step)


def save(self, ckpt_dir, global_step=None):
if self.config.get("saver") is None:
self.config["saver"] = 

tf.train.Saver(max_to_keep=30)
saver = self.config["saver"]
sess = self.config["sess"]
dirname = os.path.join(ckpt_dir, self.name)
if not os.path.exists(dirname):
os.makedirs(dirname)
saver.save(sess, dirname, global_step)


def save(self, ckpt_dir, global_step=None):
if self.config.get("saver") is None:
self.config["saver"] = 

tf.train.Saver(max_to_keep=30)
saver = self.config["saver"]
sess = self.config["sess"]
dirname = os.path.join(ckpt_dir, self.name)
if not os.path.exists(dirname):
os.makedirs(dirname)
saver.save(sess, dirname, global_step)
dirname 

global_step


def load_latest_checkpoint(self, ckpt_dir, exclude=None):
path = tf.train.latest_checkpoint(ckpt_dir)
if path is None:
raise AssertionError("No ckpt exists in {0}.".format(ckpt_dir))
print("Load {} save file".format(path))
self._load(path, exclude)
def load_from_path(self, ckpt_path, exclude=None):
self._load(ckpt_path, exclude)
def _load(self, ckpt_path, exclude):
init_fn = slim.assign_from_checkpoint_fn(ckpt_path,
slim.get_variables_to_restore(exclude=exclude),
ignore_missing_vars=True)
init_fn(self.config["sess"])



def load_latest_checkpoint(self, ckpt_dir, exclude=None):
path = tf.train.latest_checkpoint(ckpt_dir)
if path is None:
raise AssertionError("No ckpt exists in {0}.".format(ckpt_dir))
print("Load {} save file".format(path))
self._load(path, exclude)
def load_from_path(self, ckpt_path, exclude=None):
self._load(ckpt_path, exclude)
def _load(self, ckpt_path, exclude):
init_fn = slim.assign_from_checkpoint_fn(ckpt_path,
slim.get_variables_to_restore(exclude=exclude),
ignore_missing_vars=True)
init_fn(self.config["sess"])

exclude 



def load_latest_checkpoint(self, ckpt_dir, exclude=None):
path = tf.train.latest_checkpoint(ckpt_dir)
if path is None:
raise AssertionError("No ckpt exists in {0}.".format(ckpt_dir))
print("Load {} save file".format(path))
self._load(path, exclude)
def load_from_path(self, ckpt_path, exclude=None):
self._load(ckpt_path, exclude)
def _load(self, ckpt_path, exclude):
init_fn = slim.assign_from_checkpoint_fn(ckpt_path,
slim.get_variables_to_restore(exclude=exclude),
ignore_missing_vars=True)
init_fn(self.config["sess"])

exclude 

False 



True
TensorFlow Tutorial
TensorFlow Tutorial
X = tf.placeholder(tf.float32, [None, 224, 224, 3], name="X")
y = tf.placeholder(tf.int32, [None, 8], name="y")
is_training = tf.placeholder(tf.bool, name="is_training")
with slim.arg_scope(vgg.vgg_arg_scope()):
net, end_pts = vgg.vgg_16(X, is_training=is_training, 

num_classes=1000)
with tf.variable_scope("losses"):
cls_loss = slim.losses.softmax_cross_entropy(net, y)
reg_loss = tf.add_n(slim.losses.get_regularization_losses())
loss_op = type_loss + reg_loss
with tf.variable_scope("opt"):
opt = tf.train.AdamOptimizer(0.001).minimize(loss_op)
self.load_from_path(ckpt_path=VGG_PATH, exclude=["vgg_16/fc8"])
...
X = tf.placeholder(tf.float32, [None, 224, 224, 3], name="X")
y = tf.placeholder(tf.int32, [None, 8], name="y")
is_training = tf.placeholder(tf.bool, name="is_training")
with slim.arg_scope(vgg.vgg_arg_scope()):
net, end_pts = vgg.vgg_16(X, is_training=is_training, 

num_classes=1000)
with tf.variable_scope("losses"):
cls_loss = slim.losses.softmax_cross_entropy(net, y)
reg_loss = tf.add_n(slim.losses.get_regularization_losses())
loss_op = type_loss + reg_loss
with tf.variable_scope("opt"):
opt = tf.train.AdamOptimizer(0.001).minimize(loss_op)
self.load_from_path(ckpt_path=VGG_PATH, exclude=["vgg_16/fc8"])
...
fc8 

trainable=False
























tf.summary.FileWriter
tf.summary.###
sess.run(…)
FileWriter add_summary


tensorboard —-logdir=## —-host=## —-port=##
import tensorflow as tf
slim = tf.contrib.slim
# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
max_steps = 10000
batch_size = 128
lr = 0.001
keep_prob = 0.5
weight_decay = 0.0004
logs_path = "/tmp/tensorflow_logs/example"
def my_arg_scope(is_training, weight_decay):
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
biases_initializer=tf.zeros_initializer,
stride=1, padding="SAME"):
with slim.arg_scope([slim.dropout],
is_training=is_training) as arg_sc:
return arg_sc
def my_net(x, keep_prob, outputs_collections="my_net"):
x = tf.reshape(x, shape=[-1, 28, 28, 1])
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
outputs_collections=outputs_collections):
net = slim.conv2d(x, 64, [3, 3], scope="conv1")
net = slim.max_pool2d(net, [2, 2], scope="pool1")
net = slim.conv2d(net, 128, [3, 3], scope="conv2")
net = slim.max_pool2d(net, [2, 2], scope="pool2")
net = slim.conv2d(net, 256, [3, 3], scope="conv3")
# global average pooling
net = tf.reduce_mean(net, [1, 2], name="pool3", keep_dims=True)
net = slim.dropout(net, keep_prob, scope="dropout3")
net = slim.conv2d(net, 1024, [1, 1], scope="fc4")
net = slim.dropout(net, keep_prob, scope="dropout4")
net = slim.conv2d(net, 10, [1, 1],
activation_fn=None, scope="fc5")
end_points = 

slim.utils.convert_collection_to_dict(outputs_collections)
return tf.reshape(net, [-1, 10]), end_points
def my_net(x, keep_prob, outputs_collections="my_net"):
x = tf.reshape(x, shape=[-1, 28, 28, 1])
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
outputs_collections=outputs_collections):
net = slim.conv2d(x, 64, [3, 3], scope="conv1")
net = slim.max_pool2d(net, [2, 2], scope="pool1")
net = slim.conv2d(net, 128, [3, 3], scope="conv2")
net = slim.max_pool2d(net, [2, 2], scope="pool2")
net = slim.conv2d(net, 256, [3, 3], scope="conv3")
# global average pooling
net = tf.reduce_mean(net, [1, 2], name="pool3", keep_dims=True)
net = slim.dropout(net, keep_prob, scope="dropout3")
net = slim.conv2d(net, 1024, [1, 1], scope="fc4")
net = slim.dropout(net, keep_prob, scope="dropout4")
net = slim.conv2d(net, 10, [1, 1],
activation_fn=None, scope="fc5")
end_points = 

slim.utils.convert_collection_to_dict(outputs_collections)
return tf.reshape(net, [-1, 10]), end_points
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
is_training = tf.placeholder(tf.bool)
with slim.arg_scope(my_arg_scope(is_training, weight_decay)):
net, end_pts = my_net(x, keep_prob)
pred = slim.softmax(net, scope="prediction")
with tf.variable_scope("losses"):
cls_loss = slim.losses.softmax_cross_entropy(net, y)
reg_loss = tf.add_n(slim.losses.get_regularization_losses())
loss_op = cls_loss + reg_loss
with tf.variable_scope("Adam"):
opt = tf.train.AdamOptimizer(lr)
# Op to calculate every variable gradient
grads = tf.gradients(loss_op, tf.trainable_variables())
grads = list(zip(grads, tf.trainable_variables()))
# Op to update all variables according to their gradient
apply_grads = opt.apply_gradients(grads_and_vars=grads)
with tf.variable_scope("accuracy"):
correct_op = tf.equal(tf.argmax(net, 1), tf.argmax(y, 1))
acc_op = tf.reduce_mean(tf.cast(correct_op, tf.float32))
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
is_training = tf.placeholder(tf.bool)
with slim.arg_scope(my_arg_scope(is_training, weight_decay)):
net, end_pts = my_net(x, keep_prob)
pred = slim.softmax(net, scope="prediction")
with tf.variable_scope("losses"):
cls_loss = slim.losses.softmax_cross_entropy(net, y)
reg_loss = tf.add_n(slim.losses.get_regularization_losses())
loss_op = cls_loss + reg_loss
with tf.variable_scope("Adam"):
opt = tf.train.AdamOptimizer(lr)
# Op to calculate every variable gradient
grads = tf.gradients(loss_op, tf.trainable_variables())
grads = list(zip(grads, tf.trainable_variables()))
# Op to update all variables according to their gradient
apply_grads = opt.apply_gradients(grads_and_vars=grads)
with tf.variable_scope("accuracy"):
correct_op = tf.equal(tf.argmax(net, 1), tf.argmax(y, 1))
acc_op = tf.reduce_mean(tf.cast(correct_op, tf.float32))
# Create a summary to monitor loss and accuracy
summ_loss = tf.summary.scalar("loss", loss_op)
summ_acc = tf.summary.scalar("accuracy_test", acc_op)
# Create summaries to visualize weights and grads
for var in tf.trainable_variables():
tf.summary.histogram(var.name, var, collections=["my_summ"])
for grad, var in grads:
tf.summary.histogram(var.name + "/gradient", grad,
collections=["my_summ"])
summ_wg = tf.summary.merge_all(key="my_summ")
sess = tf.Session()
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter(logs_path,
graph=sess.graph)
for step in range(max_steps):
batch_X, batch_y = mnist.train.next_batch(batch_size)
_, loss, plot_loss, plot_wg = sess.run([apply_grads, loss_op,
summ_loss, summ_wg],
feed_dict={x: batch_X, y: batch_y, is_training: True})
summary_writer.add_summary(plot_loss, step)
summary_writer.add_summary(plot_wg, step)
if (step+1) % 100 == 0:
plot_acc = sess.run(summ_acc, feed_dict={x: mnist.test.images,
y: mnist.test.labels,
is_training: False})
summary_writer.add_summary(plot_acc, step)
print("Optimization Finished!")
test_acc = sess.run(acc_op, feed_dict={x: mnist.test.images,
y: mnist.test.labels,
is_training: False})
print("Test accuracy: {:.3f}".format(test_acc))
TensorFlow Tutorial
TensorFlow Tutorial
TensorFlow Tutorial
TensorFlow Tutorial
feed_dict
TensorFlow Tutorial

More Related Content

PPTX
TensorFlow
PPTX
Explanation on Tensorflow example -Deep mnist for expert
PDF
Google TensorFlow Tutorial
PDF
Gentlest Introduction to Tensorflow - Part 3
PDF
Gentlest Introduction to Tensorflow
PPTX
TensorFlow in Practice
PDF
Gentlest Introduction to Tensorflow - Part 2
PDF
About RNN
TensorFlow
Explanation on Tensorflow example -Deep mnist for expert
Google TensorFlow Tutorial
Gentlest Introduction to Tensorflow - Part 3
Gentlest Introduction to Tensorflow
TensorFlow in Practice
Gentlest Introduction to Tensorflow - Part 2
About RNN

What's hot (17)

PDF
About RNN
PPTX
Introduction to Tensorflow
PPTX
30 分鐘學會實作 Python Feature Selection
PDF
Tensor board
PPTX
深層学習後半
PDF
Pybelsberg — Constraint-based Programming in Python
PDF
Pythonbrasil - 2018 - Acelerando Soluções com GPU
PPTX
Machine Learning - Introduction to Tensorflow
PDF
Kristhyan kurtlazartezubia evidencia1-metodosnumericos
PDF
Python book
PDF
Deep learning with C++ - an introduction to tiny-dnn
PDF
Машинное обучение на JS. С чего начать и куда идти | Odessa Frontend Meetup #12
PPTX
Intro to Python (High School) Unit #3
PDF
[신경망기초] 합성곱신경망
PPTX
Pytorch and Machine Learning for the Math Impaired
PDF
Simple Neural Network Python Code
PDF
GoLightly - a customisable virtual machine written in Go
About RNN
Introduction to Tensorflow
30 分鐘學會實作 Python Feature Selection
Tensor board
深層学習後半
Pybelsberg — Constraint-based Programming in Python
Pythonbrasil - 2018 - Acelerando Soluções com GPU
Machine Learning - Introduction to Tensorflow
Kristhyan kurtlazartezubia evidencia1-metodosnumericos
Python book
Deep learning with C++ - an introduction to tiny-dnn
Машинное обучение на JS. С чего начать и куда идти | Odessa Frontend Meetup #12
Intro to Python (High School) Unit #3
[신경망기초] 합성곱신경망
Pytorch and Machine Learning for the Math Impaired
Simple Neural Network Python Code
GoLightly - a customisable virtual machine written in Go
Ad

Viewers also liked (20)

PDF
Introduction to TensorFlow
PDF
20150306 파이썬기초 IPython을이용한프로그래밍_이태영
PDF
High Performance Distributed TensorFlow with GPUs - NYC Workshop - July 9 2017
PDF
Splunk 교육자료 v1.2
PPTX
Startup JavaScript 10 - OpenAPI & RSS 활용
PDF
Personal Interconnect AUdio - piAu manual
PDF
숭실대교육교재 - IoT 산업에서 오픈소스의 활용방안(김형채)
PDF
라즈베리파이와자바스크립트로만드는 IoT
PPTX
123D Design - 정리함 만들기
PDF
123D Design - 모델링 기초
PDF
Splunk6.3 소개서 2015_11
PDF
How to deploy oVirt using Nested KVM environment?
PPTX
Startup 123D Design - 10. 3D프린팅
ODT
RHCE FINAL Questions and Answers
PPTX
파이썬 확률과 통계 기초 이해하기
PDF
Django, 저는 이렇게 씁니다.
PPTX
Introduction to Deep Learning with TensorFlow
PDF
간단한 블로그를 만들며 Django 이해하기
PPTX
텐서플로우 기초 이해하기
Introduction to TensorFlow
20150306 파이썬기초 IPython을이용한프로그래밍_이태영
High Performance Distributed TensorFlow with GPUs - NYC Workshop - July 9 2017
Splunk 교육자료 v1.2
Startup JavaScript 10 - OpenAPI & RSS 활용
Personal Interconnect AUdio - piAu manual
숭실대교육교재 - IoT 산업에서 오픈소스의 활용방안(김형채)
라즈베리파이와자바스크립트로만드는 IoT
123D Design - 정리함 만들기
123D Design - 모델링 기초
Splunk6.3 소개서 2015_11
How to deploy oVirt using Nested KVM environment?
Startup 123D Design - 10. 3D프린팅
RHCE FINAL Questions and Answers
파이썬 확률과 통계 기초 이해하기
Django, 저는 이렇게 씁니다.
Introduction to Deep Learning with TensorFlow
간단한 블로그를 만들며 Django 이해하기
텐서플로우 기초 이해하기
Ad

Similar to TensorFlow Tutorial (20)

PPTX
Introduction to TensorFlow
PPTX
Working with tf.data (TF 2)
PDF
What is TensorFlow and why do we use it
PDF
Can you fix the errors- It isn't working when I try to run import s.pdf
PDF
Baby Steps to Machine Learning at DevFest Lagos 2019
PPTX
Introduction to TensorFlow 2 and Keras
PDF
Implement the following sorting algorithms Bubble Sort Insertion S.pdf
DOCX
Trabajo de Matemática aplicada de la facultad de ciencias matematicas unidad ...
PDF
From NumPy to PyTorch
PPTX
Introduction to TensorFlow 2
PDF
TensorFlow Dev Summit 2018 Extended: TensorFlow Eager Execution
PDF
TensorFlow Tutorial.pdf
PDF
딥러닝 교육 자료 #2
PDF
PPTX
TensorFlow for IITians
PPTX
From Tensorflow Graph to Tensorflow Eager
DOCX
assignment_7_sc report for soft comptuing
PPTX
Fourier project presentation
DOCX
Lab 1 izz
PDF
analysis of data structure design programs
Introduction to TensorFlow
Working with tf.data (TF 2)
What is TensorFlow and why do we use it
Can you fix the errors- It isn't working when I try to run import s.pdf
Baby Steps to Machine Learning at DevFest Lagos 2019
Introduction to TensorFlow 2 and Keras
Implement the following sorting algorithms Bubble Sort Insertion S.pdf
Trabajo de Matemática aplicada de la facultad de ciencias matematicas unidad ...
From NumPy to PyTorch
Introduction to TensorFlow 2
TensorFlow Dev Summit 2018 Extended: TensorFlow Eager Execution
TensorFlow Tutorial.pdf
딥러닝 교육 자료 #2
TensorFlow for IITians
From Tensorflow Graph to Tensorflow Eager
assignment_7_sc report for soft comptuing
Fourier project presentation
Lab 1 izz
analysis of data structure design programs

More from NamHyuk Ahn (7)

PDF
Supporting Time-Sensitive Applications on a Commodity OS
PDF
Generative Adversarial Network (+Laplacian Pyramid GAN)
PDF
Single Shot Multibox Detector
PDF
Multimodal Residual Learning for Visual QA
PDF
Google's Multilingual Neural Machine Translation System
PDF
DeconvNet, DecoupledNet, TransferNet in Image Segmentation
PDF
Case Study of Convolutional Neural Network
Supporting Time-Sensitive Applications on a Commodity OS
Generative Adversarial Network (+Laplacian Pyramid GAN)
Single Shot Multibox Detector
Multimodal Residual Learning for Visual QA
Google's Multilingual Neural Machine Translation System
DeconvNet, DecoupledNet, TransferNet in Image Segmentation
Case Study of Convolutional Neural Network

Recently uploaded (20)

PPTX
additive manufacturing of ss316l using mig welding
PDF
The CXO Playbook 2025 – Future-Ready Strategies for C-Suite Leaders Cerebrai...
PPT
Mechanical Engineering MATERIALS Selection
PPTX
Engineering Ethics, Safety and Environment [Autosaved] (1).pptx
PPTX
MCN 401 KTU-2019-PPE KITS-MODULE 2.pptx
PDF
Evaluating the Democratization of the Turkish Armed Forces from a Normative P...
PDF
Well-logging-methods_new................
PPTX
FINAL REVIEW FOR COPD DIANOSIS FOR PULMONARY DISEASE.pptx
PDF
PRIZ Academy - 9 Windows Thinking Where to Invest Today to Win Tomorrow.pdf
PDF
Digital Logic Computer Design lecture notes
PPTX
KTU 2019 -S7-MCN 401 MODULE 2-VINAY.pptx
PDF
Mitigating Risks through Effective Management for Enhancing Organizational Pe...
PPTX
Internet of Things (IOT) - A guide to understanding
PDF
July 2025 - Top 10 Read Articles in International Journal of Software Enginee...
PPTX
Recipes for Real Time Voice AI WebRTC, SLMs and Open Source Software.pptx
PPTX
Lecture Notes Electrical Wiring System Components
PPTX
Welding lecture in detail for understanding
PPTX
OOP with Java - Java Introduction (Basics)
PPTX
UNIT-1 - COAL BASED THERMAL POWER PLANTS
PDF
Embodied AI: Ushering in the Next Era of Intelligent Systems
additive manufacturing of ss316l using mig welding
The CXO Playbook 2025 – Future-Ready Strategies for C-Suite Leaders Cerebrai...
Mechanical Engineering MATERIALS Selection
Engineering Ethics, Safety and Environment [Autosaved] (1).pptx
MCN 401 KTU-2019-PPE KITS-MODULE 2.pptx
Evaluating the Democratization of the Turkish Armed Forces from a Normative P...
Well-logging-methods_new................
FINAL REVIEW FOR COPD DIANOSIS FOR PULMONARY DISEASE.pptx
PRIZ Academy - 9 Windows Thinking Where to Invest Today to Win Tomorrow.pdf
Digital Logic Computer Design lecture notes
KTU 2019 -S7-MCN 401 MODULE 2-VINAY.pptx
Mitigating Risks through Effective Management for Enhancing Organizational Pe...
Internet of Things (IOT) - A guide to understanding
July 2025 - Top 10 Read Articles in International Journal of Software Enginee...
Recipes for Real Time Voice AI WebRTC, SLMs and Open Source Software.pptx
Lecture Notes Electrical Wiring System Components
Welding lecture in detail for understanding
OOP with Java - Java Introduction (Basics)
UNIT-1 - COAL BASED THERMAL POWER PLANTS
Embodied AI: Ushering in the Next Era of Intelligent Systems

TensorFlow Tutorial

  • 3.
  • 5. >>> import numpy as np >>> a = np.zeros((2, 2)); b = np.ones((2, 2)) >>> np.sum(b, axis=1) array([ 2., 2.]) >>> a.shape (2, 2) >>> np.reshape(a, (1, 4)) array([[ 0., 0., 0., 0.]])
  • 6. >>> import tensorflow as tf >>> sess = tf.Session() >>> a = tf.zeros((2, 2)); b = tf.ones((2, 2)) >>> sess.run(tf.reduce_sum(b, axis=1)) [ 2., 2.] >>> a.get_shape() (2, 2) >>> sess.run(tf.reshape(a, (1, 4))) [[ 0., 0., 0., 0.]]
  • 7. sess.run() >>> sess = tf.Session() >>> a = np.zeros((2, 2)); ta = tf.zeros((2, 2)) >>> print(a) [[ 0. 0.] [ 0. 0.]] >>> print(ta) Tensor("zeros:0", shape=(2, 2), dtype=float32) >>> print(sess.run(ta)) [[ 0. 0.] [ 0. 0.]]
  • 8. sess.run() >>> sess = tf.Session() >>> a = np.zeros((2, 2)); ta = tf.zeros((2, 2)) >>> print(a) [[ 0. 0.] [ 0. 0.]] >>> print(ta) Tensor("zeros:0", shape=(2, 2), dtype=float32) >>> print(sess.run(ta)) [[ 0. 0.] [ 0. 0.]] 
 

  • 9. 
 >>> a = tf.constant(5.0) >>> b = tf.constant(6.0) >>> c = a * b >>> sess = tf.Session() >>> print(sess.run(c)) 30.0
  • 10. 
 >>> a = tf.constant(5.0) >>> b = tf.constant(6.0) >>> c = a * b >>> sess = tf.Session() >>> print(sess.run(c)) 30.0 

  • 11. 
 >>> a = tf.constant(5.0) >>> b = tf.constant(6.0) >>> c = a * b >>> sess = tf.Session() >>> print(sess.run(c)) 30.0 
 
 
 tf.Session
  • 12. 
 >>> sess = tf.Session() >>> print(sess.run(c)) >>> with tf.Session() as sess: >>> print(sess.run(c)) >>> print(c.eval())
  • 13. 
 >>> sess = tf.Session() >>> print(sess.run(c)) >>> with tf.Session() as sess: >>> print(sess.run(c)) >>> print(c.eval()) sess.run(c) 
 
 tf.Session
  • 14. 
 >>> w = tf.Variable(tf.zeros((2, 2)), name="weight") >>> with tf.Session() as sess: >>> print(sess.run(w))
  • 15. 
 >>> w = tf.Variable(tf.zeros((2, 2)), name="weight") >>> with tf.Session() as sess: >>> print(sess.run(w))
  • 16. >>> w = tf.Variable(tf.random_normal([5, 2], stddev=0.1), 
 name="weight") >>> with tf.Session() as sess: >>> sess.run(tf.global_variables_initializer()) >>> print(sess.run(w))
 [[-0.10020355 -0.01114563] [ 0.04050281 -0.15980773] [-0.00628474 -0.02608337] [ 0.16397022 0.02898547] [ 0.04264377 0.04281621]]
  • 17. >>> w = tf.Variable(tf.random_normal([5, 2], stddev=0.1), 
 name="weight") >>> with tf.Session() as sess: >>> sess.run(tf.global_variables_initializer()) >>> print(sess.run(w))
 [[-0.10020355 -0.01114563] [ 0.04050281 -0.15980773] [-0.00628474 -0.02608337] [ 0.16397022 0.02898547] [ 0.04264377 0.04281621]] tf.Variable 

  • 18. >>> w = tf.Variable(tf.random_normal([5, 2], stddev=0.1), 
 name="weight") >>> with tf.Session() as sess: >>> sess.run(tf.global_variables_initializer()) >>> print(sess.run(w))
 [[-0.10020355 -0.01114563] [ 0.04050281 -0.15980773] [-0.00628474 -0.02608337] [ 0.16397022 0.02898547] [ 0.04264377 0.04281621]] tf.Variable tf.Variable 

  • 19. >>> state = tf.Variable(0, name="counter") >>> new_value = tf.add(state, tf.constant(1)) >>> update = tf.assign(state, new_value) >>> with tf.Session() as sess: >>> sess.run(tf.global_variables_initializer()) >>> print(sess.run(state)) >>> for _ in range(3): >>> sess.run(update) >>> print(sess.run(state)) 0 1 2 3
  • 20. >>> x1 = tf.constant(1) >>> x2 = tf.constant(2) >>> x3 = tf.constant(3) >>> temp = tf.add(x2, x3) >>> mul = tf.mul(x1, temp) >>> with tf.Session() as sess: >>> result1, result2 = sess.run([mul, temp]) >>> print(result1, result2) 5 5
  • 21. >>> x1 = tf.constant(1) >>> x2 = tf.constant(2) >>> x3 = tf.constant(3) >>> temp = tf.add(x2, x3) >>> mul = tf.mul(x1, temp) >>> with tf.Session() as sess: >>> result1, result2 = sess.run([mul, temp]) >>> print(result1, result2) 5 5 sess.run(var) 
 
 sess.run([var1, .. ,])
  • 23. tf.placeholder feed_dict >>> a = tf.placeholder(tf.int16) >>> b = tf.placeholder(tf.int16) >>> add = tf.add(a, b) >>> mul = tf.mul(a, b) >>> with tf.Session() as sess: >>> print(sess.run(add, feed_dict={a: 2, b: 3})) >>> print(sess.run(mul, feed_dict={a: 2, b: 3})) 5 6
  • 24. tf.placeholder feed_dict >>> a = tf.placeholder(tf.int16) >>> b = tf.placeholder(tf.int16) >>> add = tf.add(a, b) >>> mul = tf.mul(a, b) >>> with tf.Session() as sess: >>> print(sess.run(add, feed_dict={a: 2, b: 3})) >>> print(sess.run(mul, feed_dict={a: 2, b: 3})) 5 6 tf.placeholder
  • 25. tf.placeholder feed_dict >>> a = tf.placeholder(tf.int16) >>> b = tf.placeholder(tf.int16) >>> add = tf.add(a, b) >>> mul = tf.mul(a, b) >>> with tf.Session() as sess: >>> print(sess.run(add, feed_dict={a: 2, b: 3})) >>> print(sess.run(mul, feed_dict={a: 2, b: 3})) 5 6 tf.placeholder tf.placeholder
  • 26. # using tf.constant matrix1 = tf.constant([[3., 3.]]) matrix2 = tf.constant([[2.],[2.]]) product = tf.matmul(matrix1, matrix2) with tf.Session() as sess: result = sess.run(product) print(result) # using placeholder import numpy as np matrix1 = tf.placeholder(tf.float32, [1, 2]) matrix2 = tf.placeholder(tf.float32, [2, 1]) product = tf.matmul(matrix1, matrix2) with tf.Session() as sess: mv1 = np.array([[3., 3.]]) mv2 = np.array([[2.], [2.]]) result = sess.run(product, feed_dict={matrix1: mv1, matrix2: mv2}) print(result)
  • 27. import tensorflow as tf # Import MINST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) learning_rate = 0.001 max_steps = 15000 batch_size = 128 x = tf.placeholder(tf.float32, [None, 784]) y = tf.placeholder(tf.float32, [None, 10])
  • 28. def MLP(inputs): W_1 = tf.Variable(tf.random_normal([784, 256])) b_1 = tf.Variable(tf.zeros([256])) W_2 = tf.Variable(tf.random_normal([256, 256])) b_2 = tf.Variable(tf.zeros([256])) W_out = tf.Variable(tf.random_normal([256, 10])) b_out = tf.Variable(tf.zeros([10])) h_1 = tf.add(tf.matmul(inputs, W_1), b_1) h_1 = tf.nn.relu(h_1) h_2 = tf.add(tf.matmul(h_1, W_2), b_2) h_2 = tf.nn.relu(h_2) out = tf.add(tf.matmul(h_2, W_out), b_out) return out net = MLP(x) # define loss and optimizer loss_op = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(net, y)) opt = tf.train.AdamOptimizer(learning_rate).minimize(loss_op)
  • 29. # initializing the variables init_op = tf.global_variables_initializer() sess = tf.Session() sess.run(init_op) # train model for step in range(max_steps): batch_X, batch_y = mnist.train.next_batch(batch_size) _, loss = sess.run([opt, loss_op], feed_dict={x: batch_X, y: batch_y}) if (step+1) % 1000 == 0: print("[{}/{}] loss:{:.3f}".format(step+1, max_steps, loss)) print("Optimization Finished!") # test model correct_prediction = tf.equal(tf.argmax(net, 1), tf.argmax(y, 1)) # calculate accuracy accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print("Train accuracy: {:.3f}” .format(sess.run(accuracy, feed_dict={x: mnist.train.images, y: mnist.train.labels}))) print("Test accuracy: {:.3f}” .format(sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})))
  • 34. tf.variable_scope() var1 = tf.Variable([1], name="var") with tf.variable_scope("foo"): with tf.variable_scope("bar"): var2 = tf.Variable([1], name="var") var3 = tf.Variable([1], name="var") print("var1: {}".format(var1.name)) print("var2: {}".format(var2.name)) print("var3: {}”.format(var3.name)) var1: var:0 var2: foo/bar/var:0 var3: foo/bar/var_1:0
  • 35. tf.get_variable() tf.Variable 
 var1 = tf.Variable([1], name="var") with tf.variable_scope("foo"): with tf.variable_scope("bar") as scp: var2 = tf.Variable([1], name="var") scp.reuse_variables() # allow reuse variables var3 = tf.Variable([1], name="var") print("var1: {}".format(var1.name)) print("var2: {}".format(var2.name)) print("var3: {}”.format(var3.name)) var1: var:0 var2: foo/bar/var:0 var3: foo/bar/var_1:0
  • 36. tf.get_variable() tf.Variable 
 var1 = tf.Variable([1], name="var") with tf.variable_scope("foo"): with tf.variable_scope("bar") as scp: var2 = tf.Variable([1], name="var") scp.reuse_variables() # allow reuse variables var3 = tf.Variable([1], name="var") print("var1: {}".format(var1.name)) print("var2: {}".format(var2.name)) print("var3: {}”.format(var3.name)) var1: var:0 var2: foo/bar/var:0 var3: foo/bar/var_1:0 var3 foo/bar/var:0 tf.Variable
  • 37. tf.get_variable() 
 var1 = tf.get_variable("var", [1]) with tf.variable_scope("foo"): with tf.variable_scope("bar") as scp: var2 = tf.get_variable("var", [1]) scp.reuse_variables() # allow reuse variables var3 = tf.get_variable("var", [1]) print("var1: {}".format(var1.name)) print("var2: {}".format(var2.name)) print("var3: {}".format(var3.name)) var1: var:0 var2: foo/bar/var:0 var3: foo/bar/var:0
  • 38. tf.get_variable() 
 var1 = tf.get_variable("var", [1]) with tf.variable_scope("foo"): with tf.variable_scope("bar") as scp: var2 = tf.get_variable("var", [1]) scp.reuse_variables() # allow reuse variables var3 = tf.get_variable("var", [1]) print("var1: {}".format(var1.name)) print("var2: {}".format(var2.name)) print("var3: {}".format(var3.name)) var1: var:0 var2: foo/bar/var:0 var3: foo/bar/var:0
  • 40. with tf.variable_scope("foo"): with tf.variable_scope("bar") as scp: var1 = tf.get_variable("var", [1]) scp.reuse_variables() var2 = tf.get_variable("var", [1]) with tf.variable_scope("bar", reuse=True): var3 = tf.get_variable("var", [1]) print("var1: {}".format(var1.name)) print("var2: {}".format(var2.name)) print("var3: {}”.format(var3.name)) var1: foo/bar/var:0 var2: foo/bar/var:0 var3: foo/bar/var:0
  • 43. 
 • tf.nn.conv2d(input, filter, strides, padding, name=None) 1. input 2. filter 3. stride input 4. padding “SAME” “VALID” 
 • tf.nn.##_pool(value, ksize, strides, padding, name=None) 1. value 2. ksize 
 3. stride input 4. padding “SAME” “VALID”
  • 44. from tensorflow.contrib.layers import variance_scaling_initializer he_init = variance_scaling_initializer() def conv(bottom, num_filter, ksize=3, stride=1, padding="SAME", scope=None): bottom_shape = bottom.get_shape().as_list()[3] with tf.variable_scope(scope or "conv"): W = tf.get_variable("W", [ksize, ksize, bottom_shape, num_filter], initializer=he_init) b = tf.get_variable("b", [num_filter], initializer=tf.constant_initializer(0)) x = tf.nn.conv2d(bottom, W, strides=[1, stride, stride, 1], padding=padding) x = tf.nn.relu(tf.nn.bias_add(x, b)) return x
  • 45. from tensorflow.contrib.layers import variance_scaling_initializer he_init = variance_scaling_initializer() def conv(bottom, num_filter, ksize=3, stride=1, padding="SAME", scope=None): bottom_shape = bottom.get_shape().as_list()[3] with tf.variable_scope(scope or "conv"): W = tf.get_variable("W", [ksize, ksize, bottom_shape, num_filter], initializer=he_init) b = tf.get_variable("b", [num_filter], initializer=tf.constant_initializer(0)) x = tf.nn.conv2d(bottom, W, strides=[1, stride, stride, 1], padding=padding) x = tf.nn.relu(tf.nn.bias_add(x, b)) return x
  • 46. from tensorflow.contrib.layers import variance_scaling_initializer he_init = variance_scaling_initializer() def conv(bottom, num_filter, ksize=3, stride=1, padding="SAME", scope=None): bottom_shape = bottom.get_shape().as_list()[3] with tf.variable_scope(scope or "conv"): W = tf.get_variable("W", [ksize, ksize, bottom_shape, num_filter], initializer=he_init) b = tf.get_variable("b", [num_filter], initializer=tf.constant_initializer(0)) x = tf.nn.conv2d(bottom, W, strides=[1, stride, stride, 1], padding=padding) x = tf.nn.relu(tf.nn.bias_add(x, b)) return x
  • 47. def maxpool(bottom, ksize=2, stride=2, padding="SAME", scope=None): with tf.variable_scope(scope or "maxpool"): pool = tf.nn.max_pool(bottom, ksize=[1, ksize, ksize, 1], strides=[1, stride, stride, 1], padding=padding) return pool
  • 48. def fc(bottom, num_dims, scope=None): bottom_shape = bottom.get_shape().as_list() if len(bottom_shape) > 2: bottom = tf.reshape(bottom, [-1, reduce(lambda x, y: x*y, bottom_shape[1:])]) bottom_shape = bottom.get_shape().as_list() with tf.variable_scope(scope or "fc"): W = tf.get_variable("W", [bottom_shape[1], num_dims], initializer=he_init) b = tf.get_variable("b", [num_dims], initializer=tf.constant_initializer(0)) out = tf.nn.bias_add(tf.matmul(bottom, W), b) return out def fc_relu(bottom, num_dims, scope=None): with tf.variable_scope(scope or "fc"): out = fc(bottom, num_dims, scope="fc") relu = tf.nn.relu(out) return relu
  • 49. def fc(bottom, num_dims, scope=None): bottom_shape = bottom.get_shape().as_list() if len(bottom_shape) > 2: bottom = tf.reshape(bottom, [-1, reduce(lambda x, y: x*y, bottom_shape[1:])]) bottom_shape = bottom.get_shape().as_list() with tf.variable_scope(scope or "fc"): W = tf.get_variable("W", [bottom_shape[1], num_dims], initializer=he_init) b = tf.get_variable("b", [num_dims], initializer=tf.constant_initializer(0)) out = tf.nn.bias_add(tf.matmul(bottom, W), b) return out def fc_relu(bottom, num_dims, scope=None): with tf.variable_scope(scope or "fc"): out = fc(bottom, num_dims, scope="fc") relu = tf.nn.relu(out) return relu
  • 50. keep_prob = tf.placeholder(tf.float32, None) def conv_net(x, keep_prob): x = tf.reshape(x, shape=[-1, 28, 28, 1]) conv1 = conv(x, 32, 5, scope="conv_1") conv1 = maxpool(conv1, scope="maxpool_1") conv2 = conv(conv1, 64, 5, scope="conv_2") conv2 = maxpool(conv2, scope="maxpool_2") fc1 = fc_relu(conv2, 1024, scope="fc_1") fc1 = tf.nn.dropout(fc1, keep_prob) out = fc(fc1, 10, scope="out") return out
  • 51. keep_prob = tf.placeholder(tf.float32, None) def conv_net(x, keep_prob): x = tf.reshape(x, shape=[-1, 28, 28, 1]) conv1 = conv(x, 32, 5, scope="conv_1") conv1 = maxpool(conv1, scope="maxpool_1") conv2 = conv(conv1, 64, 5, scope="conv_2") conv2 = maxpool(conv2, scope="maxpool_2") fc1 = fc_relu(conv2, 1024, scope="fc_1") fc1 = tf.nn.dropout(fc1, keep_prob) out = fc(fc1, 10, scope="out") return out
  • 57. input = ... with tf.name_scope('conv1_1') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(input, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(bias, name=scope)
  • 58. input = ... with tf.name_scope('conv1_1') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(input, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(bias, name=scope) input = ... net = slim.conv2d(input, 128, [3, 3], padding=‘SAME’, scope=‘conv1_1')
  • 59. # 1. simple network generation with slim net = ... net = slim.conv2d(net, 256, [3, 3], scope='conv3_1') net = slim.conv2d(net, 256, [3, 3], scope='conv3_2') net = slim.conv2d(net, 256, [3, 3], scope='conv3_3') net = slim.max_pool2d(net, [2, 2], scope=‘pool3')
 # 1. cleaner by repeat operation: net = ... net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3') net = slim.max_pool(net, [2, 2], scope=‘pool3')
 # 2. Verbose way: x = slim.fully_connected(x, 32, scope='fc/fc_1') x = slim.fully_connected(x, 64, scope='fc/fc_2') x = slim.fully_connected(x, 128, scope='fc/fc_3')
 # 2. Equivalent, TF-Slim way using slim.stack: slim.stack(x, slim.fully_connected, [32, 64, 128], scope='fc')
  • 60. • tf.truncated_normal_initializer tf • slim.xavier_initializer • slim.variance_scaling_initializer • slim.l1_regularizer • slim.l2_regularizer • … net = slim.conv2d(inputs, 64, [11, 11], 4, padding=‘SAME', 
 weights_initializer=slim.xavier_initializer(), weights_regularizer=slim.l2_regularizer(0.0005), scope='conv1')
  • 61. 
 he_init = slim.variance_scaling_initializer() xavier_init = slim.xavier_initializer() with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn=tf.nn.relu, weights_initializer=he_init, weights_regularizer=slim.l2_regularizer(0.0005)): with slim.arg_scope([slim.conv2d], stride=1, padding='SAME'): net = slim.conv2d(inputs, 64, [11, 11], 4, scope='conv1') net = slim.conv2d(net, 256, [5, 5], weights_initializer=xavier_init, scope='conv2') net = slim.fully_connected(net, 1000, activation_fn=None, scope='fc')
  • 62. 
 he_init = slim.variance_scaling_initializer() xavier_init = slim.xavier_initializer() with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn=tf.nn.relu, weights_initializer=he_init, weights_regularizer=slim.l2_regularizer(0.0005)): with slim.arg_scope([slim.conv2d], stride=1, padding='SAME'): net = slim.conv2d(inputs, 64, [11, 11], 4, scope='conv1') net = slim.conv2d(net, 256, [5, 5], weights_initializer=xavier_init, scope='conv2') net = slim.fully_connected(net, 1000, activation_fn=None, scope='fc') [..]
  • 63. 
 he_init = slim.variance_scaling_initializer() xavier_init = slim.xavier_initializer() with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn=tf.nn.relu, weights_initializer=he_init, weights_regularizer=slim.l2_regularizer(0.0005)): with slim.arg_scope([slim.conv2d], stride=1, padding='SAME'): net = slim.conv2d(inputs, 64, [11, 11], 4, scope='conv1') net = slim.conv2d(net, 256, [5, 5], weights_initializer=xavier_init, scope='conv2') net = slim.fully_connected(net, 1000, activation_fn=None, scope='fc') [..]
  • 64. 
 # Define the loss functions and get the total loss. loss1 = slim.losses.softmax_cross_entropy(pred1, label1) loss2 = slim.losses.mean_squared_error(pred2, label2) # The following two lines have the same effect: total_loss = loss1 + loss2 slim.losses.get_total_loss(add_regularization_losses=False) # If you want to add regularization loss reg_loss = tf.add_n(slim.losses.get_regularization_losses()) total_loss = loss1 + loss2 + reg_loss # or total_loss = slim.losses.get_total_loss()
  • 70. 
 def save(self, ckpt_dir, global_step=None): if self.config.get("saver") is None: self.config["saver"] = 
 tf.train.Saver(max_to_keep=30) saver = self.config["saver"] sess = self.config["sess"] dirname = os.path.join(ckpt_dir, self.name) if not os.path.exists(dirname): os.makedirs(dirname) saver.save(sess, dirname, global_step)
  • 71. 
 def save(self, ckpt_dir, global_step=None): if self.config.get("saver") is None: self.config["saver"] = 
 tf.train.Saver(max_to_keep=30) saver = self.config["saver"] sess = self.config["sess"] dirname = os.path.join(ckpt_dir, self.name) if not os.path.exists(dirname): os.makedirs(dirname) saver.save(sess, dirname, global_step)
  • 72. 
 def save(self, ckpt_dir, global_step=None): if self.config.get("saver") is None: self.config["saver"] = 
 tf.train.Saver(max_to_keep=30) saver = self.config["saver"] sess = self.config["sess"] dirname = os.path.join(ckpt_dir, self.name) if not os.path.exists(dirname): os.makedirs(dirname) saver.save(sess, dirname, global_step) dirname 
 global_step
  • 73. 
 def load_latest_checkpoint(self, ckpt_dir, exclude=None): path = tf.train.latest_checkpoint(ckpt_dir) if path is None: raise AssertionError("No ckpt exists in {0}.".format(ckpt_dir)) print("Load {} save file".format(path)) self._load(path, exclude) def load_from_path(self, ckpt_path, exclude=None): self._load(ckpt_path, exclude) def _load(self, ckpt_path, exclude): init_fn = slim.assign_from_checkpoint_fn(ckpt_path, slim.get_variables_to_restore(exclude=exclude), ignore_missing_vars=True) init_fn(self.config["sess"])

  • 74. 
 def load_latest_checkpoint(self, ckpt_dir, exclude=None): path = tf.train.latest_checkpoint(ckpt_dir) if path is None: raise AssertionError("No ckpt exists in {0}.".format(ckpt_dir)) print("Load {} save file".format(path)) self._load(path, exclude) def load_from_path(self, ckpt_path, exclude=None): self._load(ckpt_path, exclude) def _load(self, ckpt_path, exclude): init_fn = slim.assign_from_checkpoint_fn(ckpt_path, slim.get_variables_to_restore(exclude=exclude), ignore_missing_vars=True) init_fn(self.config["sess"])
 exclude 

  • 75. 
 def load_latest_checkpoint(self, ckpt_dir, exclude=None): path = tf.train.latest_checkpoint(ckpt_dir) if path is None: raise AssertionError("No ckpt exists in {0}.".format(ckpt_dir)) print("Load {} save file".format(path)) self._load(path, exclude) def load_from_path(self, ckpt_path, exclude=None): self._load(ckpt_path, exclude) def _load(self, ckpt_path, exclude): init_fn = slim.assign_from_checkpoint_fn(ckpt_path, slim.get_variables_to_restore(exclude=exclude), ignore_missing_vars=True) init_fn(self.config["sess"])
 exclude 
 False 
 
 True
  • 78. X = tf.placeholder(tf.float32, [None, 224, 224, 3], name="X") y = tf.placeholder(tf.int32, [None, 8], name="y") is_training = tf.placeholder(tf.bool, name="is_training") with slim.arg_scope(vgg.vgg_arg_scope()): net, end_pts = vgg.vgg_16(X, is_training=is_training, 
 num_classes=1000) with tf.variable_scope("losses"): cls_loss = slim.losses.softmax_cross_entropy(net, y) reg_loss = tf.add_n(slim.losses.get_regularization_losses()) loss_op = type_loss + reg_loss with tf.variable_scope("opt"): opt = tf.train.AdamOptimizer(0.001).minimize(loss_op) self.load_from_path(ckpt_path=VGG_PATH, exclude=["vgg_16/fc8"]) ...
  • 79. X = tf.placeholder(tf.float32, [None, 224, 224, 3], name="X") y = tf.placeholder(tf.int32, [None, 8], name="y") is_training = tf.placeholder(tf.bool, name="is_training") with slim.arg_scope(vgg.vgg_arg_scope()): net, end_pts = vgg.vgg_16(X, is_training=is_training, 
 num_classes=1000) with tf.variable_scope("losses"): cls_loss = slim.losses.softmax_cross_entropy(net, y) reg_loss = tf.add_n(slim.losses.get_regularization_losses()) loss_op = type_loss + reg_loss with tf.variable_scope("opt"): opt = tf.train.AdamOptimizer(0.001).minimize(loss_op) self.load_from_path(ckpt_path=VGG_PATH, exclude=["vgg_16/fc8"]) ... fc8 
 trainable=False
  • 82. import tensorflow as tf slim = tf.contrib.slim # Import MINST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) max_steps = 10000 batch_size = 128 lr = 0.001 keep_prob = 0.5 weight_decay = 0.0004 logs_path = "/tmp/tensorflow_logs/example" def my_arg_scope(is_training, weight_decay): with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=slim.variance_scaling_initializer(), biases_initializer=tf.zeros_initializer, stride=1, padding="SAME"): with slim.arg_scope([slim.dropout], is_training=is_training) as arg_sc: return arg_sc
  • 83. def my_net(x, keep_prob, outputs_collections="my_net"): x = tf.reshape(x, shape=[-1, 28, 28, 1]) with slim.arg_scope([slim.conv2d, slim.max_pool2d], outputs_collections=outputs_collections): net = slim.conv2d(x, 64, [3, 3], scope="conv1") net = slim.max_pool2d(net, [2, 2], scope="pool1") net = slim.conv2d(net, 128, [3, 3], scope="conv2") net = slim.max_pool2d(net, [2, 2], scope="pool2") net = slim.conv2d(net, 256, [3, 3], scope="conv3") # global average pooling net = tf.reduce_mean(net, [1, 2], name="pool3", keep_dims=True) net = slim.dropout(net, keep_prob, scope="dropout3") net = slim.conv2d(net, 1024, [1, 1], scope="fc4") net = slim.dropout(net, keep_prob, scope="dropout4") net = slim.conv2d(net, 10, [1, 1], activation_fn=None, scope="fc5") end_points = 
 slim.utils.convert_collection_to_dict(outputs_collections) return tf.reshape(net, [-1, 10]), end_points
  • 84. def my_net(x, keep_prob, outputs_collections="my_net"): x = tf.reshape(x, shape=[-1, 28, 28, 1]) with slim.arg_scope([slim.conv2d, slim.max_pool2d], outputs_collections=outputs_collections): net = slim.conv2d(x, 64, [3, 3], scope="conv1") net = slim.max_pool2d(net, [2, 2], scope="pool1") net = slim.conv2d(net, 128, [3, 3], scope="conv2") net = slim.max_pool2d(net, [2, 2], scope="pool2") net = slim.conv2d(net, 256, [3, 3], scope="conv3") # global average pooling net = tf.reduce_mean(net, [1, 2], name="pool3", keep_dims=True) net = slim.dropout(net, keep_prob, scope="dropout3") net = slim.conv2d(net, 1024, [1, 1], scope="fc4") net = slim.dropout(net, keep_prob, scope="dropout4") net = slim.conv2d(net, 10, [1, 1], activation_fn=None, scope="fc5") end_points = 
 slim.utils.convert_collection_to_dict(outputs_collections) return tf.reshape(net, [-1, 10]), end_points
  • 85. x = tf.placeholder(tf.float32, [None, 784]) y = tf.placeholder(tf.float32, [None, 10]) is_training = tf.placeholder(tf.bool) with slim.arg_scope(my_arg_scope(is_training, weight_decay)): net, end_pts = my_net(x, keep_prob) pred = slim.softmax(net, scope="prediction") with tf.variable_scope("losses"): cls_loss = slim.losses.softmax_cross_entropy(net, y) reg_loss = tf.add_n(slim.losses.get_regularization_losses()) loss_op = cls_loss + reg_loss with tf.variable_scope("Adam"): opt = tf.train.AdamOptimizer(lr) # Op to calculate every variable gradient grads = tf.gradients(loss_op, tf.trainable_variables()) grads = list(zip(grads, tf.trainable_variables())) # Op to update all variables according to their gradient apply_grads = opt.apply_gradients(grads_and_vars=grads) with tf.variable_scope("accuracy"): correct_op = tf.equal(tf.argmax(net, 1), tf.argmax(y, 1)) acc_op = tf.reduce_mean(tf.cast(correct_op, tf.float32))
  • 86. x = tf.placeholder(tf.float32, [None, 784]) y = tf.placeholder(tf.float32, [None, 10]) is_training = tf.placeholder(tf.bool) with slim.arg_scope(my_arg_scope(is_training, weight_decay)): net, end_pts = my_net(x, keep_prob) pred = slim.softmax(net, scope="prediction") with tf.variable_scope("losses"): cls_loss = slim.losses.softmax_cross_entropy(net, y) reg_loss = tf.add_n(slim.losses.get_regularization_losses()) loss_op = cls_loss + reg_loss with tf.variable_scope("Adam"): opt = tf.train.AdamOptimizer(lr) # Op to calculate every variable gradient grads = tf.gradients(loss_op, tf.trainable_variables()) grads = list(zip(grads, tf.trainable_variables())) # Op to update all variables according to their gradient apply_grads = opt.apply_gradients(grads_and_vars=grads) with tf.variable_scope("accuracy"): correct_op = tf.equal(tf.argmax(net, 1), tf.argmax(y, 1)) acc_op = tf.reduce_mean(tf.cast(correct_op, tf.float32))
  • 87. # Create a summary to monitor loss and accuracy summ_loss = tf.summary.scalar("loss", loss_op) summ_acc = tf.summary.scalar("accuracy_test", acc_op) # Create summaries to visualize weights and grads for var in tf.trainable_variables(): tf.summary.histogram(var.name, var, collections=["my_summ"]) for grad, var in grads: tf.summary.histogram(var.name + "/gradient", grad, collections=["my_summ"]) summ_wg = tf.summary.merge_all(key="my_summ") sess = tf.Session() sess.run(tf.global_variables_initializer()) summary_writer = tf.summary.FileWriter(logs_path, graph=sess.graph)
  • 88. for step in range(max_steps): batch_X, batch_y = mnist.train.next_batch(batch_size) _, loss, plot_loss, plot_wg = sess.run([apply_grads, loss_op, summ_loss, summ_wg], feed_dict={x: batch_X, y: batch_y, is_training: True}) summary_writer.add_summary(plot_loss, step) summary_writer.add_summary(plot_wg, step) if (step+1) % 100 == 0: plot_acc = sess.run(summ_acc, feed_dict={x: mnist.test.images, y: mnist.test.labels, is_training: False}) summary_writer.add_summary(plot_acc, step) print("Optimization Finished!") test_acc = sess.run(acc_op, feed_dict={x: mnist.test.images, y: mnist.test.labels, is_training: False}) print("Test accuracy: {:.3f}".format(test_acc))