Commit 78ce3a96 authored by Yuxin Wu's avatar Yuxin Wu

summary for all parameters

parent d8330092
......@@ -72,7 +72,7 @@ def get_model(inputs, is_training):
name='regularize_loss')
tf.add_to_collection(MOVING_SUMMARY_VARS_KEY, wd_cost)
add_histogram_summary('.*/W') # monitor histogram of all W
add_param_summary('.*/W') # monitor histogram of all W
return [prob, nr_wrong], tf.add_n([wd_cost, cost], name='cost')
def get_config():
......
......@@ -73,7 +73,7 @@ def get_model(inputs, is_training):
name='regularize_loss')
tf.add_to_collection(MOVING_SUMMARY_VARS_KEY, wd_cost)
add_histogram_summary('.*/W') # monitor histogram of all W
add_param_summary('.*') # monitor all variables
return [prob, nr_wrong], tf.add_n([wd_cost, cost], name='cost')
def get_config():
......@@ -155,5 +155,5 @@ if __name__ == '__main__':
if args.load:
config.session_init = SaverRestore(args.load)
if args.gpu:
config['nr_tower'] = len(args.gpu.split(','))
config.nr_tower = len(args.gpu.split(','))
start_train(config)
......@@ -82,7 +82,7 @@ def get_model(inputs, is_training):
name='regularize_loss')
tf.add_to_collection(MOVING_SUMMARY_VARS_KEY, wd_cost)
add_histogram_summary('.*/W') # monitor histogram of all W
add_param_summary('.*/W') # monitor histogram of all W
return [prob, nr_wrong], tf.add_n([wd_cost, cost], name='cost')
def get_config():
......
......@@ -140,15 +140,16 @@ def start_train(config):
grads = []
for i in range(config.nr_tower):
with tf.device('/gpu:{}'.format(i)):
model_inputs = get_model_inputs()
output_vars, cost_var = config.get_model_func(model_inputs, is_training=True)
grads.append(
config.optimizer.compute_gradients(cost_var))
if i == 0:
tf.get_variable_scope().reuse_variables()
for k in coll_keys:
kept_summaries[k] = copy.copy(tf.get_collection(k))
with tf.name_scope('tower{}'.format(i)) as scope:
model_inputs = get_model_inputs()
output_vars, cost_var = config.get_model_func(model_inputs, is_training=True)
grads.append(
config.optimizer.compute_gradients(cost_var))
if i == 0:
tf.get_variable_scope().reuse_variables()
for k in coll_keys:
kept_summaries[k] = copy.copy(tf.get_collection(k))
for k in coll_keys: # avoid repeating summary on multiple devices
del tf.get_collection(k)[:]
tf.get_collection(k).extend(kept_summaries[k])
......
......@@ -32,16 +32,19 @@ def add_activation_summary(x, name=None):
tf.histogram_summary(name + '/activations', x)
tf.scalar_summary(name + '/sparsity', tf.nn.zero_fraction(x))
def add_histogram_summary(regex):
def add_param_summary(regex):
"""
Add histogram summary for all trainable variables matching the regex
Add summary for all trainable variables matching the regex
"""
import re
params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
for p in params:
name = p.name
if re.search(regex, name):
tf.histogram_summary(name, p)
if p.get_shape().ndims == 0:
tf.scalar_summary(name, p)
else:
tf.histogram_summary(name, p)
def summary_moving_average(cost_var):
""" Create a MovingAverage op and summary for all variables in
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment