Commit 52664b97 authored by Yuxin Wu's avatar Yuxin Wu

support types for activation summary

parent 61d3f50a
...@@ -199,7 +199,7 @@ class CheckGradient(MapGradient): ...@@ -199,7 +199,7 @@ class CheckGradient(MapGradient):
def _mapper(self, grad, var): def _mapper(self, grad, var):
# this is very slow.... see #3649 # this is very slow.... see #3649
# op = tf.Assert(tf.reduce_all(tf.is_finite(var)), [var], summarize=100) # op = tf.Assert(tf.reduce_all(tf.is_finite(var)), [var], summarize=100)
grad = tf.check_numerics(grad, 'CheckGradient-' + var.op.name) grad = tf.check_numerics(grad, 'CheckGradient/' + var.op.name)
return grad return grad
......
...@@ -94,7 +94,7 @@ def add_tensor_summary(x, types, name=None, collections=None, ...@@ -94,7 +94,7 @@ def add_tensor_summary(x, types, name=None, collections=None,
Args: Args:
x (tf.Tensor): a tensor to summarize x (tf.Tensor): a tensor to summarize
types (list[str]): can be scalar/histogram/sparsity/mean/rms types (list[str]): summary types, can be scalar/histogram/sparsity/mean/rms
name (str): summary name. Defaults to be the op name. name (str): summary name. Defaults to be the op name.
collections (list[str]): collections of the summary ops. collections (list[str]): collections of the summary ops.
main_tower_only (bool): Only run under main training tower. If main_tower_only (bool): Only run under main training tower. If
...@@ -132,33 +132,31 @@ def add_tensor_summary(x, types, name=None, collections=None, ...@@ -132,33 +132,31 @@ def add_tensor_summary(x, types, name=None, collections=None,
SUMMARY_TYPES_DIC[typ]() SUMMARY_TYPES_DIC[typ]()
def add_activation_summary(x, name=None, collections=None): def add_activation_summary(x, types=None, name=None, collections=None):
""" """
Add summary for an activation tensor x, including its sparsity, rms, and histogram. Call :func:`add_tensor_summary` under a reused 'activation-summary' name scope.
This function is a no-op if not calling from main training tower. This function is a no-op if not calling from main training tower.
Args: Args:
x (tf.Tensor): the tensor to summary. x (tf.Tensor): the tensor to summary.
types (list[str]): summary types, defaults to ``['sparsity', 'rms', 'histogram']``.
name (str): if is None, use x.name. name (str): if is None, use x.name.
collections (list[str]): collections of the summary ops. collections (list[str]): collections of the summary ops.
""" """
ctx = get_current_tower_context()
if ctx is not None and not ctx.is_main_training_tower:
return
ndim = x.get_shape().ndims ndim = x.get_shape().ndims
if ndim < 2: if ndim < 2:
logger.warn("Cannot summarize scalar activation {}".format(x.name)) logger.warn("Cannot summarize scalar activation {}".format(x.name))
return return
if name is None: if types is None:
name = x.name types = ['sparsity', 'rms', 'histogram']
with cached_name_scope('activation-summary'): with cached_name_scope('activation-summary'):
add_tensor_summary(x, ['sparsity', 'rms', 'histogram'], add_tensor_summary(x, types, name=name, collections=collections)
name=name, collections=collections)
def add_param_summary(*summary_lists, **kwargs): def add_param_summary(*summary_lists, **kwargs):
""" """
Add summary Ops for all trainable variables matching the regex. Add summary ops for all trainable variables matching the regex, under a
reused 'param-summary' name scope.
This function is a no-op if not calling from main training tower. This function is a no-op if not calling from main training tower.
Args: Args:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment