Commit 88893e56 authored by Yuxin Wu's avatar Yuxin Wu

add and use `cached_name_scope` for internal summaries

parent 64a13448
...@@ -4,13 +4,12 @@ ...@@ -4,13 +4,12 @@
# Author: Yuxin Wu <ppwwyyxxc@gmail.com> # Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import tensorflow as tf import tensorflow as tf
import six import functools
if six.PY2: from contextlib import contextmanager
import functools32 as functools
else:
import functools
__all__ = ['auto_reuse_variable_scope'] from ..utils.argtools import graph_memoized
__all__ = ['auto_reuse_variable_scope', 'cached_name_scope']
def auto_reuse_variable_scope(func): def auto_reuse_variable_scope(func):
...@@ -33,3 +32,25 @@ def auto_reuse_variable_scope(func): ...@@ -33,3 +32,25 @@ def auto_reuse_variable_scope(func):
return func(*args, **kwargs) return func(*args, **kwargs)
return wrapper return wrapper
@graph_memoized
def _get_cached_ns(name):
with tf.name_scope(None):
with tf.name_scope(name) as scope:
return scope
@contextmanager
def cached_name_scope(name):
"""
Return a context which either opens and caches a new top-level name scope,
or reenter an existing one.
Note:
The name scope will always be top-level. It will not be nested under
any existing name scope of the caller.
"""
ns = _get_cached_ns(name)
with tf.name_scope(ns):
yield ns
...@@ -17,12 +17,28 @@ from ..utils.argtools import graph_memoized ...@@ -17,12 +17,28 @@ from ..utils.argtools import graph_memoized
from ..utils.naming import MOVING_SUMMARY_OPS_KEY from ..utils.naming import MOVING_SUMMARY_OPS_KEY
from .tower import get_current_tower_context from .tower import get_current_tower_context
from .symbolic_functions import rms from .symbolic_functions import rms
from .scope_utils import cached_name_scope
__all__ = ['create_scalar_summary', 'create_image_summary', __all__ = ['create_scalar_summary', 'create_image_summary',
'add_tensor_summary', 'add_param_summary', 'add_tensor_summary', 'add_param_summary',
'add_activation_summary', 'add_moving_summary'] 'add_activation_summary', 'add_moving_summary']
# some scope stuff to use internally...
@graph_memoized
def _get_cached_vs(name):
with tf.variable_scope(name) as scope:
return scope
@contextmanager
def _enter_vs_reuse_ns(name):
vs = _get_cached_vs(name)
with tf.variable_scope(vs):
with tf.name_scope(vs.original_name_scope):
yield vs
def create_scalar_summary(name, v): def create_scalar_summary(name, v):
""" """
Args: Args:
...@@ -90,8 +106,9 @@ def add_tensor_summary(x, types, name=None, collections=None, ...@@ -90,8 +106,9 @@ def add_tensor_summary(x, types, name=None, collections=None,
.. code-block:: python .. code-block:: python
add_tensor_summary( with tf.name_scope('mysummaries'): # to not mess up tensorboard
tensor, ['histogram', 'rms', 'sparsity'], name='mytensor') add_tensor_summary(
tensor, ['histogram', 'rms', 'sparsity'], name='mytensor')
""" """
types = set(types) types = set(types)
if name is None: if name is None:
...@@ -134,7 +151,7 @@ def add_activation_summary(x, name=None, collections=None): ...@@ -134,7 +151,7 @@ def add_activation_summary(x, name=None, collections=None):
return return
if name is None: if name is None:
name = x.name name = x.name
with tf.name_scope('activation-summary'): with cached_name_scope('activation-summary'):
add_tensor_summary(x, ['sparsity', 'rms', 'histogram'], add_tensor_summary(x, ['sparsity', 'rms', 'histogram'],
name=name, collections=collections) name=name, collections=collections)
...@@ -145,8 +162,8 @@ def add_param_summary(*summary_lists, **kwargs): ...@@ -145,8 +162,8 @@ def add_param_summary(*summary_lists, **kwargs):
This function is a no-op if not calling from main training tower. This function is a no-op if not calling from main training tower.
Args: Args:
summary_lists (list): each is (regex, [list of summary type to perform]). summary_lists (list): each is (regex, [list of summary type]).
Summary type can be 'mean', 'scalar', 'histogram', 'sparsity', 'rms' Summary type is defined in :func:`add_tensor_summary`.
kwargs: only ``collections`` is allowed. kwargs: only ``collections`` is allowed.
Examples: Examples:
...@@ -165,7 +182,7 @@ def add_param_summary(*summary_lists, **kwargs): ...@@ -165,7 +182,7 @@ def add_param_summary(*summary_lists, **kwargs):
return return
params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
with tf.name_scope('param-summary'): with cached_name_scope('param-summary'):
for p in params: for p in params:
name = p.op.name name = p.op.name
for rgx, actions in summary_lists: for rgx, actions in summary_lists:
...@@ -175,20 +192,6 @@ def add_param_summary(*summary_lists, **kwargs): ...@@ -175,20 +192,6 @@ def add_param_summary(*summary_lists, **kwargs):
add_tensor_summary(p, actions, name=name, collections=collections) add_tensor_summary(p, actions, name=name, collections=collections)
@graph_memoized
def _get_cached_vs(name):
with tf.variable_scope(name) as scope:
return scope
@contextmanager
def _enter_vs_reuse_ns(name):
vs = _get_cached_vs(name)
with tf.variable_scope(vs):
with tf.name_scope(vs.original_name_scope):
yield vs
def add_moving_summary(*args, **kwargs): def add_moving_summary(*args, **kwargs):
""" """
Enable moving average summary for some tensors. Enable moving average summary for some tensors.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment