Commit 377bfd0f authored by Yuxin Wu's avatar Yuxin Wu

update docs

parent 92f159b6
...@@ -363,6 +363,7 @@ _DEPRECATED_NAMES = set([ ...@@ -363,6 +363,7 @@ _DEPRECATED_NAMES = set([
'TFRecordData', 'TFRecordData',
'dump_dataflow_to_lmdb', 'dump_dataflow_to_lmdb',
'dump_dataflow_to_tfrecord', 'dump_dataflow_to_tfrecord',
'pyplot2img',
# renamed stuff: # renamed stuff:
'DumpTensor', 'DumpTensor',
......
...@@ -6,4 +6,5 @@ Sphinx>=1.6 ...@@ -6,4 +6,5 @@ Sphinx>=1.6
recommonmark==0.4.0 recommonmark==0.4.0
sphinx_rtd_theme sphinx_rtd_theme
mock mock
matplotlib
tensorflow==1.4.0 tensorflow==1.4.0
...@@ -28,7 +28,7 @@ def global_import(name): ...@@ -28,7 +28,7 @@ def global_import(name):
_CURR_DIR = os.path.dirname(__file__) _CURR_DIR = os.path.dirname(__file__)
_SKIP = [] _SKIP = ['training', 'distributed']
for _, module_name, _ in iter_modules( for _, module_name, _ in iter_modules(
[_CURR_DIR]): [_CURR_DIR]):
srcpath = os.path.join(_CURR_DIR, module_name + '.py') srcpath = os.path.join(_CURR_DIR, module_name + '.py')
......
...@@ -40,6 +40,9 @@ class PlaceholderInput(InputSource): ...@@ -40,6 +40,9 @@ class PlaceholderInput(InputSource):
""" """
Just produce placeholders as input tensors. Just produce placeholders as input tensors.
""" """
def __init__(self):
pass
def _setup(self, inputs): def _setup(self, inputs):
self._all_placehdrs = [v.build_placeholder_reuse() for v in inputs] self._all_placehdrs = [v.build_placeholder_reuse() for v in inputs]
......
...@@ -75,8 +75,8 @@ class InputSource(object): ...@@ -75,8 +75,8 @@ class InputSource(object):
""" """
Returns: Returns:
list: A list of tensors corresponding to the inputs of the model, list: A list of tensors corresponding to the inputs of the model,
used as input of :func:`build_graph`. used as input of :func:`build_graph`.
For non-placeholder tensors, should always create and return new tensors when called. For non-placeholder tensors, should always create and return new tensors when called.
""" """
return self._get_input_tensors() return self._get_input_tensors()
...@@ -92,7 +92,7 @@ class InputSource(object): ...@@ -92,7 +92,7 @@ class InputSource(object):
Returns: Returns:
list[Callback]: extra callbacks needed by this InputSource. list[Callback]: extra callbacks needed by this InputSource.
callbacks of InputSource cannot use any `trigger*()` method. callbacks of InputSource cannot use any `trigger*()` method.
""" """
self._setup(inputs_desc) self._setup(inputs_desc)
self._setup_done = True self._setup_done = True
......
...@@ -18,7 +18,10 @@ __all__ = ['GradientProcessor', ...@@ -18,7 +18,10 @@ __all__ = ['GradientProcessor',
@six.add_metaclass(ABCMeta) @six.add_metaclass(ABCMeta)
class GradientProcessor(object): class GradientProcessor(object):
""" Base class for all gradient processors. """
Base class for all gradient processors.
Gradient processors can be applied to optimizers by
:func:`optimizer.apply_grad_processors`.
Subclass should override the ``_process()`` method. Subclass should override the ``_process()`` method.
""" """
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
import tensorflow as tf import tensorflow as tf
from contextlib import contextmanager from contextlib import contextmanager
from ..utils.develop import HIDE_DOC
from .gradproc import FilterNoneGrad, GradientProcessor from .gradproc import FilterNoneGrad, GradientProcessor
__all__ = ['apply_grad_processors', 'ProxyOptimizer', __all__ = ['apply_grad_processors', 'ProxyOptimizer',
...@@ -20,15 +22,19 @@ class ProxyOptimizer(tf.train.Optimizer): ...@@ -20,15 +22,19 @@ class ProxyOptimizer(tf.train.Optimizer):
super(ProxyOptimizer, self).__init__(False, name) super(ProxyOptimizer, self).__init__(False, name)
self._opt = opt self._opt = opt
@HIDE_DOC
def compute_gradients(self, *args, **kwargs): def compute_gradients(self, *args, **kwargs):
return self._opt.compute_gradients(*args, **kwargs) return self._opt.compute_gradients(*args, **kwargs)
@HIDE_DOC
def get_slot(self, *args, **kwargs): def get_slot(self, *args, **kwargs):
return self._opt.get_slot(*args, **kwargs) return self._opt.get_slot(*args, **kwargs)
@HIDE_DOC
def get_slot_names(self, *args, **kwargs): def get_slot_names(self, *args, **kwargs):
return self._opt.get_slot_names(*args, **kwargs) return self._opt.get_slot_names(*args, **kwargs)
@HIDE_DOC
def apply_gradients(self, *args, **kwargs): def apply_gradients(self, *args, **kwargs):
return self._opt.apply_gradients(*args, **kwargs) return self._opt.apply_gradients(*args, **kwargs)
...@@ -85,6 +91,7 @@ class PostProcessOptimizer(ProxyOptimizer): ...@@ -85,6 +91,7 @@ class PostProcessOptimizer(ProxyOptimizer):
self._func = func self._func = func
self._colocate = colocate self._colocate = colocate
@HIDE_DOC
def apply_gradients(self, grads_and_vars, global_step=None, name=None): def apply_gradients(self, grads_and_vars, global_step=None, name=None):
update_op = super(PostProcessOptimizer, self).apply_gradients( update_op = super(PostProcessOptimizer, self).apply_gradients(
grads_and_vars, global_step) grads_and_vars, global_step)
...@@ -131,8 +138,8 @@ class VariableAssignmentOptimizer(PostProcessOptimizer): ...@@ -131,8 +138,8 @@ class VariableAssignmentOptimizer(PostProcessOptimizer):
class AccumGradOptimizer(ProxyOptimizer): class AccumGradOptimizer(ProxyOptimizer):
""" """
An optimizer which accumulates gradients across :math:`k` :meth:`minimize` calls, An optimizer which accumulates gradients across :math:`k` :meth:`minimize` executions,
and apply them together in every :math:`k`th :meth:`minimize` call. and apply them together in every :math:`k` th :meth:`minimize` execution.
This is roughly the same as using a :math:`k` times larger batch size plus a This is roughly the same as using a :math:`k` times larger batch size plus a
:math:`k` times larger learning rate, but uses much less memory. :math:`k` times larger learning rate, but uses much less memory.
...@@ -157,6 +164,7 @@ class AccumGradOptimizer(ProxyOptimizer): ...@@ -157,6 +164,7 @@ class AccumGradOptimizer(ProxyOptimizer):
slots.append(s) slots.append(s)
return slots return slots
@HIDE_DOC
def apply_gradients(self, grads_and_vars, global_step=None, name=None): def apply_gradients(self, grads_and_vars, global_step=None, name=None):
assert global_step is None, \ assert global_step is None, \
"AccumGradOptimizer doesn't support the option global_step! " \ "AccumGradOptimizer doesn't support the option global_step! " \
......
...@@ -29,6 +29,9 @@ class BaseTowerContext(object): ...@@ -29,6 +29,9 @@ class BaseTowerContext(object):
@HIDE_DOC @HIDE_DOC
def __init__(self, ns_name, vs_name=''): def __init__(self, ns_name, vs_name=''):
""" """
This is not supposed to be used by users.
You need to use :func:`TowerContext` to create a :class:`BaseTowerContext`.
Args: Args:
ns_name (str): The name scope of the tower. ns_name (str): The name scope of the tower.
vs_name (str): Open a new variable scope with this name. vs_name (str): Open a new variable scope with this name.
...@@ -214,7 +217,7 @@ class PredictTowerContext(BaseTowerContext): ...@@ -214,7 +217,7 @@ class PredictTowerContext(BaseTowerContext):
def get_current_tower_context(): def get_current_tower_context():
""" """
When called inside a TowerContext, return the TowerContext. When called inside a TowerContext, returns the TowerContext.
Returns: Returns:
a :class:`BaseTowerContext` instance. a :class:`BaseTowerContext` instance.
......
...@@ -7,7 +7,7 @@ GIT_ARG="--git-dir ../.git --work-tree .." ...@@ -7,7 +7,7 @@ GIT_ARG="--git-dir ../.git --work-tree .."
# find out modified python files, so that we ignored unstaged files # find out modified python files, so that we ignored unstaged files
# exclude ../docs # exclude ../docs
MOD=$(git $GIT_ARG status -s \ MOD=$(git $GIT_ARG status -s \
| grep -E '\.py$' | grep -v '../docs' \ | grep -E '\.py$' | grep -v '../docs' | grep -v '__init__' \
| grep -E '^ *M|^ *A' | cut -c 4- ) | grep -E '^ *M|^ *A' | cut -c 4- )
if [[ -n $MOD ]]; then if [[ -n $MOD ]]; then
flake8 $MOD flake8 $MOD
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment