Commit 377bfd0f authored by Yuxin Wu's avatar Yuxin Wu

update docs

parent 92f159b6
......@@ -363,6 +363,7 @@ _DEPRECATED_NAMES = set([
'TFRecordData',
'dump_dataflow_to_lmdb',
'dump_dataflow_to_tfrecord',
'pyplot2img',
# renamed stuff:
'DumpTensor',
......
......@@ -6,4 +6,5 @@ Sphinx>=1.6
recommonmark==0.4.0
sphinx_rtd_theme
mock
matplotlib
tensorflow==1.4.0
......@@ -28,7 +28,7 @@ def global_import(name):
_CURR_DIR = os.path.dirname(__file__)
_SKIP = []
_SKIP = ['training', 'distributed']
for _, module_name, _ in iter_modules(
[_CURR_DIR]):
srcpath = os.path.join(_CURR_DIR, module_name + '.py')
......
......@@ -40,6 +40,9 @@ class PlaceholderInput(InputSource):
"""
Just produce placeholders as input tensors.
"""
def __init__(self):
pass
def _setup(self, inputs):
self._all_placehdrs = [v.build_placeholder_reuse() for v in inputs]
......
......@@ -18,7 +18,10 @@ __all__ = ['GradientProcessor',
@six.add_metaclass(ABCMeta)
class GradientProcessor(object):
""" Base class for all gradient processors.
"""
Base class for all gradient processors.
Gradient processors can be applied to optimizers by
:func:`optimizer.apply_grad_processors`.
Subclass should override the ``_process()`` method.
"""
......
......@@ -4,6 +4,8 @@
import tensorflow as tf
from contextlib import contextmanager
from ..utils.develop import HIDE_DOC
from .gradproc import FilterNoneGrad, GradientProcessor
__all__ = ['apply_grad_processors', 'ProxyOptimizer',
......@@ -20,15 +22,19 @@ class ProxyOptimizer(tf.train.Optimizer):
super(ProxyOptimizer, self).__init__(False, name)
self._opt = opt
@HIDE_DOC
def compute_gradients(self, *args, **kwargs):
return self._opt.compute_gradients(*args, **kwargs)
@HIDE_DOC
def get_slot(self, *args, **kwargs):
return self._opt.get_slot(*args, **kwargs)
@HIDE_DOC
def get_slot_names(self, *args, **kwargs):
return self._opt.get_slot_names(*args, **kwargs)
@HIDE_DOC
def apply_gradients(self, *args, **kwargs):
return self._opt.apply_gradients(*args, **kwargs)
......@@ -85,6 +91,7 @@ class PostProcessOptimizer(ProxyOptimizer):
self._func = func
self._colocate = colocate
@HIDE_DOC
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
update_op = super(PostProcessOptimizer, self).apply_gradients(
grads_and_vars, global_step)
......@@ -131,8 +138,8 @@ class VariableAssignmentOptimizer(PostProcessOptimizer):
class AccumGradOptimizer(ProxyOptimizer):
"""
An optimizer which accumulates gradients across :math:`k` :meth:`minimize` calls,
and apply them together in every :math:`k`th :meth:`minimize` call.
An optimizer which accumulates gradients across :math:`k` :meth:`minimize` executions,
and apply them together in every :math:`k` th :meth:`minimize` execution.
This is roughly the same as using a :math:`k` times larger batch size plus a
:math:`k` times larger learning rate, but uses much less memory.
......@@ -157,6 +164,7 @@ class AccumGradOptimizer(ProxyOptimizer):
slots.append(s)
return slots
@HIDE_DOC
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
assert global_step is None, \
"AccumGradOptimizer doesn't support the option global_step! " \
......
......@@ -29,6 +29,9 @@ class BaseTowerContext(object):
@HIDE_DOC
def __init__(self, ns_name, vs_name=''):
"""
This is not supposed to be used by users.
You need to use :func:`TowerContext` to create a :class:`BaseTowerContext`.
Args:
ns_name (str): The name scope of the tower.
vs_name (str): Open a new variable scope with this name.
......@@ -214,7 +217,7 @@ class PredictTowerContext(BaseTowerContext):
def get_current_tower_context():
"""
When called inside a TowerContext, return the TowerContext.
When called inside a TowerContext, returns the TowerContext.
Returns:
a :class:`BaseTowerContext` instance.
......
......@@ -7,7 +7,7 @@ GIT_ARG="--git-dir ../.git --work-tree .."
# find out modified python files, so that we ignored unstaged files
# exclude ../docs
MOD=$(git $GIT_ARG status -s \
| grep -E '\.py$' | grep -v '../docs' \
| grep -E '\.py$' | grep -v '../docs' | grep -v '__init__' \
| grep -E '^ *M|^ *A' | cut -c 4- )
if [[ -n $MOD ]]; then
flake8 $MOD
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment