Commit 1bf2737f authored by Yuxin Wu's avatar Yuxin Wu

hide deprecated methods from documentation

parent a9cba3c6
......@@ -32,6 +32,7 @@ MOCK_MODULES = ['scipy', 'tabulate',
'gym', 'functools32']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock(name=mod_name)
sys.modules['cv2'].__version__ = '3.2.1' # fake version
import tensorpack
......@@ -349,6 +350,18 @@ def process_signature(app, what, name, obj, options, signature,
return signature, return_annotation
def autodoc_skip_member(app, what, name, obj, skip, options):
for deprecate in [
'DistributedReplicatedTrainer',
'SingleCostFeedfreeTrainer',
'SimpleFeedfreeTrainer',
'FeedfreeTrainerBase',
'FeedfreeInferenceRunner',
'replace_get_variable',
'remap_get_variable',
'freeze_get_variable',
'ParamRestore']:
if deprecate in name:
return True
if name in ['get_data', 'size', 'reset_state']:
# skip these methods with empty docstring
if not obj.__doc__ and inspect.isfunction(obj):
......
......@@ -6,7 +6,7 @@ Tensorpack base trainer implements the logic of __running the iteration__.
Users or derived trainers should implement __what the iteration is__.
Most neural network training tasks are single-cost optimization.
Tensorpack provides some trainer implementations for such tasks:
Tensorpack provides some trainer implementations for such tasks.
These trainers will build the graph based on the given `ModelDesc`, and minimizes `ModelDesc.cost`.
Existing trainers were implemented with certain prefetch mechanism,
......
import os
# issue#7378 may happen with custom opencv. It doesn't hurt to disable opencl
os.environ['OPENCV_OPENCL_RUNTIME'] = ''
try:
# issue#1924 may happen on old systems
import cv2 # noqa
if int(cv2.__version__.split('.')[0]) == 3:
cv2.ocl.setUseOpenCL(False)
except ImportError:
pass
import os
# issue#7378 may happen with custom opencv. It doesn't hurt to disable opencl
os.environ['OPENCV_OPENCL_RUNTIME'] = ''
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' # issue#9339
os.environ['TF_AUTOTUNE_THRESHOLD'] = '3' # use more warm-up
......
......@@ -5,8 +5,6 @@
import tensorflow as tf
import six
from .common import get_tf_version_number
from ..utils.develop import deprecated
if six.PY2:
import functools32 as functools
else:
......@@ -15,22 +13,6 @@ else:
__all__ = ['auto_reuse_variable_scope']
@deprecated("Use tf.get_default_graph().get_name_scope() (available since 1.2.1).")
def get_name_scope_name():
"""
Returns:
str: the name of the current name scope, without the ending '/'.
"""
if get_tf_version_number() > 1.2:
return tf.get_default_graph().get_name_scope()
else:
g = tf.get_default_graph()
s = "RANDOM_STR_ABCDEFG"
unique = g.unique_name(s)
scope = unique[:-len(s)].rstrip('/')
return scope
def auto_reuse_variable_scope(func):
"""
A decorator which automatically reuse the current variable scope if the
......
......@@ -13,7 +13,7 @@ from ..callbacks import RunOp
from ..tfutils.sesscreate import NewSessionCreator
from ..tfutils.common import get_global_step_var, get_op_tensor_name
__all__ = ['DistributedReplicatedTrainer']
__all__ = ['DistributedReplicatedTrainer', 'DistributedTrainerReplicated']
class OverrideToLocalVariable(object):
......@@ -34,7 +34,7 @@ class OverrideToLocalVariable(object):
return getter(name, *args, **kwargs)
class DistributedReplicatedTrainer(MultiGPUTrainerBase):
class DistributedTrainerReplicated(MultiGPUTrainerBase):
"""
Distributed replicated training.
Each worker process builds the same model on one or more GPUs.
......@@ -323,3 +323,8 @@ class DistributedReplicatedTrainer(MultiGPUTrainerBase):
@property
def vs_name_for_predictor(self):
return "tower0"
def DistributedReplicatedTrainer(*args, **kwargs):
logger.warn("DistributedReplicatedTrainer was renamed to DistributedTrainerReplicated!")
return DistributedTrainerReplicated(*args, **kwargs)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment