Commit c4b38010 authored by Yuxin Wu's avatar Yuxin Wu

fix sphinx

parent dc782068
...@@ -18,6 +18,7 @@ import sys, os, re ...@@ -18,6 +18,7 @@ import sys, os, re
# add these directories to sys.path here. If the directory is relative to the # add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here. # documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('../'))
os.environ['TENSORPACK_DOC_BUILDING'] = '1'
import mock import mock
...@@ -34,8 +35,8 @@ import mock ...@@ -34,8 +35,8 @@ import mock
#+ ', '.join(["{}={}".format(k,v) for k,v in kwargs.items()]) + ')' #+ ', '.join(["{}={}".format(k,v) for k,v in kwargs.items()]) + ')'
MOCK_MODULES = ['numpy', 'scipy', 'tensorflow', 'scipy.misc', 'h5py', 'nltk', MOCK_MODULES = ['numpy', 'scipy', 'tensorflow', 'scipy.misc', 'h5py', 'nltk',
'cv2', 'scipy.io', 'dill', 'zmq', 'subprocess32', 'lmdb', 'cv2', 'scipy.io', 'dill', 'zmq', 'subprocess32', 'lmdb', 'tornado.concurrent',
'tornado', 'msgpack', 'msgpack_numpy'] 'tornado', 'msgpack', 'msgpack_numpy', 'ale_python_interface']
for mod_name in MOCK_MODULES: for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock(name=mod_name) sys.modules[mod_name] = mock.Mock(name=mod_name)
...@@ -320,17 +321,18 @@ def skip(app, what, name, obj, skip, options): ...@@ -320,17 +321,18 @@ def skip(app, what, name, obj, skip, options):
return False return False
return skip return skip
def get_rst(app, what, name, obj, options, signature, def process_signature(app, what, name, obj, options, signature,
return_annotation): return_annotation):
if signature: if signature:
# replace Mock function names # replace Mock function names
signature = re.sub('<Mock name=\'([^\']+)\'.*>', '\g<1>', signature) signature = re.sub('<Mock name=\'([^\']+)\'.*>', '\g<1>', signature)
signature = re.sub('tensorflow', 'tf', signature) signature = re.sub('tensorflow', 'tf', signature)
# signature: arg list
return signature, return_annotation return signature, return_annotation
def setup(app): def setup(app):
from recommonmark.transform import AutoStructify from recommonmark.transform import AutoStructify
app.connect('autodoc-process-signature', get_rst) app.connect('autodoc-process-signature', process_signature)
app.connect("autodoc-skip-member", skip) app.connect("autodoc-skip-member", skip)
app.add_config_value( app.add_config_value(
'recommonmark_config', 'recommonmark_config',
......
...@@ -17,16 +17,16 @@ Contents: ...@@ -17,16 +17,16 @@ Contents:
user/models user/models
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 1
modules/tensorpack
..
modules/tensorpack.models modules/tensorpack.models
modules/tensorpack.tfutils modules/tensorpack.dataflow
modules/tensorpack.train
modules/tensorpack.callbacks modules/tensorpack.callbacks
modules/tensorpack.train
modules/tensorpack.utils modules/tensorpack.utils
modules/tensorpack.tfutils
modules/tensorpack.RL
Indices and tables Indices and tables
......
...@@ -28,6 +28,14 @@ tensorpack.callbacks.dump module ...@@ -28,6 +28,14 @@ tensorpack.callbacks.dump module
:undoc-members: :undoc-members:
:show-inheritance: :show-inheritance:
tensorpack.callbacks.graph module
---------------------------------
.. automodule:: tensorpack.callbacks.graph
:members:
:undoc-members:
:show-inheritance:
tensorpack.callbacks.group module tensorpack.callbacks.group module
--------------------------------- ---------------------------------
......
...@@ -4,10 +4,18 @@ tensorpack.dataflow.dataset package ...@@ -4,10 +4,18 @@ tensorpack.dataflow.dataset package
Submodules Submodules
---------- ----------
tensorpack.dataflow.dataset.cifar10 module tensorpack.dataflow.dataset.bsds500 module
------------------------------------------ ------------------------------------------
.. automodule:: tensorpack.dataflow.dataset.cifar10 .. automodule:: tensorpack.dataflow.dataset.bsds500
:members:
:undoc-members:
:show-inheritance:
tensorpack.dataflow.dataset.cifar module
----------------------------------------
.. automodule:: tensorpack.dataflow.dataset.cifar
:members: :members:
:undoc-members: :undoc-members:
:show-inheritance: :show-inheritance:
......
...@@ -28,6 +28,14 @@ tensorpack.dataflow.imgaug.deform module ...@@ -28,6 +28,14 @@ tensorpack.dataflow.imgaug.deform module
:undoc-members: :undoc-members:
:show-inheritance: :show-inheritance:
tensorpack.dataflow.imgaug.geometry module
------------------------------------------
.. automodule:: tensorpack.dataflow.imgaug.geometry
:members:
:undoc-members:
:show-inheritance:
tensorpack.dataflow.imgaug.imgproc module tensorpack.dataflow.imgaug.imgproc module
----------------------------------------- -----------------------------------------
...@@ -36,6 +44,22 @@ tensorpack.dataflow.imgaug.imgproc module ...@@ -36,6 +44,22 @@ tensorpack.dataflow.imgaug.imgproc module
:undoc-members: :undoc-members:
:show-inheritance: :show-inheritance:
tensorpack.dataflow.imgaug.meta module
--------------------------------------
.. automodule:: tensorpack.dataflow.imgaug.meta
:members:
:undoc-members:
:show-inheritance:
tensorpack.dataflow.imgaug.noise module
---------------------------------------
.. automodule:: tensorpack.dataflow.imgaug.noise
:members:
:undoc-members:
:show-inheritance:
tensorpack.dataflow.imgaug.noname module tensorpack.dataflow.imgaug.noname module
---------------------------------------- ----------------------------------------
......
...@@ -60,6 +60,14 @@ tensorpack.dataflow.prefetch module ...@@ -60,6 +60,14 @@ tensorpack.dataflow.prefetch module
:undoc-members: :undoc-members:
:show-inheritance: :show-inheritance:
tensorpack.dataflow.raw module
------------------------------
.. automodule:: tensorpack.dataflow.raw
:members:
:undoc-members:
:show-inheritance:
Module contents Module contents
--------------- ---------------
......
...@@ -68,6 +68,14 @@ tensorpack.models.regularize module ...@@ -68,6 +68,14 @@ tensorpack.models.regularize module
:undoc-members: :undoc-members:
:show-inheritance: :show-inheritance:
tensorpack.models.softmax module
--------------------------------
.. automodule:: tensorpack.models.softmax
:members:
:undoc-members:
:show-inheritance:
Module contents Module contents
--------------- ---------------
......
...@@ -6,25 +6,15 @@ Subpackages ...@@ -6,25 +6,15 @@ Subpackages
.. toctree:: .. toctree::
tensorpack.RL
tensorpack.callbacks tensorpack.callbacks
tensorpack.dataflow tensorpack.dataflow
tensorpack.models tensorpack.models
tensorpack.predict
tensorpack.tfutils tensorpack.tfutils
tensorpack.train tensorpack.train
tensorpack.utils tensorpack.utils
Submodules
----------
tensorpack.predict module
-------------------------
.. automodule:: tensorpack.predict
:members:
:undoc-members:
:show-inheritance:
Module contents Module contents
--------------- ---------------
......
...@@ -20,6 +20,14 @@ tensorpack.train.config module ...@@ -20,6 +20,14 @@ tensorpack.train.config module
:undoc-members: :undoc-members:
:show-inheritance: :show-inheritance:
tensorpack.train.multigpu module
--------------------------------
.. automodule:: tensorpack.train.multigpu
:members:
:undoc-members:
:show-inheritance:
tensorpack.train.trainer module tensorpack.train.trainer module
------------------------------- -------------------------------
......
...@@ -12,6 +12,14 @@ tensorpack.utils.concurrency module ...@@ -12,6 +12,14 @@ tensorpack.utils.concurrency module
:undoc-members: :undoc-members:
:show-inheritance: :show-inheritance:
tensorpack.utils.discretize module
----------------------------------
.. automodule:: tensorpack.utils.discretize
:members:
:undoc-members:
:show-inheritance:
tensorpack.utils.fs module tensorpack.utils.fs module
-------------------------- --------------------------
...@@ -60,6 +68,14 @@ tensorpack.utils.rect module ...@@ -60,6 +68,14 @@ tensorpack.utils.rect module
:undoc-members: :undoc-members:
:show-inheritance: :show-inheritance:
tensorpack.utils.serialize module
---------------------------------
.. automodule:: tensorpack.utils.serialize
:members:
:undoc-members:
:show-inheritance:
tensorpack.utils.stat module tensorpack.utils.stat module
---------------------------- ----------------------------
...@@ -68,6 +84,14 @@ tensorpack.utils.stat module ...@@ -68,6 +84,14 @@ tensorpack.utils.stat module
:undoc-members: :undoc-members:
:show-inheritance: :show-inheritance:
tensorpack.utils.timer module
-----------------------------
.. automodule:: tensorpack.utils.timer
:members:
:undoc-members:
:show-inheritance:
tensorpack.utils.utils module tensorpack.utils.utils module
----------------------------- -----------------------------
......
...@@ -14,8 +14,10 @@ class HistoryFramePlayer(ProxyPlayer): ...@@ -14,8 +14,10 @@ class HistoryFramePlayer(ProxyPlayer):
Assume player will do auto-restart. Assume player will do auto-restart.
""" """
def __init__(self, player, hist_len): def __init__(self, player, hist_len):
""" :param hist_len: total length of the state, including the current """
and `hist_len-1` history""" :param hist_len: total length of the state, including the current
and `hist_len-1` history
"""
super(HistoryFramePlayer, self).__init__(player) super(HistoryFramePlayer, self).__init__(player)
self.history = deque(maxlen=hist_len) self.history = deque(maxlen=hist_len)
......
...@@ -265,6 +265,9 @@ class ConcatData(DataFlow): ...@@ -265,6 +265,9 @@ class ConcatData(DataFlow):
class JoinData(DataFlow): class JoinData(DataFlow):
""" """
Join the components from each DataFlow. Join the components from each DataFlow.
.. code-block:: none
e.g.: df1: [dp1, dp2] e.g.: df1: [dp1, dp2]
df2: [dp3, dp4] df2: [dp3, dp4]
join: [dp1, dp2, dp3, dp4] join: [dp1, dp2, dp3, dp4]
......
...@@ -28,15 +28,9 @@ def log_once(s): logger.warn(s) ...@@ -28,15 +28,9 @@ def log_once(s): logger.warn(s)
CAFFE_ILSVRC12_URL = "http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz" CAFFE_ILSVRC12_URL = "http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz"
CAFFE_PROTO_URL = "https://github.com/BVLC/caffe/raw/master/src/caffe/proto/caffe.proto" CAFFE_PROTO_URL = "https://github.com/BVLC/caffe/raw/master/src/caffe/proto/caffe.proto"
"""
Preprocess training set like this:
cd train
for i in *.tar; do dir=${i%.tar}; echo $dir; mkdir -p $dir; tar xf $i -C $dir; done
"""
class ILSVRCMeta(object): class ILSVRCMeta(object):
""" """
Provide metadata for ILSVRC dataset. Some metadata for ILSVRC dataset.
""" """
def __init__(self, dir=None): def __init__(self, dir=None):
if dir is None: if dir is None:
...@@ -68,7 +62,7 @@ class ILSVRCMeta(object): ...@@ -68,7 +62,7 @@ class ILSVRCMeta(object):
def get_image_list(self, name): def get_image_list(self, name):
""" """
:param name: 'train' or 'val' or 'test' :param name: 'train' or 'val' or 'test'
:returns list of (image filename, cls) :returns: list of (image filename, cls)
""" """
assert name in ['train', 'val', 'test'] assert name in ['train', 'val', 'test']
fname = os.path.join(self.dir, name + '.txt') fname = os.path.join(self.dir, name + '.txt')
...@@ -83,7 +77,7 @@ class ILSVRCMeta(object): ...@@ -83,7 +77,7 @@ class ILSVRCMeta(object):
def get_per_pixel_mean(self, size=None): def get_per_pixel_mean(self, size=None):
""" """
:param size: return image size in [h, w]. default to (256, 256) :param size: return image size in [h, w]. default to (256, 256)
:returns per-pixel mean as an array of shape (h, w, 3) in range [0, 255] :returns: per-pixel mean as an array of shape (h, w, 3) in range [0, 255]
""" """
import imp import imp
caffepb = imp.load_source('caffepb', self.caffe_pb_file) caffepb = imp.load_source('caffepb', self.caffe_pb_file)
...@@ -101,9 +95,36 @@ class ILSVRCMeta(object): ...@@ -101,9 +95,36 @@ class ILSVRCMeta(object):
class ILSVRC12(DataFlow): class ILSVRC12(DataFlow):
def __init__(self, dir, name, meta_dir=None, shuffle=True): def __init__(self, dir, name, meta_dir=None, shuffle=True):
""" """
:param name: 'train' or 'val' or 'test' :param dir: A directory containing a subdir named `name`, where the
:param dir: A directory containing a subdir named `name`, inside which the
original ILSVRC12_`name`.tar gets decompressed. original ILSVRC12_`name`.tar gets decompressed.
:param name: 'train' or 'val' or 'test'
Dir should have the following structure:
.. code-block:: none
dir/
train/
n02134418/
n02134418_198.JPEG
...
...
val/
ILSVRC2012_val_00000001.JPEG
...
test/
ILSVRC2012_test_00000001.JPEG
...
After decompress ILSVRC12_img_train.tar, you can use the following
command to build the above structure for `train/`:
.. code-block:: none
find -type f | parallel -P 10 'mkdir -p {/.} && tar xf {} -C {/.}'
Or:
for i in *.tar; do dir=${i%.tar}; echo $dir; mkdir -p $dir; tar xf $i -C $dir; done
""" """
assert name in ['train', 'test', 'val'] assert name in ['train', 'test', 'val']
self.full_dir = os.path.join(dir, name) self.full_dir = os.path.join(dir, name)
......
...@@ -34,6 +34,7 @@ def dump_dataset_images(ds, dirname, max_count=None, index=0): ...@@ -34,6 +34,7 @@ def dump_dataset_images(ds, dirname, max_count=None, index=0):
def dataflow_to_process_queue(ds, size, nr_consumer): def dataflow_to_process_queue(ds, size, nr_consumer):
""" """
Convert a `DataFlow` to a multiprocessing.Queue. Convert a `DataFlow` to a multiprocessing.Queue.
:param ds: a `DataFlow` :param ds: a `DataFlow`
:param size: size of the queue :param size: size of the queue
:param nr_consumer: number of consumer of the queue. :param nr_consumer: number of consumer of the queue.
......
...@@ -70,9 +70,10 @@ def layer_register(summary_activation=False, log_shape=True): ...@@ -70,9 +70,10 @@ def layer_register(summary_activation=False, log_shape=True):
wrapped_func.f = func # attribute to access the underlining function object wrapped_func.f = func # attribute to access the underlining function object
return wrapped_func return wrapped_func
# need some special handling for RTD to work with the arguments # need some special handling for sphinx to work with the arguments
on_rtd = os.environ.get('READTHEDOCS') == 'True' on_doc = os.environ.get('READTHEDOCS') == 'True' \
if on_rtd: or os.environ.get('TENSORPACK_DOC_BUILDING')
if on_doc:
from decorator import decorator from decorator import decorator
wrapper = decorator(wrapper) wrapper = decorator(wrapper)
......
...@@ -60,4 +60,3 @@ def Conv2D(x, out_channel, kernel_shape, ...@@ -60,4 +60,3 @@ def Conv2D(x, out_channel, kernel_shape,
conv = tf.concat(3, outputs) conv = tf.concat(3, outputs)
return nl(tf.nn.bias_add(conv, b) if use_bias else conv, name='output') return nl(tf.nn.bias_add(conv, b) if use_bias else conv, name='output')
...@@ -48,12 +48,9 @@ class ModelDesc(object): ...@@ -48,12 +48,9 @@ class ModelDesc(object):
def build_graph(self, model_inputs, is_training): def build_graph(self, model_inputs, is_training):
""" """
setup the whole graph. Setup the whole graph.
:param model_inputs: a list of input variable in the graph
e.g.: [image_var, label_var] with:
* image_var: bx28x28 :param model_inputs: a list of input variable in the graph.
* label_var: bx1 integer
:param is_training: a boolean :param is_training: a boolean
:returns: the cost to minimize. a scalar variable :returns: the cost to minimize. a scalar variable
""" """
......
...@@ -66,6 +66,7 @@ class PredictConfig(object): ...@@ -66,6 +66,7 @@ class PredictConfig(object):
def get_predict_func(config): def get_predict_func(config):
""" """
Produce a simple predictor function run inside a new session. Produce a simple predictor function run inside a new session.
:param config: a `PredictConfig` instance. :param config: a `PredictConfig` instance.
:returns: A prediction function that takes a list of input values, and return :returns: A prediction function that takes a list of input values, and return
a list of output values defined in ``config.output_var_names``. a list of output values defined in ``config.output_var_names``.
......
...@@ -66,9 +66,10 @@ class MultiProcessDatasetPredictor(DatasetPredictorBase): ...@@ -66,9 +66,10 @@ class MultiProcessDatasetPredictor(DatasetPredictorBase):
def __init__(self, config, dataset, nr_proc, use_gpu=True): def __init__(self, config, dataset, nr_proc, use_gpu=True):
""" """
Run prediction in multiprocesses, on either CPU or GPU. Mix mode not supported. Run prediction in multiprocesses, on either CPU or GPU. Mix mode not supported.
:param nr_proc: number of processes to use :param nr_proc: number of processes to use
:param use_gpu: use GPU or CPU. :param use_gpu: use GPU or CPU.
if GPU, then nr_proc cannot be larger than the total number of GPUs available If GPU, then nr_proc cannot be larger than the total number of GPUs available
in CUDA_VISIBLE_DEVICES or in the system. in CUDA_VISIBLE_DEVICES or in the system.
""" """
assert config.return_input == False, "return_input not supported for MultiProcessDatasetPredictor" assert config.return_input == False, "return_input not supported for MultiProcessDatasetPredictor"
......
...@@ -34,9 +34,9 @@ def argscope(layers, **param): ...@@ -34,9 +34,9 @@ def argscope(layers, **param):
del _ArgScopeStack[-1] del _ArgScopeStack[-1]
def get_arg_scope(): def get_arg_scope():
""" return the current argscope """
an argscope is a dict of dict: :returns: the current argscope.
dict[layername] = {arg: val} An argscope is a dict of dict: dict[layername] = {arg: val}
""" """
if len(_ArgScopeStack) > 0: if len(_ArgScopeStack) > 0:
return _ArgScopeStack[-1] return _ArgScopeStack[-1]
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment