Commit 6c4f2351 authored by Yuxin Wu's avatar Yuxin Wu

Show optional scope name in API document (#379)

parent 61305080
......@@ -345,7 +345,9 @@ def process_signature(app, what, name, obj, options, signature,
# add scope name to layer signatures:
if hasattr(obj, 'use_scope') and hasattr(obj, 'symbolic_function'):
if obj.use_scope:
signature = signature[0] + 'name, ' + signature[1:]
signature = signature[0] + 'scope_name, ' + signature[1:]
elif obj.use_scope is None:
signature = signature[0] + '[scope_name,] ' + signature[1:]
# signature: arg list
return signature, return_annotation
......
......@@ -135,7 +135,7 @@ def get_config(cifar_classnum):
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.', required=True)
parser.add_argument('--load', help='load model')
parser.add_argument('--classnum', help='10 for cifar10 or 100 for cifar100',
type=int, default=10)
......
......@@ -65,7 +65,7 @@ def reshape_for_bn(param, ndims, chan, data_format):
return tf.reshape(param, shape)
@layer_register(log_shape=False)
@layer_register()
def BatchNorm(x, use_local_stat=None, decay=0.9, epsilon=1e-5,
use_scale=True, use_bias=True,
gamma_init=tf.constant_initializer(1.0), data_format='NHWC'):
......@@ -157,7 +157,7 @@ def BatchNorm(x, use_local_stat=None, decay=0.9, epsilon=1e-5,
return ret
@layer_register(log_shape=False)
@layer_register()
def BatchRenorm(x, rmax, dmax, decay=0.9, epsilon=1e-5,
use_scale=True, use_bias=True, data_format='NHWC'):
"""
......
......@@ -83,16 +83,19 @@ def disable_layer_logging():
def layer_register(
log_shape=True,
log_shape=False,
use_scope=True):
"""
Register a layer.
Args:
log_shape (bool): log input/output shape of this layer
use_scope (bool): whether to call this layer with an extra first argument as scope.
If set to False, will try to figure out whether the first argument
is scope name or not.
use_scope (bool or None):
Whether to call this layer with an extra first argument as scope.
When set to None, it can be called either with or without
the scope name argument.
It will try to figure out by checking if the first argument
is string or not.
"""
def wrapper(func):
......@@ -103,7 +106,12 @@ def layer_register(
name, inputs = args[0], args[1]
args = args[1:] # actual positional args used to call func
assert isinstance(name, six.string_types), name
else:
elif use_scope is False:
assert not log_shape
inputs = args[0]
name = None
assert not isinstance(args[0], six.string_types), name
else: # use_scope is None
assert not log_shape
if isinstance(args[0], six.string_types):
name, inputs = args[0], args[1]
......
......@@ -11,7 +11,7 @@ from .shape_utils import StaticDynamicAxis
__all__ = ['Conv2D', 'Deconv2D']
@layer_register()
@layer_register(log_shape=True)
def Conv2D(x, out_channel, kernel_shape,
padding='SAME', stride=1,
W_init=None, b_init=None,
......@@ -79,7 +79,7 @@ def Conv2D(x, out_channel, kernel_shape,
return ret
@layer_register()
@layer_register(log_shape=True)
def Deconv2D(x, out_shape, kernel_shape,
stride, padding='SAME',
W_init=None, b_init=None,
......
......@@ -11,7 +11,7 @@ from ..tfutils import symbolic_functions as symbf
__all__ = ['FullyConnected']
@layer_register()
@layer_register(log_shape=True)
def FullyConnected(x, out_dim,
W_init=None, b_init=None,
nl=tf.identity, use_bias=True):
......
......@@ -9,7 +9,7 @@ from .common import layer_register
__all__ = ['LayerNorm', 'InstanceNorm']
@layer_register(log_shape=False)
@layer_register()
def LayerNorm(x, epsilon=1e-5, use_bias=True, use_scale=True, data_format='NHWC'):
"""
Layer Normalization layer, as described in the paper:
......@@ -49,7 +49,7 @@ def LayerNorm(x, epsilon=1e-5, use_bias=True, use_scale=True, data_format='NHWC'
return tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output')
@layer_register(log_shape=False)
@layer_register()
def InstanceNorm(x, epsilon=1e-5, data_format='NHWC', use_affine=True):
"""
Instance Normalization, as in the paper:
......
......@@ -11,7 +11,7 @@ from .batch_norm import BatchNorm
__all__ = ['Maxout', 'PReLU', 'LeakyReLU', 'BNReLU']
@layer_register(use_scope=False)
@layer_register(use_scope=None)
def Maxout(x, num_unit):
"""
Maxout as in the paper `Maxout Networks <http://arxiv.org/abs/1302.4389>`_.
......@@ -35,7 +35,7 @@ def Maxout(x, num_unit):
return tf.reduce_max(x, ndim, name='output')
@layer_register(log_shape=False)
@layer_register()
def PReLU(x, init=0.001, name='output'):
"""
Parameterized ReLU as in the paper `Delving Deep into Rectifiers: Surpassing
......@@ -60,7 +60,7 @@ def PReLU(x, init=0.001, name='output'):
return ret
@layer_register(use_scope=False, log_shape=False)
@layer_register(use_scope=None)
def LeakyReLU(x, alpha, name='output'):
"""
Leaky ReLU as in paper `Rectifier Nonlinearities Improve Neural Network Acoustic
......@@ -74,7 +74,7 @@ def LeakyReLU(x, alpha, name='output'):
return tf.maximum(x, alpha * x, name=name)
@layer_register(log_shape=False, use_scope=False)
@layer_register(use_scope=None)
def BNReLU(x, name=None):
"""
A shorthand of BatchNormalization + ReLU.
......
......@@ -29,7 +29,7 @@ def _Pooling(func, x, shape, stride, padding, data_format):
name='output')
@layer_register()
@layer_register(log_shape=True)
def MaxPooling(x, shape, stride=None, padding='VALID', data_format='NHWC'):
"""
Max Pooling on 4D tensors.
......@@ -47,7 +47,7 @@ def MaxPooling(x, shape, stride=None, padding='VALID', data_format='NHWC'):
data_format=data_format)
@layer_register()
@layer_register(log_shape=True)
def AvgPooling(x, shape, stride=None, padding='VALID', data_format='NHWC'):
"""
Average Pooling on 4D tensors.
......@@ -65,7 +65,7 @@ def AvgPooling(x, shape, stride=None, padding='VALID', data_format='NHWC'):
data_format=data_format)
@layer_register()
@layer_register(log_shape=True)
def GlobalAvgPooling(x, data_format='NHWC'):
"""
Global average pooling as in the paper `Network In Network
......@@ -97,7 +97,7 @@ def UnPooling2x2ZeroFilled(x):
return ret
@layer_register()
@layer_register(log_shape=True)
def FixedUnPooling(x, shape, unpool_mat=None, data_format='NHWC'):
"""
Unpool the input with a fixed matrix to perform kronecker product with.
......@@ -149,7 +149,7 @@ def FixedUnPooling(x, shape, unpool_mat=None, data_format='NHWC'):
return ret
@layer_register()
@layer_register(log_shape=True)
def BilinearUpSample(x, shape):
"""
Deterministic bilinearly-upsample the input images.
......
......@@ -8,7 +8,7 @@ from .common import layer_register
__all__ = ['SoftMax']
@layer_register()
@layer_register(use_scope=None)
def SoftMax(x, use_temperature=False, temperature_init=1.0):
"""
A SoftMax layer (w/o linear projection) with optional temperature, as
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment