Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
6c4f2351
Commit
6c4f2351
authored
Aug 11, 2017
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Show optional scope name in API document (#379)
parent
61305080
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
34 additions
and
24 deletions
+34
-24
docs/conf.py
docs/conf.py
+3
-1
examples/cifar-convnet.py
examples/cifar-convnet.py
+1
-1
tensorpack/models/batch_norm.py
tensorpack/models/batch_norm.py
+2
-2
tensorpack/models/common.py
tensorpack/models/common.py
+13
-5
tensorpack/models/conv2d.py
tensorpack/models/conv2d.py
+2
-2
tensorpack/models/fc.py
tensorpack/models/fc.py
+1
-1
tensorpack/models/layer_norm.py
tensorpack/models/layer_norm.py
+2
-2
tensorpack/models/nonlin.py
tensorpack/models/nonlin.py
+4
-4
tensorpack/models/pool.py
tensorpack/models/pool.py
+5
-5
tensorpack/models/softmax.py
tensorpack/models/softmax.py
+1
-1
No files found.
docs/conf.py
View file @
6c4f2351
...
@@ -345,7 +345,9 @@ def process_signature(app, what, name, obj, options, signature,
...
@@ -345,7 +345,9 @@ def process_signature(app, what, name, obj, options, signature,
# add scope name to layer signatures:
# add scope name to layer signatures:
if
hasattr
(
obj
,
'use_scope'
)
and
hasattr
(
obj
,
'symbolic_function'
):
if
hasattr
(
obj
,
'use_scope'
)
and
hasattr
(
obj
,
'symbolic_function'
):
if
obj
.
use_scope
:
if
obj
.
use_scope
:
signature
=
signature
[
0
]
+
'name, '
+
signature
[
1
:]
signature
=
signature
[
0
]
+
'scope_name, '
+
signature
[
1
:]
elif
obj
.
use_scope
is
None
:
signature
=
signature
[
0
]
+
'[scope_name,] '
+
signature
[
1
:]
# signature: arg list
# signature: arg list
return
signature
,
return_annotation
return
signature
,
return_annotation
...
...
examples/cifar-convnet.py
View file @
6c4f2351
...
@@ -135,7 +135,7 @@ def get_config(cifar_classnum):
...
@@ -135,7 +135,7 @@ def get_config(cifar_classnum):
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
,
required
=
True
)
parser
.
add_argument
(
'--load'
,
help
=
'load model'
)
parser
.
add_argument
(
'--load'
,
help
=
'load model'
)
parser
.
add_argument
(
'--classnum'
,
help
=
'10 for cifar10 or 100 for cifar100'
,
parser
.
add_argument
(
'--classnum'
,
help
=
'10 for cifar10 or 100 for cifar100'
,
type
=
int
,
default
=
10
)
type
=
int
,
default
=
10
)
...
...
tensorpack/models/batch_norm.py
View file @
6c4f2351
...
@@ -65,7 +65,7 @@ def reshape_for_bn(param, ndims, chan, data_format):
...
@@ -65,7 +65,7 @@ def reshape_for_bn(param, ndims, chan, data_format):
return
tf
.
reshape
(
param
,
shape
)
return
tf
.
reshape
(
param
,
shape
)
@
layer_register
(
log_shape
=
False
)
@
layer_register
()
def
BatchNorm
(
x
,
use_local_stat
=
None
,
decay
=
0.9
,
epsilon
=
1e-5
,
def
BatchNorm
(
x
,
use_local_stat
=
None
,
decay
=
0.9
,
epsilon
=
1e-5
,
use_scale
=
True
,
use_bias
=
True
,
use_scale
=
True
,
use_bias
=
True
,
gamma_init
=
tf
.
constant_initializer
(
1.0
),
data_format
=
'NHWC'
):
gamma_init
=
tf
.
constant_initializer
(
1.0
),
data_format
=
'NHWC'
):
...
@@ -157,7 +157,7 @@ def BatchNorm(x, use_local_stat=None, decay=0.9, epsilon=1e-5,
...
@@ -157,7 +157,7 @@ def BatchNorm(x, use_local_stat=None, decay=0.9, epsilon=1e-5,
return
ret
return
ret
@
layer_register
(
log_shape
=
False
)
@
layer_register
()
def
BatchRenorm
(
x
,
rmax
,
dmax
,
decay
=
0.9
,
epsilon
=
1e-5
,
def
BatchRenorm
(
x
,
rmax
,
dmax
,
decay
=
0.9
,
epsilon
=
1e-5
,
use_scale
=
True
,
use_bias
=
True
,
data_format
=
'NHWC'
):
use_scale
=
True
,
use_bias
=
True
,
data_format
=
'NHWC'
):
"""
"""
...
...
tensorpack/models/common.py
View file @
6c4f2351
...
@@ -83,16 +83,19 @@ def disable_layer_logging():
...
@@ -83,16 +83,19 @@ def disable_layer_logging():
def
layer_register
(
def
layer_register
(
log_shape
=
Tru
e
,
log_shape
=
Fals
e
,
use_scope
=
True
):
use_scope
=
True
):
"""
"""
Register a layer.
Register a layer.
Args:
Args:
log_shape (bool): log input/output shape of this layer
log_shape (bool): log input/output shape of this layer
use_scope (bool): whether to call this layer with an extra first argument as scope.
use_scope (bool or None):
If set to False, will try to figure out whether the first argument
Whether to call this layer with an extra first argument as scope.
is scope name or not.
When set to None, it can be called either with or without
the scope name argument.
It will try to figure out by checking if the first argument
is string or not.
"""
"""
def
wrapper
(
func
):
def
wrapper
(
func
):
...
@@ -103,7 +106,12 @@ def layer_register(
...
@@ -103,7 +106,12 @@ def layer_register(
name
,
inputs
=
args
[
0
],
args
[
1
]
name
,
inputs
=
args
[
0
],
args
[
1
]
args
=
args
[
1
:]
# actual positional args used to call func
args
=
args
[
1
:]
# actual positional args used to call func
assert
isinstance
(
name
,
six
.
string_types
),
name
assert
isinstance
(
name
,
six
.
string_types
),
name
else
:
elif
use_scope
is
False
:
assert
not
log_shape
inputs
=
args
[
0
]
name
=
None
assert
not
isinstance
(
args
[
0
],
six
.
string_types
),
name
else
:
# use_scope is None
assert
not
log_shape
assert
not
log_shape
if
isinstance
(
args
[
0
],
six
.
string_types
):
if
isinstance
(
args
[
0
],
six
.
string_types
):
name
,
inputs
=
args
[
0
],
args
[
1
]
name
,
inputs
=
args
[
0
],
args
[
1
]
...
...
tensorpack/models/conv2d.py
View file @
6c4f2351
...
@@ -11,7 +11,7 @@ from .shape_utils import StaticDynamicAxis
...
@@ -11,7 +11,7 @@ from .shape_utils import StaticDynamicAxis
__all__
=
[
'Conv2D'
,
'Deconv2D'
]
__all__
=
[
'Conv2D'
,
'Deconv2D'
]
@
layer_register
()
@
layer_register
(
log_shape
=
True
)
def
Conv2D
(
x
,
out_channel
,
kernel_shape
,
def
Conv2D
(
x
,
out_channel
,
kernel_shape
,
padding
=
'SAME'
,
stride
=
1
,
padding
=
'SAME'
,
stride
=
1
,
W_init
=
None
,
b_init
=
None
,
W_init
=
None
,
b_init
=
None
,
...
@@ -79,7 +79,7 @@ def Conv2D(x, out_channel, kernel_shape,
...
@@ -79,7 +79,7 @@ def Conv2D(x, out_channel, kernel_shape,
return
ret
return
ret
@
layer_register
()
@
layer_register
(
log_shape
=
True
)
def
Deconv2D
(
x
,
out_shape
,
kernel_shape
,
def
Deconv2D
(
x
,
out_shape
,
kernel_shape
,
stride
,
padding
=
'SAME'
,
stride
,
padding
=
'SAME'
,
W_init
=
None
,
b_init
=
None
,
W_init
=
None
,
b_init
=
None
,
...
...
tensorpack/models/fc.py
View file @
6c4f2351
...
@@ -11,7 +11,7 @@ from ..tfutils import symbolic_functions as symbf
...
@@ -11,7 +11,7 @@ from ..tfutils import symbolic_functions as symbf
__all__
=
[
'FullyConnected'
]
__all__
=
[
'FullyConnected'
]
@
layer_register
()
@
layer_register
(
log_shape
=
True
)
def
FullyConnected
(
x
,
out_dim
,
def
FullyConnected
(
x
,
out_dim
,
W_init
=
None
,
b_init
=
None
,
W_init
=
None
,
b_init
=
None
,
nl
=
tf
.
identity
,
use_bias
=
True
):
nl
=
tf
.
identity
,
use_bias
=
True
):
...
...
tensorpack/models/layer_norm.py
View file @
6c4f2351
...
@@ -9,7 +9,7 @@ from .common import layer_register
...
@@ -9,7 +9,7 @@ from .common import layer_register
__all__
=
[
'LayerNorm'
,
'InstanceNorm'
]
__all__
=
[
'LayerNorm'
,
'InstanceNorm'
]
@
layer_register
(
log_shape
=
False
)
@
layer_register
()
def
LayerNorm
(
x
,
epsilon
=
1e-5
,
use_bias
=
True
,
use_scale
=
True
,
data_format
=
'NHWC'
):
def
LayerNorm
(
x
,
epsilon
=
1e-5
,
use_bias
=
True
,
use_scale
=
True
,
data_format
=
'NHWC'
):
"""
"""
Layer Normalization layer, as described in the paper:
Layer Normalization layer, as described in the paper:
...
@@ -49,7 +49,7 @@ def LayerNorm(x, epsilon=1e-5, use_bias=True, use_scale=True, data_format='NHWC'
...
@@ -49,7 +49,7 @@ def LayerNorm(x, epsilon=1e-5, use_bias=True, use_scale=True, data_format='NHWC'
return
tf
.
nn
.
batch_normalization
(
x
,
mean
,
var
,
beta
,
gamma
,
epsilon
,
name
=
'output'
)
return
tf
.
nn
.
batch_normalization
(
x
,
mean
,
var
,
beta
,
gamma
,
epsilon
,
name
=
'output'
)
@
layer_register
(
log_shape
=
False
)
@
layer_register
()
def
InstanceNorm
(
x
,
epsilon
=
1e-5
,
data_format
=
'NHWC'
,
use_affine
=
True
):
def
InstanceNorm
(
x
,
epsilon
=
1e-5
,
data_format
=
'NHWC'
,
use_affine
=
True
):
"""
"""
Instance Normalization, as in the paper:
Instance Normalization, as in the paper:
...
...
tensorpack/models/nonlin.py
View file @
6c4f2351
...
@@ -11,7 +11,7 @@ from .batch_norm import BatchNorm
...
@@ -11,7 +11,7 @@ from .batch_norm import BatchNorm
__all__
=
[
'Maxout'
,
'PReLU'
,
'LeakyReLU'
,
'BNReLU'
]
__all__
=
[
'Maxout'
,
'PReLU'
,
'LeakyReLU'
,
'BNReLU'
]
@
layer_register
(
use_scope
=
Fals
e
)
@
layer_register
(
use_scope
=
Non
e
)
def
Maxout
(
x
,
num_unit
):
def
Maxout
(
x
,
num_unit
):
"""
"""
Maxout as in the paper `Maxout Networks <http://arxiv.org/abs/1302.4389>`_.
Maxout as in the paper `Maxout Networks <http://arxiv.org/abs/1302.4389>`_.
...
@@ -35,7 +35,7 @@ def Maxout(x, num_unit):
...
@@ -35,7 +35,7 @@ def Maxout(x, num_unit):
return
tf
.
reduce_max
(
x
,
ndim
,
name
=
'output'
)
return
tf
.
reduce_max
(
x
,
ndim
,
name
=
'output'
)
@
layer_register
(
log_shape
=
False
)
@
layer_register
()
def
PReLU
(
x
,
init
=
0.001
,
name
=
'output'
):
def
PReLU
(
x
,
init
=
0.001
,
name
=
'output'
):
"""
"""
Parameterized ReLU as in the paper `Delving Deep into Rectifiers: Surpassing
Parameterized ReLU as in the paper `Delving Deep into Rectifiers: Surpassing
...
@@ -60,7 +60,7 @@ def PReLU(x, init=0.001, name='output'):
...
@@ -60,7 +60,7 @@ def PReLU(x, init=0.001, name='output'):
return
ret
return
ret
@
layer_register
(
use_scope
=
False
,
log_shape
=
Fals
e
)
@
layer_register
(
use_scope
=
Non
e
)
def
LeakyReLU
(
x
,
alpha
,
name
=
'output'
):
def
LeakyReLU
(
x
,
alpha
,
name
=
'output'
):
"""
"""
Leaky ReLU as in paper `Rectifier Nonlinearities Improve Neural Network Acoustic
Leaky ReLU as in paper `Rectifier Nonlinearities Improve Neural Network Acoustic
...
@@ -74,7 +74,7 @@ def LeakyReLU(x, alpha, name='output'):
...
@@ -74,7 +74,7 @@ def LeakyReLU(x, alpha, name='output'):
return
tf
.
maximum
(
x
,
alpha
*
x
,
name
=
name
)
return
tf
.
maximum
(
x
,
alpha
*
x
,
name
=
name
)
@
layer_register
(
log_shape
=
False
,
use_scope
=
Fals
e
)
@
layer_register
(
use_scope
=
Non
e
)
def
BNReLU
(
x
,
name
=
None
):
def
BNReLU
(
x
,
name
=
None
):
"""
"""
A shorthand of BatchNormalization + ReLU.
A shorthand of BatchNormalization + ReLU.
...
...
tensorpack/models/pool.py
View file @
6c4f2351
...
@@ -29,7 +29,7 @@ def _Pooling(func, x, shape, stride, padding, data_format):
...
@@ -29,7 +29,7 @@ def _Pooling(func, x, shape, stride, padding, data_format):
name
=
'output'
)
name
=
'output'
)
@
layer_register
()
@
layer_register
(
log_shape
=
True
)
def
MaxPooling
(
x
,
shape
,
stride
=
None
,
padding
=
'VALID'
,
data_format
=
'NHWC'
):
def
MaxPooling
(
x
,
shape
,
stride
=
None
,
padding
=
'VALID'
,
data_format
=
'NHWC'
):
"""
"""
Max Pooling on 4D tensors.
Max Pooling on 4D tensors.
...
@@ -47,7 +47,7 @@ def MaxPooling(x, shape, stride=None, padding='VALID', data_format='NHWC'):
...
@@ -47,7 +47,7 @@ def MaxPooling(x, shape, stride=None, padding='VALID', data_format='NHWC'):
data_format
=
data_format
)
data_format
=
data_format
)
@
layer_register
()
@
layer_register
(
log_shape
=
True
)
def
AvgPooling
(
x
,
shape
,
stride
=
None
,
padding
=
'VALID'
,
data_format
=
'NHWC'
):
def
AvgPooling
(
x
,
shape
,
stride
=
None
,
padding
=
'VALID'
,
data_format
=
'NHWC'
):
"""
"""
Average Pooling on 4D tensors.
Average Pooling on 4D tensors.
...
@@ -65,7 +65,7 @@ def AvgPooling(x, shape, stride=None, padding='VALID', data_format='NHWC'):
...
@@ -65,7 +65,7 @@ def AvgPooling(x, shape, stride=None, padding='VALID', data_format='NHWC'):
data_format
=
data_format
)
data_format
=
data_format
)
@
layer_register
()
@
layer_register
(
log_shape
=
True
)
def
GlobalAvgPooling
(
x
,
data_format
=
'NHWC'
):
def
GlobalAvgPooling
(
x
,
data_format
=
'NHWC'
):
"""
"""
Global average pooling as in the paper `Network In Network
Global average pooling as in the paper `Network In Network
...
@@ -97,7 +97,7 @@ def UnPooling2x2ZeroFilled(x):
...
@@ -97,7 +97,7 @@ def UnPooling2x2ZeroFilled(x):
return
ret
return
ret
@
layer_register
()
@
layer_register
(
log_shape
=
True
)
def
FixedUnPooling
(
x
,
shape
,
unpool_mat
=
None
,
data_format
=
'NHWC'
):
def
FixedUnPooling
(
x
,
shape
,
unpool_mat
=
None
,
data_format
=
'NHWC'
):
"""
"""
Unpool the input with a fixed matrix to perform kronecker product with.
Unpool the input with a fixed matrix to perform kronecker product with.
...
@@ -149,7 +149,7 @@ def FixedUnPooling(x, shape, unpool_mat=None, data_format='NHWC'):
...
@@ -149,7 +149,7 @@ def FixedUnPooling(x, shape, unpool_mat=None, data_format='NHWC'):
return
ret
return
ret
@
layer_register
()
@
layer_register
(
log_shape
=
True
)
def
BilinearUpSample
(
x
,
shape
):
def
BilinearUpSample
(
x
,
shape
):
"""
"""
Deterministic bilinearly-upsample the input images.
Deterministic bilinearly-upsample the input images.
...
...
tensorpack/models/softmax.py
View file @
6c4f2351
...
@@ -8,7 +8,7 @@ from .common import layer_register
...
@@ -8,7 +8,7 @@ from .common import layer_register
__all__
=
[
'SoftMax'
]
__all__
=
[
'SoftMax'
]
@
layer_register
()
@
layer_register
(
use_scope
=
None
)
def
SoftMax
(
x
,
use_temperature
=
False
,
temperature_init
=
1.0
):
def
SoftMax
(
x
,
use_temperature
=
False
,
temperature_init
=
1.0
):
"""
"""
A SoftMax layer (w/o linear projection) with optional temperature, as
A SoftMax layer (w/o linear projection) with optional temperature, as
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment