Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
faa6f17c
Commit
faa6f17c
authored
Aug 31, 2016
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
change name for .print
parent
fc9e45b0
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
69 additions
and
45 deletions
+69
-45
examples/DisturbLabel/mnist-disturb.py
examples/DisturbLabel/mnist-disturb.py
+1
-3
examples/DoReFa-Net/alexnet-dorefa.py
examples/DoReFa-Net/alexnet-dorefa.py
+2
-2
examples/DoReFa-Net/svhn-digit-dorefa.py
examples/DoReFa-Net/svhn-digit-dorefa.py
+1
-1
examples/char-rnn/char-rnn.py
examples/char-rnn/char-rnn.py
+1
-1
examples/load-alexnet.py
examples/load-alexnet.py
+1
-3
examples/load-vgg16.py
examples/load-vgg16.py
+6
-7
tensorpack/models/__init__.py
tensorpack/models/__init__.py
+9
-4
tensorpack/models/_common.py
tensorpack/models/_common.py
+37
-22
tensorpack/models/nonlin.py
tensorpack/models/nonlin.py
+1
-1
tensorpack/models/regularize.py
tensorpack/models/regularize.py
+10
-1
No files found.
examples/DisturbLabel/mnist-disturb.py
View file @
faa6f17c
...
@@ -25,9 +25,7 @@ mnist_example.get_data = get_data
...
@@ -25,9 +25,7 @@ mnist_example.get_data = get_data
IMAGE_SIZE
=
28
IMAGE_SIZE
=
28
class
Model
(
mnist_example
.
Model
):
class
Model
(
mnist_example
.
Model
):
def
_build_graph
(
self
,
input_vars
,
is_training
):
def
_build_graph
(
self
,
input_vars
):
is_training
=
bool
(
is_training
)
keep_prob
=
tf
.
constant
(
0.5
if
is_training
else
1.0
)
image
,
label
=
input_vars
image
,
label
=
input_vars
image
=
tf
.
expand_dims
(
image
,
3
)
# add a single channel
image
=
tf
.
expand_dims
(
image
,
3
)
# add a single channel
...
...
examples/DoReFa-Net/alexnet-dorefa.py
View file @
faa6f17c
...
@@ -72,7 +72,7 @@ class Model(ModelDesc):
...
@@ -72,7 +72,7 @@ class Model(ModelDesc):
return
[
InputVar
(
tf
.
float32
,
[
None
,
224
,
224
,
3
],
'input'
),
return
[
InputVar
(
tf
.
float32
,
[
None
,
224
,
224
,
3
],
'input'
),
InputVar
(
tf
.
int32
,
[
None
],
'label'
)
]
InputVar
(
tf
.
int32
,
[
None
],
'label'
)
]
def
_build_graph
(
self
,
input_vars
,
is_training
):
def
_build_graph
(
self
,
input_vars
):
image
,
label
=
input_vars
image
,
label
=
input_vars
image
=
image
/
255.0
image
=
image
/
255.0
...
@@ -97,7 +97,7 @@ class Model(ModelDesc):
...
@@ -97,7 +97,7 @@ class Model(ModelDesc):
def
activate
(
x
):
def
activate
(
x
):
return
fa
(
nonlin
(
x
))
return
fa
(
nonlin
(
x
))
with
argscope
(
BatchNorm
,
decay
=
0.9
,
epsilon
=
1e-4
,
use_local_stat
=
is_training
),
\
with
argscope
(
BatchNorm
,
decay
=
0.9
,
epsilon
=
1e-4
),
\
argscope
([
Conv2D
,
FullyConnected
],
use_bias
=
False
,
nl
=
tf
.
identity
):
argscope
([
Conv2D
,
FullyConnected
],
use_bias
=
False
,
nl
=
tf
.
identity
):
logits
=
(
LinearWrap
(
image
)
logits
=
(
LinearWrap
(
image
)
.
Conv2D
(
'conv0'
,
96
,
12
,
stride
=
4
,
padding
=
'VALID'
)
.
Conv2D
(
'conv0'
,
96
,
12
,
stride
=
4
,
padding
=
'VALID'
)
...
...
examples/DoReFa-Net/svhn-digit-dorefa.py
View file @
faa6f17c
...
@@ -69,7 +69,7 @@ class Model(ModelDesc):
...
@@ -69,7 +69,7 @@ class Model(ModelDesc):
image
=
image
/
256.0
image
=
image
/
256.0
with
argscope
(
BatchNorm
,
decay
=
0.9
,
epsilon
=
1e-4
,
use_local_stat
=
is_training
),
\
with
argscope
(
BatchNorm
,
decay
=
0.9
,
epsilon
=
1e-4
),
\
argscope
(
Conv2D
,
use_bias
=
False
,
nl
=
tf
.
identity
):
argscope
(
Conv2D
,
use_bias
=
False
,
nl
=
tf
.
identity
):
logits
=
(
LinearWrap
(
image
)
logits
=
(
LinearWrap
(
image
)
.
Conv2D
(
'conv0'
,
48
,
5
,
padding
=
'VALID'
,
use_bias
=
True
)
.
Conv2D
(
'conv0'
,
48
,
5
,
padding
=
'VALID'
,
use_bias
=
True
)
...
...
examples/char-rnn/char-rnn.py
View file @
faa6f17c
...
@@ -74,7 +74,7 @@ class Model(ModelDesc):
...
@@ -74,7 +74,7 @@ class Model(ModelDesc):
return
[
InputVar
(
tf
.
int32
,
(
None
,
param
.
seq_len
),
'input'
),
return
[
InputVar
(
tf
.
int32
,
(
None
,
param
.
seq_len
),
'input'
),
InputVar
(
tf
.
int32
,
(
None
,
param
.
seq_len
),
'nextinput'
)
]
InputVar
(
tf
.
int32
,
(
None
,
param
.
seq_len
),
'nextinput'
)
]
def
_build_graph
(
self
,
input_vars
,
is_training
):
def
_build_graph
(
self
,
input_vars
):
input
,
nextinput
=
input_vars
input
,
nextinput
=
input_vars
cell
=
rnn_cell
.
BasicLSTMCell
(
num_units
=
param
.
rnn_size
)
cell
=
rnn_cell
.
BasicLSTMCell
(
num_units
=
param
.
rnn_size
)
...
...
examples/load-alexnet.py
View file @
faa6f17c
...
@@ -28,10 +28,8 @@ class Model(ModelDesc):
...
@@ -28,10 +28,8 @@ class Model(ModelDesc):
return
[
InputVar
(
tf
.
float32
,
(
None
,
227
,
227
,
3
),
'input'
),
return
[
InputVar
(
tf
.
float32
,
(
None
,
227
,
227
,
3
),
'input'
),
InputVar
(
tf
.
int32
,
(
None
,),
'label'
)
]
InputVar
(
tf
.
int32
,
(
None
,),
'label'
)
]
def
_build_graph
(
self
,
inputs
,
is_training
):
def
_build_graph
(
self
,
inputs
):
# img: 227x227x3
# img: 227x227x3
is_training
=
bool
(
is_training
)
keep_prob
=
tf
.
constant
(
0.5
if
is_training
else
1.0
)
image
,
label
=
inputs
image
,
label
=
inputs
...
...
examples/load-vgg16.py
View file @
faa6f17c
...
@@ -8,7 +8,7 @@ import tensorflow as tf
...
@@ -8,7 +8,7 @@ import tensorflow as tf
import
numpy
as
np
import
numpy
as
np
import
os
import
os
import
argparse
import
argparse
import
cP
ickle
as
pkl
import
p
ickle
as
pkl
from
tensorpack.train
import
TrainConfig
from
tensorpack.train
import
TrainConfig
from
tensorpack.predict
import
PredictConfig
,
get_predict_func
from
tensorpack.predict
import
PredictConfig
,
get_predict_func
...
@@ -33,8 +33,6 @@ class Model(ModelDesc):
...
@@ -33,8 +33,6 @@ class Model(ModelDesc):
InputVar
(
tf
.
int32
,
(
None
,),
'label'
)
]
InputVar
(
tf
.
int32
,
(
None
,),
'label'
)
]
def
_build_graph
(
self
,
inputs
,
is_training
):
def
_build_graph
(
self
,
inputs
,
is_training
):
is_training
=
bool
(
is_training
)
keep_prob
=
tf
.
constant
(
0.5
if
is_training
else
1.0
)
image
,
label
=
inputs
image
,
label
=
inputs
...
@@ -65,9 +63,10 @@ class Model(ModelDesc):
...
@@ -65,9 +63,10 @@ class Model(ModelDesc):
.
MaxPooling
(
'pool5'
,
2
)
.
MaxPooling
(
'pool5'
,
2
)
# 7
# 7
.
FullyConnected
(
'fc6'
,
4096
)
.
FullyConnected
(
'fc6'
,
4096
)
.
tf
.
nn
.
dropout
(
keep_prob
)
.
Dropout
(
'drop0'
,
0.5
)
.
print_tensor
()
.
FullyConnected
(
'fc7'
,
4096
)
.
FullyConnected
(
'fc7'
,
4096
)
.
tf
.
nn
.
dropout
(
keep_prob
)
.
Dropout
(
'drop1'
,
0.5
)
.
FullyConnected
(
'fc8'
,
out_dim
=
1000
,
nl
=
tf
.
identity
)())
.
FullyConnected
(
'fc8'
,
out_dim
=
1000
,
nl
=
tf
.
identity
)())
prob
=
tf
.
nn
.
softmax
(
logits
,
name
=
'output'
)
prob
=
tf
.
nn
.
softmax
(
logits
,
name
=
'output'
)
...
@@ -93,10 +92,10 @@ def run_test(path, input):
...
@@ -93,10 +92,10 @@ def run_test(path, input):
outputs
=
predict_func
([
im
])[
0
]
outputs
=
predict_func
([
im
])[
0
]
prob
=
outputs
[
0
]
prob
=
outputs
[
0
]
ret
=
prob
.
argsort
()[
-
10
:][::
-
1
]
ret
=
prob
.
argsort
()[
-
10
:][::
-
1
]
print
ret
print
(
ret
)
meta
=
ILSVRCMeta
()
.
get_synset_words_1000
()
meta
=
ILSVRCMeta
()
.
get_synset_words_1000
()
print
[
meta
[
k
]
for
k
in
ret
]
print
([
meta
[
k
]
for
k
in
ret
])
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
=
argparse
.
ArgumentParser
()
...
...
tensorpack/models/__init__.py
View file @
faa6f17c
...
@@ -49,9 +49,14 @@ class LinearWrap(object):
...
@@ -49,9 +49,14 @@ class LinearWrap(object):
layer
=
eval
(
layer_name
)
layer
=
eval
(
layer_name
)
if
hasattr
(
layer
,
'f'
):
if
hasattr
(
layer
,
'f'
):
# this is a registered tensorpack layer
# this is a registered tensorpack layer
def
f
(
name
,
*
args
,
**
kwargs
):
if
layer
.
use_scope
:
ret
=
layer
(
name
,
self
.
_t
,
*
args
,
**
kwargs
)
def
f
(
name
,
*
args
,
**
kwargs
):
return
LinearWrap
(
ret
)
ret
=
layer
(
name
,
self
.
_t
,
*
args
,
**
kwargs
)
return
LinearWrap
(
ret
)
else
:
def
f
(
*
args
,
**
kwargs
):
ret
=
layer
(
self
.
_t
,
*
args
,
**
kwargs
)
return
LinearWrap
(
ret
)
return
f
return
f
else
:
else
:
if
layer_name
!=
'tf'
:
if
layer_name
!=
'tf'
:
...
@@ -70,7 +75,7 @@ class LinearWrap(object):
...
@@ -70,7 +75,7 @@ class LinearWrap(object):
def
tensor
(
self
):
def
tensor
(
self
):
return
self
.
_t
return
self
.
_t
def
print
(
self
):
def
print
_tensor
(
self
):
print
(
self
.
_t
)
print
(
self
.
_t
)
return
self
return
self
...
...
tensorpack/models/_common.py
View file @
faa6f17c
...
@@ -22,7 +22,10 @@ def disable_layer_logging():
...
@@ -22,7 +22,10 @@ def disable_layer_logging():
# can use nonlocal in python3, but how
# can use nonlocal in python3, but how
globals
()[
'_layer_logged'
]
=
ContainEverything
()
globals
()[
'_layer_logged'
]
=
ContainEverything
()
def
layer_register
(
summary_activation
=
False
,
log_shape
=
True
):
def
layer_register
(
summary_activation
=
False
,
log_shape
=
True
,
use_scope
=
True
):
"""
"""
Register a layer.
Register a layer.
:param summary_activation: Define the default behavior of whether to
:param summary_activation: Define the default behavior of whether to
...
@@ -33,40 +36,52 @@ def layer_register(summary_activation=False, log_shape=True):
...
@@ -33,40 +36,52 @@ def layer_register(summary_activation=False, log_shape=True):
def
wrapper
(
func
):
def
wrapper
(
func
):
@
wraps
(
func
)
@
wraps
(
func
)
def
wrapped_func
(
name
,
inputs
,
*
args
,
**
kwargs
):
def
wrapped_func
(
*
args
,
**
kwargs
):
assert
isinstance
(
name
,
six
.
string_types
),
name
if
use_scope
:
name
,
inputs
=
args
[
0
],
args
[
1
]
args
=
args
[
1
:]
# actual positional args used to call func
assert
isinstance
(
name
,
six
.
string_types
),
name
else
:
assert
not
log_shape
and
not
summary_activation
inputs
=
args
[
0
]
name
=
None
do_summary
=
kwargs
.
pop
(
do_summary
=
kwargs
.
pop
(
'summary_activation'
,
summary_activation
)
'summary_activation'
,
summary_activation
)
args
=
(
inputs
,)
+
args
# TODO use inspect.getcallargs to enhance?
# TODO use inspect.getcallargs to enhance?
# update from current argument scope
# update from current argument scope
actual_args
=
copy
.
copy
(
get_arg_scope
()[
func
.
__name__
])
actual_args
=
copy
.
copy
(
get_arg_scope
()[
func
.
__name__
])
actual_args
.
update
(
kwargs
)
actual_args
.
update
(
kwargs
)
with
tf
.
variable_scope
(
name
)
as
scope
:
if
name
is
not
None
:
do_log_shape
=
log_shape
and
scope
.
name
not
in
_layer_logged
with
tf
.
variable_scope
(
name
)
as
scope
:
do_summary
=
do_summary
and
scope
.
name
not
in
_layer_logged
do_log_shape
=
log_shape
and
scope
.
name
not
in
_layer_logged
if
do_log_shape
:
do_summary
=
do_summary
and
scope
.
name
not
in
_layer_logged
logger
.
info
(
"{} input: {}"
.
format
(
scope
.
name
,
get_shape_str
(
inputs
)))
if
do_log_shape
:
logger
.
info
(
"{} input: {}"
.
format
(
scope
.
name
,
get_shape_str
(
inputs
)))
# run the actual function
outputs
=
func
(
*
args
,
**
actual_args
)
if
do_log_shape
:
# log shape info and add activation
logger
.
info
(
"{} output: {}"
.
format
(
scope
.
name
,
get_shape_str
(
outputs
)))
_layer_logged
.
add
(
scope
.
name
)
if
do_summary
:
if
isinstance
(
outputs
,
list
):
for
x
in
outputs
:
add_activation_summary
(
x
,
scope
.
name
)
else
:
add_activation_summary
(
outputs
,
scope
.
name
)
else
:
# run the actual function
# run the actual function
outputs
=
func
(
*
args
,
**
actual_args
)
outputs
=
func
(
*
args
,
**
actual_args
)
return
outputs
if
do_log_shape
:
# log shape info and add activation
logger
.
info
(
"{} output: {}"
.
format
(
scope
.
name
,
get_shape_str
(
outputs
)))
_layer_logged
.
add
(
scope
.
name
)
if
do_summary
:
if
isinstance
(
outputs
,
list
):
for
x
in
outputs
:
add_activation_summary
(
x
,
scope
.
name
)
else
:
add_activation_summary
(
outputs
,
scope
.
name
)
return
outputs
wrapped_func
.
f
=
func
# attribute to access the underlining function object
wrapped_func
.
f
=
func
# attribute to access the underlining function object
wrapped_func
.
use_scope
=
use_scope
return
wrapped_func
return
wrapped_func
# need some special handling for sphinx to work with the arguments
# need some special handling for sphinx to work with the arguments
...
...
tensorpack/models/nonlin.py
View file @
faa6f17c
...
@@ -11,7 +11,7 @@ from .batch_norm import BatchNorm
...
@@ -11,7 +11,7 @@ from .batch_norm import BatchNorm
__all__
=
[
'Maxout'
,
'PReLU'
,
'LeakyReLU'
,
'BNReLU'
]
__all__
=
[
'Maxout'
,
'PReLU'
,
'LeakyReLU'
,
'BNReLU'
]
@
layer_register
(
log_shape
=
False
)
@
layer_register
()
def
Maxout
(
x
,
num_unit
):
def
Maxout
(
x
,
num_unit
):
"""
"""
Maxout as in `Maxout Networks <http://arxiv.org/abs/1302.4389>`_.
Maxout as in `Maxout Networks <http://arxiv.org/abs/1302.4389>`_.
...
...
tensorpack/models/regularize.py
View file @
faa6f17c
...
@@ -7,8 +7,10 @@ import re
...
@@ -7,8 +7,10 @@ import re
from
..utils
import
logger
from
..utils
import
logger
from
..utils.utils
import
*
from
..utils.utils
import
*
from
.model_desc
import
get_current_tower_context
from
._common
import
layer_register
__all__
=
[
'regularize_cost'
,
'l2_regularizer'
,
'l1_regularizer'
]
__all__
=
[
'regularize_cost'
,
'l2_regularizer'
,
'l1_regularizer'
,
'Dropout'
]
@
memoized
@
memoized
def
_log_regularizer
(
name
):
def
_log_regularizer
(
name
):
...
@@ -36,3 +38,10 @@ def regularize_cost(regex, func, name=None):
...
@@ -36,3 +38,10 @@ def regularize_cost(regex, func, name=None):
return
0
return
0
return
tf
.
add_n
(
costs
,
name
=
name
)
return
tf
.
add_n
(
costs
,
name
=
name
)
@
layer_register
(
log_shape
=
False
)
def
Dropout
(
x
,
prob
=
0.5
):
is_training
=
get_current_tower_context
()
.
is_training
keep_prob
=
tf
.
constant
(
prob
if
is_training
else
1.0
)
return
tf
.
nn
.
dropout
(
x
,
keep_prob
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment