Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
916d9a19
Commit
916d9a19
authored
Jun 25, 2016
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
linearwrap for mnist & cifar10
parent
4f352b29
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
71 additions
and
29 deletions
+71
-29
examples/cifar-convnet.py
examples/cifar-convnet.py
+15
-16
examples/mnist-convnet.py
examples/mnist-convnet.py
+11
-13
tensorpack/models/__init__.py
tensorpack/models/__init__.py
+45
-0
No files found.
examples/cifar-convnet.py
View file @
916d9a19
...
@@ -42,22 +42,21 @@ class Model(ModelDesc):
...
@@ -42,22 +42,21 @@ class Model(ModelDesc):
image
=
image
/
4.0
# just to make range smaller
image
=
image
/
4.0
# just to make range smaller
with
argscope
(
Conv2D
,
nl
=
BNReLU
(
is_training
),
use_bias
=
False
,
kernel_shape
=
3
):
with
argscope
(
Conv2D
,
nl
=
BNReLU
(
is_training
),
use_bias
=
False
,
kernel_shape
=
3
):
l
=
Conv2D
(
'conv1.1'
,
image
,
out_channel
=
64
)
logits
=
LinearWrap
(
image
)
\
l
=
Conv2D
(
'conv1.2'
,
l
,
out_channel
=
64
)
.
Conv2D
(
'conv1.1'
,
out_channel
=
64
)
\
l
=
MaxPooling
(
'pool1'
,
l
,
3
,
stride
=
2
,
padding
=
'SAME'
)
.
Conv2D
(
'conv1.2'
,
out_channel
=
64
)
\
.
MaxPooling
(
'pool1'
,
3
,
stride
=
2
,
padding
=
'SAME'
)
\
l
=
Conv2D
(
'conv2.1'
,
l
,
out_channel
=
128
)
.
Conv2D
(
'conv2.1'
,
out_channel
=
128
)
\
l
=
Conv2D
(
'conv2.2'
,
l
,
out_channel
=
128
)
.
Conv2D
(
'conv2.2'
,
out_channel
=
128
)
\
l
=
MaxPooling
(
'pool2'
,
l
,
3
,
stride
=
2
,
padding
=
'SAME'
)
.
MaxPooling
(
'pool2'
,
3
,
stride
=
2
,
padding
=
'SAME'
)
\
.
Conv2D
(
'conv3.1'
,
out_channel
=
128
,
padding
=
'VALID'
)
\
l
=
Conv2D
(
'conv3.1'
,
l
,
out_channel
=
128
,
padding
=
'VALID'
)
.
Conv2D
(
'conv3.2'
,
out_channel
=
128
,
padding
=
'VALID'
)
\
l
=
Conv2D
(
'conv3.2'
,
l
,
out_channel
=
128
,
padding
=
'VALID'
)
.
FullyConnected
(
'fc0'
,
1024
+
512
,
l
=
FullyConnected
(
'fc0'
,
l
,
1024
+
512
,
b_init
=
tf
.
constant_initializer
(
0.1
))
\
b_init
=
tf
.
constant_initializer
(
0.1
))
.
tf
.
nn
.
dropout
(
keep_prob
)
\
l
=
tf
.
nn
.
dropout
(
l
,
keep_prob
)
.
FullyConnected
(
'fc1'
,
512
,
l
=
FullyConnected
(
'fc1'
,
l
,
512
,
b_init
=
tf
.
constant_initializer
(
0.1
))
\
b_init
=
tf
.
constant_initializer
(
0.1
))
.
FullyConnected
(
'linear'
,
out_dim
=
self
.
cifar_classnum
,
nl
=
tf
.
identity
)()
logits
=
FullyConnected
(
'linear'
,
l
,
out_dim
=
self
.
cifar_classnum
,
nl
=
tf
.
identity
)
cost
=
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
(
logits
,
label
)
cost
=
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
(
logits
,
label
)
cost
=
tf
.
reduce_mean
(
cost
,
name
=
'cross_entropy_loss'
)
cost
=
tf
.
reduce_mean
(
cost
,
name
=
'cross_entropy_loss'
)
...
...
examples/mnist-convnet.py
View file @
916d9a19
...
@@ -33,19 +33,17 @@ class Model(ModelDesc):
...
@@ -33,19 +33,17 @@ class Model(ModelDesc):
nl
=
PReLU
.
f
nl
=
PReLU
.
f
image
=
image
*
2
-
1
image
=
image
*
2
-
1
l
=
Conv2D
(
'conv0'
,
image
,
out_channel
=
32
,
kernel_shape
=
3
,
nl
=
nl
,
with
argscope
(
Conv2D
,
kernel_shape
=
3
,
nl
=
nl
,
out_channel
=
32
):
padding
=
'VALID'
)
logits
=
LinearWrap
(
image
)
\
l
=
MaxPooling
(
'pool0'
,
l
,
2
)
.
Conv2D
(
'conv0'
,
padding
=
'VALID'
)
\
l
=
Conv2D
(
'conv1'
,
l
,
out_channel
=
32
,
kernel_shape
=
3
,
nl
=
nl
,
padding
=
'SAME'
)
.
MaxPooling
(
'pool0'
,
2
)
\
l
=
Conv2D
(
'conv2'
,
l
,
out_channel
=
32
,
kernel_shape
=
3
,
nl
=
nl
,
padding
=
'VALID'
)
.
Conv2D
(
'conv1'
,
padding
=
'SAME'
)
\
l
=
MaxPooling
(
'pool1'
,
l
,
2
)
.
Conv2D
(
'conv2'
,
padding
=
'VALID'
)
\
l
=
Conv2D
(
'conv3'
,
l
,
out_channel
=
32
,
kernel_shape
=
3
,
nl
=
nl
,
padding
=
'VALID'
)
.
MaxPooling
(
'pool1'
,
2
)
\
.
Conv2D
(
'conv3'
,
padding
=
'VALID'
)
\
l
=
FullyConnected
(
'fc0'
,
l
,
512
)
.
FullyConnected
(
'fc0'
,
512
)
\
l
=
tf
.
nn
.
dropout
(
l
,
keep_prob
)
.
tf
.
nn
.
dropout
(
keep_prob
)
\
.
FullyConnected
(
'fc1'
,
out_dim
=
10
,
nl
=
tf
.
identity
)()
# fc will have activation summary by default. disable this for the output layer
logits
=
FullyConnected
(
'fc1'
,
l
,
out_dim
=
10
,
nl
=
tf
.
identity
)
prob
=
tf
.
nn
.
softmax
(
logits
,
name
=
'prob'
)
prob
=
tf
.
nn
.
softmax
(
logits
,
name
=
'prob'
)
cost
=
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
(
logits
,
label
)
cost
=
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
(
logits
,
label
)
...
...
tensorpack/models/__init__.py
View file @
916d9a19
...
@@ -3,8 +3,11 @@
...
@@ -3,8 +3,11 @@
# Author: Yuxin Wu <ppwwyyxx@gmail.com>
# Author: Yuxin Wu <ppwwyyxx@gmail.com>
from
pkgutil
import
walk_packages
from
pkgutil
import
walk_packages
from
types
import
ModuleType
import
tensorflow
as
tf
import
os
import
os
import
os.path
import
os.path
from
..utils
import
logger
def
_global_import
(
name
):
def
_global_import
(
name
):
p
=
__import__
(
name
,
globals
(),
locals
(),
level
=
1
)
p
=
__import__
(
name
,
globals
(),
locals
(),
level
=
1
)
...
@@ -17,3 +20,45 @@ for _, module_name, _ in walk_packages(
...
@@ -17,3 +20,45 @@ for _, module_name, _ in walk_packages(
if
not
module_name
.
startswith
(
'_'
):
if
not
module_name
.
startswith
(
'_'
):
_global_import
(
module_name
)
_global_import
(
module_name
)
class
LinearWrap
(
object
):
""" A simple wrapper to easily create linear graph,
for layers with one input&output, or tf function with one input&output
"""
class
TFModuleFunc
(
object
):
def
__init__
(
self
,
mod
,
tensor
):
self
.
_mod
=
mod
self
.
_t
=
tensor
def
__getattr__
(
self
,
name
):
ret
=
getattr
(
self
.
_mod
,
name
)
if
isinstance
(
ret
,
ModuleType
):
return
LinearWrap
.
TFModuleFunc
(
ret
,
self
.
_t
)
else
:
# assume to be a tf function
def
f
(
*
args
,
**
kwargs
):
o
=
ret
(
self
.
_t
,
*
args
,
**
kwargs
)
return
LinearWrap
(
o
)
return
f
def
__init__
(
self
,
tensor
):
self
.
_t
=
tensor
def
__getattr__
(
self
,
layer_name
):
layer
=
eval
(
layer_name
)
if
hasattr
(
layer
,
'f'
):
# a registered tensorpack layer
def
f
(
name
,
*
args
,
**
kwargs
):
ret
=
layer
(
name
,
self
.
_t
,
*
args
,
**
kwargs
)
return
LinearWrap
(
ret
)
return
f
else
:
if
layer_name
!=
'tf'
:
logger
.
warn
(
"You're calling LinearWrap with something neither a layer nor 'tf'. Not officially supported yet!"
)
assert
isinstance
(
layer
,
ModuleType
)
return
LinearWrap
.
TFModuleFunc
(
layer
,
self
.
_t
)
def
__call__
(
self
):
return
self
.
_t
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment