Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
fc7f0aac
Commit
fc7f0aac
authored
Feb 15, 2016
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
example
parent
5bc36930
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
38 additions
and
29 deletions
+38
-29
example_cifar10.py
example_cifar10.py
+22
-22
example_mnist.py
example_mnist.py
+16
-7
No files found.
example_cifar10.py
View file @
fc7f0aac
...
...
@@ -18,9 +18,7 @@ from tensorpack.dataflow import *
from
tensorpack.dataflow
import
imgaug
"""
This config follows the same preprocessing/model/hyperparemeters as in
tensorflow cifar10 examples. (https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/models/image/cifar10/)
86
%
accuracy. faster.
CIFAR10 89
%
test accuracy after 200 epochs.
"""
BATCH_SIZE
=
128
...
...
@@ -31,7 +29,7 @@ class Model(ModelDesc):
def
_get_input_vars
(
self
):
return
[
tf
.
placeholder
(
tf
.
float32
,
shape
=
[
None
,
24
,
24
,
3
],
name
=
'input'
),
tf
.
float32
,
shape
=
[
None
,
30
,
30
,
3
],
name
=
'input'
),
tf
.
placeholder
(
tf
.
int32
,
shape
=
[
None
],
name
=
'label'
)
]
...
...
@@ -45,30 +43,32 @@ class Model(ModelDesc):
num_threads
=
6
,
enqueue_many
=
True
)
tf
.
image_summary
(
"train_image"
,
image
,
10
)
l
=
Conv2D
(
'conv1'
,
image
,
out_channel
=
64
,
kernel_shape
=
5
,
padding
=
'SAME'
,
W_init
=
tf
.
truncated_normal_initializer
(
stddev
=
1e-4
))
#l = BatchNorm('bn0', l, is_training)
l
=
Conv2D
(
'conv1.1'
,
image
,
out_channel
=
64
,
kernel_shape
=
3
,
padding
=
'SAME'
)
l
=
Conv2D
(
'conv1.2'
,
l
,
out_channel
=
64
,
kernel_shape
=
3
,
nl
=
tf
.
identity
)
l
=
BatchNorm
(
'bn1'
,
l
,
is_training
)
l
=
tf
.
nn
.
relu
(
l
)
l
=
MaxPooling
(
'pool1'
,
l
,
3
,
stride
=
2
,
padding
=
'SAME'
)
l
=
tf
.
nn
.
lrn
(
l
,
4
,
bias
=
1.0
,
alpha
=
0.001
/
9.0
,
beta
=
0.75
,
name
=
'norm1'
)
l
=
Conv2D
(
'conv2'
,
l
,
out_channel
=
64
,
kernel_shape
=
5
,
padding
=
'SAME'
,
W_init
=
tf
.
truncated_normal_initializer
(
stddev
=
1e-4
),
b_init
=
tf
.
constant_initializer
(
0.1
))
#l = BatchNorm('bn1', l, is_training)
l
=
tf
.
nn
.
lrn
(
l
,
4
,
bias
=
1.0
,
alpha
=
0.001
/
9.0
,
beta
=
0.75
,
name
=
'norm2'
)
l
=
Conv2D
(
'conv2.1'
,
l
,
out_channel
=
128
,
kernel_shape
=
3
)
l
=
Conv2D
(
'conv2.2'
,
l
,
out_channel
=
128
,
kernel_shape
=
3
,
nl
=
tf
.
identity
)
l
=
BatchNorm
(
'bn2'
,
l
,
is_training
)
l
=
tf
.
nn
.
relu
(
l
)
l
=
MaxPooling
(
'pool2'
,
l
,
3
,
stride
=
2
,
padding
=
'SAME'
)
l
=
FullyConnected
(
'fc0'
,
l
,
384
,
l
=
Conv2D
(
'conv3.1'
,
l
,
out_channel
=
128
,
kernel_shape
=
3
,
padding
=
'VALID'
)
l
=
Conv2D
(
'conv3.2'
,
l
,
out_channel
=
128
,
kernel_shape
=
3
,
padding
=
'VALID'
,
nl
=
tf
.
identity
)
l
=
BatchNorm
(
'bn3'
,
l
,
is_training
)
l
=
tf
.
nn
.
relu
(
l
)
l
=
FullyConnected
(
'fc0'
,
l
,
512
,
W_init
=
tf
.
truncated_normal_initializer
(
stddev
=
0.04
),
b_init
=
tf
.
constant_initializer
(
0.1
))
l
=
FullyConnected
(
'fc1'
,
l
,
out_dim
=
19
2
,
l
=
FullyConnected
(
'fc1'
,
l
,
out_dim
=
51
2
,
W_init
=
tf
.
truncated_normal_initializer
(
stddev
=
0.04
),
b_init
=
tf
.
constant_initializer
(
0.1
))
# fc will have activation summary by default. disable for the output layer
logits
=
FullyConnected
(
'linear'
,
l
,
out_dim
=
10
,
summary_activation
=
False
,
nl
=
tf
.
identity
,
W_init
=
tf
.
truncated_normal_initializer
(
stddev
=
1.0
/
192
))
prob
=
tf
.
nn
.
softmax
(
logits
,
name
=
'output'
)
y
=
one_hot
(
label
,
10
)
...
...
@@ -102,7 +102,7 @@ def get_config():
# prepare dataset
dataset_train
=
dataset
.
Cifar10
(
'train'
)
augmentors
=
[
imgaug
.
RandomCrop
((
24
,
24
)),
imgaug
.
RandomCrop
((
30
,
30
)),
imgaug
.
Flip
(
horiz
=
True
),
imgaug
.
BrightnessAdd
(
63
),
imgaug
.
Contrast
((
0.2
,
1.8
)),
...
...
@@ -113,7 +113,7 @@ def get_config():
step_per_epoch
=
dataset_train
.
size
()
augmentors
=
[
imgaug
.
CenterCrop
((
24
,
24
)),
imgaug
.
CenterCrop
((
30
,
30
)),
imgaug
.
MeanVarianceNormalize
(
all_channel
=
True
)
]
dataset_test
=
dataset
.
Cifar10
(
'test'
)
...
...
@@ -124,15 +124,15 @@ def get_config():
sess_config
.
gpu_options
.
per_process_gpu_memory_fraction
=
0.5
lr
=
tf
.
train
.
exponential_decay
(
learning_rate
=
1e-
1
,
learning_rate
=
1e-
2
,
global_step
=
get_global_step_var
(),
decay_steps
=
dataset_train
.
size
()
*
20
0
,
decay_rate
=
0.
1
,
staircase
=
True
,
name
=
'learning_rate'
)
decay_steps
=
dataset_train
.
size
()
*
3
0
,
decay_rate
=
0.
5
,
staircase
=
True
,
name
=
'learning_rate'
)
tf
.
scalar_summary
(
'learning_rate'
,
lr
)
return
TrainConfig
(
dataset
=
dataset_train
,
optimizer
=
tf
.
train
.
GradientDescent
Optimizer
(
lr
),
optimizer
=
tf
.
train
.
Adam
Optimizer
(
lr
),
callbacks
=
Callbacks
([
SummaryWriter
(
print_tag
=
[
'train_cost'
,
'train_error'
]),
PeriodicSaver
(),
...
...
example_mnist.py
View file @
fc7f0aac
...
...
@@ -18,6 +18,11 @@ from tensorpack.utils.summary import *
from
tensorpack.callbacks
import
*
from
tensorpack.dataflow
import
*
"""
MNIST ConvNet example.
99.33
%
test accuracy after 50 epochs.
"""
BATCH_SIZE
=
128
IMAGE_SIZE
=
28
...
...
@@ -37,13 +42,17 @@ class Model(ModelDesc):
image
,
label
=
input_vars
image
=
tf
.
expand_dims
(
image
,
3
)
# add a single channel
l
=
Conv2D
(
'conv0'
,
image
,
out_channel
=
32
,
kernel_shape
=
3
)
l
=
Conv2D
(
'conv1'
,
l
,
out_channel
=
32
,
kernel_shape
=
3
)
nl
=
tf
.
nn
.
relu
image
=
image
*
2
-
1
l
=
Conv2D
(
'conv0'
,
image
,
out_channel
=
32
,
kernel_shape
=
3
,
nl
=
nl
,
padding
=
'VALID'
)
l
=
MaxPooling
(
'pool0'
,
l
,
2
)
l
=
Conv2D
(
'conv2'
,
l
,
out_channel
=
40
,
kernel_shape
=
3
)
l
=
Conv2D
(
'conv1'
,
l
,
out_channel
=
32
,
kernel_shape
=
3
,
nl
=
nl
,
padding
=
'SAME'
)
l
=
Conv2D
(
'conv2'
,
l
,
out_channel
=
32
,
kernel_shape
=
3
,
nl
=
nl
,
padding
=
'VALID'
)
l
=
MaxPooling
(
'pool1'
,
l
,
2
)
l
=
Conv2D
(
'conv3'
,
l
,
out_channel
=
32
,
kernel_shape
=
3
,
nl
=
nl
,
padding
=
'VALID'
)
l
=
FullyConnected
(
'fc0'
,
l
,
1024
)
l
=
FullyConnected
(
'fc0'
,
l
,
512
)
l
=
tf
.
nn
.
dropout
(
l
,
keep_prob
)
# fc will have activation summary by default. disable this for the output layer
...
...
@@ -66,7 +75,7 @@ class Model(ModelDesc):
MOVING_SUMMARY_VARS_KEY
,
tf
.
reduce_mean
(
wrong
,
name
=
'train_error'
))
# weight decay on all W of fc layers
wd_cost
=
tf
.
mul
(
1e-
4
,
wd_cost
=
tf
.
mul
(
1e-
5
,
regularize_cost
(
'fc.*/W'
,
tf
.
nn
.
l2_loss
),
name
=
'regularize_loss'
)
tf
.
add_to_collection
(
MOVING_SUMMARY_VARS_KEY
,
wd_cost
)
...
...
@@ -91,7 +100,7 @@ def get_config():
lr
=
tf
.
train
.
exponential_decay
(
learning_rate
=
1e-3
,
global_step
=
get_global_step_var
(),
decay_steps
=
dataset_train
.
size
()
*
1
0
,
decay_steps
=
dataset_train
.
size
()
*
2
0
,
decay_rate
=
0.1
,
staircase
=
True
,
name
=
'learning_rate'
)
tf
.
scalar_summary
(
'learning_rate'
,
lr
)
...
...
@@ -101,7 +110,7 @@ def get_config():
callbacks
=
Callbacks
([
SummaryWriter
(
print_tag
=
[
'train_cost'
,
'train_error'
]),
PeriodicSaver
(),
ValidationError
(
dataset_test
,
prefix
=
'
test
'
),
ValidationError
(
dataset_test
,
prefix
=
'
validation
'
),
]),
session_config
=
sess_config
,
model
=
Model
(),
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment