Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
dcbd4696
Commit
dcbd4696
authored
Apr 11, 2016
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
bnrelu, classificationerror
parent
fec3a4a5
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
16 additions
and
17 deletions
+16
-17
examples/cifar10_convnet.py
examples/cifar10_convnet.py
+8
-8
examples/cifar10_resnet.py
examples/cifar10_resnet.py
+1
-1
examples/mnist_convnet.py
examples/mnist_convnet.py
+2
-2
examples/svhn_digit_convnet.py
examples/svhn_digit_convnet.py
+2
-2
tensorpack/callbacks/validation_callback.py
tensorpack/callbacks/validation_callback.py
+3
-4
No files found.
examples/cifar10_convnet.py
View file @
dcbd4696
...
...
@@ -20,8 +20,6 @@ from tensorpack.dataflow import imgaug
"""
CIFAR10 90
%
validation accuracy after 70k step.
91
%
validation accuracy after 36k step with 3 GPU.
"""
BATCH_SIZE
=
128
...
...
@@ -46,15 +44,15 @@ class Model(ModelDesc):
image
=
image
/
4.0
# just to make range smaller
l
=
Conv2D
(
'conv1.1'
,
image
,
out_channel
=
64
,
kernel_shape
=
3
)
l
=
Conv2D
(
'conv1.2'
,
l
,
out_channel
=
64
,
kernel_shape
=
3
,
nl
=
BNReLU
(
is_training
))
l
=
Conv2D
(
'conv1.2'
,
l
,
out_channel
=
64
,
kernel_shape
=
3
,
nl
=
BNReLU
(
is_training
)
,
use_bias
=
False
)
l
=
MaxPooling
(
'pool1'
,
l
,
3
,
stride
=
2
,
padding
=
'SAME'
)
l
=
Conv2D
(
'conv2.1'
,
l
,
out_channel
=
128
,
kernel_shape
=
3
)
l
=
Conv2D
(
'conv2.2'
,
l
,
out_channel
=
128
,
kernel_shape
=
3
,
nl
=
BNReLU
(
is_training
))
l
=
Conv2D
(
'conv2.2'
,
l
,
out_channel
=
128
,
kernel_shape
=
3
,
nl
=
BNReLU
(
is_training
)
,
use_bias
=
False
)
l
=
MaxPooling
(
'pool2'
,
l
,
3
,
stride
=
2
,
padding
=
'SAME'
)
l
=
Conv2D
(
'conv3.1'
,
l
,
out_channel
=
128
,
kernel_shape
=
3
,
padding
=
'VALID'
)
l
=
Conv2D
(
'conv3.2'
,
l
,
out_channel
=
128
,
kernel_shape
=
3
,
padding
=
'VALID'
,
nl
=
BNReLU
(
is_training
))
l
=
Conv2D
(
'conv3.2'
,
l
,
out_channel
=
128
,
kernel_shape
=
3
,
padding
=
'VALID'
,
nl
=
BNReLU
(
is_training
)
,
use_bias
=
False
)
l
=
FullyConnected
(
'fc0'
,
l
,
1024
+
512
,
b_init
=
tf
.
constant_initializer
(
0.1
))
l
=
tf
.
nn
.
dropout
(
l
,
keep_prob
)
...
...
@@ -69,7 +67,7 @@ class Model(ModelDesc):
cost
=
tf
.
reduce_mean
(
cost
,
name
=
'cross_entropy_loss'
)
tf
.
add_to_collection
(
MOVING_SUMMARY_VARS_KEY
,
cost
)
# compute the number of failed samples, for
Valid
ationError to use at test time
# compute the number of failed samples, for
Classific
ationError to use at test time
wrong
=
prediction_incorrect
(
logits
,
label
)
nr_wrong
=
tf
.
reduce_sum
(
wrong
,
name
=
'wrong'
)
# monitor training error
...
...
@@ -125,7 +123,7 @@ def get_config():
lr
=
tf
.
train
.
exponential_decay
(
learning_rate
=
1e-2
,
global_step
=
get_global_step_var
(),
decay_steps
=
dataset_train
.
size
()
*
30
if
nr_gpu
==
1
else
15
,
decay_steps
=
dataset_train
.
size
()
*
30
if
nr_gpu
==
1
else
20
,
decay_rate
=
0.5
,
staircase
=
True
,
name
=
'learning_rate'
)
tf
.
scalar_summary
(
'learning_rate'
,
lr
)
...
...
@@ -135,7 +133,7 @@ def get_config():
callbacks
=
Callbacks
([
StatPrinter
(),
PeriodicSaver
(),
Valid
ationError
(
dataset_test
,
prefix
=
'test'
),
Classific
ationError
(
dataset_test
,
prefix
=
'test'
),
]),
session_config
=
sess_config
,
model
=
Model
(),
...
...
@@ -155,6 +153,8 @@ if __name__ == '__main__':
if
args
.
gpu
:
os
.
environ
[
'CUDA_VISIBLE_DEVICES'
]
=
args
.
gpu
else
:
os
.
environ
[
'CUDA_VISIBLE_DEVICES'
]
=
'0'
with
tf
.
Graph
()
.
as_default
():
config
=
get_config
()
...
...
examples/cifar10_resnet.py
View file @
dcbd4696
...
...
@@ -167,7 +167,7 @@ def get_config():
callbacks
=
Callbacks
([
StatPrinter
(),
PeriodicSaver
(),
Valid
ationError
(
dataset_test
,
prefix
=
'test'
),
Classific
ationError
(
dataset_test
,
prefix
=
'test'
),
ScheduledHyperParamSetter
(
'learning_rate'
,
[(
1
,
0.1
),
(
82
,
0.01
),
(
123
,
0.001
),
(
300
,
0.0002
)])
]),
...
...
examples/mnist_convnet.py
View file @
dcbd4696
...
...
@@ -62,7 +62,7 @@ class Model(ModelDesc):
cost
=
tf
.
reduce_mean
(
cost
,
name
=
'cross_entropy_loss'
)
tf
.
add_to_collection
(
MOVING_SUMMARY_VARS_KEY
,
cost
)
# compute the number of failed samples, for
Valid
ationError to use at test time
# compute the number of failed samples, for
Classific
ationError to use at test time
wrong
=
prediction_incorrect
(
logits
,
label
)
nr_wrong
=
tf
.
reduce_sum
(
wrong
,
name
=
'wrong'
)
# monitor training error
...
...
@@ -106,7 +106,7 @@ def get_config():
StatPrinter
(),
PeriodicSaver
(),
ValidationStatPrinter
(
dataset_test
,
[
'cost:0'
]),
Valid
ationError
(
dataset_test
,
prefix
=
'validation'
),
Classific
ationError
(
dataset_test
,
prefix
=
'validation'
),
]),
session_config
=
sess_config
,
model
=
Model
(),
...
...
examples/svhn_digit_convnet.py
View file @
dcbd4696
...
...
@@ -53,7 +53,7 @@ class Model(ModelDesc):
cost
=
tf
.
reduce_mean
(
cost
,
name
=
'cross_entropy_loss'
)
tf
.
add_to_collection
(
MOVING_SUMMARY_VARS_KEY
,
cost
)
# compute the number of failed samples, for
Valid
ationError to use at test time
# compute the number of failed samples, for
Classific
ationError to use at test time
wrong
=
prediction_incorrect
(
logits
,
label
)
nr_wrong
=
tf
.
reduce_sum
(
wrong
,
name
=
'wrong'
)
# monitor training error
...
...
@@ -110,7 +110,7 @@ def get_config():
callbacks
=
Callbacks
([
StatPrinter
(),
PeriodicSaver
(),
Valid
ationError
(
test
,
prefix
=
'test'
),
Classific
ationError
(
test
,
prefix
=
'test'
),
]),
session_config
=
sess_config
,
model
=
Model
(),
...
...
tensorpack/callbacks/validation_callback.py
View file @
dcbd4696
...
...
@@ -12,7 +12,7 @@ from ..utils.stat import *
from
..tfutils.summary
import
*
from
.base
import
PeriodicCallback
,
Callback
,
TestCallbackType
__all__
=
[
'
Valid
ationError'
,
'ValidationCallback'
,
'ValidationStatPrinter'
]
__all__
=
[
'
Classific
ationError'
,
'ValidationCallback'
,
'ValidationStatPrinter'
]
class
ValidationCallback
(
PeriodicCallback
):
"""
...
...
@@ -100,8 +100,7 @@ class ValidationStatPrinter(ValidationCallback):
'{}_{}'
.
format
(
self
.
prefix
,
name
),
stat
),
self
.
global_step
)
self
.
trainer
.
stat_holder
.
add_stat
(
"{}_{}"
.
format
(
self
.
prefix
,
name
),
stat
)
class
ValidationError
(
ValidationCallback
):
class
ClassificationError
(
ValidationCallback
):
"""
Validate the accuracy from a `wrong` variable
...
...
@@ -119,7 +118,7 @@ class ValidationError(ValidationCallback):
:param ds: a batched `DataFlow` instance
:param wrong_var_name: name of the `wrong` variable
"""
super
(
Valid
ationError
,
self
)
.
__init__
(
ds
,
prefix
,
period
)
super
(
Classific
ationError
,
self
)
.
__init__
(
ds
,
prefix
,
period
)
self
.
wrong_var_name
=
wrong_var_name
def
_find_output_vars
(
self
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment