Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
00473316
Commit
00473316
authored
Oct 31, 2017
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
minor refactor in ImageNetModel
parent
9672e503
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
38 additions
and
39 deletions
+38
-39
examples/ResNet/imagenet_utils.py
examples/ResNet/imagenet_utils.py
+36
-37
tensorpack/tfutils/symbolic_functions.py
tensorpack/tfutils/symbolic_functions.py
+1
-1
tensorpack/train/trainers.py
tensorpack/train/trainers.py
+1
-1
No files found.
examples/ResNet/imagenet_utils.py
View file @
00473316
...
@@ -129,42 +129,9 @@ def eval_on_ILSVRC12(model, sessinit, dataflow):
...
@@ -129,42 +129,9 @@ def eval_on_ILSVRC12(model, sessinit, dataflow):
print
(
"Top5 Error: {}"
.
format
(
acc5
.
ratio
))
print
(
"Top5 Error: {}"
.
format
(
acc5
.
ratio
))
def
image_preprocess
(
image
,
bgr
=
True
):
with
tf
.
name_scope
(
'image_preprocess'
):
if
image
.
dtype
.
base_dtype
!=
tf
.
float32
:
image
=
tf
.
cast
(
image
,
tf
.
float32
)
image
=
image
*
(
1.0
/
255
)
mean
=
[
0.485
,
0.456
,
0.406
]
# rgb
std
=
[
0.229
,
0.224
,
0.225
]
if
bgr
:
mean
=
mean
[::
-
1
]
std
=
std
[::
-
1
]
image_mean
=
tf
.
constant
(
mean
,
dtype
=
tf
.
float32
)
image_std
=
tf
.
constant
(
std
,
dtype
=
tf
.
float32
)
image
=
(
image
-
image_mean
)
/
image_std
return
image
def
compute_loss_and_error
(
logits
,
label
):
loss
=
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
(
logits
=
logits
,
labels
=
label
)
loss
=
tf
.
reduce_mean
(
loss
,
name
=
'xentropy-loss'
)
def
prediction_incorrect
(
logits
,
label
,
topk
=
1
,
name
=
'incorrect_vector'
):
with
tf
.
name_scope
(
'prediction_incorrect'
):
x
=
tf
.
logical_not
(
tf
.
nn
.
in_top_k
(
logits
,
label
,
topk
))
return
tf
.
cast
(
x
,
tf
.
float32
,
name
=
name
)
wrong
=
prediction_incorrect
(
logits
,
label
,
1
,
name
=
'wrong-top1'
)
add_moving_summary
(
tf
.
reduce_mean
(
wrong
,
name
=
'train-error-top1'
))
wrong
=
prediction_incorrect
(
logits
,
label
,
5
,
name
=
'wrong-top5'
)
add_moving_summary
(
tf
.
reduce_mean
(
wrong
,
name
=
'train-error-top5'
))
return
loss
class
ImageNetModel
(
ModelDesc
):
class
ImageNetModel
(
ModelDesc
):
weight_decay
=
1e-4
weight_decay
=
1e-4
image_shape
=
224
"""
"""
uint8 instead of float32 is used as input type to reduce copy overhead.
uint8 instead of float32 is used as input type to reduce copy overhead.
...
@@ -179,17 +146,17 @@ class ImageNetModel(ModelDesc):
...
@@ -179,17 +146,17 @@ class ImageNetModel(ModelDesc):
self
.
data_format
=
data_format
self
.
data_format
=
data_format
def
_get_inputs
(
self
):
def
_get_inputs
(
self
):
return
[
InputDesc
(
self
.
image_dtype
,
[
None
,
224
,
224
,
3
],
'input'
),
return
[
InputDesc
(
self
.
image_dtype
,
[
None
,
self
.
image_shape
,
self
.
image_shape
,
3
],
'input'
),
InputDesc
(
tf
.
int32
,
[
None
],
'label'
)]
InputDesc
(
tf
.
int32
,
[
None
],
'label'
)]
def
_build_graph
(
self
,
inputs
):
def
_build_graph
(
self
,
inputs
):
image
,
label
=
inputs
image
,
label
=
inputs
image
=
image_preprocess
(
image
,
bgr
=
True
)
image
=
self
.
image_preprocess
(
image
,
bgr
=
True
)
if
self
.
data_format
==
'NCHW'
:
if
self
.
data_format
==
'NCHW'
:
image
=
tf
.
transpose
(
image
,
[
0
,
3
,
1
,
2
])
image
=
tf
.
transpose
(
image
,
[
0
,
3
,
1
,
2
])
logits
=
self
.
get_logits
(
image
)
logits
=
self
.
get_logits
(
image
)
loss
=
compute_loss_and_error
(
logits
,
label
)
loss
=
self
.
compute_loss_and_error
(
logits
,
label
)
wd_loss
=
regularize_cost
(
'.*/W'
,
tf
.
contrib
.
layers
.
l2_regularizer
(
self
.
weight_decay
),
wd_loss
=
regularize_cost
(
'.*/W'
,
tf
.
contrib
.
layers
.
l2_regularizer
(
self
.
weight_decay
),
name
=
'l2_regularize_loss'
)
name
=
'l2_regularize_loss'
)
add_moving_summary
(
loss
,
wd_loss
)
add_moving_summary
(
loss
,
wd_loss
)
...
@@ -209,3 +176,35 @@ class ImageNetModel(ModelDesc):
...
@@ -209,3 +176,35 @@ class ImageNetModel(ModelDesc):
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
0.1
,
trainable
=
False
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
0.1
,
trainable
=
False
)
tf
.
summary
.
scalar
(
'learning_rate'
,
lr
)
tf
.
summary
.
scalar
(
'learning_rate'
,
lr
)
return
tf
.
train
.
MomentumOptimizer
(
lr
,
0.9
,
use_nesterov
=
True
)
return
tf
.
train
.
MomentumOptimizer
(
lr
,
0.9
,
use_nesterov
=
True
)
def
image_preprocess
(
self
,
image
,
bgr
=
True
):
with
tf
.
name_scope
(
'image_preprocess'
):
if
image
.
dtype
.
base_dtype
!=
tf
.
float32
:
image
=
tf
.
cast
(
image
,
tf
.
float32
)
image
=
image
*
(
1.0
/
255
)
mean
=
[
0.485
,
0.456
,
0.406
]
# rgb
std
=
[
0.229
,
0.224
,
0.225
]
if
bgr
:
mean
=
mean
[::
-
1
]
std
=
std
[::
-
1
]
image_mean
=
tf
.
constant
(
mean
,
dtype
=
tf
.
float32
)
image_std
=
tf
.
constant
(
std
,
dtype
=
tf
.
float32
)
image
=
(
image
-
image_mean
)
/
image_std
return
image
def
compute_loss_and_error
(
self
,
logits
,
label
):
loss
=
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
(
logits
=
logits
,
labels
=
label
)
loss
=
tf
.
reduce_mean
(
loss
,
name
=
'xentropy-loss'
)
def
prediction_incorrect
(
logits
,
label
,
topk
=
1
,
name
=
'incorrect_vector'
):
with
tf
.
name_scope
(
'prediction_incorrect'
):
x
=
tf
.
logical_not
(
tf
.
nn
.
in_top_k
(
logits
,
label
,
topk
))
return
tf
.
cast
(
x
,
tf
.
float32
,
name
=
name
)
wrong
=
prediction_incorrect
(
logits
,
label
,
1
,
name
=
'wrong-top1'
)
add_moving_summary
(
tf
.
reduce_mean
(
wrong
,
name
=
'train-error-top1'
))
wrong
=
prediction_incorrect
(
logits
,
label
,
5
,
name
=
'wrong-top5'
)
add_moving_summary
(
tf
.
reduce_mean
(
wrong
,
name
=
'train-error-top5'
))
return
loss
tensorpack/tfutils/symbolic_functions.py
View file @
00473316
...
@@ -164,7 +164,7 @@ def get_scalar_var(name, init_value, summary=False, trainable=False):
...
@@ -164,7 +164,7 @@ def get_scalar_var(name, init_value, summary=False, trainable=False):
trainable
=
trainable
)
trainable
=
trainable
)
if
summary
:
if
summary
:
# this is recognized in callbacks.StatHolder
# this is recognized in callbacks.StatHolder
tf
.
summary
.
scalar
(
name
,
ret
)
tf
.
summary
.
scalar
(
name
+
'-summary'
,
ret
)
return
ret
return
ret
...
...
tensorpack/train/trainers.py
View file @
00473316
...
@@ -248,5 +248,5 @@ class HorovodTrainer(SingleCostTrainer):
...
@@ -248,5 +248,5 @@ class HorovodTrainer(SingleCostTrainer):
from
..utils.develop
import
create_dummy_class
# noqa
from
..utils.develop
import
create_dummy_class
# noqa
try
:
try
:
import
horovod.tensorflow
as
hvd
import
horovod.tensorflow
as
hvd
except
ImportError
:
except
Exception
:
# could be other than ImportError, e.g. NCCL not found
HorovodTrainer
=
create_dummy_class
(
'HovorodTrainer'
,
'horovod'
)
# noqa
HorovodTrainer
=
create_dummy_class
(
'HovorodTrainer'
,
'horovod'
)
# noqa
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment