Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
02e94b63
Commit
02e94b63
authored
Feb 16, 2016
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
gamma_init in batch_norm
parent
fd635774
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
7 additions
and
4 deletions
+7
-4
scripts/dump_train_config.py
scripts/dump_train_config.py
+1
-1
tensorpack/models/batch_norm.py
tensorpack/models/batch_norm.py
+5
-3
tensorpack/models/fc.py
tensorpack/models/fc.py
+1
-0
No files found.
scripts/dump_train_config.py
View file @
02e94b63
...
...
@@ -39,7 +39,7 @@ if args.output:
for
bi
,
img
in
enumerate
(
imgbatch
):
cnt
+=
1
fname
=
os
.
path
.
join
(
args
.
output
,
'{:03d}-{}.png'
.
format
(
cnt
,
bi
))
cv2
.
imwrite
(
fname
,
img
)
cv2
.
imwrite
(
fname
,
img
*
255
)
NR_DP_TEST
=
100
logger
.
info
(
"Testing dataflow speed:"
)
...
...
tensorpack/models/batch_norm.py
View file @
02e94b63
...
...
@@ -13,7 +13,7 @@ __all__ = ['BatchNorm']
# http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
# TF batch_norm only works for 4D tensor right now: #804
@
layer_register
()
def
BatchNorm
(
x
,
is_training
):
def
BatchNorm
(
x
,
is_training
,
gamma_init
=
1.0
):
"""
Batch normalization layer as described in:
Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift
...
...
@@ -35,7 +35,8 @@ def BatchNorm(x, is_training):
n_out
=
shape
[
-
1
]
# channel
beta
=
tf
.
get_variable
(
'beta'
,
[
n_out
])
gamma
=
tf
.
get_variable
(
'gamma'
,
[
n_out
],
initializer
=
tf
.
constant_initializer
(
1.0
))
gamma
=
tf
.
get_variable
(
'gamma'
,
[
n_out
],
initializer
=
tf
.
constant_initializer
(
gamma_init
))
batch_mean
,
batch_var
=
tf
.
nn
.
moments
(
x
,
[
0
,
1
,
2
],
name
=
'moments'
)
ema
=
tf
.
train
.
ExponentialMovingAverage
(
decay
=
0.999
)
...
...
@@ -49,6 +50,7 @@ def BatchNorm(x, is_training):
batch
=
tf
.
cast
(
tf
.
shape
(
x
)[
0
],
tf
.
float32
)
mean
,
var
=
ema_mean
,
ema_var
*
batch
/
(
batch
-
1
)
# unbiased variance estimator
normed
=
tf
.
nn
.
batch_norm_with_global_normalization
(
x
,
mean
,
var
,
beta
,
gamma
,
EPS
,
True
)
normed
=
tf
.
nn
.
batch_norm_with_global_normalization
(
x
,
mean
,
var
,
beta
,
gamma
,
EPS
,
True
)
return
normed
tensorpack/models/fc.py
View file @
02e94b63
...
...
@@ -18,6 +18,7 @@ def FullyConnected(x, out_dim, W_init=None, b_init=None, nl=tf.nn.relu):
if
W_init
is
None
:
W_init
=
tf
.
truncated_normal_initializer
(
stddev
=
1
/
math
.
sqrt
(
float
(
in_dim
)))
#W_init = tf.uniform_unit_scaling_initializer()
if
b_init
is
None
:
b_init
=
tf
.
constant_initializer
(
0.0
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment