Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
0f90d4c2
Commit
0f90d4c2
authored
Oct 19, 2017
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
clean-up '-summary' in stats name. clean-up some use of get_scalar_var
parent
a55d81ca
Changes
12
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
19 additions
and
12 deletions
+19
-12
examples/A3C-Gym/train-atari.py
examples/A3C-Gym/train-atari.py
+1
-1
examples/FasterRCNN/train.py
examples/FasterRCNN/train.py
+2
-1
examples/GAN/BEGAN.py
examples/GAN/BEGAN.py
+1
-1
examples/GAN/CycleGAN.py
examples/GAN/CycleGAN.py
+1
-1
examples/GAN/DCGAN.py
examples/GAN/DCGAN.py
+1
-1
examples/HED/hed.py
examples/HED/hed.py
+1
-1
examples/ResNet/imagenet_utils.py
examples/ResNet/imagenet_utils.py
+1
-1
examples/boilerplate.py
examples/boilerplate.py
+1
-1
examples/cifar-convnet.py
examples/cifar-convnet.py
+2
-1
tensorpack/callbacks/monitor.py
tensorpack/callbacks/monitor.py
+3
-0
tensorpack/tfutils/summary.py
tensorpack/tfutils/summary.py
+1
-1
tensorpack/tfutils/symbolic_functions.py
tensorpack/tfutils/symbolic_functions.py
+4
-2
No files found.
examples/A3C-Gym/train-atari.py
View file @
0f90d4c2
...
...
@@ -135,7 +135,7 @@ class Model(ModelDesc):
self
.
cost
,
tf
.
reduce_mean
(
importance
,
name
=
'importance'
))
def
_get_optimizer
(
self
):
lr
=
symbf
.
get_scalar_var
(
'learning_rate'
,
0.001
,
summary
=
Tru
e
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
0.001
,
trainable
=
Fals
e
)
opt
=
tf
.
train
.
AdamOptimizer
(
lr
,
epsilon
=
1e-3
)
gradprocs
=
[
MapGradient
(
lambda
grad
:
tf
.
clip_by_average_norm
(
grad
,
0.1
)),
...
...
examples/FasterRCNN/train.py
View file @
0f90d4c2
...
...
@@ -126,7 +126,8 @@ class Model(ModelDesc):
decoded_boxes
=
tf
.
identity
(
decoded_boxes
,
name
=
'fastrcnn_fg_boxes'
)
def
_get_optimizer
(
self
):
lr
=
symbf
.
get_scalar_var
(
'learning_rate'
,
0.003
,
summary
=
True
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
0.003
,
trainable
=
False
)
tf
.
summary
.
scalar
(
'learning_rate'
,
lr
)
factor
=
get_batch_factor
()
if
factor
!=
1
:
...
...
examples/GAN/BEGAN.py
View file @
0f90d4c2
...
...
@@ -126,7 +126,7 @@ class Model(GANModelDesc):
self
.
g_loss
=
L_neg
add_moving_summary
(
L_pos
,
L_neg
,
eq
,
measure
,
self
.
d_loss
)
tf
.
summary
.
scalar
(
'kt
-summary
'
,
kt
)
tf
.
summary
.
scalar
(
'kt'
,
kt
)
self
.
collect_variables
()
...
...
examples/GAN/CycleGAN.py
View file @
0f90d4c2
...
...
@@ -157,7 +157,7 @@ class Model(GANModelDesc):
add_moving_summary
(
recon_loss_A
,
recon_loss_B
,
self
.
g_loss
,
self
.
d_loss
)
def
_get_optimizer
(
self
):
lr
=
symbolic_functions
.
get_scalar_var
(
'learning_rate'
,
2e-4
,
summary
=
Tru
e
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
2e-4
,
trainable
=
Fals
e
)
return
tf
.
train
.
AdamOptimizer
(
lr
,
beta1
=
0.5
,
epsilon
=
1e-3
)
...
...
examples/GAN/DCGAN.py
View file @
0f90d4c2
...
...
@@ -95,7 +95,7 @@ class Model(GANModelDesc):
self
.
collect_variables
()
def
_get_optimizer
(
self
):
lr
=
symbolic_functions
.
get_scalar_var
(
'learning_rate'
,
2e-4
,
summary
=
Tru
e
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
2e-4
,
trainable
=
Fals
e
)
return
tf
.
train
.
AdamOptimizer
(
lr
,
beta1
=
0.5
,
epsilon
=
1e-3
)
...
...
examples/HED/hed.py
View file @
0f90d4c2
...
...
@@ -96,7 +96,7 @@ class Model(ModelDesc):
add_moving_summary
(
costs
+
[
wrong
,
self
.
cost
])
def
_get_optimizer
(
self
):
lr
=
symbf
.
get_scalar_var
(
'learning_rate'
,
3e-5
,
summary
=
Tru
e
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
3e-5
,
trainable
=
Fals
e
)
opt
=
tf
.
train
.
AdamOptimizer
(
lr
,
epsilon
=
1e-3
)
return
optimizer
.
apply_grad_processors
(
opt
,
[
gradproc
.
ScaleGradient
(
...
...
examples/ResNet/imagenet_utils.py
View file @
0f90d4c2
...
...
@@ -207,5 +207,5 @@ class ImageNetModel(ModelDesc):
def
_get_optimizer
(
self
):
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
0.1
,
trainable
=
False
)
tf
.
summary
.
scalar
(
'learning_rate
-summary
'
,
lr
)
tf
.
summary
.
scalar
(
'learning_rate'
,
lr
)
return
tf
.
train
.
MomentumOptimizer
(
lr
,
0.9
,
use_nesterov
=
True
)
examples/boilerplate.py
View file @
0f90d4c2
...
...
@@ -30,7 +30,7 @@ class Model(ModelDesc):
summary
.
add_moving_summary
(
self
.
cost
)
def
_get_optimizer
(
self
):
lr
=
symbolic_functions
.
get_scalar_var
(
'learning_rate'
,
5e-3
,
summary
=
Tru
e
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
5e-3
,
trainable
=
Fals
e
)
return
tf
.
train
.
AdamOptimizer
(
lr
)
...
...
examples/cifar-convnet.py
View file @
0f90d4c2
...
...
@@ -78,7 +78,8 @@ class Model(ModelDesc):
self
.
cost
=
tf
.
add_n
([
cost
,
wd_cost
],
name
=
'cost'
)
def
_get_optimizer
(
self
):
lr
=
symbf
.
get_scalar_var
(
'learning_rate'
,
1e-2
,
summary
=
True
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
1e-2
,
trainable
=
False
)
tf
.
summary
.
scalar
(
'lr'
,
lr
)
return
tf
.
train
.
AdamOptimizer
(
lr
,
epsilon
=
1e-3
)
...
...
tensorpack/callbacks/monitor.py
View file @
0f90d4c2
...
...
@@ -129,9 +129,12 @@ class Monitors(Callback):
for
val
in
summary
.
value
:
if
val
.
WhichOneof
(
'value'
)
==
'simple_value'
:
val
.
tag
=
re
.
sub
(
'tower[0-9]+/'
,
''
,
val
.
tag
)
# TODO move to subclasses
# TODO This hack not needed any more, can remove this in the future
suffix
=
'-summary'
# tensorflow#6150, tensorboard#59
if
val
.
tag
.
endswith
(
suffix
):
val
.
tag
=
val
.
tag
[:
-
len
(
suffix
)]
self
.
_dispatch
(
lambda
m
:
m
.
process_scalar
(
val
.
tag
,
val
.
simple_value
))
self
.
_dispatch
(
lambda
m
:
m
.
process_summary
(
summary
))
...
...
tensorpack/tfutils/summary.py
View file @
0f90d4c2
...
...
@@ -246,7 +246,7 @@ def add_moving_summary(*args, **kwargs):
ema_ops
.
append
(
ema_op
)
with
tf
.
name_scope
(
None
):
# cannot add it into colocate group -- will force everything to cpus
tf
.
summary
.
scalar
(
name
+
'-summary'
,
ema_op
)
# write the EMA value as a summary
tf
.
summary
.
scalar
(
name
,
ema_op
)
# write the EMA value as a summary
if
coll
is
not
None
:
for
op
in
ema_ops
:
# TODO a new collection to summary every step?
...
...
tensorpack/tfutils/symbolic_functions.py
View file @
0f90d4c2
...
...
@@ -154,9 +154,11 @@ def huber_loss(x, delta=1, name='huber_loss'):
return
tf
.
where
(
cond
,
l2
,
l1
,
name
=
name
)
# TODO remove this in the future
def
get_scalar_var
(
name
,
init_value
,
summary
=
False
,
trainable
=
False
):
"""
Get a scalar float variable with certain initial value
Get a scalar float variable with certain initial value.
You can just call `tf.get_variable(name, initializer=init_value, trainable=False)` instead.
Args:
name (str): name of the variable.
...
...
@@ -170,7 +172,7 @@ def get_scalar_var(name, init_value, summary=False, trainable=False):
trainable
=
trainable
)
if
summary
:
# this is recognized in callbacks.StatHolder
tf
.
summary
.
scalar
(
name
+
'-summary'
,
ret
)
tf
.
summary
.
scalar
(
name
,
ret
)
return
ret
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment