Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
f47923c7
Commit
f47923c7
authored
Oct 31, 2017
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
prefer get_variable over get_scalar_var
parent
f2e028d2
Changes
18
Hide whitespace changes
Inline
Side-by-side
Showing
18 changed files
with
19 additions
and
23 deletions
+19
-23
examples/CTC-TIMIT/train-timit.py
examples/CTC-TIMIT/train-timit.py
+1
-1
examples/Char-RNN/char-rnn.py
examples/Char-RNN/char-rnn.py
+1
-1
examples/DoReFa-Net/alexnet-dorefa.py
examples/DoReFa-Net/alexnet-dorefa.py
+1
-1
examples/DynamicFilterNetwork/steering-filter.py
examples/DynamicFilterNetwork/steering-filter.py
+1
-2
examples/GAN/BEGAN.py
examples/GAN/BEGAN.py
+1
-1
examples/GAN/ConditionalGAN-mnist.py
examples/GAN/ConditionalGAN-mnist.py
+1
-2
examples/GAN/DiscoGAN-CelebA.py
examples/GAN/DiscoGAN-CelebA.py
+1
-2
examples/GAN/Image2Image.py
examples/GAN/Image2Image.py
+1
-1
examples/GAN/Improved-WGAN.py
examples/GAN/Improved-WGAN.py
+1
-2
examples/GAN/WGAN.py
examples/GAN/WGAN.py
+1
-2
examples/Inception/inception-bn.py
examples/Inception/inception-bn.py
+1
-1
examples/Inception/inceptionv3.py
examples/Inception/inceptionv3.py
+1
-1
examples/PennTreebank/PTB-LSTM.py
examples/PennTreebank/PTB-LSTM.py
+1
-1
examples/ResNet/cifar10-resnet.py
examples/ResNet/cifar10-resnet.py
+1
-1
examples/Saliency/CAM-resnet.py
examples/Saliency/CAM-resnet.py
+1
-1
examples/SimilarityLearning/mnist-embeddings.py
examples/SimilarityLearning/mnist-embeddings.py
+1
-1
examples/SpatialTransformer/mnist-addition.py
examples/SpatialTransformer/mnist-addition.py
+1
-1
tensorpack/tfutils/symbolic_functions.py
tensorpack/tfutils/symbolic_functions.py
+2
-1
No files found.
examples/CTC-TIMIT/train-timit.py
View file @
f47923c7
...
@@ -77,7 +77,7 @@ class Model(ModelDesc):
...
@@ -77,7 +77,7 @@ class Model(ModelDesc):
summary
.
add_moving_summary
(
err
,
self
.
cost
)
summary
.
add_moving_summary
(
err
,
self
.
cost
)
def
_get_optimizer
(
self
):
def
_get_optimizer
(
self
):
lr
=
symbolic_functions
.
get_scalar_var
(
'learning_rate'
,
5e-3
,
summary
=
Tru
e
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
5e-3
,
trainable
=
Fals
e
)
opt
=
tf
.
train
.
AdamOptimizer
(
lr
,
epsilon
=
1e-3
)
opt
=
tf
.
train
.
AdamOptimizer
(
lr
,
epsilon
=
1e-3
)
return
optimizer
.
apply_grad_processors
(
return
optimizer
.
apply_grad_processors
(
opt
,
[
GlobalNormClip
(
5
),
SummaryGradient
()])
opt
,
[
GlobalNormClip
(
5
),
SummaryGradient
()])
...
...
examples/Char-RNN/char-rnn.py
View file @
f47923c7
...
@@ -105,7 +105,7 @@ class Model(ModelDesc):
...
@@ -105,7 +105,7 @@ class Model(ModelDesc):
summary
.
add_moving_summary
(
self
.
cost
)
summary
.
add_moving_summary
(
self
.
cost
)
def
_get_optimizer
(
self
):
def
_get_optimizer
(
self
):
lr
=
symbolic_functions
.
get_scalar_var
(
'learning_rate'
,
2e-3
,
summary
=
Tru
e
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
2e-3
,
trainable
=
Fals
e
)
opt
=
tf
.
train
.
AdamOptimizer
(
lr
)
opt
=
tf
.
train
.
AdamOptimizer
(
lr
)
return
optimizer
.
apply_grad_processors
(
opt
,
[
GlobalNormClip
(
5
)])
return
optimizer
.
apply_grad_processors
(
opt
,
[
GlobalNormClip
(
5
)])
...
...
examples/DoReFa-Net/alexnet-dorefa.py
View file @
f47923c7
...
@@ -167,7 +167,7 @@ class Model(ModelDesc):
...
@@ -167,7 +167,7 @@ class Model(ModelDesc):
add_moving_summary
(
cost
,
wd_cost
,
self
.
cost
)
add_moving_summary
(
cost
,
wd_cost
,
self
.
cost
)
def
_get_optimizer
(
self
):
def
_get_optimizer
(
self
):
lr
=
get_scalar_var
(
'learning_rate'
,
1e-4
,
summary
=
Tru
e
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
1e-4
,
trainable
=
Fals
e
)
return
tf
.
train
.
AdamOptimizer
(
lr
,
epsilon
=
1e-5
)
return
tf
.
train
.
AdamOptimizer
(
lr
,
epsilon
=
1e-5
)
...
...
examples/DynamicFilterNetwork/steering-filter.py
View file @
f47923c7
...
@@ -149,8 +149,7 @@ class Model(ModelDesc):
...
@@ -149,8 +149,7 @@ class Model(ModelDesc):
summary
.
add_moving_summary
(
self
.
cost
)
summary
.
add_moving_summary
(
self
.
cost
)
def
_get_optimizer
(
self
):
def
_get_optimizer
(
self
):
lr
=
symbolic_functions
.
get_scalar_var
(
'learning_rate'
,
1e-3
,
summary
=
True
)
return
tf
.
train
.
AdamOptimizer
(
1e-3
)
return
tf
.
train
.
AdamOptimizer
(
lr
)
class
ThetaImages
(
ProxyDataFlow
,
RNGDataFlow
):
class
ThetaImages
(
ProxyDataFlow
,
RNGDataFlow
):
...
...
examples/GAN/BEGAN.py
View file @
f47923c7
...
@@ -132,7 +132,7 @@ class Model(GANModelDesc):
...
@@ -132,7 +132,7 @@ class Model(GANModelDesc):
self
.
collect_variables
()
self
.
collect_variables
()
def
_get_optimizer
(
self
):
def
_get_optimizer
(
self
):
lr
=
symbolic_functions
.
get_scalar_var
(
'learning_rate'
,
1e-4
,
summary
=
Tru
e
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
1e-4
,
trainable
=
Fals
e
)
opt
=
tf
.
train
.
AdamOptimizer
(
lr
,
beta1
=
0.5
,
beta2
=
0.9
)
opt
=
tf
.
train
.
AdamOptimizer
(
lr
,
beta1
=
0.5
,
beta2
=
0.9
)
return
opt
return
opt
...
...
examples/GAN/ConditionalGAN-mnist.py
View file @
f47923c7
...
@@ -96,8 +96,7 @@ class Model(GANModelDesc):
...
@@ -96,8 +96,7 @@ class Model(GANModelDesc):
self
.
collect_variables
()
self
.
collect_variables
()
def
_get_optimizer
(
self
):
def
_get_optimizer
(
self
):
lr
=
symbf
.
get_scalar_var
(
'learning_rate'
,
2e-4
,
summary
=
True
)
return
tf
.
train
.
AdamOptimizer
(
2e-4
,
beta1
=
0.5
,
epsilon
=
1e-3
)
return
tf
.
train
.
AdamOptimizer
(
lr
,
beta1
=
0.5
,
epsilon
=
1e-3
)
def
get_data
():
def
get_data
():
...
...
examples/GAN/DiscoGAN-CelebA.py
View file @
f47923c7
...
@@ -159,8 +159,7 @@ class Model(GANModelDesc):
...
@@ -159,8 +159,7 @@ class Model(GANModelDesc):
add_moving_summary
(
recon_loss_A
,
recon_loss_B
,
rate
,
g_loss
,
d_loss
,
wd_g
,
wd_d
)
add_moving_summary
(
recon_loss_A
,
recon_loss_B
,
rate
,
g_loss
,
d_loss
,
wd_g
,
wd_d
)
def
_get_optimizer
(
self
):
def
_get_optimizer
(
self
):
lr
=
symbolic_functions
.
get_scalar_var
(
'learning_rate'
,
2e-4
,
summary
=
True
)
return
tf
.
train
.
AdamOptimizer
(
2e-4
,
beta1
=
0.5
,
epsilon
=
1e-3
)
return
tf
.
train
.
AdamOptimizer
(
lr
,
beta1
=
0.5
,
epsilon
=
1e-3
)
def
get_celebA_data
(
datadir
,
styleA
,
styleB
=
None
):
def
get_celebA_data
(
datadir
,
styleA
,
styleB
=
None
):
...
...
examples/GAN/Image2Image.py
View file @
f47923c7
...
@@ -134,7 +134,7 @@ class Model(GANModelDesc):
...
@@ -134,7 +134,7 @@ class Model(GANModelDesc):
self
.
collect_variables
()
self
.
collect_variables
()
def
_get_optimizer
(
self
):
def
_get_optimizer
(
self
):
lr
=
symbolic_functions
.
get_scalar_var
(
'learning_rate'
,
2e-4
,
summary
=
Tru
e
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
2e-4
,
trainable
=
Fals
e
)
return
tf
.
train
.
AdamOptimizer
(
lr
,
beta1
=
0.5
,
epsilon
=
1e-3
)
return
tf
.
train
.
AdamOptimizer
(
lr
,
beta1
=
0.5
,
epsilon
=
1e-3
)
...
...
examples/GAN/Improved-WGAN.py
View file @
f47923c7
...
@@ -83,8 +83,7 @@ class Model(DCGAN.Model):
...
@@ -83,8 +83,7 @@ class Model(DCGAN.Model):
self
.
collect_variables
()
self
.
collect_variables
()
def
_get_optimizer
(
self
):
def
_get_optimizer
(
self
):
lr
=
symbolic_functions
.
get_scalar_var
(
'learning_rate'
,
1e-4
,
summary
=
True
)
opt
=
tf
.
train
.
AdamOptimizer
(
1e-4
,
beta1
=
0.5
,
beta2
=
0.9
)
opt
=
tf
.
train
.
AdamOptimizer
(
lr
,
beta1
=
0.5
,
beta2
=
0.9
)
return
opt
return
opt
...
...
examples/GAN/WGAN.py
View file @
f47923c7
...
@@ -36,8 +36,7 @@ class Model(DCGAN.Model):
...
@@ -36,8 +36,7 @@ class Model(DCGAN.Model):
add_moving_summary
(
self
.
d_loss
,
self
.
g_loss
)
add_moving_summary
(
self
.
d_loss
,
self
.
g_loss
)
def
_get_optimizer
(
self
):
def
_get_optimizer
(
self
):
lr
=
symbolic_functions
.
get_scalar_var
(
'learning_rate'
,
1e-4
,
summary
=
True
)
opt
=
tf
.
train
.
RMSPropOptimizer
(
1e-4
)
opt
=
tf
.
train
.
RMSPropOptimizer
(
lr
)
return
opt
return
opt
# An alternative way to implement the clipping:
# An alternative way to implement the clipping:
...
...
examples/Inception/inception-bn.py
View file @
f47923c7
...
@@ -123,7 +123,7 @@ class Model(ModelDesc):
...
@@ -123,7 +123,7 @@ class Model(ModelDesc):
add_moving_summary
(
wd_cost
,
self
.
cost
)
add_moving_summary
(
wd_cost
,
self
.
cost
)
def
_get_optimizer
(
self
):
def
_get_optimizer
(
self
):
lr
=
get_scalar_var
(
'learning_rate'
,
0.045
,
summary
=
Tru
e
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
0.045
,
trainable
=
Fals
e
)
return
tf
.
train
.
MomentumOptimizer
(
lr
,
0.9
)
return
tf
.
train
.
MomentumOptimizer
(
lr
,
0.9
)
...
...
examples/Inception/inceptionv3.py
View file @
f47923c7
...
@@ -195,7 +195,7 @@ class Model(ModelDesc):
...
@@ -195,7 +195,7 @@ class Model(ModelDesc):
add_moving_summary
(
loss1
,
loss2
,
wd_cost
,
self
.
cost
)
add_moving_summary
(
loss1
,
loss2
,
wd_cost
,
self
.
cost
)
def
_get_optimizer
(
self
):
def
_get_optimizer
(
self
):
lr
=
get_scalar_var
(
'learning_rate'
,
0.045
,
summary
=
Tru
e
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
0.045
,
trainable
=
Fals
e
)
return
tf
.
train
.
AdamOptimizer
(
lr
,
epsilon
=
1e-3
)
return
tf
.
train
.
AdamOptimizer
(
lr
,
epsilon
=
1e-3
)
...
...
examples/PennTreebank/PTB-LSTM.py
View file @
f47923c7
...
@@ -109,7 +109,7 @@ class Model(ModelDesc):
...
@@ -109,7 +109,7 @@ class Model(ModelDesc):
s
[
1
]
.
h
.
assign
(
z
),
name
=
'reset_lstm_state'
)
s
[
1
]
.
h
.
assign
(
z
),
name
=
'reset_lstm_state'
)
def
_get_optimizer
(
self
):
def
_get_optimizer
(
self
):
lr
=
symbolic_functions
.
get_scalar_var
(
'learning_rate'
,
1
,
summary
=
Tru
e
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
1.0
,
trainable
=
Fals
e
)
opt
=
tf
.
train
.
GradientDescentOptimizer
(
lr
)
opt
=
tf
.
train
.
GradientDescentOptimizer
(
lr
)
return
optimizer
.
apply_grad_processors
(
return
optimizer
.
apply_grad_processors
(
opt
,
[
gradproc
.
GlobalNormClip
(
5
)])
opt
,
[
gradproc
.
GlobalNormClip
(
5
)])
...
...
examples/ResNet/cifar10-resnet.py
View file @
f47923c7
...
@@ -116,7 +116,7 @@ class Model(ModelDesc):
...
@@ -116,7 +116,7 @@ class Model(ModelDesc):
self
.
cost
=
tf
.
add_n
([
cost
,
wd_cost
],
name
=
'cost'
)
self
.
cost
=
tf
.
add_n
([
cost
,
wd_cost
],
name
=
'cost'
)
def
_get_optimizer
(
self
):
def
_get_optimizer
(
self
):
lr
=
get_scalar_var
(
'learning_rate'
,
0.01
,
summary
=
Tru
e
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
0.01
,
trainable
=
Fals
e
)
opt
=
tf
.
train
.
MomentumOptimizer
(
lr
,
0.9
)
opt
=
tf
.
train
.
MomentumOptimizer
(
lr
,
0.9
)
return
opt
return
opt
...
...
examples/Saliency/CAM-resnet.py
View file @
f47923c7
...
@@ -68,7 +68,7 @@ class Model(ModelDesc):
...
@@ -68,7 +68,7 @@ class Model(ModelDesc):
self
.
cost
=
tf
.
add_n
([
loss
,
wd_cost
],
name
=
'cost'
)
self
.
cost
=
tf
.
add_n
([
loss
,
wd_cost
],
name
=
'cost'
)
def
_get_optimizer
(
self
):
def
_get_optimizer
(
self
):
lr
=
get_scalar_var
(
'learning_rate'
,
0.1
,
summary
=
Tru
e
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
0.1
,
trainable
=
Fals
e
)
opt
=
tf
.
train
.
MomentumOptimizer
(
lr
,
0.9
,
use_nesterov
=
True
)
opt
=
tf
.
train
.
MomentumOptimizer
(
lr
,
0.9
,
use_nesterov
=
True
)
gradprocs
=
[
gradproc
.
ScaleGradient
(
gradprocs
=
[
gradproc
.
ScaleGradient
(
[(
'conv0.*'
,
0.1
),
(
'group[0-2].*'
,
0.1
)])]
[(
'conv0.*'
,
0.1
),
(
'group[0-2].*'
,
0.1
)])]
...
...
examples/SimilarityLearning/mnist-embeddings.py
View file @
f47923c7
...
@@ -229,7 +229,7 @@ class EmbeddingModel(ModelDesc):
...
@@ -229,7 +229,7 @@ class EmbeddingModel(ModelDesc):
return
embeddings
return
embeddings
def
_get_optimizer
(
self
):
def
_get_optimizer
(
self
):
lr
=
symbf
.
get_scalar_var
(
'learning_rate'
,
1e-4
,
summary
=
Tru
e
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
1e-4
,
trainable
=
Fals
e
)
return
tf
.
train
.
GradientDescentOptimizer
(
lr
)
return
tf
.
train
.
GradientDescentOptimizer
(
lr
)
...
...
examples/SpatialTransformer/mnist-addition.py
View file @
f47923c7
...
@@ -90,7 +90,7 @@ class Model(ModelDesc):
...
@@ -90,7 +90,7 @@ class Model(ModelDesc):
self
.
cost
=
tf
.
add_n
([
wd_cost
,
cost
],
name
=
'cost'
)
self
.
cost
=
tf
.
add_n
([
wd_cost
,
cost
],
name
=
'cost'
)
def
_get_optimizer
(
self
):
def
_get_optimizer
(
self
):
lr
=
symbf
.
get_scalar_var
(
'learning_rate'
,
5e-4
,
summary
=
Tru
e
)
lr
=
tf
.
get_variable
(
'learning_rate'
,
initializer
=
5e-4
,
trainable
=
Fals
e
)
opt
=
tf
.
train
.
AdamOptimizer
(
lr
,
epsilon
=
1e-3
)
opt
=
tf
.
train
.
AdamOptimizer
(
lr
,
epsilon
=
1e-3
)
return
optimizer
.
apply_grad_processors
(
return
optimizer
.
apply_grad_processors
(
opt
,
[
opt
,
[
...
...
tensorpack/tfutils/symbolic_functions.py
View file @
f47923c7
...
@@ -145,7 +145,8 @@ def huber_loss(x, delta=1, name='huber_loss'):
...
@@ -145,7 +145,8 @@ def huber_loss(x, delta=1, name='huber_loss'):
return
tf
.
where
(
cond
,
l2
,
l1
,
name
=
name
)
return
tf
.
where
(
cond
,
l2
,
l1
,
name
=
name
)
# TODO remove this in the future
# TODO deprecate this in the future
# doesn't hurt to keep it here for now
def
get_scalar_var
(
name
,
init_value
,
summary
=
False
,
trainable
=
False
):
def
get_scalar_var
(
name
,
init_value
,
summary
=
False
,
trainable
=
False
):
"""
"""
Get a scalar float variable with certain initial value.
Get a scalar float variable with certain initial value.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment