Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
fdc90767
Commit
fdc90767
authored
Jan 08, 2017
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fix use of deprecated TF functions.
parent
453b7c63
Changes
16
Show whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
26 additions
and
24 deletions
+26
-24
examples/DisturbLabel/mnist-disturb.py
examples/DisturbLabel/mnist-disturb.py
+2
-2
examples/DoReFa-Net/resnet-dorefa.py
examples/DoReFa-Net/resnet-dorefa.py
+1
-1
examples/GAN/GAN.py
examples/GAN/GAN.py
+3
-3
examples/GAN/InfoGAN-mnist.py
examples/GAN/InfoGAN-mnist.py
+1
-1
examples/HED/hed.py
examples/HED/hed.py
+1
-1
examples/Inception/inception-bn.py
examples/Inception/inception-bn.py
+1
-1
examples/Inception/inceptionv3.py
examples/Inception/inceptionv3.py
+1
-1
examples/OpenAIGym/train-atari.py
examples/OpenAIGym/train-atari.py
+1
-1
examples/ResNet/cifar10-resnet.py
examples/ResNet/cifar10-resnet.py
+1
-1
examples/ResNet/imagenet-resnet.py
examples/ResNet/imagenet-resnet.py
+1
-1
examples/SpatialTransformer/mnist-addition.py
examples/SpatialTransformer/mnist-addition.py
+2
-2
examples/cifar-convnet.py
examples/cifar-convnet.py
+1
-3
examples/mnist-convnet.py
examples/mnist-convnet.py
+3
-3
tensorpack/dataflow/dataset/__init__.py
tensorpack/dataflow/dataset/__init__.py
+5
-1
tensorpack/models/nonlin.py
tensorpack/models/nonlin.py
+1
-1
tensorpack/tfutils/symbolic_functions.py
tensorpack/tfutils/symbolic_functions.py
+1
-1
No files found.
examples/DisturbLabel/mnist-disturb.py
View file @
fdc90767
...
...
@@ -49,7 +49,7 @@ class Model(mnist_example.Model):
cost
=
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
(
logits
,
label
)
cost
=
tf
.
reduce_mean
(
cost
,
name
=
'cross_entropy_loss'
)
wd_cost
=
tf
.
mul
(
1e-5
,
regularize_cost
(
'fc.*/W'
,
tf
.
nn
.
l2_loss
),
wd_cost
=
tf
.
mul
tiply
(
1e-5
,
regularize_cost
(
'fc.*/W'
,
tf
.
nn
.
l2_loss
),
name
=
'regularize_loss'
)
self
.
cost
=
tf
.
add_n
([
wd_cost
,
cost
],
name
=
'cost'
)
...
...
examples/DoReFa-Net/resnet-dorefa.py
View file @
fdc90767
...
...
@@ -107,7 +107,7 @@ class Model(ModelDesc):
.
BatchNorm
(
'lastbn'
)
.
apply
(
nonlin
)
.
GlobalAvgPooling
(
'gap'
)
.
tf
.
mul
(
49
)
# this is due to a bug in our model design
.
tf
.
mul
tiply
(
49
)
# this is due to a bug in our model design
.
FullyConnected
(
'fct'
,
1000
)())
prob
=
tf
.
nn
.
softmax
(
logits
,
name
=
'output'
)
wrong
=
prediction_incorrect
(
logits
,
label
,
1
,
name
=
'wrong-top1'
)
...
...
examples/GAN/GAN.py
View file @
fdc90767
...
...
@@ -59,15 +59,15 @@ def build_GAN_losses(vecpos, vecneg):
tf
.
summary
.
histogram
(
'sigmoid-neg'
,
sigmneg
)
d_loss_pos
=
tf
.
reduce_mean
(
tf
.
nn
.
sigmoid_cross_entropy_with_logits
(
vecpos
,
tf
.
ones_like
(
vecpos
)),
name
=
'd_CE_loss_pos'
)
logits
=
vecpos
,
labels
=
tf
.
ones_like
(
vecpos
)),
name
=
'd_CE_loss_pos'
)
d_loss_neg
=
tf
.
reduce_mean
(
tf
.
nn
.
sigmoid_cross_entropy_with_logits
(
vecneg
,
tf
.
zeros_like
(
vecneg
)),
name
=
'd_CE_loss_neg'
)
logits
=
vecneg
,
labels
=
tf
.
zeros_like
(
vecneg
)),
name
=
'd_CE_loss_neg'
)
d_pos_acc
=
tf
.
reduce_mean
(
tf
.
cast
(
sigmpos
>
0.5
,
tf
.
float32
),
name
=
'pos_acc'
)
d_neg_acc
=
tf
.
reduce_mean
(
tf
.
cast
(
sigmneg
<
0.5
,
tf
.
float32
),
name
=
'neg_acc'
)
g_loss
=
tf
.
reduce_mean
(
tf
.
nn
.
sigmoid_cross_entropy_with_logits
(
vecneg
,
tf
.
ones_like
(
vecneg
)),
name
=
'g_CE_loss'
)
logits
=
vecneg
,
labels
=
tf
.
ones_like
(
vecneg
)),
name
=
'g_CE_loss'
)
d_loss
=
tf
.
add
(
d_loss_pos
,
d_loss_neg
,
name
=
'd_CE_loss'
)
add_moving_summary
(
d_loss_pos
,
d_loss_neg
,
g_loss
,
d_loss
,
...
...
examples/GAN/InfoGAN-mnist.py
View file @
fdc90767
...
...
@@ -81,7 +81,7 @@ class Model(ModelDesc):
log_qc
=
tf
.
reduce_sum
(
prior_prob
*
zc
,
1
,
name
=
'logQc'
)
Elog_qc_given_x
=
tf
.
reduce_mean
(
log_qc_given_x
,
name
=
'ElogQc_x'
)
Hc
=
tf
.
reduce_mean
(
-
log_qc
,
name
=
'Hc'
)
MIloss
=
tf
.
mul
(
Hc
+
Elog_qc_given_x
,
-
1.0
,
name
=
'neg_MI'
)
MIloss
=
tf
.
mul
tiply
(
Hc
+
Elog_qc_given_x
,
-
1.0
,
name
=
'neg_MI'
)
self
.
g_loss
,
self
.
d_loss
=
build_GAN_losses
(
vecpos
,
vecneg
)
self
.
g_loss
=
tf
.
add
(
self
.
g_loss
,
MIloss
,
name
=
'total_g_loss'
)
...
...
examples/HED/hed.py
View file @
fdc90767
...
...
@@ -86,7 +86,7 @@ class Model(ModelDesc):
if
get_current_tower_context
()
.
is_training
:
wd_w
=
tf
.
train
.
exponential_decay
(
2e-4
,
get_global_step_var
(),
80000
,
0.7
,
True
)
wd_cost
=
tf
.
mul
(
wd_w
,
regularize_cost
(
'.*/W'
,
tf
.
nn
.
l2_loss
),
name
=
'wd_cost'
)
wd_cost
=
tf
.
mul
tiply
(
wd_w
,
regularize_cost
(
'.*/W'
,
tf
.
nn
.
l2_loss
),
name
=
'wd_cost'
)
costs
.
append
(
wd_cost
)
add_param_summary
((
'.*/W'
,
[
'histogram'
]))
# monitor W
...
...
examples/Inception/inception-bn.py
View file @
fdc90767
...
...
@@ -113,7 +113,7 @@ class Model(ModelDesc):
# weight decay on all W of fc layers
wd_w
=
tf
.
train
.
exponential_decay
(
0.0002
,
get_global_step_var
(),
80000
,
0.7
,
True
)
wd_cost
=
tf
.
mul
(
wd_w
,
regularize_cost
(
'.*/W'
,
tf
.
nn
.
l2_loss
),
name
=
'l2_regularize_loss'
)
wd_cost
=
tf
.
mul
tiply
(
wd_w
,
regularize_cost
(
'.*/W'
,
tf
.
nn
.
l2_loss
),
name
=
'l2_regularize_loss'
)
add_param_summary
((
'.*/W'
,
[
'histogram'
]))
# monitor W
self
.
cost
=
tf
.
add_n
([
cost
,
wd_cost
],
name
=
'cost'
)
...
...
examples/Inception/inceptionv3.py
View file @
fdc90767
...
...
@@ -192,7 +192,7 @@ class Model(ModelDesc):
# weight decay on all W of fc layers
wd_w
=
tf
.
train
.
exponential_decay
(
0.00004
,
get_global_step_var
(),
80000
,
0.7
,
True
)
wd_cost
=
tf
.
mul
(
wd_w
,
regularize_cost
(
'.*/W'
,
tf
.
nn
.
l2_loss
),
name
=
'l2_regularize_loss'
)
wd_cost
=
tf
.
mul
tiply
(
wd_w
,
regularize_cost
(
'.*/W'
,
tf
.
nn
.
l2_loss
),
name
=
'l2_regularize_loss'
)
self
.
cost
=
tf
.
add_n
([
0.4
*
loss1
,
loss2
,
wd_cost
],
name
=
'cost'
)
add_moving_summary
(
loss1
,
loss2
,
wd_cost
,
self
.
cost
)
...
...
examples/OpenAIGym/train-atari.py
View file @
fdc90767
...
...
@@ -115,7 +115,7 @@ class Model(ModelDesc):
log_pi_a_given_s
=
tf
.
reduce_sum
(
log_probs
*
tf
.
one_hot
(
action
,
NUM_ACTIONS
),
1
)
advantage
=
tf
.
sub
(
tf
.
stop_gradient
(
self
.
value
),
futurereward
,
name
=
'advantage'
)
advantage
=
tf
.
sub
tract
(
tf
.
stop_gradient
(
self
.
value
),
futurereward
,
name
=
'advantage'
)
policy_loss
=
tf
.
reduce_sum
(
log_pi_a_given_s
*
advantage
,
name
=
'policy_loss'
)
xentropy_loss
=
tf
.
reduce_sum
(
self
.
logits
*
log_probs
,
name
=
'xentropy_loss'
)
...
...
examples/ResNet/cifar10-resnet.py
View file @
fdc90767
...
...
@@ -100,7 +100,7 @@ class Model(ModelDesc):
# weight decay on all W of fc layers
wd_w
=
tf
.
train
.
exponential_decay
(
0.0002
,
get_global_step_var
(),
480000
,
0.2
,
True
)
wd_cost
=
tf
.
mul
(
wd_w
,
regularize_cost
(
'.*/W'
,
tf
.
nn
.
l2_loss
),
name
=
'wd_cost'
)
wd_cost
=
tf
.
mul
tiply
(
wd_w
,
regularize_cost
(
'.*/W'
,
tf
.
nn
.
l2_loss
),
name
=
'wd_cost'
)
add_moving_summary
(
cost
,
wd_cost
)
add_param_summary
((
'.*/W'
,
[
'histogram'
]))
# monitor W
...
...
examples/ResNet/imagenet-resnet.py
View file @
fdc90767
...
...
@@ -111,7 +111,7 @@ class Model(ModelDesc):
wrong
=
prediction_incorrect
(
logits
,
label
,
5
,
name
=
'wrong-top5'
)
add_moving_summary
(
tf
.
reduce_mean
(
wrong
,
name
=
'train-error-top5'
))
wd_cost
=
tf
.
mul
(
1e-4
,
regularize_cost
(
'.*/W'
,
tf
.
nn
.
l2_loss
),
name
=
'l2_regularize_loss'
)
wd_cost
=
regularize_cost
(
'.*/W'
,
l2_regularizer
(
1e-4
),
name
=
'l2_regularize_loss'
)
add_moving_summary
(
loss
,
wd_cost
)
self
.
cost
=
tf
.
add_n
([
loss
,
wd_cost
],
name
=
'cost'
)
...
...
examples/SpatialTransformer/mnist-addition.py
View file @
fdc90767
...
...
@@ -81,7 +81,7 @@ class Model(ModelDesc):
wrong
=
symbolic_functions
.
prediction_incorrect
(
logits
,
label
)
summary
.
add_moving_summary
(
tf
.
reduce_mean
(
wrong
,
name
=
'train_error'
))
wd_cost
=
tf
.
mul
(
1e-5
,
regularize_cost
(
'fc.*/W'
,
tf
.
nn
.
l2_loss
),
wd_cost
=
tf
.
mul
tiply
(
1e-5
,
regularize_cost
(
'fc.*/W'
,
tf
.
nn
.
l2_loss
),
name
=
'regularize_loss'
)
summary
.
add_moving_summary
(
cost
,
wd_cost
)
self
.
cost
=
tf
.
add_n
([
wd_cost
,
cost
],
name
=
'cost'
)
...
...
examples/cifar-convnet.py
View file @
fdc90767
...
...
@@ -66,9 +66,7 @@ class Model(ModelDesc):
add_moving_summary
(
tf
.
reduce_mean
(
wrong
,
name
=
'train_error'
))
# weight decay on all W of fc layers
wd_cost
=
tf
.
mul
(
0.0004
,
regularize_cost
(
'fc.*/W'
,
tf
.
nn
.
l2_loss
),
name
=
'regularize_loss'
)
wd_cost
=
regularize_cost
(
'fc.*/W'
,
l2_regularizer
(
4e-4
),
name
=
'regularize_loss'
)
add_moving_summary
(
cost
,
wd_cost
)
add_param_summary
((
'.*/W'
,
[
'histogram'
]))
# monitor W
...
...
examples/mnist-convnet.py
View file @
fdc90767
...
...
@@ -96,7 +96,7 @@ class Model(ModelDesc):
if
not
USE_SLIM
:
# Use a regex to find parameters to apply weight decay.
# Here we apply a weight decay on all W (weight matrix) of all fc layers
wd_cost
=
tf
.
mul
(
1e-5
,
wd_cost
=
tf
.
mul
tiply
(
1e-5
,
regularize_cost
(
'fc.*/W'
,
tf
.
nn
.
l2_loss
),
name
=
'regularize_loss'
)
self
.
cost
=
tf
.
add_n
([
wd_cost
,
cost
],
name
=
'total_cost'
)
...
...
tensorpack/dataflow/dataset/__init__.py
View file @
fdc90767
...
...
@@ -18,7 +18,11 @@ def global_import(name):
__all__
.
append
(
k
)
_CURR_DIR
=
os
.
path
.
dirname
(
__file__
)
for
_
,
module_name
,
_
in
walk_packages
(
[
os
.
path
.
dirname
(
__file__
)]):
[
_CURR_DIR
]):
srcpath
=
os
.
path
.
join
(
_CURR_DIR
,
module_name
+
'.py'
)
if
not
os
.
path
.
isfile
(
srcpath
):
continue
if
not
module_name
.
startswith
(
'_'
):
global_import
(
module_name
)
tensorpack/models/nonlin.py
View file @
fdc90767
...
...
@@ -50,7 +50,7 @@ def PReLU(x, init=0.001, name='output'):
init
=
tf
.
constant_initializer
(
init
)
alpha
=
tf
.
get_variable
(
'alpha'
,
[],
initializer
=
init
)
x
=
((
1
+
alpha
)
*
x
+
(
1
-
alpha
)
*
tf
.
abs
(
x
))
return
tf
.
mul
(
x
,
0.5
,
name
=
name
)
return
tf
.
mul
tiply
(
x
,
0.5
,
name
=
name
)
@
layer_register
(
use_scope
=
False
,
log_shape
=
False
)
...
...
tensorpack/tfutils/symbolic_functions.py
View file @
fdc90767
...
...
@@ -59,7 +59,7 @@ def class_balanced_cross_entropy(pred, label, name='cross_entropy_loss'):
eps
=
1e-12
loss_pos
=
-
beta
*
tf
.
reduce_mean
(
y
*
tf
.
log
(
z
+
eps
))
loss_neg
=
(
1.
-
beta
)
*
tf
.
reduce_mean
((
1.
-
y
)
*
tf
.
log
(
1.
-
z
+
eps
))
cost
=
tf
.
sub
(
loss_pos
,
loss_neg
,
name
=
name
)
cost
=
tf
.
sub
tract
(
loss_pos
,
loss_neg
,
name
=
name
)
return
cost
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment