Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
62ea40c8
Commit
62ea40c8
authored
Jul 11, 2020
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
v1 compat in optimizers
parent
7dacad08
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
8 additions
and
8 deletions
+8
-8
tensorpack/graph_builder/training.py
tensorpack/graph_builder/training.py
+1
-1
tensorpack/tfutils/optimizer.py
tensorpack/tfutils/optimizer.py
+7
-7
No files found.
tensorpack/graph_builder/training.py
View file @
62ea40c8
...
...
@@ -32,7 +32,7 @@ class GraphBuilder(object):
@
contextmanager
def
_maybe_reuse_vs
(
reuse
):
if
reuse
:
with
tf
.
variable_scope
(
tf
.
get_variable_scope
(),
reuse
=
True
):
with
tf
v1
.
variable_scope
(
tfv1
.
get_variable_scope
(),
reuse
=
True
):
yield
else
:
yield
...
...
tensorpack/tfutils/optimizer.py
View file @
62ea40c8
...
...
@@ -134,7 +134,7 @@ class VariableAssignmentOptimizer(PostProcessOptimizer):
t
=
func
(
v
)
if
t
is
None
:
return
t
return
tf
.
assign
(
v
,
t
,
use_locking
=
False
)
.
op
return
tf
v1
.
assign
(
v
,
t
,
use_locking
=
False
)
.
op
super
(
VariableAssignmentOptimizer
,
self
)
.
__init__
(
opt
,
f
)
...
...
@@ -189,7 +189,7 @@ class AccumGradOptimizer(ProxyOptimizer):
slots
=
self
.
_create_accum_slots
(
vs
)
slots_and_vars
=
[(
s
,
gv
[
1
])
for
s
,
gv
in
zip
(
slots
,
grads_and_vars
)]
with
tf
.
variable_scope
(
self
.
_name
),
tf
.
device
(
'/cpu:0'
):
with
tf
v1
.
variable_scope
(
self
.
_name
),
tf
.
device
(
'/cpu:0'
):
counter
=
tf
.
Variable
(
0
,
name
=
"counter"
,
trainable
=
False
,
dtype
=
tf
.
int32
)
...
...
@@ -198,16 +198,16 @@ class AccumGradOptimizer(ProxyOptimizer):
for
s
,
gv
in
zip
(
slots
,
grads_and_vars
):
g
,
v
=
gv
ops
.
append
(
s
.
assign_add
(
g
))
update_counter
=
tf
.
assign_add
(
counter
,
1
,
name
=
'update_counter'
)
update_counter
=
tf
v1
.
assign_add
(
counter
,
1
,
name
=
'update_counter'
)
update_slot_op
=
tf
.
group
(
update_counter
,
*
ops
,
name
=
'update_slot'
)
def
update_grad
():
update_op
=
self
.
_opt
.
apply_gradients
(
slots_and_vars
)
with
tf
.
control_dependencies
([
update_op
]):
clear_ops
=
[
tf
.
assign
(
s
,
tf
.
zeros_like
(
s
))
for
s
in
slots
]
clear_ops
=
[
tf
v1
.
assign
(
s
,
tf
.
zeros_like
(
s
))
for
s
in
slots
]
return
tf
.
group
(
*
clear_ops
,
name
=
'update_grad'
)
pred
=
tf
.
equal
(
tf
.
mod
(
counter
,
self
.
_niter
),
0
)
pred
=
tf
.
equal
(
tf
v1
.
mod
(
counter
,
self
.
_niter
),
0
)
with
tf
.
control_dependencies
([
update_slot_op
]):
if
name
is
None
:
name
=
'cond_update_grad'
...
...
@@ -217,7 +217,7 @@ class AccumGradOptimizer(ProxyOptimizer):
# Tensorpack maintains global_step by other means,
# so this option is useless in tensorpack trainers.
# But we include the implementation here for completeness
global_step_increment
=
tf
.
assign_add
(
global_step
,
1
)
global_step_increment
=
tf
v1
.
assign_add
(
global_step
,
1
)
op
=
tf
.
group
(
op
,
global_step_increment
,
name
=
name
)
else
:
op
=
tf
.
identity
(
op
,
name
=
name
)
.
op
...
...
@@ -227,7 +227,7 @@ class AccumGradOptimizer(ProxyOptimizer):
if
__name__
==
'__main__'
:
# run it with "python -m tensorpack.tfutils.optimizer"
x
=
tf
.
get_variable
(
'x'
,
shape
=
[
6
])
x
=
tf
v1
.
get_variable
(
'x'
,
shape
=
[
6
])
cost
=
tf
.
reduce_sum
(
tf
.
abs
(
x
),
name
=
'cost'
)
opt
=
tf
.
train
.
GradientDescentOptimizer
(
0.01
)
opt
=
AccumGradOptimizer
(
opt
,
5
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment