Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
05ae7b5d
Commit
05ae7b5d
authored
Jun 20, 2017
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
gradprocessor reuse name scope
parent
e7aaaf13
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
15 additions
and
2 deletions
+15
-2
tensorpack/tfutils/gradproc.py
tensorpack/tfutils/gradproc.py
+14
-2
tensorpack/train/multigpu.py
tensorpack/train/multigpu.py
+1
-0
No files found.
tensorpack/tfutils/gradproc.py
View file @
05ae7b5d
...
@@ -23,6 +23,8 @@ class GradientProcessor(object):
...
@@ -23,6 +23,8 @@ class GradientProcessor(object):
Subclass should override the ``_process()`` method.
Subclass should override the ``_process()`` method.
"""
"""
_name_scope
=
None
def
process
(
self
,
grads
):
def
process
(
self
,
grads
):
"""
"""
Process the symbolic gradients.
Process the symbolic gradients.
...
@@ -32,8 +34,16 @@ class GradientProcessor(object):
...
@@ -32,8 +34,16 @@ class GradientProcessor(object):
Returns:
Returns:
list: processed gradients, with the same type as input.
list: processed gradients, with the same type as input.
"""
"""
with
tf
.
name_scope
(
type
(
self
)
.
__name__
):
return
self
.
_process
(
grads
)
# reuse the old name_scope, if process() is called multiple times
if
self
.
_name_scope
is
None
:
with
tf
.
name_scope
(
type
(
self
)
.
__name__
)
as
scope
:
self
.
_name_scope
=
scope
return
self
.
_process
(
grads
)
else
:
with
tf
.
name_scope
(
self
.
_name_scope
):
return
self
.
_process
(
grads
)
@
abstractmethod
@
abstractmethod
def
_process
(
self
,
grads
):
def
_process
(
self
,
grads
):
...
@@ -67,6 +77,7 @@ class GlobalNormClip(GradientProcessor):
...
@@ -67,6 +77,7 @@ class GlobalNormClip(GradientProcessor):
Args:
Args:
global_norm(float): the threshold to clip with.
global_norm(float): the threshold to clip with.
"""
"""
super
(
GlobalNormClip
,
self
)
.
__init__
()
self
.
_norm
=
float
(
global_norm
)
self
.
_norm
=
float
(
global_norm
)
def
_process
(
self
,
grads
):
def
_process
(
self
,
grads
):
...
@@ -101,6 +112,7 @@ class MapGradient(GradientProcessor):
...
@@ -101,6 +112,7 @@ class MapGradient(GradientProcessor):
if
not
regex
.
endswith
(
'$'
):
if
not
regex
.
endswith
(
'$'
):
regex
=
regex
+
'$'
regex
=
regex
+
'$'
self
.
regex
=
regex
self
.
regex
=
regex
super
(
MapGradient
,
self
)
.
__init__
()
def
_process
(
self
,
grads
):
def
_process
(
self
,
grads
):
ret
=
[]
ret
=
[]
...
...
tensorpack/train/multigpu.py
View file @
05ae7b5d
...
@@ -330,6 +330,7 @@ class AsyncMultiGPUTrainer(MultiGPUTrainerBase, SingleCostFeedfreeTrainer):
...
@@ -330,6 +330,7 @@ class AsyncMultiGPUTrainer(MultiGPUTrainerBase, SingleCostFeedfreeTrainer):
# Ngpu x 2
# Ngpu x 2
v
=
grad_and_vars
[
0
][
1
]
v
=
grad_and_vars
[
0
][
1
]
with
tf
.
device
(
v
.
device
):
with
tf
.
device
(
v
.
device
):
# will call apply_gradients (therefore gradproc) multiple times
train_ops
.
append
(
opt
.
apply_gradients
(
train_ops
.
append
(
opt
.
apply_gradients
(
grad_and_vars
,
name
=
'apply_grad_{}'
.
format
(
i
)))
grad_and_vars
,
name
=
'apply_grad_{}'
.
format
(
i
)))
self
.
train_op
=
tf
.
group
(
*
train_ops
,
name
=
'train_op'
)
self
.
train_op
=
tf
.
group
(
*
train_ops
,
name
=
'train_op'
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment