Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
6b85a1f1
You need to sign in or sign up before continuing.
Commit
6b85a1f1
authored
Nov 30, 2016
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
global norm clip
parent
68cb6994
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
19 additions
and
4 deletions
+19
-4
examples/char-rnn/char-rnn.py
examples/char-rnn/char-rnn.py
+1
-2
tensorpack/tfutils/gradproc.py
tensorpack/tfutils/gradproc.py
+16
-1
tensorpack/train/base.py
tensorpack/train/base.py
+1
-0
tensorpack/train/trainer.py
tensorpack/train/trainer.py
+1
-1
No files found.
examples/char-rnn/char-rnn.py
View file @
6b85a1f1
...
@@ -90,8 +90,7 @@ class Model(ModelDesc):
...
@@ -90,8 +90,7 @@ class Model(ModelDesc):
summary
.
add_param_summary
([(
'.*/W'
,
[
'histogram'
])])
# monitor histogram of all W
summary
.
add_param_summary
([(
'.*/W'
,
[
'histogram'
])])
# monitor histogram of all W
def
get_gradient_processor
(
self
):
def
get_gradient_processor
(
self
):
return
[
MapGradient
(
lambda
grad
:
tf
.
clip_by_global_norm
(
return
[
GlobalNormClip
(
5
)]
[
grad
],
param
.
grad_clip
)[
0
][
0
])]
def
get_config
():
def
get_config
():
logger
.
auto_set_dir
()
logger
.
auto_set_dir
()
...
...
tensorpack/tfutils/gradproc.py
View file @
6b85a1f1
...
@@ -12,7 +12,8 @@ from .symbolic_functions import rms
...
@@ -12,7 +12,8 @@ from .symbolic_functions import rms
from
.summary
import
add_moving_summary
from
.summary
import
add_moving_summary
__all__
=
[
'GradientProcessor'
,
'SummaryGradient'
,
'CheckGradient'
,
__all__
=
[
'GradientProcessor'
,
'SummaryGradient'
,
'CheckGradient'
,
'ScaleGradient'
,
'MapGradient'
,
'apply_grad_processors'
]
'ScaleGradient'
,
'MapGradient'
,
'apply_grad_processors'
,
'GlobalNormClip'
]
def
apply_grad_processors
(
grads
,
gradprocs
):
def
apply_grad_processors
(
grads
,
gradprocs
):
"""
"""
...
@@ -47,6 +48,20 @@ class GradientProcessor(object):
...
@@ -47,6 +48,20 @@ class GradientProcessor(object):
def
_process
(
self
,
grads
):
def
_process
(
self
,
grads
):
pass
pass
class
GlobalNormClip
(
GradientProcessor
):
def
__init__
(
self
,
global_norm
):
""" Clip by global norm
Note that the global norm is the sum of norm for **all** gradients
"""
self
.
_norm
=
global_norm
def
_process
(
self
,
grads
):
g
=
[
k
[
0
]
for
k
in
grads
]
v
=
[
k
[
1
]
for
k
in
grads
]
g
,
_
=
tf
.
clip_by_global_norm
(
g
,
self
.
_norm
,
name
=
'clip_by_global_norm'
)
return
list
(
zip
(
g
,
v
))
class
MapGradient
(
GradientProcessor
):
class
MapGradient
(
GradientProcessor
):
"""
"""
Apply a function on all gradient if the name matches regex.
Apply a function on all gradient if the name matches regex.
...
...
tensorpack/train/base.py
View file @
6b85a1f1
...
@@ -98,6 +98,7 @@ class Trainer(object):
...
@@ -98,6 +98,7 @@ class Trainer(object):
def
setup
(
self
):
def
setup
(
self
):
self
.
_setup
()
self
.
_setup
()
describe_model
()
describe_model
()
get_global_step_var
()
# some final operations that might modify the graph
# some final operations that might modify the graph
logger
.
info
(
"Setup callbacks ..."
)
logger
.
info
(
"Setup callbacks ..."
)
self
.
config
.
callbacks
.
setup_graph
(
weakref
.
proxy
(
self
))
self
.
config
.
callbacks
.
setup_graph
(
weakref
.
proxy
(
self
))
...
...
tensorpack/train/trainer.py
View file @
6b85a1f1
...
@@ -126,7 +126,7 @@ class FeedlessTrainer(Trainer):
...
@@ -126,7 +126,7 @@ class FeedlessTrainer(Trainer):
Always return new tensors (for multi tower) if called mutliple times.
Always return new tensors (for multi tower) if called mutliple times.
"""
"""
class
SingleCostFeedlessTrainer
(
Trainer
):
class
SingleCostFeedlessTrainer
(
Feedless
Trainer
):
def
_get_cost_and_grad
(
self
):
def
_get_cost_and_grad
(
self
):
""" get the cost and gradient on a new tower"""
""" get the cost and gradient on a new tower"""
actual_inputs
=
self
.
_get_input_tensors_noreuse
()
actual_inputs
=
self
.
_get_input_tensors_noreuse
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment