Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
3f238a01
Commit
3f238a01
authored
Feb 15, 2017
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
trigger rtfd (
https://github.com/rtfd/readthedocs.org/issues/2640
)
parent
ebdcf469
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
19 additions
and
8 deletions
+19
-8
tensorpack/callbacks/stats.py
tensorpack/callbacks/stats.py
+1
-1
tensorpack/tfutils/gradproc.py
tensorpack/tfutils/gradproc.py
+18
-7
No files found.
tensorpack/callbacks/stats.py
View file @
3f238a01
...
...
@@ -172,7 +172,7 @@ class SendStat(Triggerable):
stats
=
[
stats
]
self
.
stats
=
stats
def
_trigger
_epoch
(
self
):
def
_trigger
(
self
):
holder
=
self
.
trainer
.
stat_holder
v
=
{
k
:
holder
.
get_stat_now
(
k
)
for
k
in
self
.
stats
}
cmd
=
self
.
command
.
format
(
**
v
)
...
...
tensorpack/tfutils/gradproc.py
View file @
3f238a01
...
...
@@ -12,7 +12,7 @@ from ..utils import logger
from
.symbolic_functions
import
rms
from
.summary
import
add_moving_summary
__all__
=
[
'GradientProcessor'
,
'GlobalNormClip'
,
'MapGradient'
,
'SummaryGradient'
,
'CheckGradient'
,
__all__
=
[
'GradientProcessor'
,
'
FilterNoneGrad'
,
'
GlobalNormClip'
,
'MapGradient'
,
'SummaryGradient'
,
'CheckGradient'
,
'ScaleGradient'
,
'apply_grad_processors'
]
...
...
@@ -24,12 +24,8 @@ def apply_grad_processors(grads, gradprocs):
Returns:
list: list of (grad, var) went through the processors.
"""
g
=
[]
for
grad
,
var
in
grads
:
if
grad
is
None
:
logger
.
warn
(
"No Gradient w.r.t {}"
.
format
(
var
.
op
.
name
))
else
:
g
.
append
((
grad
,
var
))
gradprocs
.
insert
(
0
,
FilterNoneGrad
())
g
=
grads
for
proc
in
gradprocs
:
g
=
proc
.
process
(
g
)
return
g
...
...
@@ -58,6 +54,21 @@ class GradientProcessor(object):
pass
class
FilterNoneGrad
(
GradientProcessor
):
"""
Skip the update and print a warning (instead of crashing),
when the gradient of certain variable is None.
"""
def
_process
(
self
,
grads
):
g
=
[]
for
grad
,
var
in
grads
:
if
grad
is
None
:
logger
.
warn
(
"No Gradient w.r.t {}"
.
format
(
var
.
op
.
name
))
else
:
g
.
append
((
grad
,
var
))
return
g
class
GlobalNormClip
(
GradientProcessor
):
""" Clip by global norm.
The global norm is the sum of norm for **all** gradients.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment