Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
3898d354
Commit
3898d354
authored
May 29, 2017
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
add some small assertions
parent
b48cd060
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
6 additions
and
3 deletions
+6
-3
tensorpack/tfutils/gradproc.py
tensorpack/tfutils/gradproc.py
+2
-1
tensorpack/tfutils/optimizer.py
tensorpack/tfutils/optimizer.py
+2
-0
tensorpack/train/multigpu.py
tensorpack/train/multigpu.py
+2
-2
No files found.
tensorpack/tfutils/gradproc.py
View file @
3898d354
...
@@ -67,7 +67,7 @@ class GlobalNormClip(GradientProcessor):
...
@@ -67,7 +67,7 @@ class GlobalNormClip(GradientProcessor):
Args:
Args:
global_norm(float): the threshold to clip with.
global_norm(float): the threshold to clip with.
"""
"""
self
.
_norm
=
global_norm
self
.
_norm
=
float
(
global_norm
)
def
_process
(
self
,
grads
):
def
_process
(
self
,
grads
):
g
=
[
k
[
0
]
for
k
in
grads
]
g
=
[
k
[
0
]
for
k
in
grads
]
...
@@ -176,6 +176,7 @@ class ScaleGradient(MapGradient):
...
@@ -176,6 +176,7 @@ class ScaleGradient(MapGradient):
if
not
isinstance
(
multipliers
,
list
):
if
not
isinstance
(
multipliers
,
list
):
multipliers
=
[
multipliers
]
multipliers
=
[
multipliers
]
self
.
multipliers
=
multipliers
self
.
multipliers
=
multipliers
assert
log
in
[
True
,
False
],
log
self
.
_log
=
log
self
.
_log
=
log
super
(
ScaleGradient
,
self
)
.
__init__
(
self
.
_mapper
)
super
(
ScaleGradient
,
self
)
.
__init__
(
self
.
_mapper
)
...
...
tensorpack/tfutils/optimizer.py
View file @
3898d354
...
@@ -16,6 +16,7 @@ class ProxyOptimizer(tf.train.Optimizer):
...
@@ -16,6 +16,7 @@ class ProxyOptimizer(tf.train.Optimizer):
A transparent proxy which delegates all methods of :class:`tf.train.Optimizer`
A transparent proxy which delegates all methods of :class:`tf.train.Optimizer`
"""
"""
def
__init__
(
self
,
opt
,
name
=
'ProxyOptimizer'
):
def
__init__
(
self
,
opt
,
name
=
'ProxyOptimizer'
):
assert
isinstance
(
opt
,
tf
.
train
.
Optimizer
),
opt
super
(
ProxyOptimizer
,
self
)
.
__init__
(
False
,
name
)
super
(
ProxyOptimizer
,
self
)
.
__init__
(
False
,
name
)
self
.
_opt
=
opt
self
.
_opt
=
opt
...
@@ -44,6 +45,7 @@ def apply_grad_processors(opt, gradprocs):
...
@@ -44,6 +45,7 @@ def apply_grad_processors(opt, gradprocs):
a :class:`tf.train.Optimizer` instance which runs the gradient
a :class:`tf.train.Optimizer` instance which runs the gradient
processors before updating the variables.
processors before updating the variables.
"""
"""
assert
isinstance
(
gradprocs
,
(
list
,
tuple
)),
gradprocs
class
_ApplyGradientProcessor
(
ProxyOptimizer
):
class
_ApplyGradientProcessor
(
ProxyOptimizer
):
def
__init__
(
self
,
opt
,
gradprocs
):
def
__init__
(
self
,
opt
,
gradprocs
):
...
...
tensorpack/train/multigpu.py
View file @
3898d354
...
@@ -82,7 +82,7 @@ class MultiGPUTrainerBase(Trainer):
...
@@ -82,7 +82,7 @@ class MultiGPUTrainerBase(Trainer):
if
idx
==
t
:
if
idx
==
t
:
logger
.
info
(
"Building graph for training tower {}..."
.
format
(
idx
))
logger
.
info
(
"Building graph for training tower {}..."
.
format
(
idx
))
else
:
else
:
logger
.
info
(
"Building graph for training tower {} on device {}..."
.
format
(
idx
,
t
))
logger
.
info
(
"Building graph for training tower {} on device {}..."
.
format
(
idx
,
device
))
ret
.
append
(
func
())
ret
.
append
(
func
())
...
@@ -264,7 +264,7 @@ class SyncMultiGPUTrainerReplicated(MultiGPUTrainerBase, SingleCostFeedfreeTrain
...
@@ -264,7 +264,7 @@ class SyncMultiGPUTrainerReplicated(MultiGPUTrainerBase, SingleCostFeedfreeTrain
run_before
=
True
,
run_as_trigger
=
True
))
run_before
=
True
,
run_as_trigger
=
True
))
#
Copied
from https://github.com/tensorflow/benchmarks/blob/master/scripts/tf_cnn_benchmarks/variable_mgr.py
#
Adopt
from https://github.com/tensorflow/benchmarks/blob/master/scripts/tf_cnn_benchmarks/variable_mgr.py
@
staticmethod
@
staticmethod
def
get_post_init_ops
():
def
get_post_init_ops
():
# Copy initialized values for variables on GPU 0 to other GPUs.
# Copy initialized values for variables on GPU 0 to other GPUs.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment