Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
ce1c507d
Commit
ce1c507d
authored
Jan 05, 2018
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
notes on stagingarea.
parent
ffa4ed10
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
21 additions
and
3 deletions
+21
-3
tensorpack/graph_builder/training.py
tensorpack/graph_builder/training.py
+16
-1
tensorpack/input_source/input_source.py
tensorpack/input_source/input_source.py
+5
-2
No files found.
tensorpack/graph_builder/training.py
View file @
ce1c507d
...
...
@@ -4,7 +4,10 @@
from
abc
import
ABCMeta
,
abstractmethod
import
tensorflow
as
tf
import
copy
import
six
import
re
import
pprint
from
six.moves
import
zip
,
range
from
..utils
import
logger
...
...
@@ -53,7 +56,19 @@ class DataParallelBuilder(GraphBuilder):
grad_list: list of list of tuples, shape is Ngpu x Nvar x 2
"""
nvars
=
[
len
(
k
)
for
k
in
grad_list
]
assert
len
(
set
(
nvars
))
==
1
,
"Number of gradients from each tower is different! "
+
str
(
nvars
)
def
basename
(
x
):
return
re
.
sub
(
'tower[0-9]+/'
,
''
,
x
.
op
.
name
)
if
len
(
set
(
nvars
))
!=
1
:
names_per_gpu
=
[
set
([
basename
(
k
[
1
])
for
k
in
grad_and_vars
])
for
grad_and_vars
in
grad_list
]
inters
=
copy
.
copy
(
names_per_gpu
[
0
])
for
s
in
names_per_gpu
:
inters
&=
s
for
s
in
names_per_gpu
:
s
-=
inters
logger
.
error
(
"Unique variables on towers: "
+
pprint
.
pformat
(
names_per_gpu
))
raise
ValueError
(
"Number of gradients from each tower is different! "
+
str
(
nvars
))
@
staticmethod
def
build_on_towers
(
...
...
tensorpack/input_source/input_source.py
View file @
ce1c507d
...
...
@@ -499,11 +499,13 @@ class StagingInput(FeedfreeInput):
self
.
_prefill
()
return
self
.
fetches
def
__init__
(
self
,
input
,
towers
=
None
,
nr_stage
=
5
):
def
__init__
(
self
,
input
,
towers
=
None
,
nr_stage
=
1
):
"""
Args:
input (FeedfreeInput):
nr_stage: number of elements to prefetch on each GPU.
Since enqueue and dequeue are synchronized, prefetching 1
element should be sufficient.
towers: deprecated
"""
assert
isinstance
(
input
,
FeedfreeInput
),
input
...
...
@@ -515,7 +517,6 @@ class StagingInput(FeedfreeInput):
self
.
_areas
=
[]
self
.
_stage_ops
=
[]
self
.
_unstage_ops
=
[]
# self._size_ops = []
def
_setup
(
self
,
inputs
):
self
.
_input
.
setup
(
inputs
)
...
...
@@ -542,6 +543,8 @@ class StagingInput(FeedfreeInput):
inputs
[
idx
]
=
tf
.
identity
(
inputs
[
idx
])
dtypes
.
append
(
dtype
.
base_dtype
)
# TODO tensorflow/benchmarks use static shapes here,
# though it doesn't seem to help. We can use it when it's known.
stage
=
StagingArea
(
dtypes
,
shapes
=
None
)
self
.
_stage_ops
.
append
(
stage
.
put
(
inputs
))
self
.
_areas
.
append
(
stage
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment