Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
75e8d9fe
Commit
75e8d9fe
authored
May 15, 2016
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
more generic parallel predict worker
parent
6e562112
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
25 additions
and
11 deletions
+25
-11
tensorpack/predict.py
tensorpack/predict.py
+25
-11
No files found.
tensorpack/predict.py
View file @
75e8d9fe
...
@@ -17,7 +17,8 @@ from .tfutils.modelutils import describe_model
...
@@ -17,7 +17,8 @@ from .tfutils.modelutils import describe_model
from
.dataflow
import
DataFlow
,
BatchData
from
.dataflow
import
DataFlow
,
BatchData
from
.dataflow.dftools
import
dataflow_to_process_queue
from
.dataflow.dftools
import
dataflow_to_process_queue
__all__
=
[
'PredictConfig'
,
'DatasetPredictor'
,
'get_predict_func'
]
__all__
=
[
'PredictConfig'
,
'DatasetPredictor'
,
'get_predict_func'
,
'ParallelPredictWorker'
]
PredictResult
=
namedtuple
(
'PredictResult'
,
[
'input'
,
'output'
])
PredictResult
=
namedtuple
(
'PredictResult'
,
[
'input'
,
'output'
])
...
@@ -97,24 +98,19 @@ def get_predict_func(config):
...
@@ -97,24 +98,19 @@ def get_predict_func(config):
return
sess
.
run
(
output_vars
,
feed_dict
=
feed
)
return
sess
.
run
(
output_vars
,
feed_dict
=
feed
)
return
run_input
return
run_input
class
PredictWorker
(
multiprocessing
.
Process
):
class
ParallelPredictWorker
(
multiprocessing
.
Process
):
""" A worker process to run predictor on one GPU """
def
__init__
(
self
,
idx
,
gpuid
,
config
):
def
__init__
(
self
,
idx
,
gpuid
,
inqueue
,
outqueue
,
config
):
"""
"""
:param idx: index of the worker. the 0th worker will print log.
:param idx: index of the worker. the 0th worker will print log.
:param gpuid: id of the GPU to be used. set to -1 to use CPU.
:param gpuid: id of the GPU to be used. set to -1 to use CPU.
:param inqueue: input queue to get data point
:param outqueue: output queue put result
:param config: a `PredictConfig`
:param config: a `PredictConfig`
"""
"""
super
(
PredictWorker
,
self
)
.
__init__
()
super
(
P
arallelP
redictWorker
,
self
)
.
__init__
()
self
.
idx
=
idx
self
.
idx
=
idx
self
.
gpuid
=
gpuid
self
.
gpuid
=
gpuid
self
.
inqueue
=
inqueue
self
.
outqueue
=
outqueue
self
.
config
=
config
self
.
config
=
config
def
run
(
self
):
def
_init_runtime
(
self
):
if
self
.
gpuid
>=
0
:
if
self
.
gpuid
>=
0
:
logger
.
info
(
"Worker {} uses GPU {}"
.
format
(
self
.
idx
,
self
.
gpuid
))
logger
.
info
(
"Worker {} uses GPU {}"
.
format
(
self
.
idx
,
self
.
gpuid
))
os
.
environ
[
'CUDA_VISIBLE_DEVICES'
]
=
self
.
gpuid
os
.
environ
[
'CUDA_VISIBLE_DEVICES'
]
=
self
.
gpuid
...
@@ -128,6 +124,24 @@ class PredictWorker(multiprocessing.Process):
...
@@ -128,6 +124,24 @@ class PredictWorker(multiprocessing.Process):
self
.
func
=
get_predict_func
(
self
.
config
)
self
.
func
=
get_predict_func
(
self
.
config
)
if
self
.
idx
==
0
:
if
self
.
idx
==
0
:
describe_model
()
describe_model
()
class
QueuePredictWorker
(
ParallelPredictWorker
):
""" A worker process to run predictor on one GPU """
def
__init__
(
self
,
idx
,
gpuid
,
inqueue
,
outqueue
,
config
):
"""
:param idx: index of the worker. the 0th worker will print log.
:param gpuid: id of the GPU to be used. set to -1 to use CPU.
:param inqueue: input queue to get data point
:param outqueue: output queue put result
:param config: a `PredictConfig`
"""
super
(
QueuePredictWorker
,
self
)
.
__init__
(
idx
,
gpuid
,
config
)
self
.
inqueue
=
inqueue
self
.
outqueue
=
outqueue
def
run
(
self
):
self
.
_init_runtime
()
while
True
:
while
True
:
tid
,
dp
=
self
.
inqueue
.
get
()
tid
,
dp
=
self
.
inqueue
.
get
()
if
tid
==
DIE
:
if
tid
==
DIE
:
...
@@ -156,7 +170,7 @@ class DatasetPredictor(object):
...
@@ -156,7 +170,7 @@ class DatasetPredictor(object):
gpus
=
os
.
environ
[
'CUDA_VISIBLE_DEVICES'
]
.
split
(
','
)
gpus
=
os
.
environ
[
'CUDA_VISIBLE_DEVICES'
]
.
split
(
','
)
except
KeyError
:
except
KeyError
:
gpus
=
list
(
range
(
self
.
nr_gpu
))
gpus
=
list
(
range
(
self
.
nr_gpu
))
self
.
workers
=
[
PredictWorker
(
i
,
gpus
[
i
],
self
.
inqueue
,
self
.
outqueue
,
config
)
self
.
workers
=
[
Queue
PredictWorker
(
i
,
gpus
[
i
],
self
.
inqueue
,
self
.
outqueue
,
config
)
for
i
in
range
(
self
.
nr_gpu
)]
for
i
in
range
(
self
.
nr_gpu
)]
self
.
result_queue
=
OrderedResultGatherProc
(
self
.
outqueue
)
self
.
result_queue
=
OrderedResultGatherProc
(
self
.
outqueue
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment