Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
f363d2e8
Commit
f363d2e8
authored
Oct 19, 2017
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Call predictor with positional arguments
parent
a6a2aba4
Changes
18
Show whitespace changes
Inline
Side-by-side
Showing
18 changed files
with
38 additions
and
27 deletions
+38
-27
examples/ConvolutionalPoseMachines/load-cpm.py
examples/ConvolutionalPoseMachines/load-cpm.py
+1
-1
examples/DeepQNetwork/common.py
examples/DeepQNetwork/common.py
+1
-1
examples/DeepQNetwork/expreplay.py
examples/DeepQNetwork/expreplay.py
+1
-1
examples/DoReFa-Net/alexnet-dorefa.py
examples/DoReFa-Net/alexnet-dorefa.py
+1
-1
examples/DoReFa-Net/resnet-dorefa.py
examples/DoReFa-Net/resnet-dorefa.py
+1
-1
examples/DynamicFilterNetwork/steering-filter.py
examples/DynamicFilterNetwork/steering-filter.py
+1
-1
examples/FasterRCNN/eval.py
examples/FasterRCNN/eval.py
+1
-1
examples/HED/hed.py
examples/HED/hed.py
+4
-2
examples/ResNet/load-resnet.py
examples/ResNet/load-resnet.py
+1
-1
examples/Saliency/saliency-maps.py
examples/Saliency/saliency-maps.py
+1
-1
examples/SimilarityLearning/mnist-embeddings.py
examples/SimilarityLearning/mnist-embeddings.py
+1
-1
examples/SpatialTransformer/mnist-addition.py
examples/SpatialTransformer/mnist-addition.py
+1
-1
examples/load-alexnet.py
examples/load-alexnet.py
+1
-1
examples/load-vgg16.py
examples/load-vgg16.py
+1
-1
tensorpack/predict/base.py
tensorpack/predict/base.py
+9
-9
tensorpack/predict/concurrency.py
tensorpack/predict/concurrency.py
+6
-2
tensorpack/predict/dataset.py
tensorpack/predict/dataset.py
+1
-1
tensorpack/tfutils/tower.py
tensorpack/tfutils/tower.py
+5
-0
No files found.
examples/ConvolutionalPoseMachines/load-cpm.py
View file @
f363d2e8
...
@@ -116,7 +116,7 @@ def run_test(model_path, img_file):
...
@@ -116,7 +116,7 @@ def run_test(model_path, img_file):
im
=
cv2
.
imread
(
img_file
,
cv2
.
IMREAD_COLOR
)
.
astype
(
'float32'
)
im
=
cv2
.
imread
(
img_file
,
cv2
.
IMREAD_COLOR
)
.
astype
(
'float32'
)
im
=
cv2
.
resize
(
im
,
(
368
,
368
))
im
=
cv2
.
resize
(
im
,
(
368
,
368
))
out
=
predict_func
(
[[
im
]
])[
0
][
0
]
out
=
predict_func
(
im
[
None
,
:,
:,
:
])[
0
][
0
]
hm
=
out
[:,
:,
:
14
]
.
sum
(
axis
=
2
)
hm
=
out
[:,
:,
:
14
]
.
sum
(
axis
=
2
)
viz
=
colorize
(
im
,
hm
)
viz
=
colorize
(
im
,
hm
)
cv2
.
imwrite
(
"output.jpg"
,
viz
)
cv2
.
imwrite
(
"output.jpg"
,
viz
)
...
...
examples/DeepQNetwork/common.py
View file @
f363d2e8
...
@@ -20,7 +20,7 @@ def play_one_episode(env, func, render=False):
...
@@ -20,7 +20,7 @@ def play_one_episode(env, func, render=False):
"""
"""
Map from observation to action, with 0.001 greedy.
Map from observation to action, with 0.001 greedy.
"""
"""
act
=
func
(
[[
s
]
])[
0
][
0
]
.
argmax
()
act
=
func
(
s
[
None
,
:,
:,
:
])[
0
][
0
]
.
argmax
()
if
random
.
random
()
<
0.001
:
if
random
.
random
()
<
0.001
:
spc
=
env
.
action_space
spc
=
env
.
action_space
act
=
spc
.
sample
()
act
=
spc
.
sample
()
...
...
examples/DeepQNetwork/expreplay.py
View file @
f363d2e8
...
@@ -199,7 +199,7 @@ class ExpReplay(DataFlow, Callback):
...
@@ -199,7 +199,7 @@ class ExpReplay(DataFlow, Callback):
history
=
np
.
stack
(
history
,
axis
=
2
)
history
=
np
.
stack
(
history
,
axis
=
2
)
# assume batched network
# assume batched network
q_values
=
self
.
predictor
(
[[
history
]
])[
0
][
0
]
# this is the bottleneck
q_values
=
self
.
predictor
(
history
[
None
,
:,
:,
:
])[
0
][
0
]
# this is the bottleneck
act
=
np
.
argmax
(
q_values
)
act
=
np
.
argmax
(
q_values
)
self
.
_current_ob
,
reward
,
isOver
,
info
=
self
.
player
.
step
(
act
)
self
.
_current_ob
,
reward
,
isOver
,
info
=
self
.
player
.
step
(
act
)
if
isOver
:
if
isOver
:
...
...
examples/DoReFa-Net/alexnet-dorefa.py
View file @
f363d2e8
...
@@ -284,7 +284,7 @@ def run_image(model, sess_init, inputs):
...
@@ -284,7 +284,7 @@ def run_image(model, sess_init, inputs):
assert
img
is
not
None
assert
img
is
not
None
img
=
transformers
.
augment
(
img
)[
np
.
newaxis
,
:,
:,
:]
img
=
transformers
.
augment
(
img
)[
np
.
newaxis
,
:,
:,
:]
outputs
=
predictor
(
[
img
]
)[
0
]
outputs
=
predictor
(
img
)[
0
]
prob
=
outputs
[
0
]
prob
=
outputs
[
0
]
ret
=
prob
.
argsort
()[
-
10
:][::
-
1
]
ret
=
prob
.
argsort
()[
-
10
:][::
-
1
]
...
...
examples/DoReFa-Net/resnet-dorefa.py
View file @
f363d2e8
...
@@ -139,7 +139,7 @@ def run_image(model, sess_init, inputs):
...
@@ -139,7 +139,7 @@ def run_image(model, sess_init, inputs):
assert
img
is
not
None
assert
img
is
not
None
img
=
transformers
.
augment
(
img
)[
np
.
newaxis
,
:,
:,
:]
img
=
transformers
.
augment
(
img
)[
np
.
newaxis
,
:,
:,
:]
o
=
predict_func
(
[
img
]
)
o
=
predict_func
(
img
)
prob
=
o
[
0
][
0
]
prob
=
o
[
0
][
0
]
ret
=
prob
.
argsort
()[
-
10
:][::
-
1
]
ret
=
prob
.
argsort
()[
-
10
:][::
-
1
]
...
...
examples/DynamicFilterNetwork/steering-filter.py
View file @
f363d2e8
...
@@ -78,7 +78,7 @@ class OnlineTensorboardExport(Callback):
...
@@ -78,7 +78,7 @@ class OnlineTensorboardExport(Callback):
x
/=
x
.
max
()
x
/=
x
.
max
()
return
x
return
x
o
=
self
.
pred
(
[
self
.
theta
]
)
o
=
self
.
pred
(
self
.
theta
)
gt_filters
=
np
.
concatenate
([
self
.
filters
[
i
,
:,
:]
for
i
in
range
(
8
)],
axis
=
0
)
gt_filters
=
np
.
concatenate
([
self
.
filters
[
i
,
:,
:]
for
i
in
range
(
8
)],
axis
=
0
)
pred_filters
=
np
.
concatenate
([
o
[
0
][
i
,
:,
:,
0
]
for
i
in
range
(
8
)],
axis
=
0
)
pred_filters
=
np
.
concatenate
([
o
[
0
][
i
,
:,
:,
0
]
for
i
in
range
(
8
)],
axis
=
0
)
...
...
examples/FasterRCNN/eval.py
View file @
f363d2e8
...
@@ -94,7 +94,7 @@ def detect_one_image(img, model_func):
...
@@ -94,7 +94,7 @@ def detect_one_image(img, model_func):
resizer
=
CustomResize
(
config
.
SHORT_EDGE_SIZE
,
config
.
MAX_SIZE
)
resizer
=
CustomResize
(
config
.
SHORT_EDGE_SIZE
,
config
.
MAX_SIZE
)
resized_img
=
resizer
.
augment
(
img
)
resized_img
=
resizer
.
augment
(
img
)
scale
=
(
resized_img
.
shape
[
0
]
*
1.0
/
img
.
shape
[
0
]
+
resized_img
.
shape
[
1
]
*
1.0
/
img
.
shape
[
1
])
/
2
scale
=
(
resized_img
.
shape
[
0
]
*
1.0
/
img
.
shape
[
0
]
+
resized_img
.
shape
[
1
]
*
1.0
/
img
.
shape
[
1
])
/
2
fg_probs
,
fg_boxes
=
model_func
(
[
resized_img
]
)
fg_probs
,
fg_boxes
=
model_func
(
resized_img
)
fg_boxes
=
fg_boxes
/
scale
fg_boxes
=
fg_boxes
/
scale
fg_boxes
=
clip_boxes
(
fg_boxes
,
img
.
shape
[:
2
])
fg_boxes
=
clip_boxes
(
fg_boxes
,
img
.
shape
[:
2
])
return
nms_fastrcnn_results
(
fg_boxes
,
fg_probs
)
return
nms_fastrcnn_results
(
fg_boxes
,
fg_probs
)
...
...
examples/HED/hed.py
View file @
f363d2e8
...
@@ -198,8 +198,10 @@ def run(model_path, image_path, output):
...
@@ -198,8 +198,10 @@ def run(model_path, image_path, output):
predictor
=
OfflinePredictor
(
pred_config
)
predictor
=
OfflinePredictor
(
pred_config
)
im
=
cv2
.
imread
(
image_path
)
im
=
cv2
.
imread
(
image_path
)
assert
im
is
not
None
assert
im
is
not
None
im
=
cv2
.
resize
(
im
,
(
im
.
shape
[
1
]
//
16
*
16
,
im
.
shape
[
0
]
//
16
*
16
))
im
=
cv2
.
resize
(
outputs
=
predictor
([[
im
.
astype
(
'float32'
)]])
im
,
(
im
.
shape
[
1
]
//
16
*
16
,
im
.
shape
[
0
]
//
16
*
16
)
)[
None
,
:,
:,
:]
.
astype
(
'float32'
)
outputs
=
predictor
(
im
)
if
output
is
None
:
if
output
is
None
:
for
k
in
range
(
6
):
for
k
in
range
(
6
):
pred
=
outputs
[
k
][
0
]
pred
=
outputs
[
k
][
0
]
...
...
examples/ResNet/load-resnet.py
View file @
f363d2e8
...
@@ -98,7 +98,7 @@ def run_test(params, input):
...
@@ -98,7 +98,7 @@ def run_test(params, input):
im
=
cv2
.
imread
(
input
)
.
astype
(
'float32'
)
im
=
cv2
.
imread
(
input
)
.
astype
(
'float32'
)
im
=
prepro
.
augment
(
im
)
im
=
prepro
.
augment
(
im
)
im
=
np
.
reshape
(
im
,
(
1
,
224
,
224
,
3
))
im
=
np
.
reshape
(
im
,
(
1
,
224
,
224
,
3
))
outputs
=
predict_func
(
[
im
]
)
outputs
=
predict_func
(
im
)
prob
=
outputs
[
0
]
prob
=
outputs
[
0
]
ret
=
prob
[
0
]
.
argsort
()[
-
10
:][::
-
1
]
ret
=
prob
[
0
]
.
argsort
()[
-
10
:][::
-
1
]
...
...
examples/Saliency/saliency-maps.py
View file @
f363d2e8
...
@@ -42,7 +42,7 @@ def run(model_path, image_path):
...
@@ -42,7 +42,7 @@ def run(model_path, image_path):
im
=
cv2
.
resize
(
im
,
(
IMAGE_SIZE
,
IMAGE_SIZE
))
im
=
cv2
.
resize
(
im
,
(
IMAGE_SIZE
,
IMAGE_SIZE
))
im
=
im
.
astype
(
np
.
float32
)[:,
:,
::
-
1
]
im
=
im
.
astype
(
np
.
float32
)[:,
:,
::
-
1
]
saliency_images
=
predictor
(
[
im
]
)[
0
]
saliency_images
=
predictor
(
im
)[
0
]
abs_saliency
=
np
.
abs
(
saliency_images
)
.
max
(
axis
=-
1
)
abs_saliency
=
np
.
abs
(
saliency_images
)
.
max
(
axis
=-
1
)
pos_saliency
=
np
.
maximum
(
0
,
saliency_images
)
pos_saliency
=
np
.
maximum
(
0
,
saliency_images
)
...
...
examples/SimilarityLearning/mnist-embeddings.py
View file @
f363d2e8
...
@@ -387,7 +387,7 @@ def visualize(model_path, model, algo_name):
...
@@ -387,7 +387,7 @@ def visualize(model_path, model, algo_name):
for
offset
,
dp
in
enumerate
(
ds
.
get_data
()):
for
offset
,
dp
in
enumerate
(
ds
.
get_data
()):
digit
,
label
=
dp
digit
,
label
=
dp
prediction
=
pred
(
[
digit
]
)[
0
]
prediction
=
pred
(
digit
)[
0
]
embed
[
offset
*
BATCH_SIZE
:
offset
*
BATCH_SIZE
+
BATCH_SIZE
,
...
]
=
prediction
embed
[
offset
*
BATCH_SIZE
:
offset
*
BATCH_SIZE
+
BATCH_SIZE
,
...
]
=
prediction
images
[
offset
*
BATCH_SIZE
:
offset
*
BATCH_SIZE
+
BATCH_SIZE
,
...
]
=
digit
images
[
offset
*
BATCH_SIZE
:
offset
*
BATCH_SIZE
+
BATCH_SIZE
,
...
]
=
digit
offset
+=
1
offset
+=
1
...
...
examples/SpatialTransformer/mnist-addition.py
View file @
f363d2e8
...
@@ -140,7 +140,7 @@ def view_warp(modelpath):
...
@@ -140,7 +140,7 @@ def view_warp(modelpath):
ds
.
reset_state
()
ds
.
reset_state
()
for
k
in
ds
.
get_data
():
for
k
in
ds
.
get_data
():
img
,
label
=
k
img
,
label
=
k
outputs
,
affine1
,
affine2
=
pred
(
[
img
]
)
outputs
,
affine1
,
affine2
=
pred
(
img
)
for
idx
,
viz
in
enumerate
(
outputs
):
for
idx
,
viz
in
enumerate
(
outputs
):
viz
=
cv2
.
cvtColor
(
viz
,
cv2
.
COLOR_GRAY2BGR
)
viz
=
cv2
.
cvtColor
(
viz
,
cv2
.
COLOR_GRAY2BGR
)
# Here we assume the second branch focuses on the first digit
# Here we assume the second branch focuses on the first digit
...
...
examples/load-alexnet.py
View file @
f363d2e8
...
@@ -65,7 +65,7 @@ def run_test(path, input):
...
@@ -65,7 +65,7 @@ def run_test(path, input):
assert
im
is
not
None
,
input
assert
im
is
not
None
,
input
im
=
cv2
.
resize
(
im
,
(
227
,
227
))[:,
:,
::
-
1
]
.
reshape
(
im
=
cv2
.
resize
(
im
,
(
227
,
227
))[:,
:,
::
-
1
]
.
reshape
(
(
1
,
227
,
227
,
3
))
.
astype
(
'float32'
)
-
110
(
1
,
227
,
227
,
3
))
.
astype
(
'float32'
)
-
110
outputs
=
predictor
(
[
im
]
)[
0
]
outputs
=
predictor
(
im
)[
0
]
prob
=
outputs
[
0
]
prob
=
outputs
[
0
]
ret
=
prob
.
argsort
()[
-
10
:][::
-
1
]
ret
=
prob
.
argsort
()[
-
10
:][::
-
1
]
print
(
"Top10 predictions:"
,
ret
)
print
(
"Top10 predictions:"
,
ret
)
...
...
examples/load-vgg16.py
View file @
f363d2e8
...
@@ -76,7 +76,7 @@ def run_test(path, input):
...
@@ -76,7 +76,7 @@ def run_test(path, input):
im
=
cv2
.
cvtColor
(
im
,
cv2
.
COLOR_BGR2RGB
)
im
=
cv2
.
cvtColor
(
im
,
cv2
.
COLOR_BGR2RGB
)
im
=
cv2
.
resize
(
im
,
(
224
,
224
))
.
reshape
((
1
,
224
,
224
,
3
))
.
astype
(
'float32'
)
im
=
cv2
.
resize
(
im
,
(
224
,
224
))
.
reshape
((
1
,
224
,
224
,
3
))
.
astype
(
'float32'
)
im
=
im
-
110
im
=
im
-
110
outputs
=
predict_func
(
[
im
]
)[
0
]
outputs
=
predict_func
(
im
)[
0
]
prob
=
outputs
[
0
]
prob
=
outputs
[
0
]
ret
=
prob
.
argsort
()[
-
10
:][::
-
1
]
ret
=
prob
.
argsort
()[
-
10
:][::
-
1
]
print
(
"Top10 predictions:"
,
ret
)
print
(
"Top10 predictions:"
,
ret
)
...
...
tensorpack/predict/base.py
View file @
f363d2e8
...
@@ -10,6 +10,7 @@ import six
...
@@ -10,6 +10,7 @@ import six
from
..tfutils.common
import
get_tensors_by_names
from
..tfutils.common
import
get_tensors_by_names
from
..tfutils.tower
import
TowerContext
from
..tfutils.tower
import
TowerContext
from
..input_source
import
PlaceholderInput
from
..input_source
import
PlaceholderInput
from
..utils.develop
import
log_deprecated
__all__
=
[
'PredictorBase'
,
'AsyncPredictorBase'
,
__all__
=
[
'PredictorBase'
,
'AsyncPredictorBase'
,
'OnlinePredictor'
,
'OfflinePredictor'
,
'OnlinePredictor'
,
'OfflinePredictor'
,
...
@@ -30,22 +31,21 @@ class PredictorBase(object):
...
@@ -30,22 +31,21 @@ class PredictorBase(object):
"""
"""
Call the predictor on some inputs.
Call the predictor on some inputs.
If ``len(args) == 1``, assume ``args[0]`` is a datapoint (a list).
otherwise, assume ``args`` is a datapoinnt
Examples:
Examples:
When you have a predictor which takes a datapoint [e1, e2], you
When you have a predictor defined with two inputs, call it with:
can call it in two ways:
.. code-block:: python
.. code-block:: python
predictor(e1, e2)
predictor(e1, e2)
predictor([e1, e2])
"""
"""
if
len
(
args
)
!=
1
:
if
len
(
args
)
==
1
and
isinstance
(
args
[
0
],
(
list
,
tuple
)):
dp
=
args
dp
=
args
[
0
]
# backward-compatibility
log_deprecated
(
"Calling a predictor with one datapoint"
,
"Call it with positional arguments instead!"
,
"2018-3-1"
)
else
:
else
:
dp
=
args
[
0
]
dp
=
args
output
=
self
.
_do_call
(
dp
)
output
=
self
.
_do_call
(
dp
)
if
self
.
return_input
:
if
self
.
return_input
:
return
(
dp
,
output
)
return
(
dp
,
output
)
...
...
tensorpack/predict/concurrency.py
View file @
f363d2e8
...
@@ -3,6 +3,7 @@
...
@@ -3,6 +3,7 @@
# File: concurrency.py
# File: concurrency.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import
numpy
as
np
import
multiprocessing
import
multiprocessing
import
six
import
six
from
six.moves
import
queue
,
range
from
six.moves
import
queue
,
range
...
@@ -71,7 +72,7 @@ class MultiProcessQueuePredictWorker(MultiProcessPredictWorker):
...
@@ -71,7 +72,7 @@ class MultiProcessQueuePredictWorker(MultiProcessPredictWorker):
self
.
outqueue
.
put
((
DIE
,
None
))
self
.
outqueue
.
put
((
DIE
,
None
))
return
return
else
:
else
:
self
.
outqueue
.
put
((
tid
,
self
.
predictor
(
dp
)))
self
.
outqueue
.
put
((
tid
,
self
.
predictor
(
*
dp
)))
class
PredictorWorkerThread
(
StoppableThread
,
ShareSessionThread
):
class
PredictorWorkerThread
(
StoppableThread
,
ShareSessionThread
):
...
@@ -89,7 +90,7 @@ class PredictorWorkerThread(StoppableThread, ShareSessionThread):
...
@@ -89,7 +90,7 @@ class PredictorWorkerThread(StoppableThread, ShareSessionThread):
while
not
self
.
stopped
():
while
not
self
.
stopped
():
batched
,
futures
=
self
.
fetch_batch
()
batched
,
futures
=
self
.
fetch_batch
()
try
:
try
:
outputs
=
self
.
func
(
batched
)
outputs
=
self
.
func
(
*
batched
)
except
tf
.
errors
.
CancelledError
:
except
tf
.
errors
.
CancelledError
:
for
f
in
futures
:
for
f
in
futures
:
f
.
cancel
()
f
.
cancel
()
...
@@ -122,6 +123,9 @@ class PredictorWorkerThread(StoppableThread, ShareSessionThread):
...
@@ -122,6 +123,9 @@ class PredictorWorkerThread(StoppableThread, ShareSessionThread):
futures
.
append
(
f
)
futures
.
append
(
f
)
except
queue
.
Empty
:
except
queue
.
Empty
:
break
# do not wait
break
# do not wait
for
k
in
range
(
nr_input_var
):
batched
[
k
]
=
np
.
asarray
(
batched
[
k
])
return
batched
,
futures
return
batched
,
futures
...
...
tensorpack/predict/dataset.py
View file @
f363d2e8
...
@@ -73,7 +73,7 @@ class SimpleDatasetPredictor(DatasetPredictorBase):
...
@@ -73,7 +73,7 @@ class SimpleDatasetPredictor(DatasetPredictorBase):
sz
=
0
sz
=
0
with
get_tqdm
(
total
=
sz
,
disable
=
(
sz
==
0
))
as
pbar
:
with
get_tqdm
(
total
=
sz
,
disable
=
(
sz
==
0
))
as
pbar
:
for
dp
in
self
.
dataset
.
get_data
():
for
dp
in
self
.
dataset
.
get_data
():
res
=
self
.
predictor
(
dp
)
res
=
self
.
predictor
(
*
dp
)
yield
res
yield
res
pbar
.
update
()
pbar
.
update
()
...
...
tensorpack/tfutils/tower.py
View file @
f363d2e8
...
@@ -277,3 +277,8 @@ class TowerTensorHandle(object):
...
@@ -277,3 +277,8 @@ class TowerTensorHandle(object):
The output returned by the tower function.
The output returned by the tower function.
"""
"""
return
self
.
_output
return
self
.
_output
# def make_callable(self, input_names, output_names):
# input_tensors = self.get_tensors(input_names)
# output_tensors = self.get_tensors(output_names)
# pass
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment