Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
395a5add
Commit
395a5add
authored
Dec 06, 2018
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
cleanup deprecations
parent
bb2262de
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
67 additions
and
220 deletions
+67
-220
docs/conf.py
docs/conf.py
+5
-2
tensorpack/dataflow/dataset/cifar.py
tensorpack/dataflow/dataset/cifar.py
+13
-1
tensorpack/dataflow/dftools.py
tensorpack/dataflow/dftools.py
+1
-42
tensorpack/dataflow/raw.py
tensorpack/dataflow/raw.py
+2
-5
tensorpack/dataflow/remote.py
tensorpack/dataflow/remote.py
+44
-0
tensorpack/models/__init__.py
tensorpack/models/__init__.py
+0
-1
tensorpack/models/image_sample.py
tensorpack/models/image_sample.py
+0
-166
tensorpack/predict/dataset.py
tensorpack/predict/dataset.py
+1
-1
tensorpack/utils/__init__.py
tensorpack/utils/__init__.py
+1
-2
No files found.
docs/conf.py
View file @
395a5add
...
@@ -377,6 +377,7 @@ _DEPRECATED_NAMES = set([
...
@@ -377,6 +377,7 @@ _DEPRECATED_NAMES = set([
'dump_dataflow_to_lmdb'
,
'dump_dataflow_to_lmdb'
,
'dump_dataflow_to_tfrecord'
,
'dump_dataflow_to_tfrecord'
,
'IntBox'
,
'FloatBox'
,
'IntBox'
,
'FloatBox'
,
'dump_dataflow_to_process_queue'
,
'PrefetchOnGPUs'
,
'PrefetchOnGPUs'
,
# renamed stuff:
# renamed stuff:
...
@@ -388,10 +389,12 @@ _DEPRECATED_NAMES = set([
...
@@ -388,10 +389,12 @@ _DEPRECATED_NAMES = set([
'ThreadedMapData'
,
'ThreadedMapData'
,
# deprecated or renamed symbolic code
# deprecated or renamed symbolic code
'ImageSample'
,
'BilinearUpSample'
,
'BilinearUpSample'
'Deconv2D'
,
'psnr'
,
'Deconv2D'
,
'psnr'
,
# shouldn't appear in doc:
'l2_regularizer'
,
'l1_regularizer'
,
# internal only
# internal only
'SessionUpdate'
,
'SessionUpdate'
,
'average_grads'
,
'average_grads'
,
...
...
tensorpack/dataflow/dataset/cifar.py
View file @
395a5add
...
@@ -14,7 +14,7 @@ from ...utils import logger
...
@@ -14,7 +14,7 @@ from ...utils import logger
from
...utils.fs
import
download
,
get_dataset_path
from
...utils.fs
import
download
,
get_dataset_path
from
..base
import
RNGDataFlow
from
..base
import
RNGDataFlow
__all__
=
[
'Cifar10'
,
'Cifar100'
]
__all__
=
[
'Cifar
Base'
,
'Cifar
10'
,
'Cifar100'
]
DATA_URL_CIFAR_10
=
(
'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
,
170498071
)
DATA_URL_CIFAR_10
=
(
'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
,
170498071
)
...
@@ -85,7 +85,19 @@ def _parse_meta(filename, cifar_classnum):
...
@@ -85,7 +85,19 @@ def _parse_meta(filename, cifar_classnum):
class
CifarBase
(
RNGDataFlow
):
class
CifarBase
(
RNGDataFlow
):
"""
Produces [image, label] in Cifar10/100 dataset,
image is 32x32x3 in the range [0,255].
label is an int.
"""
def
__init__
(
self
,
train_or_test
,
shuffle
=
None
,
dir
=
None
,
cifar_classnum
=
10
):
def
__init__
(
self
,
train_or_test
,
shuffle
=
None
,
dir
=
None
,
cifar_classnum
=
10
):
"""
Args:
train_or_test (str): 'train' or 'test'
shuffle (bool): defaults to True for training set.
dir (str): path to the dataset directory
cifar_classnum (int): 10 or 100
"""
assert
train_or_test
in
[
'train'
,
'test'
]
assert
train_or_test
in
[
'train'
,
'test'
]
assert
cifar_classnum
==
10
or
cifar_classnum
==
100
assert
cifar_classnum
==
10
or
cifar_classnum
==
100
self
.
cifar_classnum
=
cifar_classnum
self
.
cifar_classnum
=
cifar_classnum
...
...
tensorpack/dataflow/dftools.py
View file @
395a5add
...
@@ -2,55 +2,14 @@
...
@@ -2,55 +2,14 @@
# File: dftools.py
# File: dftools.py
import
multiprocessing
as
mp
from
six.moves
import
range
from
..utils.concurrency
import
DIE
from
..utils.develop
import
deprecated
from
..utils.develop
import
deprecated
from
.serialize
import
LMDBSerializer
,
TFRecordSerializer
from
.serialize
import
LMDBSerializer
,
TFRecordSerializer
__all__
=
[
'dump_dataflow_to_process_queue'
,
__all__
=
[
'dump_dataflow_to_process_queue'
,
'dump_dataflow_to_lmdb'
,
'dump_dataflow_to_tfrecord'
]
'dump_dataflow_to_lmdb'
,
'dump_dataflow_to_tfrecord'
]
def
dump_dataflow_to_process_queue
(
df
,
size
,
nr_consumer
):
from
.remote
import
dump_dataflow_to_process_queue
"""
Convert a DataFlow to a :class:`multiprocessing.Queue`.
The DataFlow will only be reset in the spawned process.
Args:
df (DataFlow): the DataFlow to dump.
size (int): size of the queue
nr_consumer (int): number of consumer of the queue.
The producer will add this many of ``DIE`` sentinel to the end of the queue.
Returns:
tuple(queue, process):
The process will take data from ``df`` and fill
the queue, once you start it. Each element in the queue is (idx,
dp). idx can be the ``DIE`` sentinel when ``df`` is exhausted.
"""
q
=
mp
.
Queue
(
size
)
class
EnqueProc
(
mp
.
Process
):
def
__init__
(
self
,
df
,
q
,
nr_consumer
):
super
(
EnqueProc
,
self
)
.
__init__
()
self
.
df
=
df
self
.
q
=
q
def
run
(
self
):
self
.
df
.
reset_state
()
try
:
for
idx
,
dp
in
enumerate
(
self
.
df
):
self
.
q
.
put
((
idx
,
dp
))
finally
:
for
_
in
range
(
nr_consumer
):
self
.
q
.
put
((
DIE
,
None
))
proc
=
EnqueProc
(
df
,
q
,
nr_consumer
)
return
q
,
proc
@
deprecated
(
"Use LMDBSerializer.save instead!"
,
"2019-01-31"
)
@
deprecated
(
"Use LMDBSerializer.save instead!"
,
"2019-01-31"
)
...
...
tensorpack/dataflow/raw.py
View file @
395a5add
...
@@ -7,7 +7,6 @@ import copy
...
@@ -7,7 +7,6 @@ import copy
import
six
import
six
from
six.moves
import
range
from
six.moves
import
range
from
.base
import
DataFlow
,
RNGDataFlow
from
.base
import
DataFlow
,
RNGDataFlow
from
..utils.develop
import
log_deprecated
__all__
=
[
'FakeData'
,
'DataFromQueue'
,
'DataFromList'
,
'DataFromGenerator'
,
'DataFromIterable'
]
__all__
=
[
'FakeData'
,
'DataFromQueue'
,
'DataFromList'
,
'DataFromGenerator'
,
'DataFromIterable'
]
...
@@ -98,19 +97,17 @@ class DataFromList(RNGDataFlow):
...
@@ -98,19 +97,17 @@ class DataFromList(RNGDataFlow):
class
DataFromGenerator
(
DataFlow
):
class
DataFromGenerator
(
DataFlow
):
"""
"""
Wrap a generator to a DataFlow.
Wrap a generator to a DataFlow.
The dataflow will not have length.
"""
"""
def
__init__
(
self
,
gen
,
size
=
None
):
def
__init__
(
self
,
gen
):
"""
"""
Args:
Args:
gen: iterable, or a callable that returns an iterable
gen: iterable, or a callable that returns an iterable
size: deprecated
"""
"""
if
not
callable
(
gen
):
if
not
callable
(
gen
):
self
.
_gen
=
lambda
:
gen
self
.
_gen
=
lambda
:
gen
else
:
else
:
self
.
_gen
=
gen
self
.
_gen
=
gen
if
size
is
not
None
:
log_deprecated
(
"DataFromGenerator(size=)"
,
"It doesn't make much sense."
,
"2018-03-31"
)
def
__iter__
(
self
):
def
__iter__
(
self
):
# yield from
# yield from
...
...
tensorpack/dataflow/remote.py
View file @
395a5add
...
@@ -4,12 +4,16 @@
...
@@ -4,12 +4,16 @@
import
time
import
time
import
tqdm
import
tqdm
import
multiprocessing
as
mp
from
six.moves
import
range
from
collections
import
deque
from
collections
import
deque
from
.base
import
DataFlow
,
DataFlowReentrantGuard
from
.base
import
DataFlow
,
DataFlowReentrantGuard
from
..utils
import
logger
from
..utils
import
logger
from
..utils.utils
import
get_tqdm_kwargs
from
..utils.utils
import
get_tqdm_kwargs
from
..utils.concurrency
import
DIE
from
..utils.serialize
import
dumps
,
loads
from
..utils.serialize
import
dumps
,
loads
try
:
try
:
import
zmq
import
zmq
except
ImportError
:
except
ImportError
:
...
@@ -156,6 +160,46 @@ class RemoteDataZMQ(DataFlow):
...
@@ -156,6 +160,46 @@ class RemoteDataZMQ(DataFlow):
ctx
.
destroy
(
linger
=
0
)
ctx
.
destroy
(
linger
=
0
)
# for internal use only
def
dump_dataflow_to_process_queue
(
df
,
size
,
nr_consumer
):
"""
Convert a DataFlow to a :class:`multiprocessing.Queue`.
The DataFlow will only be reset in the spawned process.
Args:
df (DataFlow): the DataFlow to dump.
size (int): size of the queue
nr_consumer (int): number of consumer of the queue.
The producer will add this many of ``DIE`` sentinel to the end of the queue.
Returns:
tuple(queue, process):
The process will take data from ``df`` and fill
the queue, once you start it. Each element in the queue is (idx,
dp). idx can be the ``DIE`` sentinel when ``df`` is exhausted.
"""
q
=
mp
.
Queue
(
size
)
class
EnqueProc
(
mp
.
Process
):
def
__init__
(
self
,
df
,
q
,
nr_consumer
):
super
(
EnqueProc
,
self
)
.
__init__
()
self
.
df
=
df
self
.
q
=
q
def
run
(
self
):
self
.
df
.
reset_state
()
try
:
for
idx
,
dp
in
enumerate
(
self
.
df
):
self
.
q
.
put
((
idx
,
dp
))
finally
:
for
_
in
range
(
nr_consumer
):
self
.
q
.
put
((
DIE
,
None
))
proc
=
EnqueProc
(
df
,
q
,
nr_consumer
)
return
q
,
proc
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
from
argparse
import
ArgumentParser
from
argparse
import
ArgumentParser
from
.raw
import
FakeData
from
.raw
import
FakeData
...
...
tensorpack/models/__init__.py
View file @
395a5add
...
@@ -9,7 +9,6 @@ if STATICA_HACK:
...
@@ -9,7 +9,6 @@ if STATICA_HACK:
from
.common
import
*
from
.common
import
*
from
.conv2d
import
*
from
.conv2d
import
*
from
.fc
import
*
from
.fc
import
*
from
.image_sample
import
*
from
.layer_norm
import
*
from
.layer_norm
import
*
from
.linearwrap
import
*
from
.linearwrap
import
*
from
.nonlin
import
*
from
.nonlin
import
*
...
...
tensorpack/models/image_sample.py
deleted
100644 → 0
View file @
bb2262de
# -*- coding: utf-8 -*-
# File: image_sample.py
import
tensorflow
as
tf
import
numpy
as
np
from
..utils.develop
import
log_deprecated
from
.common
import
layer_register
from
._test
import
TestModel
__all__
=
[
'ImageSample'
]
def
sample
(
img
,
coords
):
"""
Args:
img: bxhxwxc
coords: bxh2xw2x2. each coordinate is (y, x) integer.
Out of boundary coordinates will be clipped.
Return:
bxh2xw2xc image
"""
shape
=
img
.
get_shape
()
.
as_list
()[
1
:]
# h, w, c
batch
=
tf
.
shape
(
img
)[
0
]
shape2
=
coords
.
get_shape
()
.
as_list
()[
1
:
3
]
# h2, w2
assert
None
not
in
shape2
,
coords
.
get_shape
()
max_coor
=
tf
.
constant
([
shape
[
0
]
-
1
,
shape
[
1
]
-
1
],
dtype
=
tf
.
float32
)
coords
=
tf
.
clip_by_value
(
coords
,
0.
,
max_coor
)
# borderMode==repeat
coords
=
tf
.
to_int32
(
coords
)
batch_index
=
tf
.
range
(
batch
,
dtype
=
tf
.
int32
)
batch_index
=
tf
.
reshape
(
batch_index
,
[
-
1
,
1
,
1
,
1
])
batch_index
=
tf
.
tile
(
batch_index
,
[
1
,
shape2
[
0
],
shape2
[
1
],
1
])
# bxh2xw2x1
indices
=
tf
.
concat
([
batch_index
,
coords
],
axis
=
3
)
# bxh2xw2x3
sampled
=
tf
.
gather_nd
(
img
,
indices
)
return
sampled
@
layer_register
(
log_shape
=
True
)
def
ImageSample
(
inputs
,
borderMode
=
'repeat'
):
"""
Sample the images using the given coordinates, by bilinear interpolation.
This was described in the paper:
`Spatial Transformer Networks <http://arxiv.org/abs/1506.02025>`_.
This is equivalent to `torch.nn.functional.grid_sample`,
up to some non-trivial coordinate transformation.
This implementation returns pixel value at pixel (1, 1) for a floating point coordinate (1.0, 1.0).
Note that this may not be what you need.
Args:
inputs (list): [images, coords]. images has shape NHWC.
coords has shape (N, H', W', 2), where each pair of the last dimension is a (y, x) real-value
coordinate.
borderMode: either "repeat" or "constant" (zero-filled)
Returns:
tf.Tensor: a tensor named ``output`` of shape (N, H', W', C).
"""
log_deprecated
(
"ImageSample"
,
"Please implement it in your own code instead!"
,
"2018-12-01"
)
image
,
mapping
=
inputs
assert
image
.
get_shape
()
.
ndims
==
4
and
mapping
.
get_shape
()
.
ndims
==
4
input_shape
=
image
.
get_shape
()
.
as_list
()[
1
:]
assert
None
not
in
input_shape
,
\
"Images in ImageSample layer must have fully-defined shape"
assert
borderMode
in
[
'repeat'
,
'constant'
]
orig_mapping
=
mapping
mapping
=
tf
.
maximum
(
mapping
,
0.0
)
lcoor
=
tf
.
floor
(
mapping
)
ucoor
=
lcoor
+
1
diff
=
mapping
-
lcoor
neg_diff
=
1.0
-
diff
# bxh2xw2x2
lcoory
,
lcoorx
=
tf
.
split
(
lcoor
,
2
,
3
)
ucoory
,
ucoorx
=
tf
.
split
(
ucoor
,
2
,
3
)
lyux
=
tf
.
concat
([
lcoory
,
ucoorx
],
3
)
uylx
=
tf
.
concat
([
ucoory
,
lcoorx
],
3
)
diffy
,
diffx
=
tf
.
split
(
diff
,
2
,
3
)
neg_diffy
,
neg_diffx
=
tf
.
split
(
neg_diff
,
2
,
3
)
ret
=
tf
.
add_n
([
sample
(
image
,
lcoor
)
*
neg_diffx
*
neg_diffy
,
sample
(
image
,
ucoor
)
*
diffx
*
diffy
,
sample
(
image
,
lyux
)
*
neg_diffy
*
diffx
,
sample
(
image
,
uylx
)
*
diffy
*
neg_diffx
],
name
=
'sampled'
)
if
borderMode
==
'constant'
:
max_coor
=
tf
.
constant
([
input_shape
[
0
]
-
1
,
input_shape
[
1
]
-
1
],
dtype
=
tf
.
float32
)
mask
=
tf
.
greater_equal
(
orig_mapping
,
0.0
)
mask2
=
tf
.
less_equal
(
orig_mapping
,
max_coor
)
mask
=
tf
.
logical_and
(
mask
,
mask2
)
# bxh2xw2x2
mask
=
tf
.
reduce_all
(
mask
,
[
3
])
# bxh2xw2 boolean
mask
=
tf
.
expand_dims
(
mask
,
3
)
ret
=
ret
*
tf
.
cast
(
mask
,
tf
.
float32
)
return
tf
.
identity
(
ret
,
name
=
'output'
)
class
TestSample
(
TestModel
):
def
test_ImageSample
(
self
):
h
,
w
=
3
,
4
def
np_sample
(
img
,
coords
):
# a reference implementation
coords
=
np
.
maximum
(
coords
,
0
)
coords
=
np
.
minimum
(
coords
,
np
.
array
([
img
.
shape
[
1
]
-
1
,
img
.
shape
[
2
]
-
1
]))
xs
=
coords
[:,
:,
:,
1
]
.
reshape
((
img
.
shape
[
0
],
-
1
))
ys
=
coords
[:,
:,
:,
0
]
.
reshape
((
img
.
shape
[
0
],
-
1
))
ret
=
np
.
zeros
((
img
.
shape
[
0
],
coords
.
shape
[
1
],
coords
.
shape
[
2
],
img
.
shape
[
3
]),
dtype
=
'float32'
)
for
k
in
range
(
img
.
shape
[
0
]):
xss
,
yss
=
xs
[
k
],
ys
[
k
]
ret
[
k
,
:,
:,
:]
=
img
[
k
,
yss
,
xss
,
:]
.
reshape
((
coords
.
shape
[
1
],
coords
.
shape
[
2
],
3
))
return
ret
bimg
=
np
.
random
.
rand
(
2
,
h
,
w
,
3
)
.
astype
(
'float32'
)
# mat = np.array([
# [[[1,1], [1.2,1.2]], [[-1, -1], [2.5, 2.5]]],
# [[[1,1], [1.2,1.2]], [[-1, -1], [2.5, 2.5]]]
# ], dtype='float32') #2x2x2x2
mat
=
(
np
.
random
.
rand
(
2
,
5
,
5
,
2
)
-
0.2
)
*
np
.
array
([
h
+
3
,
w
+
3
])
true_res
=
np_sample
(
bimg
,
np
.
floor
(
mat
+
0.5
)
.
astype
(
'int32'
))
inp
,
mapping
=
self
.
make_variable
(
bimg
,
mat
)
output
=
sample
(
inp
,
tf
.
cast
(
tf
.
floor
(
mapping
+
0.5
),
tf
.
int32
))
res
=
self
.
run_variable
(
output
)
self
.
assertTrue
((
res
==
true_res
)
.
all
())
if
__name__
==
'__main__'
:
import
cv2
im
=
cv2
.
imread
(
'cat.jpg'
)
im
=
im
.
reshape
((
1
,)
+
im
.
shape
)
.
astype
(
'float32'
)
imv
=
tf
.
Variable
(
im
)
h
,
w
=
300
,
400
mapping
=
np
.
zeros
((
1
,
h
,
w
,
2
),
dtype
=
'float32'
)
diff
=
200
for
x
in
range
(
w
):
for
y
in
range
(
h
):
mapping
[
0
,
y
,
x
,
:]
=
np
.
array
([
y
-
diff
+
0.4
,
x
-
diff
+
0.5
])
mapv
=
tf
.
Variable
(
mapping
)
output
=
ImageSample
(
'sample'
,
[
imv
,
mapv
],
borderMode
=
'constant'
)
sess
=
tf
.
Session
()
sess
.
run
(
tf
.
global_variables_initializer
())
# out = sess.run(tf.gradients(tf.reduce_sum(output), mapv))
# out = sess.run(output)
# print(out[0].min())
# print(out[0].max())
# print(out[0].sum())
out
=
sess
.
run
([
output
])[
0
]
im
=
out
[
0
]
cv2
.
imwrite
(
'sampled.jpg'
,
im
)
tensorpack/predict/dataset.py
View file @
395a5add
...
@@ -9,7 +9,7 @@ import os
...
@@ -9,7 +9,7 @@ import os
import
six
import
six
from
..dataflow
import
DataFlow
from
..dataflow
import
DataFlow
from
..dataflow.
dftools
import
dump_dataflow_to_process_queue
from
..dataflow.
remote
import
dump_dataflow_to_process_queue
from
..utils.concurrency
import
ensure_proc_terminate
,
OrderedResultGatherProc
,
DIE
from
..utils.concurrency
import
ensure_proc_terminate
,
OrderedResultGatherProc
,
DIE
from
..utils
import
logger
from
..utils
import
logger
from
..utils.utils
import
get_tqdm
from
..utils.utils
import
get_tqdm
...
...
tensorpack/utils/__init__.py
View file @
395a5add
...
@@ -17,8 +17,7 @@ __all__ = []
...
@@ -17,8 +17,7 @@ __all__ = []
def
get_nr_gpu
():
def
get_nr_gpu
():
from
.gpu
import
get_nr_gpu
as
gg
from
.gpu
import
get_nr_gpu
as
gg
logger
.
warn
(
# noqa
logger
.
warn
(
# noqa
"get_nr_gpu will not be automatically imported any more! "
"Please use `from tensorpack.utils.gpu import get_num_gpu`"
)
"Please do `from tensorpack.utils.gpu import get_nr_gpu`"
)
return
gg
()
return
gg
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment