Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
edb78974
Commit
edb78974
authored
Jul 11, 2020
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
upgrade detection / some v2 compat fix
parent
55b2e0f1
Changes
11
Hide whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
22 additions
and
41 deletions
+22
-41
.github/workflows/workflow.yml
.github/workflows/workflow.yml
+1
-2
examples/FasterRCNN/README.md
examples/FasterRCNN/README.md
+1
-1
examples/FasterRCNN/modeling/generalized_rcnn.py
examples/FasterRCNN/modeling/generalized_rcnn.py
+1
-2
examples/FasterRCNN/modeling/model_box.py
examples/FasterRCNN/modeling/model_box.py
+1
-1
examples/FasterRCNN/modeling/model_fpn.py
examples/FasterRCNN/modeling/model_fpn.py
+2
-2
examples/FasterRCNN/modeling/model_frcnn.py
examples/FasterRCNN/modeling/model_frcnn.py
+5
-10
examples/FasterRCNN/modeling/model_mrcnn.py
examples/FasterRCNN/modeling/model_mrcnn.py
+4
-10
examples/FasterRCNN/train.py
examples/FasterRCNN/train.py
+0
-6
tensorpack/graph_builder/training.py
tensorpack/graph_builder/training.py
+2
-2
tensorpack/graph_builder/utils.py
tensorpack/graph_builder/utils.py
+3
-3
tensorpack/train/tower.py
tensorpack/train/tower.py
+2
-2
No files found.
.github/workflows/workflow.yml
View file @
edb78974
...
@@ -29,8 +29,7 @@ jobs:
...
@@ -29,8 +29,7 @@ jobs:
max-parallel
:
6
max-parallel
:
6
matrix
:
matrix
:
python-version
:
[
3.6
]
python-version
:
[
3.6
]
# TF-version: [1.3.0, 1.14.0, nightly] # TODO make nightly work
TF-version
:
[
1.5.0
,
1.15.0
]
TF-version
:
[
1.3.0
,
1.14.0
]
steps
:
steps
:
-
uses
:
actions/checkout@v1
-
uses
:
actions/checkout@v1
-
name
:
Set up Python ${{ matrix.python-version }}
-
name
:
Set up Python ${{ matrix.python-version }}
...
...
examples/FasterRCNN/README.md
View file @
edb78974
...
@@ -15,7 +15,7 @@ with the support of:
...
@@ -15,7 +15,7 @@ with the support of:
+
Training from scratch (from
[
Rethinking ImageNet Pre-training
](
https://arxiv.org/abs/1811.08883
)
)
+
Training from scratch (from
[
Rethinking ImageNet Pre-training
](
https://arxiv.org/abs/1811.08883
)
)
## Dependencies
## Dependencies
+
OpenCV, TensorFlow ≥ 1.
6
+
OpenCV, TensorFlow ≥ 1.
14
+
pycocotools/scipy:
`for i in cython 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI' scipy; do pip install $i; done`
+
pycocotools/scipy:
`for i in cython 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI' scipy; do pip install $i; done`
+
Pre-trained
[
ImageNet ResNet model
](
http://models.tensorpack.com/#FasterRCNN
)
+
Pre-trained
[
ImageNet ResNet model
](
http://models.tensorpack.com/#FasterRCNN
)
from tensorpack model zoo
from tensorpack model zoo
...
...
examples/FasterRCNN/modeling/generalized_rcnn.py
View file @
edb78974
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
# File:
# File:
import
tensorflow
as
tf
from
tensorpack.compat
import
tfv1
as
tf
from
tensorpack
import
ModelDesc
from
tensorpack
import
ModelDesc
from
tensorpack.models
import
GlobalAvgPooling
,
l2_regularizer
,
regularize_cost
from
tensorpack.models
import
GlobalAvgPooling
,
l2_regularizer
,
regularize_cost
from
tensorpack.tfutils
import
optimizer
from
tensorpack.tfutils
import
optimizer
...
...
examples/FasterRCNN/modeling/model_box.py
View file @
edb78974
...
@@ -74,7 +74,7 @@ def encode_bbox_target(boxes, anchors):
...
@@ -74,7 +74,7 @@ def encode_bbox_target(boxes, anchors):
# Note that here not all boxes are valid. Some may be zero
# Note that here not all boxes are valid. Some may be zero
txty
=
(
xbyb
-
xaya
)
/
waha
txty
=
(
xbyb
-
xaya
)
/
waha
twth
=
tf
.
log
(
wbhb
/
waha
)
# may contain -inf for invalid boxes
twth
=
tf
.
math
.
log
(
wbhb
/
waha
)
# may contain -inf for invalid boxes
encoded
=
tf
.
concat
([
txty
,
twth
],
axis
=
1
)
# (-1x2x2)
encoded
=
tf
.
concat
([
txty
,
twth
],
axis
=
1
)
# (-1x2x2)
return
tf
.
reshape
(
encoded
,
tf
.
shape
(
boxes
))
return
tf
.
reshape
(
encoded
,
tf
.
shape
(
boxes
))
...
...
examples/FasterRCNN/modeling/model_fpn.py
View file @
edb78974
...
@@ -84,7 +84,7 @@ def fpn_map_rois_to_levels(boxes):
...
@@ -84,7 +84,7 @@ def fpn_map_rois_to_levels(boxes):
"""
"""
sqrtarea
=
tf
.
sqrt
(
tf_area
(
boxes
))
sqrtarea
=
tf
.
sqrt
(
tf_area
(
boxes
))
level
=
tf
.
cast
(
tf
.
floor
(
level
=
tf
.
cast
(
tf
.
floor
(
4
+
tf
.
log
(
sqrtarea
*
(
1.
/
224
)
+
1e-6
)
*
(
1.0
/
np
.
log
(
2
))),
tf
.
int32
)
4
+
tf
.
math
.
log
(
sqrtarea
*
(
1.
/
224
)
+
1e-6
)
*
(
1.0
/
np
.
log
(
2
))),
tf
.
int32
)
# RoI levels range from 2~5 (not 6)
# RoI levels range from 2~5 (not 6)
level_ids
=
[
level_ids
=
[
...
@@ -127,7 +127,7 @@ def multilevel_roi_align(features, rcnn_boxes, resolution):
...
@@ -127,7 +127,7 @@ def multilevel_roi_align(features, rcnn_boxes, resolution):
all_rois
=
tf
.
concat
(
all_rois
,
axis
=
0
)
# NCHW
all_rois
=
tf
.
concat
(
all_rois
,
axis
=
0
)
# NCHW
# Unshuffle to the original order, to match the original samples
# Unshuffle to the original order, to match the original samples
level_id_perm
=
tf
.
concat
(
level_ids
,
axis
=
0
)
# A permutation of 1~N
level_id_perm
=
tf
.
concat
(
level_ids
,
axis
=
0
)
# A permutation of 1~N
level_id_invert_perm
=
tf
.
invert_permutation
(
level_id_perm
)
level_id_invert_perm
=
tf
.
math
.
invert_permutation
(
level_id_perm
)
all_rois
=
tf
.
gather
(
all_rois
,
level_id_invert_perm
,
name
=
"output"
)
all_rois
=
tf
.
gather
(
all_rois
,
level_id_invert_perm
,
name
=
"output"
)
return
all_rois
return
all_rois
...
...
examples/FasterRCNN/modeling/model_frcnn.py
View file @
edb78974
...
@@ -5,7 +5,6 @@ import tensorflow as tf
...
@@ -5,7 +5,6 @@ import tensorflow as tf
from
tensorpack.models
import
Conv2D
,
FullyConnected
,
layer_register
from
tensorpack.models
import
Conv2D
,
FullyConnected
,
layer_register
from
tensorpack.tfutils.argscope
import
argscope
from
tensorpack.tfutils.argscope
import
argscope
from
tensorpack.tfutils.common
import
get_tf_version_tuple
from
tensorpack.tfutils.scope_utils
import
under_name_scope
from
tensorpack.tfutils.scope_utils
import
under_name_scope
from
tensorpack.tfutils.summary
import
add_moving_summary
from
tensorpack.tfutils.summary
import
add_moving_summary
from
tensorpack.utils.argtools
import
memoized_method
from
tensorpack.utils.argtools
import
memoized_method
...
@@ -74,13 +73,13 @@ def sample_fast_rcnn_targets(boxes, gt_boxes, gt_labels):
...
@@ -74,13 +73,13 @@ def sample_fast_rcnn_targets(boxes, gt_boxes, gt_labels):
num_fg
=
tf
.
minimum
(
int
(
num_fg
=
tf
.
minimum
(
int
(
cfg
.
FRCNN
.
BATCH_PER_IM
*
cfg
.
FRCNN
.
FG_RATIO
),
cfg
.
FRCNN
.
BATCH_PER_IM
*
cfg
.
FRCNN
.
FG_RATIO
),
tf
.
size
(
fg_inds
),
name
=
'num_fg'
)
tf
.
size
(
fg_inds
),
name
=
'num_fg'
)
fg_inds
=
tf
.
random
_
shuffle
(
fg_inds
)[:
num_fg
]
fg_inds
=
tf
.
random
.
shuffle
(
fg_inds
)[:
num_fg
]
bg_inds
=
tf
.
reshape
(
tf
.
where
(
tf
.
logical_not
(
fg_mask
)),
[
-
1
])
bg_inds
=
tf
.
reshape
(
tf
.
where
(
tf
.
logical_not
(
fg_mask
)),
[
-
1
])
num_bg
=
tf
.
minimum
(
num_bg
=
tf
.
minimum
(
cfg
.
FRCNN
.
BATCH_PER_IM
-
num_fg
,
cfg
.
FRCNN
.
BATCH_PER_IM
-
num_fg
,
tf
.
size
(
bg_inds
),
name
=
'num_bg'
)
tf
.
size
(
bg_inds
),
name
=
'num_bg'
)
bg_inds
=
tf
.
random
_
shuffle
(
bg_inds
)[:
num_bg
]
bg_inds
=
tf
.
random
.
shuffle
(
bg_inds
)[:
num_bg
]
add_moving_summary
(
num_fg
,
num_bg
)
add_moving_summary
(
num_fg
,
num_bg
)
return
fg_inds
,
bg_inds
return
fg_inds
,
bg_inds
...
@@ -151,12 +150,8 @@ def fastrcnn_losses(labels, label_logits, fg_boxes, fg_box_logits):
...
@@ -151,12 +150,8 @@ def fastrcnn_losses(labels, label_logits, fg_boxes, fg_box_logits):
num_fg
=
tf
.
size
(
fg_inds
,
out_type
=
tf
.
int64
)
num_fg
=
tf
.
size
(
fg_inds
,
out_type
=
tf
.
int64
)
empty_fg
=
tf
.
equal
(
num_fg
,
0
)
empty_fg
=
tf
.
equal
(
num_fg
,
0
)
if
int
(
fg_box_logits
.
shape
[
1
])
>
1
:
if
int
(
fg_box_logits
.
shape
[
1
])
>
1
:
if
get_tf_version_tuple
()
>=
(
1
,
14
):
fg_labels
=
tf
.
expand_dims
(
fg_labels
,
axis
=
1
)
# nfg x 1
fg_labels
=
tf
.
expand_dims
(
fg_labels
,
axis
=
1
)
# nfg x 1
fg_box_logits
=
tf
.
gather
(
fg_box_logits
,
fg_labels
,
batch_dims
=
1
)
fg_box_logits
=
tf
.
gather
(
fg_box_logits
,
fg_labels
,
batch_dims
=
1
)
else
:
indices
=
tf
.
stack
([
tf
.
range
(
num_fg
),
fg_labels
],
axis
=
1
)
# nfgx2
fg_box_logits
=
tf
.
gather_nd
(
fg_box_logits
,
indices
)
fg_box_logits
=
tf
.
reshape
(
fg_box_logits
,
[
-
1
,
4
])
# nfg x 4
fg_box_logits
=
tf
.
reshape
(
fg_box_logits
,
[
-
1
,
4
])
# nfg x 4
with
tf
.
name_scope
(
'label_metrics'
),
tf
.
device
(
'/cpu:0'
):
with
tf
.
name_scope
(
'label_metrics'
),
tf
.
device
(
'/cpu:0'
):
...
@@ -253,7 +248,7 @@ def fastrcnn_Xconv1fc_head(feature, num_convs, norm=None):
...
@@ -253,7 +248,7 @@ def fastrcnn_Xconv1fc_head(feature, num_convs, norm=None):
with
argscope
(
Conv2D
,
data_format
=
'channels_first'
,
with
argscope
(
Conv2D
,
data_format
=
'channels_first'
,
kernel_initializer
=
tf
.
variance_scaling_initializer
(
kernel_initializer
=
tf
.
variance_scaling_initializer
(
scale
=
2.0
,
mode
=
'fan_out'
,
scale
=
2.0
,
mode
=
'fan_out'
,
distribution
=
'untruncated_normal'
if
get_tf_version_tuple
()
>=
(
1
,
12
)
else
'normal'
)):
distribution
=
'untruncated_normal'
)):
for
k
in
range
(
num_convs
):
for
k
in
range
(
num_convs
):
l
=
Conv2D
(
'conv{}'
.
format
(
k
),
l
,
cfg
.
FPN
.
FRCNN_CONV_HEAD_DIM
,
3
,
activation
=
tf
.
nn
.
relu
)
l
=
Conv2D
(
'conv{}'
.
format
(
k
),
l
,
cfg
.
FPN
.
FRCNN_CONV_HEAD_DIM
,
3
,
activation
=
tf
.
nn
.
relu
)
if
norm
is
not
None
:
if
norm
is
not
None
:
...
...
examples/FasterRCNN/modeling/model_mrcnn.py
View file @
edb78974
...
@@ -4,7 +4,6 @@ import tensorflow as tf
...
@@ -4,7 +4,6 @@ import tensorflow as tf
from
tensorpack.models
import
Conv2D
,
Conv2DTranspose
,
layer_register
from
tensorpack.models
import
Conv2D
,
Conv2DTranspose
,
layer_register
from
tensorpack.tfutils.argscope
import
argscope
from
tensorpack.tfutils.argscope
import
argscope
from
tensorpack.tfutils.common
import
get_tf_version_tuple
from
tensorpack.tfutils.scope_utils
import
under_name_scope
from
tensorpack.tfutils.scope_utils
import
under_name_scope
from
tensorpack.tfutils.summary
import
add_moving_summary
from
tensorpack.tfutils.summary
import
add_moving_summary
...
@@ -20,14 +19,9 @@ def maskrcnn_loss(mask_logits, fg_labels, fg_target_masks):
...
@@ -20,14 +19,9 @@ def maskrcnn_loss(mask_logits, fg_labels, fg_target_masks):
fg_labels: #fg, in 1~#class, int64
fg_labels: #fg, in 1~#class, int64
fg_target_masks: #fgxhxw, float32
fg_target_masks: #fgxhxw, float32
"""
"""
if
get_tf_version_tuple
()
>=
(
1
,
14
):
mask_logits
=
tf
.
gather
(
mask_logits
=
tf
.
gather
(
mask_logits
,
tf
.
reshape
(
fg_labels
-
1
,
[
-
1
,
1
]),
batch_dims
=
1
)
mask_logits
,
tf
.
reshape
(
fg_labels
-
1
,
[
-
1
,
1
]),
batch_dims
=
1
)
mask_logits
=
tf
.
squeeze
(
mask_logits
,
axis
=
1
)
mask_logits
=
tf
.
squeeze
(
mask_logits
,
axis
=
1
)
else
:
indices
=
tf
.
stack
([
tf
.
range
(
tf
.
size
(
fg_labels
,
out_type
=
tf
.
int64
)),
fg_labels
-
1
],
axis
=
1
)
# #fgx2
mask_logits
=
tf
.
gather_nd
(
mask_logits
,
indices
)
# #fg x h x w
mask_probs
=
tf
.
sigmoid
(
mask_logits
)
mask_probs
=
tf
.
sigmoid
(
mask_logits
)
...
@@ -74,7 +68,7 @@ def maskrcnn_upXconv_head(feature, num_category, num_convs, norm=None):
...
@@ -74,7 +68,7 @@ def maskrcnn_upXconv_head(feature, num_category, num_convs, norm=None):
with
argscope
([
Conv2D
,
Conv2DTranspose
],
data_format
=
'channels_first'
,
with
argscope
([
Conv2D
,
Conv2DTranspose
],
data_format
=
'channels_first'
,
kernel_initializer
=
tf
.
variance_scaling_initializer
(
kernel_initializer
=
tf
.
variance_scaling_initializer
(
scale
=
2.0
,
mode
=
'fan_out'
,
scale
=
2.0
,
mode
=
'fan_out'
,
distribution
=
'untruncated_normal'
if
get_tf_version_tuple
()
>=
(
1
,
12
)
else
'normal'
)):
distribution
=
'untruncated_normal'
)):
# c2's MSRAFill is fan_out
# c2's MSRAFill is fan_out
for
k
in
range
(
num_convs
):
for
k
in
range
(
num_convs
):
l
=
Conv2D
(
'fcn{}'
.
format
(
k
),
l
,
cfg
.
MRCNN
.
HEAD_DIM
,
3
,
activation
=
tf
.
nn
.
relu
)
l
=
Conv2D
(
'fcn{}'
.
format
(
k
),
l
,
cfg
.
MRCNN
.
HEAD_DIM
,
3
,
activation
=
tf
.
nn
.
relu
)
...
...
examples/FasterRCNN/train.py
View file @
edb78974
#!/usr/bin/env python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: train.py
# File: train.py
import
argparse
import
argparse
from
tensorpack
import
*
from
tensorpack
import
*
from
tensorpack.tfutils
import
collect_env_info
from
tensorpack.tfutils
import
collect_env_info
from
tensorpack.tfutils.common
import
get_tf_version_tuple
from
dataset
import
register_coco
,
register_balloon
from
dataset
import
register_coco
,
register_balloon
from
config
import
config
as
cfg
from
config
import
config
as
cfg
...
@@ -34,10 +32,6 @@ if __name__ == '__main__':
...
@@ -34,10 +32,6 @@ if __name__ == '__main__':
default
=
'train_log/maskrcnn'
)
default
=
'train_log/maskrcnn'
)
parser
.
add_argument
(
'--config'
,
help
=
"A list of KEY=VALUE to overwrite those defined in config.py"
,
nargs
=
'+'
)
parser
.
add_argument
(
'--config'
,
help
=
"A list of KEY=VALUE to overwrite those defined in config.py"
,
nargs
=
'+'
)
if
get_tf_version_tuple
()
<
(
1
,
6
):
# https://github.com/tensorflow/tensorflow/issues/14657
logger
.
warn
(
"TF<1.6 has a bug which may lead to crash in FasterRCNN if you're unlucky."
)
args
=
parser
.
parse_args
()
args
=
parser
.
parse_args
()
if
args
.
config
:
if
args
.
config
:
cfg
.
update_args
(
args
.
config
)
cfg
.
update_args
(
args
.
config
)
...
...
tensorpack/graph_builder/training.py
View file @
edb78974
...
@@ -316,9 +316,9 @@ class SyncMultiGPUReplicatedBuilder(DataParallelBuilder):
...
@@ -316,9 +316,9 @@ class SyncMultiGPUReplicatedBuilder(DataParallelBuilder):
Copy values of variables on GPU 0 to other GPUs.
Copy values of variables on GPU 0 to other GPUs.
"""
"""
# literally all variables, because it's better to sync optimizer-internal variables as well
# literally all variables, because it's better to sync optimizer-internal variables as well
all_vars
=
tf
.
global_variables
()
+
tf
.
local_variables
()
all_vars
=
tf
v1
.
global_variables
()
+
tfv1
.
local_variables
()
var_by_name
=
{
v
.
name
:
v
for
v
in
all_vars
}
var_by_name
=
{
v
.
name
:
v
for
v
in
all_vars
}
trainable_names
=
{
x
.
name
for
x
in
tf
.
trainable_variables
()}
trainable_names
=
{
x
.
name
for
x
in
tf
v1
.
trainable_variables
()}
post_init_ops
=
[]
post_init_ops
=
[]
def
log_failure
(
name
,
reason
):
def
log_failure
(
name
,
reason
):
...
...
tensorpack/graph_builder/utils.py
View file @
edb78974
...
@@ -25,11 +25,11 @@ def _replace_global_by_local(kwargs):
...
@@ -25,11 +25,11 @@ def _replace_global_by_local(kwargs):
if
'collections'
in
kwargs
:
if
'collections'
in
kwargs
:
collections
=
kwargs
[
'collections'
]
collections
=
kwargs
[
'collections'
]
if
not
collections
:
if
not
collections
:
collections
=
{
tf
.
GraphKeys
.
GLOBAL_VARIABLES
}
collections
=
{
tf
v1
.
GraphKeys
.
GLOBAL_VARIABLES
}
else
:
else
:
collections
=
set
(
collections
.
copy
())
collections
=
set
(
collections
.
copy
())
collections
.
remove
(
tf
.
GraphKeys
.
GLOBAL_VARIABLES
)
collections
.
remove
(
tf
v1
.
GraphKeys
.
GLOBAL_VARIABLES
)
collections
.
add
(
tf
.
GraphKeys
.
LOCAL_VARIABLES
)
collections
.
add
(
tf
v1
.
GraphKeys
.
LOCAL_VARIABLES
)
kwargs
[
'collections'
]
=
list
(
collections
)
kwargs
[
'collections'
]
=
list
(
collections
)
...
...
tensorpack/train/tower.py
View file @
edb78974
...
@@ -289,9 +289,9 @@ class SingleCostTrainer(TowerTrainer):
...
@@ -289,9 +289,9 @@ class SingleCostTrainer(TowerTrainer):
grads_no_vars
=
xla
.
compile
(
xla_func
)
grads_no_vars
=
xla
.
compile
(
xla_func
)
if
ctx
.
has_own_variables
:
if
ctx
.
has_own_variables
:
varlist
=
ctx
.
get_collection_in_tower
(
tf
.
GraphKeys
.
TRAINABLE_VARIABLES
)
varlist
=
ctx
.
get_collection_in_tower
(
tf
v1
.
GraphKeys
.
TRAINABLE_VARIABLES
)
else
:
else
:
varlist
=
tf
.
trainable_variables
()
varlist
=
tf
v1
.
trainable_variables
()
return
list
(
zip
(
grads_no_vars
,
varlist
))
return
list
(
zip
(
grads_no_vars
,
varlist
))
return
get_grad_fn
return
get_grad_fn
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment