Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
cc63dee7
Commit
cc63dee7
authored
Jan 05, 2019
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[MaskRCNN] move dataset loader and evaluation together
parent
88e900a9
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
191 additions
and
153 deletions
+191
-153
examples/FasterRCNN/coco.py
examples/FasterRCNN/coco.py
+166
-56
examples/FasterRCNN/common.py
examples/FasterRCNN/common.py
+2
-3
examples/FasterRCNN/config.py
examples/FasterRCNN/config.py
+3
-3
examples/FasterRCNN/data.py
examples/FasterRCNN/data.py
+3
-27
examples/FasterRCNN/eval.py
examples/FasterRCNN/eval.py
+6
-47
examples/FasterRCNN/train.py
examples/FasterRCNN/train.py
+10
-16
tensorpack/tfutils/sessinit.py
tensorpack/tfutils/sessinit.py
+1
-1
No files found.
examples/FasterRCNN/coco.py
View file @
cc63dee7
...
...
@@ -4,6 +4,7 @@
import
numpy
as
np
import
os
import
tqdm
import
json
from
tabulate
import
tabulate
from
termcolor
import
colored
...
...
@@ -13,45 +14,25 @@ from tensorpack.utils.timer import timed_operation
from
config
import
config
as
cfg
__all__
=
[
'COCODetection'
,
'
COCOMeta
'
]
__all__
=
[
'COCODetection'
,
'
DetectionDataset
'
]
class
_COCOMeta
(
object
):
class
COCODetection
(
object
):
# handle the weird (but standard) split of train and val
INSTANCE_TO_BASEDIR
=
{
_
INSTANCE_TO_BASEDIR
=
{
'valminusminival2014'
:
'val2014'
,
'minival2014'
:
'val2014'
,
}
def
valid
(
self
):
return
hasattr
(
self
,
'cat_names'
)
def
create
(
self
,
cat_ids
,
cat_names
):
"""
cat_ids: list of ids
cat_names: list of names
"""
assert
not
self
.
valid
()
assert
len
(
cat_ids
)
==
cfg
.
DATA
.
NUM_CATEGORY
and
len
(
cat_names
)
==
cfg
.
DATA
.
NUM_CATEGORY
self
.
cat_names
=
cat_names
self
.
class_names
=
[
'BG'
]
+
self
.
cat_names
# background has class id of 0
self
.
category_id_to_class_id
=
{
v
:
i
+
1
for
i
,
v
in
enumerate
(
cat_ids
)}
self
.
class_id_to_category_id
=
{
v
:
k
for
k
,
v
in
self
.
category_id_to_class_id
.
items
()}
cfg
.
DATA
.
CLASS_NAMES
=
self
.
class_names
COCO_id_to_category_id
=
{
1
:
1
,
2
:
2
,
3
:
3
,
4
:
4
,
5
:
5
,
6
:
6
,
7
:
7
,
8
:
8
,
9
:
9
,
10
:
10
,
11
:
11
,
13
:
12
,
14
:
13
,
15
:
14
,
16
:
15
,
17
:
16
,
18
:
17
,
19
:
18
,
20
:
19
,
21
:
20
,
22
:
21
,
23
:
22
,
24
:
23
,
25
:
24
,
27
:
25
,
28
:
26
,
31
:
27
,
32
:
28
,
33
:
29
,
34
:
30
,
35
:
31
,
36
:
32
,
37
:
33
,
38
:
34
,
39
:
35
,
40
:
36
,
41
:
37
,
42
:
38
,
43
:
39
,
44
:
40
,
46
:
41
,
47
:
42
,
48
:
43
,
49
:
44
,
50
:
45
,
51
:
46
,
52
:
47
,
53
:
48
,
54
:
49
,
55
:
50
,
56
:
51
,
57
:
52
,
58
:
53
,
59
:
54
,
60
:
55
,
61
:
56
,
62
:
57
,
63
:
58
,
64
:
59
,
65
:
60
,
67
:
61
,
70
:
62
,
72
:
63
,
73
:
64
,
74
:
65
,
75
:
66
,
76
:
67
,
77
:
68
,
78
:
69
,
79
:
70
,
80
:
71
,
81
:
72
,
82
:
73
,
84
:
74
,
85
:
75
,
86
:
76
,
87
:
77
,
88
:
78
,
89
:
79
,
90
:
80
}
# noqa
"""
Mapping from the incontinuous COCO category id to an id in [1, #category]
"""
COCOMeta
=
_COCOMeta
()
class
COCODetection
(
object
):
def
__init__
(
self
,
basedir
,
name
):
self
.
name
=
name
self
.
_imgdir
=
os
.
path
.
realpath
(
os
.
path
.
join
(
basedir
,
COCOMeta
.
INSTANCE_TO_BASEDIR
.
get
(
name
,
name
)))
basedir
,
self
.
_
INSTANCE_TO_BASEDIR
.
get
(
name
,
name
)))
assert
os
.
path
.
isdir
(
self
.
_imgdir
),
self
.
_imgdir
annotation_file
=
os
.
path
.
join
(
basedir
,
'annotations/instances_{}.json'
.
format
(
name
))
...
...
@@ -59,17 +40,37 @@ class COCODetection(object):
from
pycocotools.coco
import
COCO
self
.
coco
=
COCO
(
annotation_file
)
# initialize the meta
cat_ids
=
self
.
coco
.
getCatIds
()
cat_names
=
[
c
[
'name'
]
for
c
in
self
.
coco
.
loadCats
(
cat_ids
)]
if
not
COCOMeta
.
valid
():
COCOMeta
.
create
(
cat_ids
,
cat_names
)
else
:
assert
COCOMeta
.
cat_names
==
cat_names
logger
.
info
(
"Instances loaded from {}."
.
format
(
annotation_file
))
# https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
def
print_coco_metrics
(
self
,
json_file
):
"""
Args:
json_file (str): path to the results json file in coco format
Returns:
dict: the evaluation metrics
"""
from
pycocotools.cocoeval
import
COCOeval
ret
=
{}
cocoDt
=
self
.
coco
.
loadRes
(
json_file
)
cocoEval
=
COCOeval
(
self
.
coco
,
cocoDt
,
'bbox'
)
cocoEval
.
evaluate
()
cocoEval
.
accumulate
()
cocoEval
.
summarize
()
fields
=
[
'IoU=0.5:0.95'
,
'IoU=0.5'
,
'IoU=0.75'
,
'small'
,
'medium'
,
'large'
]
for
k
in
range
(
6
):
ret
[
'mAP(bbox)/'
+
fields
[
k
]]
=
cocoEval
.
stats
[
k
]
json_obj
=
json
.
load
(
open
(
json_file
))
if
len
(
json_obj
)
>
0
and
'segmentation'
in
json_obj
[
0
]:
cocoEval
=
COCOeval
(
self
.
coco
,
cocoDt
,
'segm'
)
cocoEval
.
evaluate
()
cocoEval
.
accumulate
()
cocoEval
.
summarize
()
for
k
in
range
(
6
):
ret
[
'mAP(segm)/'
+
fields
[
k
]]
=
cocoEval
.
stats
[
k
]
return
ret
def
load
(
self
,
add_gt
=
True
,
add_mask
=
False
):
"""
Args:
...
...
@@ -149,7 +150,7 @@ class COCODetection(object):
# all geometrically-valid boxes are returned
boxes
=
np
.
asarray
([
obj
[
'bbox'
]
for
obj
in
valid_objs
],
dtype
=
'float32'
)
# (n, 4)
cls
=
np
.
asarray
([
COCOMeta
.
category_id_to_class
_id
[
obj
[
'category_id'
]]
self
.
COCO_id_to_category
_id
[
obj
[
'category_id'
]]
for
obj
in
valid_objs
],
dtype
=
'int32'
)
# (n,)
is_crowd
=
np
.
asarray
([
obj
[
'iscrowd'
]
for
obj
in
valid_objs
],
dtype
=
'int8'
)
...
...
@@ -162,23 +163,6 @@ class COCODetection(object):
img
[
'segmentation'
]
=
[
obj
[
'segmentation'
]
for
obj
in
valid_objs
]
def
print_class_histogram
(
self
,
imgs
):
nr_class
=
len
(
COCOMeta
.
class_names
)
hist_bins
=
np
.
arange
(
nr_class
+
1
)
# Histogram of ground-truth objects
gt_hist
=
np
.
zeros
((
nr_class
,),
dtype
=
np
.
int
)
for
entry
in
imgs
:
# filter crowd?
gt_inds
=
np
.
where
(
(
entry
[
'class'
]
>
0
)
&
(
entry
[
'is_crowd'
]
==
0
))[
0
]
gt_classes
=
entry
[
'class'
][
gt_inds
]
gt_hist
+=
np
.
histogram
(
gt_classes
,
bins
=
hist_bins
)[
0
]
data
=
[[
COCOMeta
.
class_names
[
i
],
v
]
for
i
,
v
in
enumerate
(
gt_hist
)]
data
.
append
([
'total'
,
sum
([
x
[
1
]
for
x
in
data
])])
table
=
tabulate
(
data
,
headers
=
[
'class'
,
'#box'
],
tablefmt
=
'pipe'
)
logger
.
info
(
"Ground-Truth Boxes:
\n
"
+
colored
(
table
,
'cyan'
))
@
staticmethod
def
load_many
(
basedir
,
names
,
add_gt
=
True
,
add_mask
=
False
):
"""
...
...
@@ -195,8 +179,134 @@ class COCODetection(object):
return
ret
class
DetectionDataset
(
object
):
"""
A singleton to load datasets, evaluate results, and provide metadata.
"""
_instance
=
None
def
__new__
(
cls
):
if
not
isinstance
(
cls
.
_instance
,
cls
):
cls
.
_instance
=
object
.
__new__
(
cls
)
return
cls
.
_instance
def
__init__
(
self
):
"""
This function is responsible for setting the dataset-specific
attributes in both cfg and self.
"""
self
.
num_category
=
cfg
.
DATA
.
NUM_CATEGORY
=
80
self
.
num_classes
=
self
.
num_category
+
1
self
.
class_names
=
cfg
.
DATA
.
CLASS_NAMES
=
[
"BG"
,
"person"
,
"bicycle"
,
"car"
,
"motorcycle"
,
"airplane"
,
"bus"
,
"train"
,
"truck"
,
"boat"
,
"traffic light"
,
"fire hydrant"
,
"stop sign"
,
"parking meter"
,
"bench"
,
"bird"
,
"cat"
,
"dog"
,
"horse"
,
"sheep"
,
"cow"
,
"elephant"
,
"bear"
,
"zebra"
,
"giraffe"
,
"backpack"
,
"umbrella"
,
"handbag"
,
"tie"
,
"suitcase"
,
"frisbee"
,
"skis"
,
"snowboard"
,
"sports ball"
,
"kite"
,
"baseball bat"
,
"baseball glove"
,
"skateboard"
,
"surfboard"
,
"tennis racket"
,
"bottle"
,
"wine glass"
,
"cup"
,
"fork"
,
"knife"
,
"spoon"
,
"bowl"
,
"banana"
,
"apple"
,
"sandwich"
,
"orange"
,
"broccoli"
,
"carrot"
,
"hot dog"
,
"pizza"
,
"donut"
,
"cake"
,
"chair"
,
"couch"
,
"potted plant"
,
"bed"
,
"dining table"
,
"toilet"
,
"tv"
,
"laptop"
,
"mouse"
,
"remote"
,
"keyboard"
,
"cell phone"
,
"microwave"
,
"oven"
,
"toaster"
,
"sink"
,
"refrigerator"
,
"book"
,
"clock"
,
"vase"
,
"scissors"
,
"teddy bear"
,
"hair drier"
,
"toothbrush"
]
# noqa
assert
len
(
self
.
class_names
)
==
self
.
num_classes
def
load_training_roidbs
(
self
,
names
):
"""
Args:
names (list[str]): name of the training datasets, e.g. ['train2014', 'valminusminival2014']
Returns:
roidbs (list[dict]):
Produce "roidbs" as a list of dict, each dict corresponds to one image with k>=0 instances.
and the following keys are expected for training:
height, width: integer
file_name: str, full path to the image
boxes: numpy array of kx4 floats, each row is [x1, y1, x2, y2]
category: numpy array of k integers, in the range of [1, #categories]
is_crowd: k booleans. Use k False if you don't know what it means.
segmentation: k lists of numpy arrays (one for each instance).
Each list of numpy arrays corresponds to the mask for one instance.
Each numpy array in the list is a polygon of shape Nx2,
because one mask can be represented by N polygons.
If your segmentation annotations are originally masks rather than polygons,
either convert it, or the augmentation will need to be changed or skipped accordingly.
Include this field only if training Mask R-CNN.
"""
return
COCODetection
.
load_many
(
cfg
.
DATA
.
BASEDIR
,
cfg
.
DATA
.
TRAIN
,
add_gt
=
True
,
add_mask
=
cfg
.
MODE_MASK
)
def
load_inference_roidbs
(
self
,
name
):
"""
Args:
name (str): name of one inference dataset, e.g. 'minival2014'
Returns:
roidbs (list[dict]):
Each dict corresponds to one image to run inference on. The
following keys in the dict are expected:
file_name (str): full path to the image
id (str): an id for the image. The inference results will be stored with this id.
"""
return
COCODetection
.
load_many
(
cfg
.
DATA
.
BASEDIR
,
name
,
add_gt
=
False
)
def
eval_or_save_inference_results
(
self
,
results
,
dataset
,
output
=
None
):
"""
Args:
results (list[dict]): the inference results as dicts.
Each dict corresponds to one __instance__. It contains the following keys:
image_id (str): the id that matches `load_inference_roidbs`.
category_id (int): the category prediction, in range [1, #category]
bbox (list[float]): x1, y1, x2, y2
score (float):
segmentation: the segmentation mask in COCO's rle format.
dataset (str): the name of the dataset to evaluate.
output (str): the output file to optionally save the results to.
Returns:
dict: the evaluation results.
"""
continuous_id_to_COCO_id
=
{
v
:
k
for
k
,
v
in
COCODetection
.
COCO_id_to_category_id
.
items
()}
for
res
in
results
:
# convert to COCO's incontinuous category id
res
[
'category_id'
]
=
continuous_id_to_COCO_id
[
res
[
'category_id'
]]
# COCO expects results in xywh format
box
=
res
[
'bbox'
]
box
[
2
]
-=
box
[
0
]
box
[
3
]
-=
box
[
1
]
res
[
'bbox'
]
=
[
round
(
float
(
x
),
3
)
for
x
in
box
]
assert
output
is
not
None
,
"COCO evaluation requires an output file!"
with
open
(
output
,
'w'
)
as
f
:
json
.
dump
(
results
,
f
)
if
len
(
output
):
# sometimes may crash if the results are empty?
return
COCODetection
(
cfg
.
DATA
.
BASEDIR
,
dataset
)
.
print_coco_metrics
(
output
)
else
:
return
{}
def
print_class_histogram
(
self
,
roidbs
):
"""
Args:
roidbs (list[dict]): the same format as the output of `load_training_roidbs`.
"""
hist_bins
=
np
.
arange
(
self
.
num_classes
+
1
)
# Histogram of ground-truth objects
gt_hist
=
np
.
zeros
((
self
.
num_classes
,),
dtype
=
np
.
int
)
for
entry
in
roidbs
:
# filter crowd?
gt_inds
=
np
.
where
(
(
entry
[
'class'
]
>
0
)
&
(
entry
[
'is_crowd'
]
==
0
))[
0
]
gt_classes
=
entry
[
'class'
][
gt_inds
]
gt_hist
+=
np
.
histogram
(
gt_classes
,
bins
=
hist_bins
)[
0
]
data
=
[[
self
.
class_names
[
i
],
v
]
for
i
,
v
in
enumerate
(
gt_hist
)]
data
.
append
([
'total'
,
sum
([
x
[
1
]
for
x
in
data
])])
table
=
tabulate
(
data
,
headers
=
[
'class'
,
'#box'
],
tablefmt
=
'pipe'
)
logger
.
info
(
"Ground-Truth Boxes:
\n
"
+
colored
(
table
,
'cyan'
))
if
__name__
==
'__main__'
:
c
=
COCODetection
(
cfg
.
DATA
.
BASEDIR
,
'train2014'
)
gt_boxes
=
c
.
load
(
add_gt
=
True
,
add_mask
=
True
)
print
(
"#Images:"
,
len
(
gt_boxes
))
c
.
print_class_histogram
(
gt_boxes
)
DetectionDataset
()
.
print_class_histogram
(
gt_boxes
)
examples/FasterRCNN/common.py
View file @
cc63dee7
...
...
@@ -95,12 +95,11 @@ def segmentation_to_mask(polys, height, width):
Args:
polys: a list of nx2 float array. Each array contains many (x, y) coordinates.
Returns:
a binary matrix of (height, width)
"""
polys
=
np
.
asarray
([
p
.
flatten
()
for
p
in
polys
],
dtype
=
'float32'
)
assert
polys
.
size
>
0
,
"Polygons are empty!"
polys
=
[
p
.
flatten
()
.
tolist
()
for
p
in
polys
]
assert
len
(
polys
)
>
0
,
"Polygons are empty!"
import
pycocotools.mask
as
cocomask
rles
=
cocomask
.
frPyObjects
(
polys
,
height
,
width
)
...
...
examples/FasterRCNN/config.py
View file @
cc63dee7
...
...
@@ -85,7 +85,7 @@ _C.DATA.BASEDIR = '/path/to/your/COCO/DIR'
_C
.
DATA
.
TRAIN
=
[
'train2014'
,
'valminusminival2014'
]
# i.e. trainval35k, AKA train2017
# Each VAL dataset will be evaluated separately (instead of concatenated)
_C
.
DATA
.
VAL
=
(
'minival2014'
,
)
# AKA val2017
_C
.
DATA
.
NUM_CATEGORY
=
80
# 80 categories in COCO
_C
.
DATA
.
NUM_CATEGORY
=
0
# without the background class (e.g., 80 for COCO)
_C
.
DATA
.
CLASS_NAMES
=
[]
# NUM_CLASS (NUM_CATEGORY+1) strings, the first is "BG".
# For COCO, this list will be populated later by the COCO data loader.
...
...
@@ -126,7 +126,7 @@ _C.TRAIN.LR_SCHEDULE = [240000, 320000, 360000] # "2x" schedule in detectro
# Longer schedules for from-scratch training (https://arxiv.org/abs/1811.08883):
# _C.TRAIN.LR_SCHEDULE = [960000, 1040000, 1080000] # "6x" schedule in detectron
# _C.TRAIN.LR_SCHEDULE = [1500000, 1580000, 1620000] # "9x" schedule in detectron
_C
.
TRAIN
.
EVAL_PERIOD
=
25
# period (epochs) to run eva
_C
.
TRAIN
.
EVAL_PERIOD
=
25
# period (epochs) to run eva
luation
# preprocessing --------------------
# Alternative old (worse & faster) setting: 600
...
...
@@ -241,7 +241,7 @@ def finalize_configs(is_training):
if
is_training
:
train_scales
=
_C
.
PREPROC
.
TRAIN_SHORT_EDGE_SIZE
if
isinstance
(
train_scales
,
(
list
,
tuple
))
and
train_scales
[
1
]
-
train_scales
[
0
]
>
100
:
# don't
warmup
if augmentation is on
# don't
autotune
if augmentation is on
os
.
environ
[
'TF_CUDNN_USE_AUTOTUNE'
]
=
'0'
os
.
environ
[
'TF_AUTOTUNE_THRESHOLD'
]
=
'1'
assert
_C
.
TRAINER
in
[
'horovod'
,
'replicated'
],
_C
.
TRAINER
...
...
examples/FasterRCNN/data.py
View file @
cc63dee7
...
...
@@ -10,10 +10,10 @@ from tensorpack.dataflow import (
from
tensorpack.utils
import
logger
from
tensorpack.utils.argtools
import
log_once
,
memoized
from
coco
import
COCODetection
from
common
import
(
CustomResize
,
DataFromListOfDict
,
box_to_point8
,
filter_boxes_inside_shape
,
point8_to_box
,
segmentation_to_mask
)
from
config
import
config
as
cfg
from
coco
import
DetectionDataset
from
utils.generate_anchors
import
generate_anchors
from
utils.np_box_ops
import
area
as
np_area
from
utils.np_box_ops
import
ioa
as
np_ioa
...
...
@@ -280,25 +280,7 @@ def get_train_dataflow():
If MODE_MASK, gt_masks: (N, h, w)
"""
roidbs
=
COCODetection
.
load_many
(
cfg
.
DATA
.
BASEDIR
,
cfg
.
DATA
.
TRAIN
,
add_gt
=
True
,
add_mask
=
cfg
.
MODE_MASK
)
"""
To train on your own data, change this to your loader.
Produce "roidbs" as a list of dict, in the dict the following keys are needed for training:
height, width: integer
file_name: str, full path to the image
boxes: numpy array of kx4 floats
class: numpy array of k integers
is_crowd: k booleans. Use k False if you don't know what it means.
segmentation: k lists of numpy arrays (one for each box).
Each list of numpy arrays corresponds to the mask for one instance.
Each numpy array in the list is a polygon of shape Nx2,
because one mask can be represented by N polygons.
If your segmentation annotations are originally masks rather than polygons,
either convert it, or the augmentation code below will need to be
changed or skipped accordingly.
"""
roidbs
=
DetectionDataset
()
.
load_training_roidbs
(
cfg
.
DATA
.
TRAIN
)
# Valid training images should have at least one fg box.
# But this filter shall not be applied for testing.
...
...
@@ -387,13 +369,7 @@ def get_eval_dataflow(name, shard=0, num_shards=1):
name (str): name of the dataset to evaluate
shard, num_shards: to get subset of evaluation data
"""
roidbs
=
COCODetection
.
load_many
(
cfg
.
DATA
.
BASEDIR
,
name
,
add_gt
=
False
)
"""
To inference on your own data, change this to your loader.
Produce "roidbs" as a list of dict, in the dict the following keys are needed for training:
file_name: str, full path to the image
id: an id of this image
"""
roidbs
=
DetectionDataset
()
.
load_inference_roidbs
(
name
)
num_imgs
=
len
(
roidbs
)
img_per_shard
=
num_imgs
//
num_shards
...
...
examples/FasterRCNN/eval.py
View file @
cc63dee7
...
...
@@ -3,19 +3,15 @@
import
itertools
import
numpy
as
np
import
os
from
collections
import
namedtuple
from
concurrent.futures
import
ThreadPoolExecutor
from
contextlib
import
ExitStack
import
cv2
import
pycocotools.mask
as
cocomask
import
tqdm
from
pycocotools.coco
import
COCO
from
pycocotools.cocoeval
import
COCOeval
from
tensorpack.utils.utils
import
get_tqdm_kwargs
from
coco
import
COCOMeta
from
common
import
CustomResize
,
clip_boxes
from
config
import
config
as
cfg
...
...
@@ -103,7 +99,8 @@ def eval_coco(df, detect_func, tqdm_bar=None):
will create a new one.
Returns:
list of dict, to be dumped to COCO json format
list of dict, in the format used by
`DetectionDataset.eval_or_save_inference_results`
"""
df
.
reset_state
()
all_results
=
[]
...
...
@@ -115,15 +112,10 @@ def eval_coco(df, detect_func, tqdm_bar=None):
for
img
,
img_id
in
df
:
results
=
detect_func
(
img
)
for
r
in
results
:
box
=
r
.
box
cat_id
=
COCOMeta
.
class_id_to_category_id
[
r
.
class_id
]
box
[
2
]
-=
box
[
0
]
box
[
3
]
-=
box
[
1
]
res
=
{
'image_id'
:
img_id
,
'category_id'
:
cat
_id
,
'bbox'
:
list
(
map
(
lambda
x
:
round
(
float
(
x
),
3
),
box
)
),
'category_id'
:
r
.
class
_id
,
'bbox'
:
list
(
r
.
box
),
'score'
:
round
(
float
(
r
.
score
),
4
),
}
...
...
@@ -147,7 +139,8 @@ def multithread_eval_coco(dataflows, detect_funcs):
detect_funcs: a list of callable to be used in :func:`eval_coco`
Returns:
list of dict, to be dumped to COCO json format
list of dict, in the format used by
`DetectionDataset.eval_or_save_inference_results`
"""
num_worker
=
len
(
dataflows
)
assert
len
(
dataflows
)
==
len
(
detect_funcs
)
...
...
@@ -158,37 +151,3 @@ def multithread_eval_coco(dataflows, detect_funcs):
futures
.
append
(
executor
.
submit
(
eval_coco
,
dataflow
,
pred
,
pbar
))
all_results
=
list
(
itertools
.
chain
(
*
[
fut
.
result
()
for
fut
in
futures
]))
return
all_results
# https://github.com/pdollar/coco/blob/master/PythonAPI/pycocoEvalDemo.ipynb
def
print_coco_metrics
(
dataset
,
json_file
):
"""
Args:
dataset (str): name of the dataset
json_file (str): path to the results json file in coco format
If your data is not in COCO format, write your own evaluation function.
"""
ret
=
{}
assert
cfg
.
DATA
.
BASEDIR
and
os
.
path
.
isdir
(
cfg
.
DATA
.
BASEDIR
)
annofile
=
os
.
path
.
join
(
cfg
.
DATA
.
BASEDIR
,
'annotations'
,
'instances_{}.json'
.
format
(
dataset
))
coco
=
COCO
(
annofile
)
cocoDt
=
coco
.
loadRes
(
json_file
)
cocoEval
=
COCOeval
(
coco
,
cocoDt
,
'bbox'
)
cocoEval
.
evaluate
()
cocoEval
.
accumulate
()
cocoEval
.
summarize
()
fields
=
[
'IoU=0.5:0.95'
,
'IoU=0.5'
,
'IoU=0.75'
,
'small'
,
'medium'
,
'large'
]
for
k
in
range
(
6
):
ret
[
'mAP(bbox)/'
+
fields
[
k
]]
=
cocoEval
.
stats
[
k
]
if
cfg
.
MODE_MASK
:
cocoEval
=
COCOeval
(
coco
,
cocoDt
,
'segm'
)
cocoEval
.
evaluate
()
cocoEval
.
accumulate
()
cocoEval
.
summarize
()
for
k
in
range
(
6
):
ret
[
'mAP(segm)/'
+
fields
[
k
]]
=
cocoEval
.
stats
[
k
]
return
ret
examples/FasterRCNN/train.py
View file @
cc63dee7
...
...
@@ -22,11 +22,11 @@ from tensorpack.tfutils.summary import add_moving_summary
import
model_frcnn
import
model_mrcnn
from
basemodel
import
image_preprocess
,
resnet_c4_backbone
,
resnet_conv5
,
resnet_fpn_backbone
from
coco
import
COCODetection
from
coco
import
DetectionDataset
from
config
import
config
as
cfg
from
config
import
finalize_configs
from
data
import
get_all_anchors
,
get_all_anchors_fpn
,
get_eval_dataflow
,
get_train_dataflow
from
eval
import
DetectionResult
,
detect_one_image
,
eval_coco
,
multithread_eval_coco
,
print_coco_metrics
from
eval
import
DetectionResult
,
detect_one_image
,
eval_coco
,
multithread_eval_coco
from
model_box
import
RPNAnchors
,
clip_boxes
,
crop_and_resize
,
roi_align
from
model_cascade
import
CascadeRCNNHead
from
model_fpn
import
fpn_model
,
generate_fpn_proposals
,
multilevel_roi_align
,
multilevel_rpn_losses
...
...
@@ -388,15 +388,13 @@ def offline_evaluate(pred_config, output_file):
logger
.
info
(
"Evaluating {} ..."
.
format
(
dataset
))
dataflows
=
[
get_eval_dataflow
(
dataset
,
shard
=
k
,
num_shards
=
num_gpu
)
for
k
in
range
(
num_gpu
)
]
for
k
in
range
(
num_gpu
)]
if
num_gpu
>
1
:
all_results
=
multithread_eval_coco
(
dataflows
,
predictors
)
else
:
all_results
=
eval_coco
(
dataflows
[
0
],
predictors
[
0
])
output
=
output_file
+
'-'
+
dataset
with
open
(
output
,
'w'
)
as
f
:
json
.
dump
(
all_results
,
f
)
print_coco_metrics
(
dataset
,
output
)
DetectionDataset
()
.
eval_or_save_inference_results
(
all_results
,
dataset
,
output
)
def
predict
(
pred_func
,
input_file
):
...
...
@@ -484,14 +482,11 @@ class EvalCallback(Callback):
output_file
=
os
.
path
.
join
(
logdir
,
'{}-outputs{}.json'
.
format
(
self
.
_eval_dataset
,
self
.
global_step
))
with
open
(
output_file
,
'w'
)
as
f
:
json
.
dump
(
all_results
,
f
)
try
:
scores
=
print_coco_metrics
(
self
.
_eval_dataset
,
output_file
)
for
k
,
v
in
scores
.
items
():
self
.
trainer
.
monitors
.
put_scalar
(
k
,
v
)
except
Exception
:
logger
.
exception
(
"Exception in COCO evaluation."
)
scores
=
DetectionDataset
()
.
eval_or_save_inference_results
(
all_results
,
self
.
_eval_dataset
,
output_file
)
for
k
,
v
in
scores
.
items
():
self
.
trainer
.
monitors
.
put_scalar
(
k
,
v
)
def
_trigger_epoch
(
self
):
if
self
.
epoch_num
in
self
.
epochs_to_eval
:
...
...
@@ -520,6 +515,7 @@ if __name__ == '__main__':
cfg
.
update_args
(
args
.
config
)
MODEL
=
ResNetFPNModel
()
if
cfg
.
MODE_FPN
else
ResNetC4Model
()
DetectionDataset
()
# initialize the config with information from our dataset
if
args
.
visualize
or
args
.
evaluate
or
args
.
predict
:
assert
tf
.
test
.
is_gpu_available
()
...
...
@@ -538,7 +534,6 @@ if __name__ == '__main__':
input_names
=
MODEL
.
get_inference_tensor_names
()[
0
],
output_names
=
MODEL
.
get_inference_tensor_names
()[
1
])
if
args
.
predict
:
COCODetection
(
cfg
.
DATA
.
BASEDIR
,
'val2014'
)
# Only to load the class names into caches
predict
(
OfflinePredictor
(
predcfg
),
args
.
predict
)
elif
args
.
evaluate
:
assert
args
.
evaluate
.
endswith
(
'.json'
),
args
.
evaluate
...
...
@@ -573,7 +568,6 @@ if __name__ == '__main__':
total_passes
=
cfg
.
TRAIN
.
LR_SCHEDULE
[
-
1
]
*
8
/
train_dataflow
.
size
()
logger
.
info
(
"Total passes of the training set is: {:.5g}"
.
format
(
total_passes
))
callbacks
=
[
PeriodicCallback
(
ModelSaver
(
max_to_keep
=
10
,
keep_checkpoint_every_n_hours
=
1
),
...
...
tensorpack/tfutils/sessinit.py
View file @
cc63dee7
...
...
@@ -214,7 +214,7 @@ class DictRestore(SessionInit):
mismatch
.
log
()
upd
=
SessionUpdate
(
sess
,
[
v
for
v
in
variables
if
v
.
name
in
intersect
])
logger
.
info
(
"Restoring
from dict ..."
)
logger
.
info
(
"Restoring
{} variables from dict ..."
.
format
(
len
(
intersect
))
)
upd
.
update
({
name
:
value
for
name
,
value
in
six
.
iteritems
(
self
.
_prms
)
if
name
in
intersect
})
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment