Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
3c27064c
Commit
3c27064c
authored
Oct 12, 2016
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
update hed example
parent
1b3b50d0
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
31 additions
and
20 deletions
+31
-20
examples/HED/hed.py
examples/HED/hed.py
+25
-18
examples/Inception/inception-bn.py
examples/Inception/inception-bn.py
+3
-1
examples/Inception/inceptionv3.py
examples/Inception/inceptionv3.py
+3
-1
No files found.
examples/HED/hed.py
View file @
3c27064c
...
@@ -54,7 +54,12 @@ class Model(ModelDesc):
...
@@ -54,7 +54,12 @@ class Model(ModelDesc):
def
branch
(
name
,
l
,
up
):
def
branch
(
name
,
l
,
up
):
with
tf
.
variable_scope
(
name
)
as
scope
:
with
tf
.
variable_scope
(
name
)
as
scope
:
l
=
Conv2D
(
'convfc'
,
l
,
1
,
kernel_shape
=
1
,
nl
=
tf
.
identity
,
use_bias
=
True
)
l
=
Conv2D
(
'convfc'
,
l
,
1
,
kernel_shape
=
1
,
nl
=
tf
.
identity
,
use_bias
=
True
,
W_init
=
tf
.
zeros_initializer
,
b_init
=
tf
.
zeros_initializer
)
#if up != 1:
#l = BilinearUpSample('upsample', l, up)
while
up
!=
1
:
while
up
!=
1
:
l
=
BilinearUpSample
(
'upsample{}'
.
format
(
up
),
l
,
2
)
l
=
BilinearUpSample
(
'upsample{}'
.
format
(
up
),
l
,
2
)
up
=
up
/
2
up
=
up
/
2
...
@@ -88,12 +93,13 @@ class Model(ModelDesc):
...
@@ -88,12 +93,13 @@ class Model(ModelDesc):
l
=
Conv2D
(
'conv5_3'
,
l
,
512
)
l
=
Conv2D
(
'conv5_3'
,
l
,
512
)
b5
=
branch
(
'branch5'
,
l
,
16
)
b5
=
branch
(
'branch5'
,
l
,
16
)
#final_map = Conv2D('convfcweight',
final_map
=
Conv2D
(
'convfcweight'
,
#tf.concat(3, [b1, b2, b3, b4, b5]), 1, 1,
tf
.
concat
(
3
,
[
b1
,
b2
,
b3
,
b4
,
b5
]),
1
,
1
,
#W_init=tf.constant_initializer(0.2), use_bias=False)
W_init
=
tf
.
constant_initializer
(
0.2
),
#final_map = tf.squeeze(final_map, [3], name='predmap')
use_bias
=
False
,
nl
=
tf
.
identity
)
final_map
=
tf
.
squeeze
(
tf
.
mul
(
0.2
,
b1
+
b2
+
b3
+
b4
+
b5
),
final_map
=
tf
.
squeeze
(
final_map
,
[
3
],
name
=
'predmap'
)
[
3
],
name
=
'predmap'
)
#final_map = tf.squeeze(tf.mul(0.2, b1 + b2 + b3 + b4 + b5),
#[3], name='predmap')
costs
=
[]
costs
=
[]
for
idx
,
b
in
enumerate
([
b1
,
b2
,
b3
,
b4
,
b5
,
final_map
]):
for
idx
,
b
in
enumerate
([
b1
,
b2
,
b3
,
b4
,
b5
,
final_map
]):
output
=
tf
.
nn
.
sigmoid
(
b
,
name
=
'output{}'
.
format
(
idx
+
1
))
output
=
tf
.
nn
.
sigmoid
(
b
,
name
=
'output{}'
.
format
(
idx
+
1
))
...
@@ -116,8 +122,8 @@ class Model(ModelDesc):
...
@@ -116,8 +122,8 @@ class Model(ModelDesc):
self
.
cost
=
tf
.
add_n
(
costs
,
name
=
'cost'
)
self
.
cost
=
tf
.
add_n
(
costs
,
name
=
'cost'
)
def
get_gradient_processor
(
self
):
def
get_gradient_processor
(
self
):
return
[
ScaleGradient
([
(
'convfc.*'
,
0.1
),
(
'conv5_.*'
,
100
)]),
return
[
ScaleGradient
([
SummaryGradient
()
]
(
'convfcweight.*'
,
0.1
),
(
'conv5_.*'
,
5
)
])
]
def
get_data
(
name
):
def
get_data
(
name
):
isTrain
=
name
==
'train'
isTrain
=
name
==
'train'
...
@@ -138,10 +144,6 @@ def get_data(name):
...
@@ -138,10 +144,6 @@ def get_data(name):
h0
,
w0
,
newh
,
neww
=
param
h0
,
w0
,
newh
,
neww
=
param
return
img
[
h0
:
h0
+
newh
,
w0
:
w0
+
neww
]
return
img
[
h0
:
h0
+
newh
,
w0
:
w0
+
neww
]
def
f
(
m
):
m
[
m
>=
0.50
]
=
1
m
[
m
<
0.50
]
=
0
return
m
if
isTrain
:
if
isTrain
:
shape_aug
=
[
shape_aug
=
[
imgaug
.
RandomResize
(
xrange
=
(
0.7
,
1.5
),
yrange
=
(
0.7
,
1.5
),
imgaug
.
RandomResize
(
xrange
=
(
0.7
,
1.5
),
yrange
=
(
0.7
,
1.5
),
...
@@ -149,13 +151,18 @@ def get_data(name):
...
@@ -149,13 +151,18 @@ def get_data(name):
imgaug
.
RotationAndCropValid
(
90
),
imgaug
.
RotationAndCropValid
(
90
),
CropMultiple16
(),
CropMultiple16
(),
imgaug
.
Flip
(
horiz
=
True
),
imgaug
.
Flip
(
horiz
=
True
),
imgaug
.
Flip
(
vert
=
True
)
,
imgaug
.
Flip
(
vert
=
True
)
]
]
else
:
else
:
# the original image shape (321x481) in BSDS is not a multiple of 16
# the original image shape (321x481) in BSDS is not a multiple of 16
IMAGE_SHAPE
=
(
320
,
480
)
IMAGE_SHAPE
=
(
320
,
480
)
shape_aug
=
[
imgaug
.
CenterCrop
(
IMAGE_SHAPE
)]
shape_aug
=
[
imgaug
.
CenterCrop
(
IMAGE_SHAPE
)]
ds
=
AugmentImageComponents
(
ds
,
shape_aug
,
(
0
,
1
))
ds
=
AugmentImageComponents
(
ds
,
shape_aug
,
(
0
,
1
))
def
f
(
m
):
m
[
m
>=
0.50
]
=
1
m
[
m
<
0.50
]
=
0
return
m
ds
=
MapDataComponent
(
ds
,
f
,
1
)
ds
=
MapDataComponent
(
ds
,
f
,
1
)
if
isTrain
:
if
isTrain
:
...
@@ -188,17 +195,17 @@ def get_config():
...
@@ -188,17 +195,17 @@ def get_config():
dataset_val
=
get_data
(
'val'
)
dataset_val
=
get_data
(
'val'
)
#dataset_test = get_data('test')
#dataset_test = get_data('test')
lr
=
tf
.
Variable
(
5e-6
,
trainable
=
False
,
name
=
'learning_rate'
)
lr
=
tf
.
Variable
(
3e-5
,
trainable
=
False
,
name
=
'learning_rate'
)
tf
.
scalar_summary
(
'learning_rate'
,
lr
)
tf
.
scalar_summary
(
'learning_rate'
,
lr
)
return
TrainConfig
(
return
TrainConfig
(
dataset
=
dataset_train
,
dataset
=
dataset_train
,
#
optimizer=tf.train.AdamOptimizer(lr, epsilon=1e-3),
optimizer
=
tf
.
train
.
AdamOptimizer
(
lr
,
epsilon
=
1e-3
),
optimizer
=
tf
.
train
.
MomentumOptimizer
(
lr
,
0.9
),
#
optimizer=tf.train.MomentumOptimizer(lr, 0.9),
callbacks
=
Callbacks
([
callbacks
=
Callbacks
([
StatPrinter
(),
StatPrinter
(),
ModelSaver
(),
ModelSaver
(),
ScheduledHyperParamSetter
(
'learning_rate'
,
[(
100
,
3e-6
),
(
20
0
,
8e-7
)]),
ScheduledHyperParamSetter
(
'learning_rate'
,
[(
35
,
6e-6
),
(
50
,
1e-6
),
(
6
0
,
8e-7
)]),
HumanHyperParamSetter
(
'learning_rate'
),
HumanHyperParamSetter
(
'learning_rate'
),
InferenceRunner
(
dataset_val
,
InferenceRunner
(
dataset_val
,
BinaryClassificationStats
(
'prediction'
,
'edgemap'
))
BinaryClassificationStats
(
'prediction'
,
'edgemap'
))
...
...
examples/Inception/inception-bn.py
View file @
3c27064c
...
@@ -14,7 +14,9 @@ from tensorpack.tfutils.symbolic_functions import *
...
@@ -14,7 +14,9 @@ from tensorpack.tfutils.symbolic_functions import *
from
tensorpack.tfutils.summary
import
*
from
tensorpack.tfutils.summary
import
*
BATCH_SIZE
=
64
TOTAL_BATCH_SIZE
=
64
*
6
NR_GPU
=
6
BATCH_SIZE
=
TOTAL_BATCH_SIZE
//
NR_GPU
INPUT_SHAPE
=
224
INPUT_SHAPE
=
224
"""
"""
...
...
examples/Inception/inceptionv3.py
View file @
3c27064c
...
@@ -27,7 +27,9 @@ The hyperparameters here are for 8 GPUs, so the effective batch size is 8*64 = 5
...
@@ -27,7 +27,9 @@ The hyperparameters here are for 8 GPUs, so the effective batch size is 8*64 = 5
With 8 TitanX it runs about 0.45 it/s.
With 8 TitanX it runs about 0.45 it/s.
"""
"""
BATCH_SIZE
=
64
TOTAL_BATCH_SIZE
=
512
NR_GPU
=
8
BATCH_SIZE
=
TOTAL_BATCH_SIZE
//
NR_GPU
INPUT_SHAPE
=
299
INPUT_SHAPE
=
299
class
Model
(
ModelDesc
):
class
Model
(
ModelDesc
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment