Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
d31ba459
Commit
d31ba459
authored
Nov 22, 2016
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
misc fix
parent
f482a5aa
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
20 additions
and
12 deletions
+20
-12
examples/DoReFa-Net/alexnet-dorefa.py
examples/DoReFa-Net/alexnet-dorefa.py
+6
-3
examples/GAN/GAN.py
examples/GAN/GAN.py
+4
-4
examples/ResNet/imagenet-resnet.py
examples/ResNet/imagenet-resnet.py
+2
-2
tensorpack/models/regularize.py
tensorpack/models/regularize.py
+6
-2
tensorpack/utils/fs.py
tensorpack/utils/fs.py
+2
-1
No files found.
examples/DoReFa-Net/alexnet-dorefa.py
View file @
d31ba459
...
@@ -38,7 +38,7 @@ Accuracy:
...
@@ -38,7 +38,7 @@ Accuracy:
With (W,A,G)=(1,2,4), 63
%
error.
With (W,A,G)=(1,2,4), 63
%
error.
Speed:
Speed:
About
3.5 iteration/s on 4 Tesla M40
. (Each epoch is set to 10000 iterations)
About
2.8 iteration/s on 1 TitanX
. (Each epoch is set to 10000 iterations)
To Train:
To Train:
./alexnet-dorefa.py --dorefa 1,2,6 --data PATH --gpu 0,1,2,3
./alexnet-dorefa.py --dorefa 1,2,6 --data PATH --gpu 0,1,2,3
...
@@ -66,8 +66,7 @@ BITW = 1
...
@@ -66,8 +66,7 @@ BITW = 1
BITA
=
2
BITA
=
2
BITG
=
6
BITG
=
6
TOTAL_BATCH_SIZE
=
128
TOTAL_BATCH_SIZE
=
128
NUM_GPU
=
4
BATCH_SIZE
=
64
BATCH_SIZE
=
TOTAL_BATCH_SIZE
//
NUM_GPU
class
Model
(
ModelDesc
):
class
Model
(
ModelDesc
):
def
_get_input_vars
(
self
):
def
_get_input_vars
(
self
):
...
@@ -301,6 +300,10 @@ if __name__ == '__main__':
...
@@ -301,6 +300,10 @@ if __name__ == '__main__':
run_image
(
Model
(),
ParamRestore
(
np
.
load
(
args
.
load
,
encoding
=
'latin1'
)
.
item
()),
args
.
run
)
run_image
(
Model
(),
ParamRestore
(
np
.
load
(
args
.
load
,
encoding
=
'latin1'
)
.
item
()),
args
.
run
)
sys
.
exit
()
sys
.
exit
()
assert
args
.
gpu
is
not
None
,
"Need to specify a list of gpu for training!"
NR_GPU
=
len
(
args
.
gpu
.
split
(
','
))
BATCH_SIZE
=
TOTAL_BATCH_SIZE
//
NR_GPU
config
=
get_config
()
config
=
get_config
()
if
args
.
load
:
if
args
.
load
:
config
.
session_init
=
SaverRestore
(
args
.
load
)
config
.
session_init
=
SaverRestore
(
args
.
load
)
...
...
examples/GAN/GAN.py
View file @
d31ba459
...
@@ -54,16 +54,16 @@ def build_GAN_losses(vecpos, vecneg):
...
@@ -54,16 +54,16 @@ def build_GAN_losses(vecpos, vecneg):
tf
.
histogram_summary
(
'sigmoid-neg'
,
sigmneg
)
tf
.
histogram_summary
(
'sigmoid-neg'
,
sigmneg
)
d_loss_pos
=
tf
.
reduce_mean
(
tf
.
nn
.
sigmoid_cross_entropy_with_logits
(
d_loss_pos
=
tf
.
reduce_mean
(
tf
.
nn
.
sigmoid_cross_entropy_with_logits
(
vecpos
,
tf
.
ones_like
(
vecpos
)),
name
=
'd_loss_pos'
)
vecpos
,
tf
.
ones_like
(
vecpos
)),
name
=
'd_
CE_
loss_pos'
)
d_loss_neg
=
tf
.
reduce_mean
(
tf
.
nn
.
sigmoid_cross_entropy_with_logits
(
d_loss_neg
=
tf
.
reduce_mean
(
tf
.
nn
.
sigmoid_cross_entropy_with_logits
(
vecneg
,
tf
.
zeros_like
(
vecneg
)),
name
=
'd_loss_neg'
)
vecneg
,
tf
.
zeros_like
(
vecneg
)),
name
=
'd_
CE_
loss_neg'
)
d_pos_acc
=
tf
.
reduce_mean
(
tf
.
cast
(
sigmpos
>
0.5
,
tf
.
float32
),
name
=
'pos_acc'
)
d_pos_acc
=
tf
.
reduce_mean
(
tf
.
cast
(
sigmpos
>
0.5
,
tf
.
float32
),
name
=
'pos_acc'
)
d_neg_acc
=
tf
.
reduce_mean
(
tf
.
cast
(
sigmneg
<
0.5
,
tf
.
float32
),
name
=
'neg_acc'
)
d_neg_acc
=
tf
.
reduce_mean
(
tf
.
cast
(
sigmneg
<
0.5
,
tf
.
float32
),
name
=
'neg_acc'
)
g_loss
=
tf
.
reduce_mean
(
tf
.
nn
.
sigmoid_cross_entropy_with_logits
(
g_loss
=
tf
.
reduce_mean
(
tf
.
nn
.
sigmoid_cross_entropy_with_logits
(
vecneg
,
tf
.
ones_like
(
vecneg
)),
name
=
'g_loss'
)
vecneg
,
tf
.
ones_like
(
vecneg
)),
name
=
'g_
CE_
loss'
)
d_loss
=
tf
.
add
(
d_loss_pos
,
d_loss_neg
,
name
=
'd_loss'
)
d_loss
=
tf
.
add
(
d_loss_pos
,
d_loss_neg
,
name
=
'd_
CE_
loss'
)
add_moving_summary
(
d_loss_pos
,
d_loss_neg
,
add_moving_summary
(
d_loss_pos
,
d_loss_neg
,
g_loss
,
d_loss
,
g_loss
,
d_loss
,
d_pos_acc
,
d_neg_acc
)
d_pos_acc
,
d_neg_acc
)
...
...
examples/ResNet/imagenet-resnet.py
View file @
d31ba459
#!/usr/bin/env python
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# -*- coding: UTF-8 -*-
# File: imagenet-resnet
-short
.py
# File: imagenet-resnet.py
# Author: Yuxin Wu <ppwwyyxx@gmail.com>
# Author: Yuxin Wu <ppwwyyxx@gmail.com>
import
cv2
import
cv2
...
@@ -233,7 +233,7 @@ if __name__ == '__main__':
...
@@ -233,7 +233,7 @@ if __name__ == '__main__':
assert
args
.
gpu
is
not
None
,
"Need to specify a list of gpu for training!"
assert
args
.
gpu
is
not
None
,
"Need to specify a list of gpu for training!"
NR_GPU
=
len
(
args
.
gpu
.
split
(
','
))
NR_GPU
=
len
(
args
.
gpu
.
split
(
','
))
BATCH_SIZE
=
TOTAL_BATCH_SIZE
/
NR_GPU
BATCH_SIZE
=
TOTAL_BATCH_SIZE
/
/
NR_GPU
logger
.
auto_set_dir
()
logger
.
auto_set_dir
()
config
=
get_config
()
config
=
get_config
()
...
...
tensorpack/models/regularize.py
View file @
d31ba459
...
@@ -40,8 +40,12 @@ def regularize_cost(regex, func, name=None):
...
@@ -40,8 +40,12 @@ def regularize_cost(regex, func, name=None):
@
layer_register
(
log_shape
=
False
)
@
layer_register
(
log_shape
=
False
)
def
Dropout
(
x
,
keep_prob
=
0.5
):
def
Dropout
(
x
,
keep_prob
=
0.5
,
is_training
=
None
):
is_training
=
get_current_tower_context
()
.
is_training
"""
:param is_training: if None, will use the current context by default.
"""
if
is_training
is
None
:
is_training
=
get_current_tower_context
()
.
is_training
keep_prob
=
tf
.
constant
(
keep_prob
if
is_training
else
1.0
)
keep_prob
=
tf
.
constant
(
keep_prob
if
is_training
else
1.0
)
return
tf
.
nn
.
dropout
(
x
,
keep_prob
)
return
tf
.
nn
.
dropout
(
x
,
keep_prob
)
tensorpack/utils/fs.py
View file @
d31ba459
...
@@ -5,6 +5,7 @@
...
@@ -5,6 +5,7 @@
import
os
,
sys
import
os
,
sys
from
six.moves
import
urllib
from
six.moves
import
urllib
import
errno
from
.
import
logger
from
.
import
logger
__all__
=
[
'mkdir_p'
,
'download'
]
__all__
=
[
'mkdir_p'
,
'download'
]
...
@@ -17,7 +18,7 @@ def mkdir_p(dirname):
...
@@ -17,7 +18,7 @@ def mkdir_p(dirname):
try
:
try
:
os
.
makedirs
(
dirname
)
os
.
makedirs
(
dirname
)
except
OSError
as
e
:
except
OSError
as
e
:
if
e
.
errno
!=
17
:
if
e
.
errno
!=
errno
.
EEXIST
:
raise
e
raise
e
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment