Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
1908fbe7
Commit
1908fbe7
authored
Nov 17, 2016
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
useless comments in example & GAN
parent
756a4a5a
Changes
23
Show whitespace changes
Inline
Side-by-side
Showing
23 changed files
with
249 additions
and
20 deletions
+249
-20
README.md
README.md
+2
-1
examples/Atari2600/DQN.py
examples/Atari2600/DQN.py
+1
-1
examples/DisturbLabel/mnist-disturb.py
examples/DisturbLabel/mnist-disturb.py
+1
-1
examples/DoReFa-Net/svhn-digit-dorefa.py
examples/DoReFa-Net/svhn-digit-dorefa.py
+1
-1
examples/GAN/GAN.py
examples/GAN/GAN.py
+70
-0
examples/GAN/README.md
examples/GAN/README.md
+7
-0
examples/GAN/celebA.py
examples/GAN/celebA.py
+148
-0
examples/HED/hed.py
examples/HED/hed.py
+1
-1
examples/Inception/inception-bn.py
examples/Inception/inception-bn.py
+1
-1
examples/Inception/inceptionv3.py
examples/Inception/inceptionv3.py
+1
-1
examples/OpenAIGym/run-atari.py
examples/OpenAIGym/run-atari.py
+1
-1
examples/OpenAIGym/train-atari.py
examples/OpenAIGym/train-atari.py
+1
-1
examples/ResNet/cifar10-resnet.py
examples/ResNet/cifar10-resnet.py
+1
-1
examples/ResNet/load-resnet.py
examples/ResNet/load-resnet.py
+1
-1
examples/ResNet/svhn-resnet.py
examples/ResNet/svhn-resnet.py
+1
-1
examples/SpatialTransformer/mnist-addition.py
examples/SpatialTransformer/mnist-addition.py
+1
-1
examples/char-rnn/char-rnn.py
examples/char-rnn/char-rnn.py
+1
-1
examples/cifar-convnet.py
examples/cifar-convnet.py
+1
-1
examples/load-alexnet.py
examples/load-alexnet.py
+1
-1
examples/mnist-convnet.py
examples/mnist-convnet.py
+1
-1
examples/svhn-digit-convnet.py
examples/svhn-digit-convnet.py
+1
-1
tensorpack/dataflow/image.py
tensorpack/dataflow/image.py
+1
-1
tensorpack/utils/viz.py
tensorpack/utils/viz.py
+4
-1
No files found.
README.md
View file @
1908fbe7
...
...
@@ -7,10 +7,11 @@ See some [examples](examples) to learn about the framework.
You can actually train them and reproduce the performance... not just to see how to write code.
+
[
DoReFa-Net: training binary / low bitwidth CNN
](
examples/DoReFa-Net
)
+
[
InceptionV3 on ImageNet
](
examples/Inception/inceptionv3.py
)
+
[
ResNet for ImageNet/Cifar10/SVHN classification
](
examples/ResNet
)
+
[
InceptionV3 on ImageNet
](
examples/Inception/inceptionv3.py
)
+
[
Fully-convolutional Network for Holistically-Nested Edge Detection
](
examples/HED
)
+
[
Spatial Transformer Networks on MNIST addition
](
examples/SpatialTransformer
)
+
[
Generative Adversarial Networks
](
examples/GAN
)
+
[
Double DQN plays Atari games
](
examples/Atari2600
)
+
[
Asynchronous Advantage Actor-Critic(A3C) with demos on OpenAI Gym Atari games
](
examples/OpenAIGym
)
+
[
char-rnn language model
](
examples/char-rnn
)
...
...
examples/Atari2600/DQN.py
View file @
1908fbe7
...
...
@@ -181,7 +181,7 @@ def get_config():
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
# nargs='*' in multi mode
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
parser
.
add_argument
(
'--load'
,
help
=
'load model'
)
parser
.
add_argument
(
'--task'
,
help
=
'task to perform'
,
choices
=
[
'play'
,
'eval'
,
'train'
],
default
=
'train'
)
...
...
examples/DisturbLabel/mnist-disturb.py
View file @
1908fbe7
...
...
@@ -52,7 +52,7 @@ class Model(mnist_example.Model):
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
# nargs='*' in multi mode
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
parser
.
add_argument
(
'--load'
,
help
=
'load model'
)
parser
.
add_argument
(
'--prob'
,
help
=
'disturb prob'
,
type
=
float
,
required
=
True
)
args
=
parser
.
parse_args
()
...
...
examples/DoReFa-Net/svhn-digit-dorefa.py
View file @
1908fbe7
...
...
@@ -170,7 +170,7 @@ def get_config():
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'the GPU to use'
)
# nargs='*' in multi mode
parser
.
add_argument
(
'--gpu'
,
help
=
'the GPU to use'
)
parser
.
add_argument
(
'--load'
,
help
=
'load a checkpoint'
)
parser
.
add_argument
(
'--dorefa'
,
help
=
'number of bits for W,A,G, separated by comma. Defaults to
\'
1,2,4
\'
'
,
...
...
examples/GAN/GAN.py
0 → 100644
View file @
1908fbe7
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: GAN.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import
tensorflow
as
tf
import
numpy
as
np
from
tensorpack
import
(
QueueInputTrainerBase
,
TowerContext
,
get_global_step_var
)
from
tensorpack.tfutils.summary
import
summary_moving_average
,
add_moving_summary
from
tensorpack.dataflow
import
DataFlow
class
GANTrainer
(
QueueInputTrainerBase
):
def
__init__
(
self
,
config
,
g_vs_d
=
3
):
super
(
GANTrainer
,
self
)
.
__init__
(
config
)
self
.
_build_enque_thread
()
if
g_vs_d
>
1
:
self
.
_opt_g
=
g_vs_d
self
.
_opt_d
=
1
else
:
self
.
_opt_g
=
1
self
.
_opt_d
=
int
(
1.0
/
g_vs_d
)
def
_setup
(
self
):
with
TowerContext
(
''
):
actual_inputs
=
self
.
_get_input_tensors_noreuse
()
self
.
model
.
build_graph
(
actual_inputs
)
self
.
gs_incr
=
tf
.
assign_add
(
get_global_step_var
(),
1
,
name
=
'global_step_incr'
)
self
.
g_min
=
self
.
config
.
optimizer
.
minimize
(
self
.
model
.
g_loss
,
var_list
=
self
.
model
.
g_vars
,
name
=
'g_op'
)
self
.
d_min
=
self
.
config
.
optimizer
.
minimize
(
self
.
model
.
d_loss
,
var_list
=
self
.
model
.
d_vars
)
self
.
d_min
=
tf
.
group
(
self
.
d_min
,
summary_moving_average
(),
name
=
'd_op'
)
def
run_step
(
self
):
for
_
in
range
(
self
.
_opt_g
):
self
.
sess
.
run
(
self
.
g_min
)
for
_
in
range
(
self
.
_opt_d
):
self
.
sess
.
run
(
self
.
d_min
)
self
.
sess
.
run
(
self
.
gs_incr
)
class
RandomZData
(
DataFlow
):
def
__init__
(
self
,
shape
):
super
(
RandomZData
,
self
)
.
__init__
()
self
.
shape
=
shape
def
get_data
(
self
):
while
True
:
yield
[
np
.
random
.
uniform
(
-
1
,
1
,
size
=
self
.
shape
)]
def
build_GAN_losses
(
vecpos
,
vecneg
):
sigmpos
=
tf
.
sigmoid
(
vecpos
)
sigmneg
=
tf
.
sigmoid
(
vecneg
)
tf
.
histogram_summary
(
'sigmoid-pos'
,
sigmpos
)
tf
.
histogram_summary
(
'sigmoid-neg'
,
sigmneg
)
d_loss_pos
=
tf
.
reduce_mean
(
tf
.
nn
.
sigmoid_cross_entropy_with_logits
(
vecpos
,
tf
.
ones_like
(
vecpos
)),
name
=
'd_loss_pos'
)
d_loss_neg
=
tf
.
reduce_mean
(
tf
.
nn
.
sigmoid_cross_entropy_with_logits
(
vecneg
,
tf
.
zeros_like
(
vecneg
)),
name
=
'd_loss_neg'
)
d_pos_acc
=
tf
.
reduce_mean
(
tf
.
cast
(
sigmpos
>
0.5
,
tf
.
float32
),
name
=
'pos_acc'
)
d_neg_acc
=
tf
.
reduce_mean
(
tf
.
cast
(
sigmneg
<
0.5
,
tf
.
float32
),
name
=
'neg_acc'
)
g_loss
=
tf
.
reduce_mean
(
tf
.
nn
.
sigmoid_cross_entropy_with_logits
(
vecneg
,
tf
.
ones_like
(
vecneg
)),
name
=
'g_loss'
)
d_loss
=
tf
.
add
(
d_loss_pos
,
d_loss_neg
,
name
=
'd_loss'
)
add_moving_summary
(
d_loss_pos
,
d_loss_neg
,
g_loss
,
d_loss
,
d_pos_acc
,
d_neg_acc
)
return
g_loss
,
d_loss
examples/GAN/README.md
0 → 100644
View file @
1908fbe7
# Deep Convolutional Generative Adversarial Networks
Reproduce DCGAN following the setup in
[
dcgan.torch
](
https://github.com/soumith/dcgan.torch
)
.
More results to come.
See the docstring in each executable script for usage.
examples/GAN/celebA.py
0 → 100755
View file @
1908fbe7
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: celebA.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import
numpy
as
np
import
tensorflow
as
tf
import
glob
import
os
,
sys
import
argparse
import
cv2
from
tensorpack
import
*
from
tensorpack.utils.viz
import
build_patch_list
from
tensorpack.utils.viz
import
dump_dataflow_images
from
tensorpack.tfutils.summary
import
add_moving_summary
,
summary_moving_average
import
tensorpack.tfutils.symbolic_functions
as
symbf
from
GAN
import
GANTrainer
,
RandomZData
,
build_GAN_losses
"""
DCGAN on CelebA dataset.
1. Download the 'aligned&cropped' version of CelebA dataset.
2. Start training:
./celebA.py --data /path/to/image_align_celeba/
3. Visualize samples of a trained model:
./celebA.py --load model.tfmodel --sample
"""
SHAPE
=
64
BATCH
=
128
class
Model
(
ModelDesc
):
def
_get_input_vars
(
self
):
return
[
InputVar
(
tf
.
float32
,
(
None
,
SHAPE
,
SHAPE
,
3
),
'input'
)
]
def
generator
(
self
,
z
):
""" return a image generated from z"""
l
=
FullyConnected
(
'fc0'
,
z
,
64
*
8
*
4
*
4
,
nl
=
tf
.
identity
)
l
=
tf
.
reshape
(
l
,
[
-
1
,
4
,
4
,
64
*
8
])
l
=
BNReLU
(
l
)
with
argscope
(
Deconv2D
,
nl
=
BNReLU
,
kernel_shape
=
5
,
stride
=
2
):
l
=
Deconv2D
(
'deconv1'
,
l
,
[
8
,
8
,
64
*
4
])
l
=
Deconv2D
(
'deconv2'
,
l
,
[
16
,
16
,
64
*
2
])
l
=
Deconv2D
(
'deconv3'
,
l
,
[
32
,
32
,
64
])
l
=
Deconv2D
(
'deconv4'
,
l
,
[
64
,
64
,
3
],
nl
=
tf
.
identity
)
l
=
tf
.
tanh
(
l
,
name
=
'gen'
)
return
l
def
discriminator
(
self
,
imgs
):
""" return a (b, 1) logits"""
with
argscope
(
Conv2D
,
nl
=
tf
.
identity
,
kernel_shape
=
5
,
stride
=
2
),
\
argscope
(
LeakyReLU
,
alpha
=
0.2
):
l
=
(
LinearWrap
(
imgs
)
.
Conv2D
(
'conv0'
,
64
)
.
LeakyReLU
(
'lr0'
)
.
Conv2D
(
'conv1'
,
64
*
2
)
.
BatchNorm
(
'bn1'
)
.
LeakyReLU
(
'lr1'
)
.
Conv2D
(
'conv2'
,
64
*
4
)
.
BatchNorm
(
'bn2'
)
.
LeakyReLU
(
'lr2'
)
.
Conv2D
(
'conv3'
,
64
*
8
)
.
BatchNorm
(
'bn3'
)
.
LeakyReLU
(
'lr3'
)
.
FullyConnected
(
'fct'
,
1
,
nl
=
tf
.
identity
)())
return
l
def
_build_graph
(
self
,
input_vars
):
image_pos
=
input_vars
[
0
]
image_pos
=
image_pos
/
128.0
-
1
z
=
tf
.
random_uniform
(
tf
.
pack
([
tf
.
shape
(
image_pos
)[
0
],
100
]),
-
1
,
1
,
name
=
'z'
)
z
.
set_shape
([
None
,
100
])
# issue#5680
with
argscope
([
Conv2D
,
Deconv2D
,
FullyConnected
],
W_init
=
tf
.
truncated_normal_initializer
(
stddev
=
0.02
)):
with
tf
.
variable_scope
(
'gen'
):
image_gen
=
self
.
generator
(
z
)
tf
.
image_summary
(
'gen'
,
image_gen
,
max_images
=
30
)
with
tf
.
variable_scope
(
'discrim'
):
vecpos
=
self
.
discriminator
(
image_pos
)
with
tf
.
variable_scope
(
'discrim'
,
reuse
=
True
):
vecneg
=
self
.
discriminator
(
image_gen
)
self
.
g_loss
,
self
.
d_loss
=
build_GAN_losses
(
vecpos
,
vecneg
)
all_vars
=
tf
.
trainable_variables
()
self
.
g_vars
=
[
v
for
v
in
all_vars
if
v
.
name
.
startswith
(
'gen/'
)]
self
.
d_vars
=
[
v
for
v
in
all_vars
if
v
.
name
.
startswith
(
'discrim/'
)]
def
get_data
():
datadir
=
args
.
data
imgs
=
glob
.
glob
(
datadir
+
'/*.jpg'
)
ds
=
ImageFromFile
(
imgs
,
channel
=
3
,
shuffle
=
True
)
augs
=
[
imgaug
.
CenterCrop
(
110
),
imgaug
.
Resize
(
64
)
]
ds
=
AugmentImageComponent
(
ds
,
augs
)
ds
=
BatchData
(
ds
,
BATCH
)
ds
=
PrefetchDataZMQ
(
ds
,
1
)
return
ds
def
get_config
():
logger
.
auto_set_dir
()
dataset
=
get_data
()
lr
=
symbolic_functions
.
get_scalar_var
(
'learning_rate'
,
2e-4
,
summary
=
True
)
return
TrainConfig
(
dataset
=
dataset
,
optimizer
=
tf
.
train
.
AdamOptimizer
(
lr
,
beta1
=
0.5
,
epsilon
=
1e-3
),
callbacks
=
Callbacks
([
StatPrinter
(),
ModelSaver
(),
ScheduledHyperParamSetter
(
'learning_rate'
,
[(
200
,
1e-4
)])
]),
session_config
=
get_default_sess_config
(
0.5
),
model
=
Model
(),
step_per_epoch
=
300
,
max_epoch
=
500
,
)
def
sample
(
model_path
):
pred
=
PredictConfig
(
session_init
=
get_model_loader
(
model_path
),
model
=
Model
(),
input_names
=
[
'z'
],
output_names
=
[
'gen/gen'
])
pred
=
SimpleDatasetPredictor
(
pred
,
RandomZData
((
128
,
100
)))
for
o
in
pred
.
get_result
():
o
=
o
[
0
]
+
1
o
=
o
*
128.0
o
=
o
[:,:,:,::
-
1
]
viz
=
next
(
build_patch_list
(
o
,
nr_row
=
10
,
nr_col
=
10
))
cv2
.
imshow
(
""
,
viz
)
cv2
.
waitKey
()
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
parser
.
add_argument
(
'--load'
,
help
=
'load model'
)
parser
.
add_argument
(
'--sample'
,
action
=
'store_true'
,
help
=
'run sampling'
)
parser
.
add_argument
(
'--data'
,
help
=
'`image_align_celeba` directory of the celebA dataset'
)
global
args
args
=
parser
.
parse_args
()
if
args
.
gpu
:
os
.
environ
[
'CUDA_VISIBLE_DEVICES'
]
=
args
.
gpu
if
args
.
sample
:
sample
(
args
.
load
)
else
:
config
=
get_config
()
if
args
.
load
:
config
.
session_init
=
SaverRestore
(
args
.
load
)
GANTrainer
(
config
)
.
train
()
examples/HED/hed.py
View file @
1908fbe7
...
...
@@ -200,7 +200,7 @@ def run(model_path, image_path, output):
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
# nargs='*' in multi mode
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
parser
.
add_argument
(
'--load'
,
help
=
'load model'
)
parser
.
add_argument
(
'--view'
,
help
=
'view dataset'
,
action
=
'store_true'
)
parser
.
add_argument
(
'--run'
,
help
=
'run model on images'
)
...
...
examples/Inception/inception-bn.py
View file @
1908fbe7
...
...
@@ -175,7 +175,7 @@ def get_config():
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
# nargs='*' in multi mode
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
parser
.
add_argument
(
'--load'
,
help
=
'load model'
)
parser
.
add_argument
(
'--data'
,
help
=
'ImageNet data root directory'
,
required
=
True
)
args
=
parser
.
parse_args
()
...
...
examples/Inception/inceptionv3.py
View file @
1908fbe7
...
...
@@ -281,7 +281,7 @@ def get_config():
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
# nargs='*' in multi mode
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
parser
.
add_argument
(
'--data'
,
help
=
'ILSVRC dataset dir'
)
parser
.
add_argument
(
'--load'
,
help
=
'load model'
)
args
=
parser
.
parse_args
()
...
...
examples/OpenAIGym/run-atari.py
View file @
1908fbe7
...
...
@@ -76,7 +76,7 @@ def do_submit(output):
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
# nargs='*' in multi mode
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
parser
.
add_argument
(
'--load'
,
help
=
'load model'
,
required
=
True
)
parser
.
add_argument
(
'--env'
,
help
=
'environment name'
,
required
=
True
)
parser
.
add_argument
(
'--episode'
,
help
=
'number of episodes to run'
,
...
...
examples/OpenAIGym/train-atari.py
View file @
1908fbe7
...
...
@@ -210,7 +210,7 @@ def get_config():
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
# nargs='*' in multi mode
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
parser
.
add_argument
(
'--load'
,
help
=
'load model'
)
parser
.
add_argument
(
'--env'
,
help
=
'env'
,
required
=
True
)
parser
.
add_argument
(
'--task'
,
help
=
'task to perform'
,
...
...
examples/ResNet/cifar10-resnet.py
View file @
1908fbe7
...
...
@@ -160,7 +160,7 @@ def get_config():
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
# nargs='*' in multi mode
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
parser
.
add_argument
(
'--load'
,
help
=
'load model'
)
args
=
parser
.
parse_args
()
...
...
examples/ResNet/load-resnet.py
View file @
1908fbe7
...
...
@@ -189,7 +189,7 @@ def name_conversion(caffe_layer_name):
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
# nargs='*' in multi mode
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
parser
.
add_argument
(
'--load'
,
required
=
True
,
help
=
'.npy model file generated by tensorpack.utils.loadcaffe'
)
parser
.
add_argument
(
'-d'
,
'--depth'
,
help
=
'resnet depth'
,
required
=
True
,
type
=
int
,
choices
=
[
50
,
101
,
152
])
...
...
examples/ResNet/svhn-resnet.py
View file @
1908fbe7
...
...
@@ -83,7 +83,7 @@ def get_config():
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
# nargs='*' in multi mode
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
parser
.
add_argument
(
'--load'
,
help
=
'load model'
)
args
=
parser
.
parse_args
()
...
...
examples/SpatialTransformer/mnist-addition.py
View file @
1908fbe7
...
...
@@ -163,7 +163,7 @@ def get_config():
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
# nargs='*' in multi mode
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
parser
.
add_argument
(
'--load'
,
help
=
'load model'
)
parser
.
add_argument
(
'--view'
,
action
=
'store_true'
)
args
=
parser
.
parse_args
()
...
...
examples/char-rnn/char-rnn.py
View file @
1908fbe7
...
...
@@ -170,7 +170,7 @@ def sample(path, start, length):
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
# nargs='*' in multi mode
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
parser
.
add_argument
(
'--load'
,
help
=
'load model'
)
subparsers
=
parser
.
add_subparsers
(
title
=
'command'
,
dest
=
'command'
)
parser_sample
=
subparsers
.
add_parser
(
'sample'
,
help
=
'sample a trained model'
)
...
...
examples/cifar-convnet.py
View file @
1908fbe7
...
...
@@ -133,7 +133,7 @@ def get_config(cifar_classnum):
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
# nargs='*' in multi mode
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
parser
.
add_argument
(
'--load'
,
help
=
'load model'
)
parser
.
add_argument
(
'--classnum'
,
help
=
'10 for cifar10 or 100 for cifar100'
,
type
=
int
,
default
=
10
)
...
...
examples/load-alexnet.py
View file @
1908fbe7
...
...
@@ -77,7 +77,7 @@ def run_test(path, input):
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
# nargs='*' in multi mode
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
parser
.
add_argument
(
'--load'
,
help
=
'.npy model file generated by tensorpack.utils.loadcaffe'
,
required
=
True
)
...
...
examples/mnist-convnet.py
View file @
1908fbe7
...
...
@@ -125,7 +125,7 @@ def get_config():
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
# nargs='*' in multi mode
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
parser
.
add_argument
(
'--load'
,
help
=
'load model'
)
args
=
parser
.
parse_args
()
if
args
.
gpu
:
...
...
examples/svhn-digit-convnet.py
View file @
1908fbe7
...
...
@@ -109,7 +109,7 @@ def get_config():
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
# nargs='*' in multi mode
parser
.
add_argument
(
'--gpu'
,
help
=
'comma separated list of GPU(s) to use.'
)
parser
.
add_argument
(
'--load'
,
help
=
'load model'
)
args
=
parser
.
parse_args
()
...
...
tensorpack/dataflow/image.py
View file @
1908fbe7
...
...
@@ -14,7 +14,7 @@ __all__ = ['ImageFromFile', 'AugmentImageComponent', 'AugmentImageComponents']
class
ImageFromFile
(
RNGDataFlow
):
def
__init__
(
self
,
files
,
channel
=
3
,
resize
=
None
,
shuffle
=
False
):
"""
Generate
rgb
images from list of files
Generate
RGB
images from list of files
:param files: list of file paths
:param channel: 1 or 3 channel
:param resize: a (h, w) tuple. If given, will force a resize
...
...
tensorpack/utils/viz.py
View file @
1908fbe7
...
...
@@ -46,12 +46,13 @@ def minnone(x, y):
return
min
(
x
,
y
)
def
build_patch_list
(
patch_list
,
nr_row
=
None
,
nr_col
=
None
,
border
=
5
,
nr_row
=
None
,
nr_col
=
None
,
border
=
None
,
max_width
=
1000
,
max_height
=
1000
,
shuffle
=
False
,
bgcolor
=
255
):
"""
This is a generator.
patch_list: bhw or bhwc
:param border: defaults to 0.1 * max(image_width, image_height)
"""
patch_list
=
np
.
asarray
(
patch_list
)
if
patch_list
.
ndim
==
3
:
...
...
@@ -60,6 +61,8 @@ def build_patch_list(patch_list,
if
shuffle
:
np
.
random
.
shuffle
(
patch_list
)
ph
,
pw
=
patch_list
.
shape
[
1
:
3
]
if
border
is
None
:
border
=
int
(
0.1
*
max
(
ph
,
pw
))
mh
,
mw
=
max
(
max_height
,
ph
+
border
),
max
(
max_width
,
pw
+
border
)
if
nr_row
is
None
:
nr_row
=
minnone
(
nr_row
,
max_height
/
(
ph
+
border
))
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment