Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
a7321f06
Commit
a7321f06
authored
Apr 20, 2017
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
BEGAN (fix #234)
parent
c77433d5
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
182 additions
and
4 deletions
+182
-4
examples/GAN/BEGAN.py
examples/GAN/BEGAN.py
+163
-0
examples/GAN/README.md
examples/GAN/README.md
+4
-2
tensorpack/libinfo.py
tensorpack/libinfo.py
+3
-0
tensorpack/tfutils/common.py
tensorpack/tfutils/common.py
+12
-2
No files found.
examples/GAN/BEGAN.py
0 → 100755
View file @
a7321f06
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: BEGAN.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import
os
import
argparse
from
tensorpack
import
*
from
tensorpack.tfutils.summary
import
add_moving_summary
from
tensorpack.utils.globvars
import
globalns
as
G
from
tensorpack.tfutils.scope_utils
import
auto_reuse_variable_scope
import
tensorflow
as
tf
from
GAN
import
GANModelDesc
,
GANTrainer
"""
Boundary Equilibrium GAN.
See the docstring in DCGAN.py for usage.
"""
import
DCGAN
G
.
BATCH
=
32
G
.
Z_DIM
=
64
NH
=
64
NF
=
64
GAMMA
=
0.5
class
Model
(
GANModelDesc
):
def
_get_inputs
(
self
):
return
[
InputDesc
(
tf
.
float32
,
(
None
,
G
.
SHAPE
,
G
.
SHAPE
,
3
),
'input'
)]
@
auto_reuse_variable_scope
def
decoder
(
self
,
z
):
l
=
FullyConnected
(
'fc'
,
z
,
NF
*
8
*
8
,
nl
=
tf
.
identity
)
l
=
tf
.
reshape
(
l
,
[
-
1
,
8
,
8
,
NF
])
with
argscope
(
Conv2D
,
nl
=
tf
.
nn
.
elu
,
kernel_shape
=
3
,
stride
=
1
):
l
=
(
LinearWrap
(
l
)
.
Conv2D
(
'conv1.1'
,
NF
)
.
Conv2D
(
'conv1.2'
,
NF
)
.
tf
.
image
.
resize_nearest_neighbor
([
16
,
16
],
align_corners
=
True
)
.
Conv2D
(
'conv2.1'
,
NF
)
.
Conv2D
(
'conv2.2'
,
NF
)
.
tf
.
image
.
resize_nearest_neighbor
([
32
,
32
],
align_corners
=
True
)
.
Conv2D
(
'conv3.1'
,
NF
)
.
Conv2D
(
'conv3.2'
,
NF
)
.
tf
.
image
.
resize_nearest_neighbor
([
64
,
64
],
align_corners
=
True
)
.
Conv2D
(
'conv4.1'
,
NF
)
.
Conv2D
(
'conv4.2'
,
NF
)
.
Conv2D
(
'conv4.3'
,
3
,
nl
=
tf
.
identity
)())
return
l
@
auto_reuse_variable_scope
def
encoder
(
self
,
imgs
):
with
argscope
(
Conv2D
,
nl
=
tf
.
nn
.
elu
,
kernel_shape
=
3
,
stride
=
1
):
l
=
(
LinearWrap
(
imgs
)
.
Conv2D
(
'conv1.1'
,
NF
)
.
Conv2D
(
'conv1.2'
,
NF
)
.
Conv2D
(
'conv1.3'
,
NF
*
2
)
.
AvgPooling
(
'pool1'
,
2
)
# 32
.
Conv2D
(
'conv2.1'
,
NF
*
2
)
.
Conv2D
(
'conv2.2'
,
NF
*
3
)
.
AvgPooling
(
'pool2'
,
2
)
# 16
.
Conv2D
(
'conv3.1'
,
NF
*
3
)
.
Conv2D
(
'conv3.2'
,
NF
*
4
)
.
AvgPooling
(
'pool3'
,
2
)
# 8
.
Conv2D
(
'conv4.1'
,
NF
*
4
)
.
Conv2D
(
'conv4.2'
,
NF
*
4
)
.
FullyConnected
(
'fc'
,
NH
,
nl
=
tf
.
identity
)())
return
l
def
_build_graph
(
self
,
inputs
):
image_pos
=
inputs
[
0
]
image_pos
=
image_pos
/
128.0
-
1
z
=
tf
.
random_uniform
([
G
.
BATCH
,
G
.
Z_DIM
],
minval
=-
1
,
maxval
=
1
,
name
=
'z_train'
)
z
=
tf
.
placeholder_with_default
(
z
,
[
None
,
G
.
Z_DIM
],
name
=
'z'
)
def
summary_image
(
name
,
x
):
x
=
(
x
+
1.0
)
*
128.0
x
=
tf
.
clip_by_value
(
x
,
0
,
255
)
x
=
tf
.
cast
(
x
,
tf
.
uint8
)
tf
.
summary
.
image
(
name
,
x
,
max_outputs
=
30
)
with
argscope
([
Conv2D
,
FullyConnected
],
W_init
=
tf
.
truncated_normal_initializer
(
stddev
=
0.02
)):
with
tf
.
variable_scope
(
'gen'
):
image_gen
=
self
.
decoder
(
z
)
with
tf
.
variable_scope
(
'discrim'
):
with
tf
.
variable_scope
(
'enc'
):
hidden_pos
=
self
.
encoder
(
image_pos
)
hidden_neg
=
self
.
encoder
(
image_gen
)
with
tf
.
variable_scope
(
'dec'
):
recon_pos
=
self
.
decoder
(
hidden_pos
)
recon_neg
=
self
.
decoder
(
hidden_neg
)
summary_image
(
'generated-samples'
,
image_gen
)
summary_image
(
'reconstruct-real'
,
recon_pos
)
summary_image
(
'reconstruct-fake'
,
recon_neg
)
L_pos
=
tf
.
reduce_mean
(
tf
.
abs
(
recon_pos
-
image_pos
),
name
=
'loss_pos'
)
L_neg
=
tf
.
reduce_mean
(
tf
.
abs
(
recon_neg
-
image_gen
),
name
=
'loss_neg'
)
eq
=
tf
.
subtract
(
GAMMA
*
L_pos
,
L_neg
,
name
=
'equilibrium'
)
measure
=
tf
.
add
(
L_pos
,
tf
.
abs
(
eq
),
name
=
'measure'
)
kt
=
tf
.
get_variable
(
'kt'
,
dtype
=
tf
.
float32
,
initializer
=
0.0
)
update_kt
=
kt
.
assign_add
(
1e-3
*
eq
)
with
tf
.
control_dependencies
([
update_kt
]):
self
.
d_loss
=
tf
.
subtract
(
L_pos
,
kt
*
L_neg
,
name
=
'loss_D'
)
self
.
g_loss
=
L_neg
add_moving_summary
(
L_pos
,
L_neg
,
eq
,
measure
,
self
.
d_loss
)
tf
.
summary
.
scalar
(
'kt-summary'
,
kt
)
self
.
collect_variables
()
def
_get_optimizer
(
self
):
lr
=
symbolic_functions
.
get_scalar_var
(
'learning_rate'
,
1e-4
,
summary
=
True
)
opt
=
tf
.
train
.
AdamOptimizer
(
lr
,
beta1
=
0.5
,
beta2
=
0.9
)
return
opt
DCGAN
.
Model
=
Model
def
get_config
():
return
TrainConfig
(
model
=
Model
(),
dataflow
=
DCGAN
.
get_data
(
G
.
data
),
callbacks
=
[
ModelSaver
(),
StatMonitorParamSetter
(
'learning_rate'
,
'measure'
,
lambda
x
:
x
*
0.5
,
0
,
10
)
],
steps_per_epoch
=
500
,
max_epoch
=
400
,
)
if
__name__
==
'__main__'
:
args
=
DCGAN
.
get_args
()
if
args
.
sample
:
DCGAN
.
sample
(
args
.
load
)
else
:
assert
args
.
data
logger
.
auto_set_dir
()
config
=
get_config
()
if
args
.
load
:
config
.
session_init
=
SaverRestore
(
args
.
load
)
GANTrainer
(
config
)
.
train
()
examples/GAN/README.md
View file @
a7321f06
...
...
@@ -16,6 +16,8 @@ Reproduce the following GAN-related methods:
+
DiscoGAN (
[
Learning to Discover Cross-Domain Relations with Generative Adversarial Networks
](
https://arxiv.org/abs/1703.05192
)
)
+
BEGAN (
[
BEGAN: Boundary Equilibrium Generative Adversarial Networks
](
https://arxiv.org/abs/1703.10717
)
)
Please see the __docstring__ in each script for detailed usage and pretrained models.
## DCGAN.py
...
...
@@ -56,9 +58,9 @@ It then maximizes mutual information between these latent variables and the imag
Train a simple GAN on mnist, conditioned on the class labels.
## WGAN.py, Improved-WGAN.py
## WGAN.py, Improved-WGAN.py
, BEGAN.py
Just
some small modifications on top of DCGAN.py.
These variants are implemented by
some small modifications on top of DCGAN.py.
## DiscoGAN-CelebA.py
...
...
tensorpack/libinfo.py
View file @
a7321f06
...
...
@@ -6,4 +6,7 @@ import cv2 # noqa
import
os
os
.
environ
[
'OPENCV_OPENCL_RUNTIME'
]
=
''
os
.
environ
[
'TF_ENABLE_WINOGRAD_NONFUSED'
]
=
'1'
# issue#9339
os
.
environ
[
'TF_AUTOTUNE_THRESHOLD'
]
=
'3'
# use more warm-up
__version__
=
'0.1.8'
tensorpack/tfutils/common.py
View file @
a7321f06
...
...
@@ -34,11 +34,21 @@ def get_default_sess_config(mem_fraction=0.99):
tf.ConfigProto: the config to use.
"""
conf
=
tf
.
ConfigProto
()
conf
.
allow_soft_placement
=
True
# conf.log_device_placement = True
# https://github.com/tensorflow/tensorflow/issues/9322#issuecomment-295758107
# can speed up a bit
conf
.
intra_op_parallelism_threads
=
1
conf
.
inter_op_parallelism_threads
=
0
conf
.
gpu_options
.
per_process_gpu_memory_fraction
=
mem_fraction
conf
.
gpu_options
.
allocator_type
=
'BFC'
conf
.
gpu_options
.
allow_growth
=
True
conf
.
allow_soft_placement
=
True
# conf.log_device_placement = True
# force gpu compatible?
conf
.
graph_options
.
optimizer_options
.
global_jit_level
=
tf
.
OptimizerOptions
.
ON_1
return
conf
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment