Commit 2df3dcf4 authored by Yuxin Wu's avatar Yuxin Wu

change ConcatWith.

parent 243e957f
......@@ -8,6 +8,10 @@ so you won't need to look at here very often.
Here are a list of things that were changed, starting from an early version.
TensorFlow itself also changes API and those are not listed here.
* 2017/01/25. Argument order of `models.ConcatWith` is changed to follow the API change in
TensorFlow upstream.
* 2017/01/25. `TrainConfig(callbacks=)` now takes a list of `Callback` instances. See [commit](https://github.com/ppwwyyxx/tensorpack/commit/243e957fe6d62a0cfb5728bd77fb3e005d6603e4)
on how to change your code.
* 2017/01/06. `summary.add_moving_summary` now takes any number of positional arguments instead of a list.
See [commit](https://github.com/ppwwyyxx/tensorpack/commit/bbf41d9e58053f843d0471e6d2d87ff714a79a90) to change your code.
* 2017/01/05. The argument `TrainConfig(dataset=)` is renamed to `TrainConfig(dataflow=)`.
......
......@@ -68,21 +68,21 @@ class Model(GANModelDesc):
return (LinearWrap(e8)
.Deconv2D('deconv1', NF * 8)
.Dropout()
.ConcatWith(3, e7)
.ConcatWith(e7, 3)
.Deconv2D('deconv2', NF * 8)
.Dropout()
.ConcatWith(3, e6)
.ConcatWith(e6, 3)
.Deconv2D('deconv3', NF * 8)
.Dropout()
.ConcatWith(3, e5)
.ConcatWith(e5, 3)
.Deconv2D('deconv4', NF * 8)
.ConcatWith(3, e4)
.ConcatWith(e4, 3)
.Deconv2D('deconv5', NF * 4)
.ConcatWith(3, e3)
.ConcatWith(e3, 3)
.Deconv2D('deconv6', NF * 2)
.ConcatWith(3, e2)
.ConcatWith(e2, 3)
.Deconv2D('deconv7', NF * 1)
.ConcatWith(3, e1)
.ConcatWith(e1, 3)
.Deconv2D('deconv8', OUT_CH, nl=tf.tanh)())
def discriminator(self, inputs, outputs):
......
......@@ -10,15 +10,15 @@ __all__ = ['ConcatWith']
@layer_register(use_scope=False, log_shape=False)
def ConcatWith(x, dim, tensor):
def ConcatWith(x, tensor, dim):
"""
A wrapper around ``tf.concat`` to cooperate with :class:`LinearWrap`.
A wrapper around ``tf.concat_v2`` to cooperate with :class:`LinearWrap`.
Args:
x (tf.Tensor): input
dim (int): the dimension along which to concatenate
tensor (list[tf.Tensor]): a tensor or list of tensors to concatenate with x.
x will be at the beginning
dim (int): the dimension along which to concatenate
Returns:
tf.Tensor: ``tf.concat_v2([x] + tensor, dim)``
......
......@@ -116,8 +116,13 @@ class Distribution(object):
@class_scope
def encoder_activation(self, dist_param):
""" An activation function to produce
feasible distribution parameters from unconstrained raw network output.
""" An activation function which transform unconstrained raw network output
to a vector of feasible distribution parameters.
Note that for each distribution,
there are many feasible ways to design this function and it's hard to say which is better.
The default implementations in the distrbution classes here is
just one reasonable way to do this.
Args:
dist_param: output from a network, of shape (batch, param_dim).
......@@ -218,8 +223,9 @@ class GaussianDistribution(Distribution):
return dist_param
else:
mean, stddev = tf.split(dist_param, 2, axis=1)
# this is from https://github.com/openai/InfoGAN. don't know why
stddev = tf.sqrt(tf.exp(stddev))
stddev = tf.exp(stddev) # just make it positive and assume it's stddev
# OpenAI code assumes exp(input) is variance. https://github.com/openai/InfoGAN.
# not sure if there is any theory about this.
return tf.concat_v2([mean, stddev], axis=1)
def _sample(self, batch_size, theta):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment