Commit 9b85e7a1 authored by Yuxin Wu's avatar Yuxin Wu

1.0.0rc0 change concat_v2 to concat (#121)

parent 3657bbd7
......@@ -58,7 +58,7 @@ The components are designed to be independent. You can use Model or DataFlow in
## Dependencies:
+ Python 2 or 3
+ TensorFlow >= 0.12
+ TensorFlow >= 1.0.0rc0
+ Python bindings for OpenCV
+ other requirements:
```
......
......@@ -84,7 +84,7 @@ class Model(ModelDesc):
self.last_state = tf.identity(last_state, 'last_state')
# seqlen x (Bxrnnsize)
output = tf.reshape(tf.concat_v2(outputs, 1), [-1, param.rnn_size]) # (Bxseqlen) x rnnsize
output = tf.reshape(tf.concat(outputs, 1), [-1, param.rnn_size]) # (Bxseqlen) x rnnsize
logits = FullyConnected('fc', output, param.vocab_size, nl=tf.identity)
self.prob = tf.nn.softmax(logits / param.softmax_temprature)
......
......@@ -82,7 +82,7 @@ class Model(ModelDesc):
.Conv2D('conv4_7_CPM', 128)())
def add_stage(stage, l):
l = tf.concat_v2([l, shared, pool_center], 3,
l = tf.concat([l, shared, pool_center], 3,
name='concat_stage{}'.format(stage))
for i in range(1, 6):
l = Conv2D('Mconv{}_stage{}'.format(i, stage), l, 128)
......
......@@ -20,7 +20,7 @@ Alternative link to this page: [http://dorefa.net](http://dorefa.net)
To use the script. You'll need:
+ TensorFlow >= 0.12.1
+ TensorFlow >= 1.0.0rc0
+ OpenCV bindings for Python
......
......@@ -87,7 +87,7 @@ class Model(GANModelDesc):
def discriminator(self, inputs, outputs):
""" return a (b, 1) logits"""
l = tf.concat_v2([inputs, outputs], 3)
l = tf.concat([inputs, outputs], 3)
with argscope(Conv2D, nl=tf.identity, kernel_shape=4, stride=2):
l = (LinearWrap(l)
.Conv2D('conv0', NF, nl=LeakyReLU)
......@@ -125,7 +125,7 @@ class Model(GANModelDesc):
if OUT_CH == 1:
output = tf.image.grayscale_to_rgb(output)
fake_output = tf.image.grayscale_to_rgb(fake_output)
viz = (tf.concat_v2([input, output, fake_output], 2) + 1.0) * 128.0
viz = (tf.concat([input, output, fake_output], 2) + 1.0) * 128.0
viz = tf.cast(tf.clip_by_value(viz, 0, 255), tf.uint8, name='viz')
tf.summary.image('input,output,fake', viz, max_outputs=max(30, BATCH))
......
......@@ -79,7 +79,7 @@ class Model(GANModelDesc):
self.factors.sample(BATCH, prior), 0, name='z_code')
z_noise = symbf.shapeless_placeholder(
tf.random_uniform([BATCH, NOISE_DIM], -1, 1), 0, name='z_noise')
z = tf.concat_v2([zc, z_noise], 1, name='z')
z = tf.concat([zc, z_noise], 1, name='z')
with argscope([Conv2D, Deconv2D, FullyConnected],
W_init=tf.truncated_normal_initializer(stddev=0.02)):
......
......@@ -67,7 +67,7 @@ class Model(ModelDesc):
b5 = branch('branch5', l, 16)
final_map = Conv2D('convfcweight',
tf.concat_v2([b1, b2, b3, b4, b5], 3), 1, 1,
tf.concat([b1, b2, b3, b4, b5], 3), 1, 1,
W_init=tf.constant_initializer(0.2),
use_bias=False, nl=tf.identity)
costs = []
......
......@@ -59,7 +59,7 @@ class Model(ModelDesc):
if nrpool != 0: # pool + passthrough if nrpool == 0
x4 = Conv2D('poolproj', x4, nrpool, 1)
outs.append(x4)
return tf.concat_v2(outs, 3, name='concat')
return tf.concat(outs, 3, name='concat')
with argscope(Conv2D, nl=BNReLU, use_bias=False):
l = Conv2D('conv0', image, 64, 7, stride=2)
......
......@@ -88,21 +88,21 @@ class Model(ModelDesc):
.MaxPooling('pool4', 3, 2)()) # 35
with tf.variable_scope('incep-35-256a'):
l = tf.concat_v2([
l = tf.concat([
Conv2D('conv11', l, 64, 1),
proj_kk(l, 5, 48, 64),
proj_233(l, 64, 96),
pool_proj(l, 32, 'avg')
], 3, name='concat')
with tf.variable_scope('incep-35-288a'):
l = tf.concat_v2([
l = tf.concat([
Conv2D('conv11', l, 64, 1),
proj_kk(l, 5, 48, 64),
proj_233(l, 64, 96),
pool_proj(l, 64, 'avg')
], 3, name='concat')
with tf.variable_scope('incep-35-288b'):
l = tf.concat_v2([
l = tf.concat([
Conv2D('conv11', l, 64, 1),
proj_kk(l, 5, 48, 64),
proj_233(l, 64, 96),
......@@ -110,13 +110,13 @@ class Model(ModelDesc):
], 3, name='concat')
# 35x35x288
with tf.variable_scope('incep-17-768a'):
l = tf.concat_v2([
l = tf.concat([
Conv2D('conv3x3', l, 384, 3, stride=2, padding='VALID'),
proj_233(l, 64, 96, stride=2),
MaxPooling('maxpool', l, 3, 2)
], 3, name='concat')
with tf.variable_scope('incep-17-768b'):
l = tf.concat_v2([
l = tf.concat([
Conv2D('conv11', l, 192, 1),
proj_77(l, 128, 192),
proj_277(l, 128, 192),
......@@ -124,14 +124,14 @@ class Model(ModelDesc):
], 3, name='concat')
for x in ['c', 'd']:
with tf.variable_scope('incep-17-768{}'.format(x)):
l = tf.concat_v2([
l = tf.concat([
Conv2D('conv11', l, 192, 1),
proj_77(l, 160, 192),
proj_277(l, 160, 192),
pool_proj(l, 192, 'avg')
], 3, name='concat')
with tf.variable_scope('incep-17-768e'):
l = tf.concat_v2([
l = tf.concat([
Conv2D('conv11', l, 192, 1),
proj_77(l, 192, 192),
proj_277(l, 192, 192),
......@@ -147,7 +147,7 @@ class Model(ModelDesc):
br1 = FullyConnected('fc', br1, 1000, nl=tf.identity)
with tf.variable_scope('incep-17-1280a'):
l = tf.concat_v2([
l = tf.concat([
proj_kk(l, 3, 192, 320, stride=2),
Conv2D('conv73', proj_77(l, 192, 192), 192, 3, stride=2, padding='VALID'),
MaxPooling('maxpool', l, 3, 2)
......@@ -156,18 +156,18 @@ class Model(ModelDesc):
with tf.variable_scope('incep-8-2048{}'.format(x)) as scope:
br11 = Conv2D('conv11', l, 320, 1)
br33 = Conv2D('conv133r', l, 384, 1)
br33 = tf.concat_v2([
br33 = tf.concat([
Conv2D('conv133a', br33, 384, [1, 3]),
Conv2D('conv133b', br33, 384, [3, 1])
], 3, name='conv133')
br233 = proj_kk(l, 3, 448, 384)
br233 = tf.concat_v2([
br233 = tf.concat([
Conv2D('conv233a', br233, 384, [1, 3]),
Conv2D('conv233b', br233, 384, [3, 1]),
], 3, name='conv233')
l = tf.concat_v2([
l = tf.concat([
br11, br33, br233,
pool_proj(l, 192, 'avg')
], 3, name='concat')
......
......@@ -75,7 +75,7 @@ class Model(ModelDesc):
outputs, last_state = rnn.static_rnn(cell, input_list, state_var, scope='rnn')
# seqlen x (Bxrnnsize)
output = tf.reshape(tf.concat_v2(outputs, 1), [-1, HIDDEN_SIZE]) # (Bxseqlen) x hidden
output = tf.reshape(tf.concat(outputs, 1), [-1, HIDDEN_SIZE]) # (Bxseqlen) x hidden
logits = FullyConnected('fc', output, VOCAB_SIZE, nl=tf.identity, W_init=initializer, b_init=initializer)
xent_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=symbolic_functions.flatten(nextinput))
......
......@@ -32,7 +32,7 @@ class EmbeddingModel(ModelDesc):
list_split = 0
if isinstance(x, list):
list_split = len(x)
x = tf.concat_v2(x, 0)
x = tf.concat(x, 0)
# pre-process MNIST dataflow data
x = tf.expand_dims(x, 3)
......
......@@ -60,14 +60,14 @@ class Model(ModelDesc):
# For visualization in tensorboard
padded1 = tf.pad(sampled1, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]])
padded2 = tf.pad(sampled2, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]])
img_orig = tf.concat_v2([image[:, :, :, 0], image[:, :, :, 1]], 1) # b x 2h x w
transform1 = tf.concat_v2([padded1[:, :, :, 0], padded1[:, :, :, 1]], 1)
transform2 = tf.concat_v2([padded2[:, :, :, 0], padded2[:, :, :, 1]], 1)
stacked = tf.concat_v2([img_orig, transform1, transform2], 2, 'viz')
img_orig = tf.concat([image[:, :, :, 0], image[:, :, :, 1]], 1) # b x 2h x w
transform1 = tf.concat([padded1[:, :, :, 0], padded1[:, :, :, 1]], 1)
transform2 = tf.concat([padded2[:, :, :, 0], padded2[:, :, :, 1]], 1)
stacked = tf.concat([img_orig, transform1, transform2], 2, 'viz')
tf.summary.image('visualize',
tf.expand_dims(stacked, -1), max_outputs=30)
sampled = tf.concat_v2([sampled1, sampled2], 3, 'sampled_concat')
sampled = tf.concat([sampled1, sampled2], 3, 'sampled_concat')
logits = (LinearWrap(sampled)
.apply(symbf.batch_flatten)
.FullyConnected('fc1', out_dim=256, nl=tf.nn.relu)
......
......@@ -66,7 +66,7 @@ def Conv2D(x, out_channel, kernel_shape,
kernels = tf.split(W, split, 3)
outputs = [tf.nn.conv2d(i, k, stride, padding)
for i, k in zip(inputs, kernels)]
conv = tf.concat_v2(outputs, 3)
conv = tf.concat(outputs, 3)
return nl(tf.nn.bias_add(conv, b) if use_bias else conv, name='output')
......
......@@ -80,8 +80,8 @@ def ImageSample(inputs, borderMode='repeat'):
lcoory, lcoorx = tf.split(lcoor, 2, 3)
ucoory, ucoorx = tf.split(ucoor, 2, 3)
lyux = tf.concat_v2([lcoory, ucoorx], 3)
uylx = tf.concat_v2([ucoory, lcoorx], 3)
lyux = tf.concat([lcoory, ucoorx], 3)
uylx = tf.concat([ucoory, lcoorx], 3)
diffy, diffx = tf.split(diff, 2, 3)
neg_diffy, neg_diffx = tf.split(neg_diff, 2, 3)
......
......@@ -83,8 +83,8 @@ def GlobalAvgPooling(x):
def UnPooling2x2ZeroFilled(x):
# https://github.com/tensorflow/tensorflow/issues/2169
out = tf.concat_v2([x, tf.zeros_like(x)], 3)
out = tf.concat_v2([out, tf.zeros_like(out)], 2)
out = tf.concat([x, tf.zeros_like(x)], 3)
out = tf.concat([out, tf.zeros_like(out)], 2)
sh = x.get_shape().as_list()
if None not in sh[1:]:
......
......@@ -12,7 +12,7 @@ __all__ = ['ConcatWith']
@layer_register(use_scope=False, log_shape=False)
def ConcatWith(x, tensor, dim):
"""
A wrapper around ``tf.concat_v2`` to cooperate with :class:`LinearWrap`.
A wrapper around ``tf.concat`` to cooperate with :class:`LinearWrap`.
Args:
x (tf.Tensor): input
......@@ -21,8 +21,8 @@ def ConcatWith(x, tensor, dim):
dim (int): the dimension along which to concatenate
Returns:
tf.Tensor: ``tf.concat_v2([x] + tensor, dim)``
tf.Tensor: ``tf.concat([x] + tensor, dim)``
"""
if type(tensor) != list:
tensor = [tensor]
return tf.concat_v2([x] + tensor, dim)
return tf.concat([x] + tensor, dim)
......@@ -225,7 +225,7 @@ class GaussianDistribution(Distribution):
stddev = tf.exp(stddev) # just make it positive and assume it's stddev
# OpenAI code assumes exp(input) is variance. https://github.com/openai/InfoGAN.
# not sure if there is any theory about this.
return tf.concat_v2([mean, stddev], axis=1)
return tf.concat([mean, stddev], axis=1)
def _sample(self, batch_size, theta):
if self.fixed_std:
......@@ -308,10 +308,10 @@ class ProductDistribution(Distribution):
for dist, dist_param in zip(self.dists, self._splitter(dist_params, True)):
if dist.param_dim > 0:
rsl.append(dist._encoder_activation(dist_param))
return tf.concat_v2(rsl, 1)
return tf.concat(rsl, 1)
def _sample(self, batch_size, theta):
ret = []
for dist, ti in zip(self.dists, self._splitter(theta, True)):
ret.append(dist._sample(batch_size, ti))
return tf.concat_v2(ret, 1, name='sample')
return tf.concat(ret, 1, name='sample')
......@@ -130,7 +130,7 @@ def huber_loss(x, delta=1, name='huber_loss'):
def get_scalar_var(name, init_value, summary=False, trainable=False):
"""
Get a scalar variable with certain initial value
Get a scalar float variable with certain initial value
Args:
name (str): name of the variable.
......@@ -140,7 +140,7 @@ def get_scalar_var(name, init_value, summary=False, trainable=False):
Returns:
tf.Variable: the variable
"""
ret = tf.get_variable(name, initializer=init_value,
ret = tf.get_variable(name, initializer=float(init_value),
trainable=trainable)
if summary:
# this is recognized in callbacks.StatHolder
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment