Commit e02c7995 authored by Yuxin Wu's avatar Yuxin Wu

_build_graph(inputs) -> build_graph(*inputs) (#318)

parent 95bd4af5
...@@ -94,8 +94,7 @@ class Model(ModelDesc): ...@@ -94,8 +94,7 @@ class Model(ModelDesc):
value = FullyConnected('fc-v', l, 1) value = FullyConnected('fc-v', l, 1)
return logits, value return logits, value
def _build_graph(self, inputs): def build_graph(self, state, action, futurereward, action_prob):
state, action, futurereward, action_prob = inputs
logits, value = self._get_NN_prediction(state) logits, value = self._get_NN_prediction(state)
value = tf.squeeze(value, [1], name='pred_value') # (B,) value = tf.squeeze(value, [1], name='pred_value') # (B,)
policy = tf.nn.softmax(logits, name='policy') policy = tf.nn.softmax(logits, name='policy')
......
...@@ -33,8 +33,7 @@ class Model(ModelDesc): ...@@ -33,8 +33,7 @@ class Model(ModelDesc):
tf.placeholder(tf.int32, [None], 'seqlen'), # b tf.placeholder(tf.int32, [None], 'seqlen'), # b
] ]
def _build_graph(self, inputs): def build_graph(self, feat, labelidx, labelvalue, labelshape, seqlen):
feat, labelidx, labelvalue, labelshape, seqlen = inputs
label = tf.SparseTensor(labelidx, labelvalue, labelshape) label = tf.SparseTensor(labelidx, labelvalue, labelshape)
cell = rnn.MultiRNNCell([rnn.LSTMBlockCell(num_units=HIDDEN) for _ in range(NLAYER)]) cell = rnn.MultiRNNCell([rnn.LSTMBlockCell(num_units=HIDDEN) for _ in range(NLAYER)])
......
...@@ -74,9 +74,7 @@ class Model(ModelDesc): ...@@ -74,9 +74,7 @@ class Model(ModelDesc):
return [tf.placeholder(tf.int32, (None, param.seq_len), 'input'), return [tf.placeholder(tf.int32, (None, param.seq_len), 'input'),
tf.placeholder(tf.int32, (None, param.seq_len), 'nextinput')] tf.placeholder(tf.int32, (None, param.seq_len), 'nextinput')]
def _build_graph(self, inputs): def build_graph(self, input, nextinput):
input, nextinput = inputs
cell = rnn.MultiRNNCell([rnn.LSTMBlockCell(num_units=param.rnn_size) cell = rnn.MultiRNNCell([rnn.LSTMBlockCell(num_units=param.rnn_size)
for _ in range(param.num_rnn_layer)]) for _ in range(param.num_rnn_layer)])
......
...@@ -43,8 +43,7 @@ class Model(ModelDesc): ...@@ -43,8 +43,7 @@ class Model(ModelDesc):
def get_DQN_prediction(self, image): def get_DQN_prediction(self, image):
return self._get_DQN_prediction(image) return self._get_DQN_prediction(image)
def _build_graph(self, inputs): def build_graph(self, comb_state, action, reward, isOver):
comb_state, action, reward, isOver = inputs
comb_state = tf.cast(comb_state, tf.float32) comb_state = tf.cast(comb_state, tf.float32)
state = tf.slice(comb_state, [0, 0, 0, 0], [-1, -1, -1, self.channel], name='state') state = tf.slice(comb_state, [0, 0, 0, 0], [-1, -1, -1, self.channel], name='state')
self.predict_value = self.get_DQN_prediction(state) self.predict_value = self.get_DQN_prediction(state)
......
...@@ -29,8 +29,7 @@ IMAGE_SIZE = 28 ...@@ -29,8 +29,7 @@ IMAGE_SIZE = 28
class Model(mnist_example.Model): class Model(mnist_example.Model):
def _build_graph(self, inputs): def build_graph(self, image, label):
image, label = inputs
image = tf.expand_dims(image, 3) image = tf.expand_dims(image, 3)
logits = (LinearWrap(image) # the starting brace is oactivationy for line-breaking logits = (LinearWrap(image) # the starting brace is oactivationy for line-breaking
......
...@@ -81,8 +81,7 @@ class Model(ModelDesc): ...@@ -81,8 +81,7 @@ class Model(ModelDesc):
return [tf.placeholder(tf.float32, [None, 224, 224, 3], 'input'), return [tf.placeholder(tf.float32, [None, 224, 224, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')] tf.placeholder(tf.int32, [None], 'label')]
def _build_graph(self, inputs): def build_graph(self, image, label):
image, label = inputs
image = image / 255.0 image = image / 255.0
fw, fa, fg = get_dorefa(BITW, BITA, BITG) fw, fa, fg = get_dorefa(BITW, BITA, BITG)
......
...@@ -36,8 +36,7 @@ class Model(ModelDesc): ...@@ -36,8 +36,7 @@ class Model(ModelDesc):
return [tf.placeholder(tf.float32, [None, 224, 224, 3], 'input'), return [tf.placeholder(tf.float32, [None, 224, 224, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')] tf.placeholder(tf.int32, [None], 'label')]
def _build_graph(self, inputs): def build_graph(self, image, label):
image, label = inputs
image = image / 256.0 image = image / 256.0
fw, fa, fg = get_dorefa(BITW, BITA, BITG) fw, fa, fg = get_dorefa(BITW, BITA, BITG)
......
...@@ -47,8 +47,7 @@ class Model(ModelDesc): ...@@ -47,8 +47,7 @@ class Model(ModelDesc):
return [tf.placeholder(tf.float32, [None, 40, 40, 3], 'input'), return [tf.placeholder(tf.float32, [None, 40, 40, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')] tf.placeholder(tf.int32, [None], 'label')]
def _build_graph(self, inputs): def build_graph(self, image, label):
image, label = inputs
is_training = get_current_tower_context().is_training is_training = get_current_tower_context().is_training
fw, fa, fg = get_dorefa(BITW, BITA, BITG) fw, fa, fg = get_dorefa(BITW, BITA, BITG)
......
...@@ -120,9 +120,8 @@ class Model(ModelDesc): ...@@ -120,9 +120,8 @@ class Model(ModelDesc):
logger.info('Parameter net output: {}'.format(pred_filter.get_shape().as_list())) logger.info('Parameter net output: {}'.format(pred_filter.get_shape().as_list()))
return pred_filter return pred_filter
def _build_graph(self, inputs): def build_graph(self, theta, image, gt_image, gt_filter):
kernel_size = 9 kernel_size = 9
theta, image, gt_image, gt_filter = inputs
image = image image = image
gt_image = gt_image gt_image = gt_image
......
...@@ -90,7 +90,7 @@ class Model(ModelDesc): ...@@ -90,7 +90,7 @@ class Model(ModelDesc):
-1, -1]), name='fm_anchors') -1, -1]), name='fm_anchors')
return fm_anchors return fm_anchors
def _build_graph(self, inputs): def build_graph(self, *inputs):
is_training = get_current_tower_context().is_training is_training = get_current_tower_context().is_training
if config.MODE_MASK: if config.MODE_MASK:
image, anchor_labels, anchor_boxes, gt_boxes, gt_labels, gt_masks = inputs image, anchor_labels, anchor_boxes, gt_boxes, gt_labels, gt_masks = inputs
......
...@@ -73,8 +73,7 @@ class Model(GANModelDesc): ...@@ -73,8 +73,7 @@ class Model(GANModelDesc):
.FullyConnected('fc', NH)()) .FullyConnected('fc', NH)())
return l return l
def _build_graph(self, inputs): def build_graph(self, image_pos):
image_pos = inputs[0]
image_pos = image_pos / 128.0 - 1 image_pos = image_pos / 128.0 - 1
z = tf.random_uniform([args.batch, args.z_dim], minval=-1, maxval=1, name='z_train') z = tf.random_uniform([args.batch, args.z_dim], minval=-1, maxval=1, name='z_train')
......
...@@ -84,8 +84,7 @@ class Model(GANModelDesc): ...@@ -84,8 +84,7 @@ class Model(GANModelDesc):
.FullyConnected('fct', 1, activation=tf.identity)()) .FullyConnected('fct', 1, activation=tf.identity)())
return l return l
def _build_graph(self, inputs): def build_graph(self, image_pos, y):
image_pos, y = inputs
image_pos = tf.expand_dims(image_pos * 2.0 - 1, -1) image_pos = tf.expand_dims(image_pos * 2.0 - 1, -1)
y = tf.one_hot(y, 10, name='label_onehot') y = tf.one_hot(y, 10, name='label_onehot')
......
...@@ -85,8 +85,7 @@ class Model(GANModelDesc): ...@@ -85,8 +85,7 @@ class Model(GANModelDesc):
.Conv2D('conv4', 1, strides=1, activation=tf.identity, use_bias=True)()) .Conv2D('conv4', 1, strides=1, activation=tf.identity, use_bias=True)())
return l return l
def _build_graph(self, inputs): def build_graph(self, A, B):
A, B = inputs
with tf.name_scope('preprocess'): with tf.name_scope('preprocess'):
A = tf.transpose(A / 128.0 - 1.0, [0, 3, 1, 2]) A = tf.transpose(A / 128.0 - 1.0, [0, 3, 1, 2])
B = tf.transpose(B / 128.0 - 1.0, [0, 3, 1, 2]) B = tf.transpose(B / 128.0 - 1.0, [0, 3, 1, 2])
......
...@@ -76,8 +76,7 @@ class Model(GANModelDesc): ...@@ -76,8 +76,7 @@ class Model(GANModelDesc):
.FullyConnected('fct', 1)()) .FullyConnected('fct', 1)())
return l return l
def _build_graph(self, inputs): def build_graph(self, image_pos):
image_pos = inputs[0]
image_pos = image_pos / 128.0 - 1 image_pos = image_pos / 128.0 - 1
z = tf.random_uniform([self.batch, self.zdim], -1, 1, name='z_train') z = tf.random_uniform([self.batch, self.zdim], -1, 1, name='z_train')
......
...@@ -78,8 +78,7 @@ class Model(GANModelDesc): ...@@ -78,8 +78,7 @@ class Model(GANModelDesc):
add_moving_summary(ret) add_moving_summary(ret)
return ret return ret
def _build_graph(self, inputs): def build_graph(self, A, B):
A, B = inputs
A = tf.transpose(A / 255.0, [0, 3, 1, 2]) A = tf.transpose(A / 255.0, [0, 3, 1, 2])
B = tf.transpose(B / 255.0, [0, 3, 1, 2]) B = tf.transpose(B / 255.0, [0, 3, 1, 2])
......
...@@ -61,7 +61,7 @@ class GANModelDesc(ModelDescBase): ...@@ -61,7 +61,7 @@ class GANModelDesc(ModelDescBase):
add_moving_summary(self.g_loss, self.d_loss, d_accuracy, g_accuracy) add_moving_summary(self.g_loss, self.d_loss, d_accuracy, g_accuracy)
def _build_graph(self, inputs): def build_graph(self, *inputs):
""" """
Have to build one tower and set the following attributes: Have to build one tower and set the following attributes:
g_loss, d_loss, g_vars, d_vars. g_loss, d_loss, g_vars, d_vars.
......
...@@ -117,8 +117,7 @@ class Model(GANModelDesc): ...@@ -117,8 +117,7 @@ class Model(GANModelDesc):
.Conv2D('convlast', 1, strides=1, padding='VALID', activation=tf.identity)()) .Conv2D('convlast', 1, strides=1, padding='VALID', activation=tf.identity)())
return l return l
def _build_graph(self, inputs): def build_graph(self, input, output):
input, output = inputs
input, output = input / 128.0 - 1, output / 128.0 - 1 input, output = input / 128.0 - 1, output / 128.0 - 1
with argscope([Conv2D, Conv2DTranspose], kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)): with argscope([Conv2D, Conv2DTranspose], kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)):
......
...@@ -41,8 +41,7 @@ class Model(DCGAN.Model): ...@@ -41,8 +41,7 @@ class Model(DCGAN.Model):
.FullyConnected('fct', 1, activation=tf.identity)()) .FullyConnected('fct', 1, activation=tf.identity)())
return tf.reshape(l, [-1]) return tf.reshape(l, [-1])
def _build_graph(self, inputs): def build_graph(self, image_pos):
image_pos = inputs[0]
image_pos = image_pos / 128.0 - 1 image_pos = image_pos / 128.0 - 1
z = tf.random_normal([self.batch, self.zdim], name='z_train') z = tf.random_normal([self.batch, self.zdim], name='z_train')
......
...@@ -138,8 +138,7 @@ class Model(GANModelDesc): ...@@ -138,8 +138,7 @@ class Model(GANModelDesc):
.FullyConnected('fce-out', DIST_PARAM_DIM)()) .FullyConnected('fce-out', DIST_PARAM_DIM)())
return logits, encoder return logits, encoder
def _build_graph(self, inputs): def build_graph(self, real_sample):
real_sample = inputs[0]
real_sample = tf.expand_dims(real_sample, -1) real_sample = tf.expand_dims(real_sample, -1)
# sample the latent code: # sample the latent code:
......
...@@ -48,8 +48,7 @@ class Model(ModelDesc): ...@@ -48,8 +48,7 @@ class Model(ModelDesc):
return [tf.placeholder(tf.float32, [None, None, None, 3], 'image'), return [tf.placeholder(tf.float32, [None, None, None, 3], 'image'),
tf.placeholder(tf.int32, [None, None, None], 'edgemap')] tf.placeholder(tf.int32, [None, None, None], 'edgemap')]
def _build_graph(self, inputs): def build_graph(self, image, edgemap):
image, edgemap = inputs
image = image - tf.constant([104, 116, 122], dtype='float32') image = image - tf.constant([104, 116, 122], dtype='float32')
edgemap = tf.expand_dims(edgemap, 3, name='edgemap4d') edgemap = tf.expand_dims(edgemap, 3, name='edgemap4d')
......
...@@ -152,8 +152,7 @@ class ImageNetModel(ModelDesc): ...@@ -152,8 +152,7 @@ class ImageNetModel(ModelDesc):
return [tf.placeholder(self.image_dtype, [None, self.image_shape, self.image_shape, 3], 'input'), return [tf.placeholder(self.image_dtype, [None, self.image_shape, self.image_shape, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')] tf.placeholder(tf.int32, [None], 'label')]
def _build_graph(self, inputs): def build_graph(self, image, label):
image, label = inputs
image = ImageNetModel.image_preprocess(image, bgr=True) image = ImageNetModel.image_preprocess(image, bgr=True)
if self.data_format == 'NCHW': if self.data_format == 'NCHW':
image = tf.transpose(image, [0, 3, 1, 2]) image = tf.transpose(image, [0, 3, 1, 2])
......
...@@ -28,8 +28,7 @@ class Model(ModelDesc): ...@@ -28,8 +28,7 @@ class Model(ModelDesc):
return [tf.placeholder(tf.float32, [None, INPUT_SHAPE, INPUT_SHAPE, 3], 'input'), return [tf.placeholder(tf.float32, [None, INPUT_SHAPE, INPUT_SHAPE, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')] tf.placeholder(tf.int32, [None], 'label')]
def _build_graph(self, inputs): def build_graph(self, image, label):
image, label = inputs
image = image / 128.0 image = image / 128.0
def inception(name, x, nr1x1, nr3x3r, nr3x3, nr233r, nr233, nrpool, pooltype): def inception(name, x, nr1x1, nr3x3r, nr3x3, nr233r, nr233, nrpool, pooltype):
......
...@@ -50,9 +50,8 @@ class Model(ModelDesc): ...@@ -50,9 +50,8 @@ class Model(ModelDesc):
return [tf.placeholder(tf.int32, (None, SEQ_LEN), 'input'), return [tf.placeholder(tf.int32, (None, SEQ_LEN), 'input'),
tf.placeholder(tf.int32, (None, SEQ_LEN), 'nextinput')] tf.placeholder(tf.int32, (None, SEQ_LEN), 'nextinput')]
def _build_graph(self, inputs): def build_graph(self, input, nextinput):
is_training = get_current_tower_context().is_training is_training = get_current_tower_context().is_training
input, nextinput = inputs
initializer = tf.random_uniform_initializer(-0.05, 0.05) initializer = tf.random_uniform_initializer(-0.05, 0.05)
def get_basic_cell(): def get_basic_cell():
......
...@@ -43,9 +43,8 @@ class ResNet_Cifar(ModelDesc): ...@@ -43,9 +43,8 @@ class ResNet_Cifar(ModelDesc):
return [tf.placeholder(tf.float32, [None, 32, 32, 3], 'input'), return [tf.placeholder(tf.float32, [None, 32, 32, 3], 'input'),
tf.placeholder(tf.float32, [None, CLASS_NUM], 'label')] tf.placeholder(tf.float32, [None, CLASS_NUM], 'label')]
def _build_graph(self, inputs): def build_graph(self, image, label):
assert tf.test.is_gpu_available() assert tf.test.is_gpu_available()
image, label = inputs
MEAN_IMAGE = tf.constant([0.4914, 0.4822, 0.4465], dtype=tf.float32) MEAN_IMAGE = tf.constant([0.4914, 0.4822, 0.4465], dtype=tf.float32)
STD_IMAGE = tf.constant([0.2023, 0.1994, 0.2010], dtype=tf.float32) STD_IMAGE = tf.constant([0.2023, 0.1994, 0.2010], dtype=tf.float32)
......
...@@ -44,8 +44,7 @@ class Model(ModelDesc): ...@@ -44,8 +44,7 @@ class Model(ModelDesc):
return [tf.placeholder(tf.float32, [None, 32, 32, 3], 'input'), return [tf.placeholder(tf.float32, [None, 32, 32, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')] tf.placeholder(tf.int32, [None], 'label')]
def _build_graph(self, inputs): def build_graph(self, image, label):
image, label = inputs
image = image / 128.0 image = image / 128.0
assert tf.test.is_gpu_available() assert tf.test.is_gpu_available()
image = tf.transpose(image, [0, 3, 1, 2]) image = tf.transpose(image, [0, 3, 1, 2])
......
...@@ -32,8 +32,7 @@ class Model(ModelDesc): ...@@ -32,8 +32,7 @@ class Model(ModelDesc):
return [tf.placeholder(tf.float32, [None, 224, 224, 3], 'input'), return [tf.placeholder(tf.float32, [None, 224, 224, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')] tf.placeholder(tf.int32, [None], 'label')]
def _build_graph(self, inputs): def build_graph(self, image, label):
image, label = inputs
blocks = CFG[DEPTH] blocks = CFG[DEPTH]
bottleneck = functools.partial(resnet_bottleneck, stride_first=True) bottleneck = functools.partial(resnet_bottleneck, stride_first=True)
......
...@@ -35,8 +35,7 @@ class Model(ModelDesc): ...@@ -35,8 +35,7 @@ class Model(ModelDesc):
return [tf.placeholder(tf.uint8, [None, INPUT_SHAPE, INPUT_SHAPE, 3], 'input'), return [tf.placeholder(tf.uint8, [None, INPUT_SHAPE, INPUT_SHAPE, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')] tf.placeholder(tf.int32, [None], 'label')]
def _build_graph(self, inputs): def build_graph(self, image, label):
image, label = inputs
image = image_preprocess(image, bgr=True) image = image_preprocess(image, bgr=True)
image = tf.transpose(image, [0, 3, 1, 2]) image = tf.transpose(image, [0, 3, 1, 2])
......
...@@ -57,8 +57,7 @@ class Model(tp.ModelDescBase): ...@@ -57,8 +57,7 @@ class Model(tp.ModelDescBase):
def inputs(self): def inputs(self):
return [tf.placeholder(tf.float32, (IMAGE_SIZE, IMAGE_SIZE, 3), 'image')] return [tf.placeholder(tf.float32, (IMAGE_SIZE, IMAGE_SIZE, 3), 'image')]
def _build_graph(self, inputs): def build_graph(self, orig_image):
orig_image = inputs[0]
mean = tf.get_variable('resnet_v1_50/mean_rgb', shape=[3]) mean = tf.get_variable('resnet_v1_50/mean_rgb', shape=[3])
with guided_relu(): with guided_relu():
with slim.arg_scope(resnet_v1.resnet_arg_scope(is_training=False)): with slim.arg_scope(resnet_v1.resnet_arg_scope(is_training=False)):
......
...@@ -241,9 +241,7 @@ class SiameseModel(EmbeddingModel): ...@@ -241,9 +241,7 @@ class SiameseModel(EmbeddingModel):
tf.placeholder(tf.float32, (None, 28, 28), 'input_y'), tf.placeholder(tf.float32, (None, 28, 28), 'input_y'),
tf.placeholder(tf.int32, (None,), 'label')] tf.placeholder(tf.int32, (None,), 'label')]
def _build_graph(self, inputs): def build_graph(self, x, y, label):
# get inputs
x, y, label = inputs
# embed them # embed them
x, y = self.embed([x, y]) x, y = self.embed([x, y])
...@@ -261,8 +259,7 @@ class SiameseModel(EmbeddingModel): ...@@ -261,8 +259,7 @@ class SiameseModel(EmbeddingModel):
class CosineModel(SiameseModel): class CosineModel(SiameseModel):
def _build_graph(self, inputs): def build_graph(self, x, y, label):
x, y, label = inputs
x, y = self.embed([x, y]) x, y = self.embed([x, y])
with tf.variable_scope(tf.get_variable_scope(), reuse=True): with tf.variable_scope(tf.get_variable_scope(), reuse=True):
...@@ -289,8 +286,7 @@ class TripletModel(EmbeddingModel): ...@@ -289,8 +286,7 @@ class TripletModel(EmbeddingModel):
def loss(self, a, p, n): def loss(self, a, p, n):
return triplet_loss(a, p, n, 5., extra=True, scope="loss") return triplet_loss(a, p, n, 5., extra=True, scope="loss")
def _build_graph(self, inputs): def build_graph(self, a, p, n):
a, p, n = inputs
a, p, n = self.embed([a, p, n]) a, p, n = self.embed([a, p, n])
with tf.variable_scope(tf.get_variable_scope(), reuse=True): with tf.variable_scope(tf.get_variable_scope(), reuse=True):
...@@ -319,9 +315,7 @@ class CenterModel(EmbeddingModel): ...@@ -319,9 +315,7 @@ class CenterModel(EmbeddingModel):
return [tf.placeholder(tf.float32, (None, 28, 28), 'input'), return [tf.placeholder(tf.float32, (None, 28, 28), 'input'),
tf.placeholder(tf.int32, (None,), 'label')] tf.placeholder(tf.int32, (None,), 'label')]
def _build_graph(self, inputs): def build_graph(self, x, label):
# get inputs
x, label = inputs
# embed them # embed them
x = self.embed(x) x = self.embed(x)
......
...@@ -24,13 +24,11 @@ class Model(ModelDesc): ...@@ -24,13 +24,11 @@ class Model(ModelDesc):
return [tf.placeholder(tf.float32, (None, IMAGE_SIZE, IMAGE_SIZE, 2), 'input'), return [tf.placeholder(tf.float32, (None, IMAGE_SIZE, IMAGE_SIZE, 2), 'input'),
tf.placeholder(tf.int32, (None,), 'label')] tf.placeholder(tf.int32, (None,), 'label')]
def _build_graph(self, inputs): def build_graph(self, image, label):
xys = np.array([(y, x, 1) for y in range(WARP_TARGET_SIZE) xys = np.array([(y, x, 1) for y in range(WARP_TARGET_SIZE)
for x in range(WARP_TARGET_SIZE)], dtype='float32') for x in range(WARP_TARGET_SIZE)], dtype='float32')
xys = tf.constant(xys, dtype=tf.float32, name='xys') # p x 3 xys = tf.constant(xys, dtype=tf.float32, name='xys') # p x 3
image, label = inputs
image = image / 255.0 - 0.5 # bhw2 image = image / 255.0 - 0.5 # bhw2
def get_stn(image): def get_stn(image):
......
...@@ -52,9 +52,9 @@ class Model(GANModelDesc): ...@@ -52,9 +52,9 @@ class Model(GANModelDesc):
return [tf.placeholder(tf.float32, (None, self.height * 1, self.width * 1, CHANNELS), 'Ilr'), return [tf.placeholder(tf.float32, (None, self.height * 1, self.width * 1, CHANNELS), 'Ilr'),
tf.placeholder(tf.float32, (None, self.height * 4, self.width * 4, CHANNELS), 'Ihr')] tf.placeholder(tf.float32, (None, self.height * 4, self.width * 4, CHANNELS), 'Ihr')]
def _build_graph(self, inputs): def build_graph(self, Ilr, Ihr):
Ilr, Ihr = Ilr / 255.0, Ihr / 255.0
ctx = get_current_tower_context() ctx = get_current_tower_context()
Ilr, Ihr = inputs[0] / 255.0, inputs[1] / 255.0
Ibicubic = tf.image.resize_bicubic( Ibicubic = tf.image.resize_bicubic(
Ilr, [4 * self.height, 4 * self.width], align_corners=True, Ilr, [4 * self.height, 4 * self.width], align_corners=True,
name='bicubic_baseline') # (0,1) name='bicubic_baseline') # (0,1)
......
...@@ -30,8 +30,7 @@ class Model(ModelDesc): ...@@ -30,8 +30,7 @@ class Model(ModelDesc):
return [tf.placeholder(tf.float32, (None, 30, 30, 3), 'input'), return [tf.placeholder(tf.float32, (None, 30, 30, 3), 'input'),
tf.placeholder(tf.int32, (None,), 'label')] tf.placeholder(tf.int32, (None,), 'label')]
def _build_graph(self, inputs): def build_graph(self, image, label):
image, label = inputs
is_training = get_current_tower_context().is_training is_training = get_current_tower_context().is_training
keep_prob = tf.constant(0.5 if is_training else 1.0) keep_prob = tf.constant(0.5 if is_training else 1.0)
......
...@@ -31,10 +31,7 @@ class Model(ModelDesc): ...@@ -31,10 +31,7 @@ class Model(ModelDesc):
return [tf.placeholder(tf.float32, (None, IMAGE_SIZE, IMAGE_SIZE), 'input'), return [tf.placeholder(tf.float32, (None, IMAGE_SIZE, IMAGE_SIZE), 'input'),
tf.placeholder(tf.int32, (None,), 'label')] tf.placeholder(tf.int32, (None,), 'label')]
def _build_graph(self, inputs): def build_graph(self, image, label):
# inputs contains a list of input variables defined above
image, label = inputs
# In tensorflow, inputs to convolution function are assumed to be # In tensorflow, inputs to convolution function are assumed to be
# NHWC. Add a single channel here. # NHWC. Add a single channel here.
image = tf.expand_dims(image, 3) image = tf.expand_dims(image, 3)
......
...@@ -72,9 +72,7 @@ class Model(ModelDesc): ...@@ -72,9 +72,7 @@ class Model(ModelDesc):
return [tf.placeholder(tf.float32, (None, IMAGE_SIZE, IMAGE_SIZE), 'input'), return [tf.placeholder(tf.float32, (None, IMAGE_SIZE, IMAGE_SIZE), 'input'),
tf.placeholder(tf.int32, (None,), 'label')] tf.placeholder(tf.int32, (None,), 'label')]
def _build_graph(self, inputs): def build_graph(self, image, label):
image, label = inputs
image = tf.expand_dims(image * 2 - 1, 3) image = tf.expand_dims(image * 2 - 1, 3)
with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu, out_channel=32): with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu, out_channel=32):
...@@ -103,7 +101,7 @@ class Model(ModelDesc): ...@@ -103,7 +101,7 @@ class Model(ModelDesc):
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss') cost = tf.reduce_mean(cost, name='cross_entropy_loss')
accuracy = tf.reduce_mean(tf.to_float(tf.nn.in_top_k(logits, label, 1)), name='accuracy') tf.reduce_mean(tf.to_float(tf.nn.in_top_k(logits, label, 1)), name='accuracy')
wd_cost = tf.multiply(1e-5, wd_cost = tf.multiply(1e-5,
regularize_cost('fc.*/W', tf.nn.l2_loss), regularize_cost('fc.*/W', tf.nn.l2_loss),
......
...@@ -26,9 +26,7 @@ class Model(ModelDesc): ...@@ -26,9 +26,7 @@ class Model(ModelDesc):
return [tf.placeholder(tf.float32, [None, 40, 40, 3], 'input'), return [tf.placeholder(tf.float32, [None, 40, 40, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')] tf.placeholder(tf.int32, [None], 'label')]
def _build_graph(self, inputs): def build_graph(self, image, label):
image, label = inputs
image = image / 128.0 - 1 image = image / 128.0 - 1
with argscope(Conv2D, activation=BNReLU, use_bias=False): with argscope(Conv2D, activation=BNReLU, use_bias=False):
......
...@@ -20,15 +20,14 @@ CHANNELS = 3 ...@@ -20,15 +20,14 @@ CHANNELS = 3
class Model(ModelDesc): class Model(ModelDesc):
def inputs(self): def inputs(self):
return [tf.placeholder(tf.float32, (None, SHAPE, SHAPE, CHANNELS), 'input'), return [tf.placeholder(tf.float32, (None, SHAPE, SHAPE, CHANNELS), 'input1'),
tf.placeholder(tf.int32, (None,), 'label')] tf.placeholder(tf.int32, (None,), 'input2')]
def _build_graph(self, inputs): def build_graph(self, input1, input2):
image, label = inputs
image = image * 2 - 1
self.cost = tf.identity(0., name='total_costs') cost = tf.identity(input1 - input2, name='total_costs')
summary.add_moving_summary(self.cost) summary.add_moving_summary(cost)
return cost
def _get_optimizer(self): def _get_optimizer(self):
lr = tf.get_variable('learning_rate', initializer=5e-3, trainable=False) lr = tf.get_variable('learning_rate', initializer=5e-3, trainable=False)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment