Commit 14b3578a authored by Yuxin Wu's avatar Yuxin Wu

a rename in examples (not a breaking change)

parent 88af1f1d
......@@ -38,8 +38,7 @@ def get_player(dumpdir=None):
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
assert NUM_ACTIONS is not None
return [InputVar(tf.float32, (None,) + IMAGE_SHAPE3, 'state'),
InputVar(tf.int32, (None,), 'action'),
......
......@@ -75,8 +75,7 @@ class MySimulatorWorker(SimulatorProcess):
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
assert NUM_ACTIONS is not None
return [InputVar(tf.float32, (None,) + IMAGE_SHAPE3, 'state'),
InputVar(tf.int64, (None,), 'action'),
......
......@@ -27,8 +27,7 @@ FEATUREDIM = 39
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, [None, None, FEATUREDIM], 'feat'), # bxmaxseqx39
InputVar(tf.int64, None, 'labelidx'), # label is b x maxlen, sparse
InputVar(tf.int32, None, 'labelvalue'),
......@@ -36,8 +35,8 @@ class Model(ModelDesc):
InputVar(tf.int32, [None], 'seqlen'), # b
]
def _build_graph(self, input_vars):
feat, labelidx, labelvalue, labelshape, seqlen = input_vars
def _build_graph(self, inputs):
feat, labelidx, labelvalue, labelshape, seqlen = inputs
label = tf.SparseTensor(labelidx, labelvalue, labelshape)
cell = tf.contrib.rnn.BasicLSTMCell(num_units=HIDDEN)
......
......@@ -60,12 +60,12 @@ class CharRNNData(RNGDataFlow):
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.int32, (None, param.seq_len), 'input'),
InputVar(tf.int32, (None, param.seq_len), 'nextinput')]
def _build_graph(self, input_vars):
input, nextinput = input_vars
def _build_graph(self, inputs):
input, nextinput = inputs
cell = tf.contrib.rnn.BasicLSTMCell(num_units=param.rnn_size)
cell = tf.contrib.rnn.MultiRNNCell([cell] * param.num_rnn_layer)
......@@ -131,18 +131,18 @@ def sample(path, start, length):
ds = CharRNNData(param.corpus, 100000)
model = Model()
input_vars = model.get_input_vars()
model.build_graph(input_vars, False)
inputs = model.get_reuse_placehdrs()
model.build_graph(inputs, False)
sess = tf.Session()
tfutils.SaverRestore(path).init(sess)
dummy_input = np.zeros((1, 1), dtype='int32')
with sess.as_default():
# feed the starting sentence
state = model.initial.eval({input_vars[0]: dummy_input})
state = model.initial.eval({inputs[0]: dummy_input})
for c in start[:-1]:
x = np.array([[ds.lut.get_idx(c)]], dtype='int32')
state = model.last_state.eval({input_vars[0]: x, model.initial: state})
state = model.last_state.eval({inputs[0]: x, model.initial: state})
def pick(prob):
t = np.cumsum(prob)
......@@ -155,7 +155,7 @@ def sample(path, start, length):
for k in range(length):
x = np.array([[ds.lut.get_idx(c)]], dtype='int32')
[prob, state] = sess.run([model.prob, model.last_state],
{input_vars[0]: x, model.initial: state})
{inputs[0]: x, model.initial: state})
c = ds.lut.get_obj(pick(prob[0]))
ret += c
print(ret)
......
......@@ -44,7 +44,7 @@ def get_gaussian_map():
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, (None, 368, 368, 3), 'input'),
InputVar(tf.float32, (None, 368, 368, 15), 'label'),
]
......
......@@ -68,8 +68,7 @@ common.get_player = get_player # so that eval functions in common can use the p
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
if NUM_ACTIONS is None:
p = get_player()
del p
......
......@@ -29,9 +29,8 @@ IMAGE_SIZE = 28
class Model(mnist_example.Model):
def _build_graph(self, input_vars):
image, label = input_vars
def _build_graph(self, inputs):
image, label = inputs
image = tf.expand_dims(image, 3)
with argscope(Conv2D, kernel_shape=5, nl=tf.nn.relu):
......
......@@ -74,13 +74,12 @@ BATCH_SIZE = None
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, [None, 224, 224, 3], 'input'),
InputVar(tf.int32, [None], 'label')]
def _build_graph(self, input_vars):
image, label = input_vars
def _build_graph(self, inputs):
image, label = inputs
image = image / 255.0
fw, fa, fg = get_dorefa(BITW, BITA, BITG)
......
......@@ -33,12 +33,12 @@ BITG = 32
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, [None, 224, 224, 3], 'input'),
InputVar(tf.int32, [None], 'label')]
def _build_graph(self, input_vars):
image, label = input_vars
def _build_graph(self, inputs):
image, label = inputs
image = image / 256.0
fw, fa, fg = get_dorefa(BITW, BITA, BITG)
......
......@@ -43,13 +43,12 @@ BITG = 4
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, [None, 40, 40, 3], 'input'),
InputVar(tf.int32, [None], 'label')]
def _build_graph(self, input_vars):
image, label = input_vars
def _build_graph(self, inputs):
image, label = inputs
is_training = get_current_tower_context().is_training
fw, fa, fg = get_dorefa(BITW, BITA, BITG)
......
......@@ -36,8 +36,7 @@ CFG.Z_DIM = 100
class Model(GANModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, (None, CFG.SHAPE, CFG.SHAPE, 3), 'input')]
def generator(self, z):
......@@ -70,8 +69,8 @@ class Model(GANModelDesc):
.FullyConnected('fct', 1, nl=tf.identity)())
return l
def _build_graph(self, input_vars):
image_pos = input_vars[0]
def _build_graph(self, inputs):
image_pos = inputs[0]
image_pos = image_pos / 128.0 - 1
z = tf.random_uniform([CFG.BATCH, CFG.Z_DIM], -1, 1, name='z_train')
......
......@@ -43,8 +43,7 @@ NF = 64 # number of filter
class Model(GANModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, (None, SHAPE, SHAPE, IN_CH), 'input'),
InputVar(tf.float32, (None, SHAPE, SHAPE, OUT_CH), 'output')]
......@@ -100,8 +99,8 @@ class Model(GANModelDesc):
.Conv2D('convlast', 1, stride=1, padding='VALID')())
return l
def _build_graph(self, input_vars):
input, output = input_vars
def _build_graph(self, inputs):
input, output = inputs
input, output = input / 128.0 - 1, output / 128.0 - 1
with argscope([Conv2D, Deconv2D],
......
......@@ -32,7 +32,7 @@ class GaussianWithUniformSample(GaussianDistribution):
class Model(GANModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, (None, 28, 28), 'input')]
def generator(self, z):
......@@ -62,8 +62,8 @@ class Model(GANModelDesc):
.FullyConnected('fce-out', self.factors.param_dim, nl=tf.identity)())
return logits, encoder
def _build_graph(self, input_vars):
real_sample = input_vars[0]
def _build_graph(self, inputs):
real_sample = inputs[0]
real_sample = tf.expand_dims(real_sample * 2.0 - 1, -1)
# latent space is cat(10) x uni(1) x uni(1) x noise(NOISE_DIM)
......
......@@ -17,13 +17,12 @@ from tensorpack.tfutils.summary import *
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, [None, None, None, 3], 'image'),
InputVar(tf.int32, [None, None, None], 'edgemap')]
def _build_graph(self, input_vars):
image, edgemap = input_vars
def _build_graph(self, inputs):
image, edgemap = inputs
image = image - tf.constant([104, 116, 122], dtype='float32')
edgemap = tf.expand_dims(edgemap, 3, name='edgemap4d')
......
......@@ -29,12 +29,12 @@ Learning rate may need a different schedule for different number of GPUs (becaus
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, [None, INPUT_SHAPE, INPUT_SHAPE, 3], 'input'),
InputVar(tf.int32, [None], 'label')]
def _build_graph(self, input_vars):
image, label = input_vars
def _build_graph(self, inputs):
image, label = inputs
image = image / 128.0
def inception(name, x, nr1x1, nr3x3r, nr3x3, nr233r, nr233, nrpool, pooltype):
......
......@@ -35,12 +35,12 @@ INPUT_SHAPE = 299
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, [None, INPUT_SHAPE, INPUT_SHAPE, 3], 'input'),
InputVar(tf.int32, [None], 'label')]
def _build_graph(self, input_vars):
image, label = input_vars
def _build_graph(self, inputs):
image, label = inputs
image = image / 255.0 # ?
def proj_kk(l, k, ch_r, ch, stride=1):
......
......@@ -44,13 +44,13 @@ def get_PennTreeBank(data_dir=None):
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.int32, (None, SEQ_LEN), 'input'),
InputVar(tf.int32, (None, SEQ_LEN), 'nextinput')]
def _build_graph(self, input_vars):
def _build_graph(self, inputs):
is_training = get_current_tower_context().is_training
input, nextinput = input_vars
input, nextinput = inputs
initializer = tf.random_uniform_initializer(-0.05, 0.05)
cell = rnn.BasicLSTMCell(num_units=HIDDEN_SIZE, forget_bias=0.0)
......
......@@ -37,12 +37,12 @@ class Model(ModelDesc):
super(Model, self).__init__()
self.n = n
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, [None, 32, 32, 3], 'input'),
InputVar(tf.int32, [None], 'label')]
def _build_graph(self, input_vars):
image, label = input_vars
def _build_graph(self, inputs):
image, label = inputs
image = image / 128.0 - 1
def residual(name, l, increase_dim=False, first=False):
......
......@@ -28,12 +28,12 @@ DEPTH = None
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, [None, INPUT_SHAPE, INPUT_SHAPE, 3], 'input'),
InputVar(tf.int32, [None], 'label')]
def _build_graph(self, input_vars):
image, label = input_vars
def _build_graph(self, inputs):
image, label = inputs
def shortcut(l, n_in, n_out, stride):
if n_in != n_out:
......
......@@ -16,11 +16,11 @@ IMAGE_SIZE = 224
class Model(tp.ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [tp.InputVar(tf.float32, (IMAGE_SIZE, IMAGE_SIZE, 3), 'image')]
def _build_graph(self, input_vars):
orig_image = input_vars[0]
def _build_graph(self, inputs):
orig_image = inputs[0]
mean = tf.get_variable('resnet_v1_50/mean_rgb', shape=[3])
with tp.symbolic_functions.guided_relu():
with slim.arg_scope(resnet_v1.resnet_arg_scope(is_training=False)):
......
......@@ -61,20 +61,20 @@ class SiameseModel(EmbeddingModel):
ds = BatchData(ds, 128 // 2)
return ds
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, (None, 28, 28), 'input'),
InputVar(tf.float32, (None, 28, 28), 'input_y'),
InputVar(tf.int32, (None,), 'label')]
def _build_graph(self, input_vars):
def _build_graph(self, inputs):
# get inputs
x, y, label = input_vars
x, y, label = inputs
# embed them
x, y = self.embed([x, y])
# tag the embedding of 'input' with name 'emb', just for inference later on
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
tf.identity(self.embed(input_vars[0]), name="emb")
tf.identity(self.embed(inputs[0]), name="emb")
# compute the actual loss
cost, pos_dist, neg_dist = symbf.contrastive_loss(x, y, label, 5., extra=True)
......@@ -85,12 +85,12 @@ class SiameseModel(EmbeddingModel):
class CosineModel(SiameseModel):
def _build_graph(self, input_vars):
x, y, label = input_vars
def _build_graph(self, inputs):
x, y, label = inputs
x, y = self.embed([x, y])
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
tf.identity(self.embed(input_vars[0]), name="emb")
tf.identity(self.embed(inputs[0]), name="emb")
cost = symbf.cosine_loss(x, y, label)
self.cost = tf.identity(cost, name="cost")
......@@ -104,7 +104,7 @@ class TripletModel(EmbeddingModel):
ds = BatchData(ds, 128 // 3)
return ds
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, (None, 28, 28), 'input'),
InputVar(tf.float32, (None, 28, 28), 'input_p'),
InputVar(tf.float32, (None, 28, 28), 'input_n')]
......@@ -112,12 +112,12 @@ class TripletModel(EmbeddingModel):
def loss(self, a, p, n):
return symbf.triplet_loss(a, p, n, 5., extra=True)
def _build_graph(self, input_vars):
a, p, n = input_vars
def _build_graph(self, inputs):
a, p, n = inputs
a, p, n = self.embed([a, p, n])
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
tf.identity(self.embed(input_vars[0]), name="emb")
tf.identity(self.embed(inputs[0]), name="emb")
cost, pos_dist, neg_dist = self.loss(a, p, n)
self.cost = tf.identity(cost, name="cost")
......
......@@ -18,17 +18,16 @@ HALF_DIFF = (IMAGE_SIZE - WARP_TARGET_SIZE) // 2
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, (None, IMAGE_SIZE, IMAGE_SIZE, 2), 'input'),
InputVar(tf.int32, (None,), 'label')]
def _build_graph(self, input_vars):
def _build_graph(self, inputs):
xys = np.array([(y, x, 1) for y in range(WARP_TARGET_SIZE)
for x in range(WARP_TARGET_SIZE)], dtype='float32')
xys = tf.constant(xys, dtype=tf.float32, name='xys') # p x 3
image, label = input_vars
image, label = inputs
image = image / 255.0 - 0.5 # bhw2
......
......@@ -24,18 +24,17 @@ Not a good model for Cifar100, just for demonstration.
class Model(ModelDesc):
def __init__(self, cifar_classnum):
super(Model, self).__init__()
self.cifar_classnum = cifar_classnum
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, [None, 30, 30, 3], 'input'),
InputVar(tf.int32, [None], 'label')
]
def _build_graph(self, input_vars):
image, label = input_vars
def _build_graph(self, inputs):
image, label = inputs
is_training = get_current_tower_context().is_training
keep_prob = tf.constant(0.5 if is_training else 1.0)
......
......@@ -24,7 +24,7 @@ Usage:
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, (None, 227, 227, 3), 'input')]
def _build_graph(self, inputs):
......
......@@ -24,8 +24,7 @@ Usage:
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, (None, 224, 224, 3), 'input')]
def _build_graph(self, inputs):
......
......@@ -23,18 +23,18 @@ USE_SLIM = False
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
"""Define all the input variables (with type, shape, name) that'll be
fed into the graph to produce a cost. """
return [InputVar(tf.float32, (None, IMAGE_SIZE, IMAGE_SIZE), 'input'),
InputVar(tf.int32, (None,), 'label')]
def _build_graph(self, input_vars):
def _build_graph(self, inputs):
"""This function should build the model which takes the input variables
and define self.cost at the end"""
# input_vars contains a list of input variables defined above
image, label = input_vars
# inputs contains a list of input variables defined above
image, label = inputs
# In tensorflow, inputs to convolution function are assumed to be
# NHWC. Add a single channel here.
image = tf.expand_dims(image, 3)
......
......@@ -23,12 +23,12 @@ Speed is about 43 it/s on TitanX.
class Model(ModelDesc):
def _get_input_vars(self):
def _get_inputs(self):
return [InputVar(tf.float32, [None, 40, 40, 3], 'input'),
InputVar(tf.int32, [None], 'label')]
def _build_graph(self, input_vars):
image, label = input_vars
def _build_graph(self, inputs):
image, label = inputs
image = image / 128.0 - 1
......
......@@ -87,11 +87,14 @@ class ModelDesc(object):
"""
return self._get_input_vars()
@abstractmethod
def _get_input_vars(self):
def _get_input_vars(self): # keep backward compatibility
"""
:returns: a list of InputVar
"""
return self._get_inputs()
def _get_inputs(self): # this is a better name than _get_input_vars
raise NotImplementedError()
def build_graph(self, model_inputs):
"""
......@@ -171,7 +174,7 @@ class ModelFromMetaGraph(ModelDesc):
assert k in all_coll, \
"Collection {} not found in metagraph!".format(k)
def _get_input_vars(self):
def _get_inputs(self):
col = tf.get_collection(INPUT_VARS_KEY)
col = [InputVar.loads(v) for v in col]
return col
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment