Commit a9dd0b8e authored by Yuxin Wu's avatar Yuxin Wu

replace step_per_epoch by steps_per_epoch in examples

parent 543d9299
......@@ -298,9 +298,9 @@ def get_config():
return TrainConfig(
dataflow=dataset,
optimizer=tf.train.AdamOptimizer(lr),
callbacks=Callbacks([StatPrinter(), PeriodicCallback(ModelSaver(), 3)])]),
callbacks=[PeriodicCallback(ModelSaver(), 3)],
model=Model(),
step_per_epoch=dataset.size(),
steps_per_epoch=dataset.size(),
max_epoch=100,
)
```
......
......@@ -219,7 +219,7 @@ def get_config():
],
session_config=get_default_sess_config(0.5),
model=M,
step_per_epoch=STEP_PER_EPOCH,
steps_per_epoch=STEP_PER_EPOCH,
max_epoch=1000,
)
......
......@@ -89,7 +89,7 @@ def get_data(path, isTrain, stat_file):
def get_config(ds_train, ds_test):
step_per_epoch = ds_train.size()
steps_per_epoch = ds_train.size()
lr = symbolic_functions.get_scalar_var('learning_rate', 5e-3, summary=True)
......@@ -105,7 +105,7 @@ def get_config(ds_train, ds_test):
InferenceRunner(ds_test, [ScalarStats('error')]), 2),
],
model=Model(),
step_per_epoch=step_per_epoch,
steps_per_epoch=steps_per_epoch,
max_epoch=70,
)
......
......@@ -103,7 +103,7 @@ def get_config():
ds = CharRNNData(param.corpus, 100000)
ds = BatchData(ds, param.batch_size)
step_per_epoch = ds.size()
steps_per_epoch = ds.size()
lr = symbolic_functions.get_scalar_var('learning_rate', 2e-3, summary=True)
......@@ -115,7 +115,7 @@ def get_config():
ScheduledHyperParamSetter('learning_rate', [(25, 2e-4)])
],
model=Model(),
step_per_epoch=step_per_epoch,
steps_per_epoch=steps_per_epoch,
max_epoch=50,
)
......
......@@ -190,7 +190,7 @@ def get_config():
# save memory for multiprocess evaluator
session_config=get_default_sess_config(0.6),
model=M,
step_per_epoch=STEP_PER_EPOCH,
steps_per_epoch=STEP_PER_EPOCH,
)
......
......@@ -247,7 +247,7 @@ def get_config():
ClassificationError('wrong-top5', 'val-error-top5')])
],
model=Model(),
step_per_epoch=10000,
steps_per_epoch=10000,
max_epoch=100,
)
......
......@@ -147,7 +147,7 @@ def get_config():
data_train = AugmentImageComponent(data_train, augmentors)
data_train = BatchData(data_train, 128)
data_train = PrefetchDataZMQ(data_train, 5)
step_per_epoch = data_train.size()
steps_per_epoch = data_train.size()
augmentors = [imgaug.Resize((40, 40))]
data_test = AugmentImageComponent(data_test, augmentors)
......@@ -169,7 +169,7 @@ def get_config():
[ScalarStats('cost'), ClassificationError()])
],
model=Model(),
step_per_epoch=step_per_epoch,
steps_per_epoch=steps_per_epoch,
max_epoch=200,
)
......
......@@ -112,7 +112,7 @@ def get_config():
callbacks=[ModelSaver()],
session_config=get_default_sess_config(0.5),
model=Model(),
step_per_epoch=300,
steps_per_epoch=300,
max_epoch=200,
)
......
......@@ -173,7 +173,7 @@ def get_config():
ScheduledHyperParamSetter('learning_rate', [(200, 1e-4)])
],
model=Model(),
step_per_epoch=dataset.size(),
steps_per_epoch=dataset.size(),
max_epoch=300,
)
......
......@@ -161,7 +161,7 @@ def get_config():
callbacks=[ModelSaver()],
session_config=get_default_sess_config(0.5),
model=Model(),
step_per_epoch=500,
steps_per_epoch=500,
max_epoch=100,
)
......
......@@ -166,7 +166,7 @@ def view_data():
def get_config():
logger.auto_set_dir()
dataset_train = get_data('train')
step_per_epoch = dataset_train.size() * 40
steps_per_epoch = dataset_train.size() * 40
dataset_val = get_data('val')
lr = get_scalar_var('learning_rate', 3e-5, summary=True)
......@@ -181,7 +181,7 @@ def get_config():
BinaryClassificationStats('prediction', 'edgemap4d'))
],
model=Model(),
step_per_epoch=step_per_epoch,
steps_per_epoch=steps_per_epoch,
max_epoch=100,
)
......
......@@ -153,7 +153,7 @@ def get_config():
logger.auto_set_dir()
# prepare dataset
dataset_train = get_data('train')
step_per_epoch = 5000
steps_per_epoch = 5000
dataset_val = get_data('val')
lr = get_scalar_var('learning_rate', 0.045, summary=True)
......@@ -172,7 +172,7 @@ def get_config():
],
session_config=get_default_sess_config(0.99),
model=Model(),
step_per_epoch=step_per_epoch,
steps_per_epoch=steps_per_epoch,
max_epoch=80,
)
......
......@@ -281,7 +281,7 @@ def get_config():
],
session_config=get_default_sess_config(0.9),
model=Model(),
step_per_epoch=5000,
steps_per_epoch=5000,
max_epoch=100,
)
......
......@@ -110,11 +110,11 @@ def get_config():
data3, wd2id = get_PennTreeBank()
global VOCAB_SIZE
VOCAB_SIZE = len(wd2id)
step_per_epoch = (data3[0].shape[0] // BATCH - 1) // SEQ_LEN
steps_per_epoch = (data3[0].shape[0] // BATCH - 1) // SEQ_LEN
train_data = TensorInput(
lambda: ptb_producer(data3[0], BATCH, SEQ_LEN),
step_per_epoch)
steps_per_epoch)
val_data = TensorInput(
lambda: ptb_producer(data3[1], BATCH, SEQ_LEN),
(data3[1].shape[0] // BATCH - 1) // SEQ_LEN)
......
......@@ -134,7 +134,7 @@ def get_config():
# prepare dataset
dataset_train = get_data('train')
step_per_epoch = dataset_train.size()
steps_per_epoch = dataset_train.size()
dataset_test = get_data('test')
lr = get_scalar_var('learning_rate', 0.01, summary=True)
......@@ -149,7 +149,7 @@ def get_config():
[(1, 0.1), (82, 0.01), (123, 0.001), (300, 0.0002)])
],
model=Model(n=NUM_UNITS),
step_per_epoch=step_per_epoch,
steps_per_epoch=steps_per_epoch,
max_epoch=400,
)
......
......@@ -199,7 +199,7 @@ def get_config():
HumanHyperParamSetter('learning_rate'),
],
model=Model(),
step_per_epoch=5000,
steps_per_epoch=5000,
max_epoch=110,
)
......
......@@ -63,7 +63,7 @@ def get_config():
# prepare dataset
dataset_train = get_data('train')
step_per_epoch = dataset_train.size()
steps_per_epoch = dataset_train.size()
dataset_test = get_data('test')
lr = get_scalar_var('learning_rate', 0.01, summary=True)
......@@ -78,7 +78,7 @@ def get_config():
[(1, 0.1), (20, 0.01), (28, 0.001), (50, 0.0001)])
],
model=Model(n=18),
step_per_epoch=step_per_epoch,
steps_per_epoch=steps_per_epoch,
max_epoch=500,
)
......
......@@ -133,7 +133,7 @@ def get_config(model):
logger.auto_set_dir()
dataset = model.get_data()
step_per_epoch = dataset.size()
steps_per_epoch = dataset.size()
lr = symbf.get_scalar_var('learning_rate', 1e-4, summary=True)
......@@ -145,7 +145,7 @@ def get_config(model):
ModelSaver(),
ScheduledHyperParamSetter('learning_rate', [(10, 1e-5), (20, 1e-6)])
],
step_per_epoch=step_per_epoch,
steps_per_epoch=steps_per_epoch,
max_epoch=20,
)
......
......@@ -148,7 +148,7 @@ def get_config():
logger.auto_set_dir()
dataset_train, dataset_test = get_data(True), get_data(False)
step_per_epoch = dataset_train.size() * 5
steps_per_epoch = dataset_train.size() * 5
lr = symbf.get_scalar_var('learning_rate', 5e-4, summary=True)
......@@ -163,7 +163,7 @@ def get_config():
],
session_config=get_default_sess_config(0.5),
model=Model(),
step_per_epoch=step_per_epoch,
steps_per_epoch=steps_per_epoch,
max_epoch=500,
)
......
......@@ -107,7 +107,7 @@ def get_config(cifar_classnum):
# prepare dataset
dataset_train = get_data('train', cifar_classnum)
step_per_epoch = dataset_train.size()
steps_per_epoch = dataset_train.size()
dataset_test = get_data('test', cifar_classnum)
sess_config = get_default_sess_config(0.5)
......@@ -130,7 +130,7 @@ def get_config(cifar_classnum):
],
session_config=sess_config,
model=Model(cifar_classnum),
step_per_epoch=step_per_epoch,
steps_per_epoch=steps_per_epoch,
max_epoch=150,
)
......
......@@ -124,7 +124,7 @@ def get_config():
dataset_train, dataset_test = get_data()
# how many iterations you want in each epoch
step_per_epoch = dataset_train.size()
steps_per_epoch = dataset_train.size()
lr = tf.train.exponential_decay(
learning_rate=1e-3,
......@@ -148,7 +148,7 @@ def get_config():
[ScalarStats('cross_entropy_loss'), ClassificationError('incorrect')]),
],
model=Model(),
step_per_epoch=step_per_epoch,
steps_per_epoch=steps_per_epoch,
max_epoch=100,
)
......
......@@ -89,7 +89,7 @@ def get_config():
logger.auto_set_dir()
data_train, data_test = get_data()
step_per_epoch = data_train.size()
steps_per_epoch = data_train.size()
lr = tf.train.exponential_decay(
learning_rate=1e-3,
......@@ -107,7 +107,7 @@ def get_config():
[ScalarStats('cost'), ClassificationError()])
],
model=Model(),
step_per_epoch=step_per_epoch,
steps_per_epoch=steps_per_epoch,
max_epoch=350,
)
......
......@@ -5,7 +5,6 @@
from abc import ABCMeta, abstractmethod
import tensorflow as tf
import inspect
import pickle
import six
......@@ -102,12 +101,7 @@ class ModelDesc(object):
model_inputs (list[tf.Tensor]): a list of inputs, corresponding to
InputVars of this model.
"""
if len(inspect.getargspec(self._build_graph).args) == 3:
logger.warn("[DEPRECATED] _build_graph(self, input_vars, is_training) is deprecated! \
Use _build_graph(self, input_vars) and get_current_tower_context().is_training instead.")
self._build_graph(model_inputs, get_current_tower_context().is_training)
else:
self._build_graph(model_inputs)
self._build_graph(model_inputs)
@abstractmethod
def _build_graph(self, inputs):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment