Commit 0f90d4c2 authored by Yuxin Wu's avatar Yuxin Wu

clean-up '-summary' in stats name. clean-up some use of get_scalar_var

parent a55d81ca
......@@ -135,7 +135,7 @@ class Model(ModelDesc):
self.cost, tf.reduce_mean(importance, name='importance'))
def _get_optimizer(self):
lr = symbf.get_scalar_var('learning_rate', 0.001, summary=True)
lr = tf.get_variable('learning_rate', initializer=0.001, trainable=False)
opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)
gradprocs = [MapGradient(lambda grad: tf.clip_by_average_norm(grad, 0.1)),
......
......@@ -126,7 +126,8 @@ class Model(ModelDesc):
decoded_boxes = tf.identity(decoded_boxes, name='fastrcnn_fg_boxes')
def _get_optimizer(self):
lr = symbf.get_scalar_var('learning_rate', 0.003, summary=True)
lr = tf.get_variable('learning_rate', initializer=0.003, trainable=False)
tf.summary.scalar('learning_rate', lr)
factor = get_batch_factor()
if factor != 1:
......
......@@ -126,7 +126,7 @@ class Model(GANModelDesc):
self.g_loss = L_neg
add_moving_summary(L_pos, L_neg, eq, measure, self.d_loss)
tf.summary.scalar('kt-summary', kt)
tf.summary.scalar('kt', kt)
self.collect_variables()
......
......@@ -157,7 +157,7 @@ class Model(GANModelDesc):
add_moving_summary(recon_loss_A, recon_loss_B, self.g_loss, self.d_loss)
def _get_optimizer(self):
lr = symbolic_functions.get_scalar_var('learning_rate', 2e-4, summary=True)
lr = tf.get_variable('learning_rate', initializer=2e-4, trainable=False)
return tf.train.AdamOptimizer(lr, beta1=0.5, epsilon=1e-3)
......
......@@ -95,7 +95,7 @@ class Model(GANModelDesc):
self.collect_variables()
def _get_optimizer(self):
lr = symbolic_functions.get_scalar_var('learning_rate', 2e-4, summary=True)
lr = tf.get_variable('learning_rate', initializer=2e-4, trainable=False)
return tf.train.AdamOptimizer(lr, beta1=0.5, epsilon=1e-3)
......
......@@ -96,7 +96,7 @@ class Model(ModelDesc):
add_moving_summary(costs + [wrong, self.cost])
def _get_optimizer(self):
lr = symbf.get_scalar_var('learning_rate', 3e-5, summary=True)
lr = tf.get_variable('learning_rate', initializer=3e-5, trainable=False)
opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)
return optimizer.apply_grad_processors(
opt, [gradproc.ScaleGradient(
......
......@@ -207,5 +207,5 @@ class ImageNetModel(ModelDesc):
def _get_optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.1, trainable=False)
tf.summary.scalar('learning_rate-summary', lr)
tf.summary.scalar('learning_rate', lr)
return tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True)
......@@ -30,7 +30,7 @@ class Model(ModelDesc):
summary.add_moving_summary(self.cost)
def _get_optimizer(self):
lr = symbolic_functions.get_scalar_var('learning_rate', 5e-3, summary=True)
lr = tf.get_variable('learning_rate', initializer=5e-3, trainable=False)
return tf.train.AdamOptimizer(lr)
......
......@@ -78,7 +78,8 @@ class Model(ModelDesc):
self.cost = tf.add_n([cost, wd_cost], name='cost')
def _get_optimizer(self):
lr = symbf.get_scalar_var('learning_rate', 1e-2, summary=True)
lr = tf.get_variable('learning_rate', initializer=1e-2, trainable=False)
tf.summary.scalar('lr', lr)
return tf.train.AdamOptimizer(lr, epsilon=1e-3)
......
......@@ -129,9 +129,12 @@ class Monitors(Callback):
for val in summary.value:
if val.WhichOneof('value') == 'simple_value':
val.tag = re.sub('tower[0-9]+/', '', val.tag) # TODO move to subclasses
# TODO This hack not needed any more, can remove this in the future
suffix = '-summary' # tensorflow#6150, tensorboard#59
if val.tag.endswith(suffix):
val.tag = val.tag[:-len(suffix)]
self._dispatch(lambda m: m.process_scalar(val.tag, val.simple_value))
self._dispatch(lambda m: m.process_summary(summary))
......
......@@ -246,7 +246,7 @@ def add_moving_summary(*args, **kwargs):
ema_ops.append(ema_op)
with tf.name_scope(None):
# cannot add it into colocate group -- will force everything to cpus
tf.summary.scalar(name + '-summary', ema_op) # write the EMA value as a summary
tf.summary.scalar(name, ema_op) # write the EMA value as a summary
if coll is not None:
for op in ema_ops:
# TODO a new collection to summary every step?
......
......@@ -154,9 +154,11 @@ def huber_loss(x, delta=1, name='huber_loss'):
return tf.where(cond, l2, l1, name=name)
# TODO remove this in the future
def get_scalar_var(name, init_value, summary=False, trainable=False):
"""
Get a scalar float variable with certain initial value
Get a scalar float variable with certain initial value.
You can just call `tf.get_variable(name, initializer=init_value, trainable=False)` instead.
Args:
name (str): name of the variable.
......@@ -170,7 +172,7 @@ def get_scalar_var(name, init_value, summary=False, trainable=False):
trainable=trainable)
if summary:
# this is recognized in callbacks.StatHolder
tf.summary.scalar(name + '-summary', ret)
tf.summary.scalar(name, ret)
return ret
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment