Commit faa6f17c authored by Yuxin Wu's avatar Yuxin Wu

change name for .print

parent fc9e45b0
......@@ -25,9 +25,7 @@ mnist_example.get_data = get_data
IMAGE_SIZE = 28
class Model(mnist_example.Model):
def _build_graph(self, input_vars, is_training):
is_training = bool(is_training)
keep_prob = tf.constant(0.5 if is_training else 1.0)
def _build_graph(self, input_vars):
image, label = input_vars
image = tf.expand_dims(image, 3) # add a single channel
......
......@@ -72,7 +72,7 @@ class Model(ModelDesc):
return [InputVar(tf.float32, [None, 224, 224, 3], 'input'),
InputVar(tf.int32, [None], 'label') ]
def _build_graph(self, input_vars, is_training):
def _build_graph(self, input_vars):
image, label = input_vars
image = image / 255.0
......@@ -97,7 +97,7 @@ class Model(ModelDesc):
def activate(x):
return fa(nonlin(x))
with argscope(BatchNorm, decay=0.9, epsilon=1e-4, use_local_stat=is_training), \
with argscope(BatchNorm, decay=0.9, epsilon=1e-4), \
argscope([Conv2D, FullyConnected], use_bias=False, nl=tf.identity):
logits = (LinearWrap(image)
.Conv2D('conv0', 96, 12, stride=4, padding='VALID')
......
......@@ -69,7 +69,7 @@ class Model(ModelDesc):
image = image / 256.0
with argscope(BatchNorm, decay=0.9, epsilon=1e-4, use_local_stat=is_training), \
with argscope(BatchNorm, decay=0.9, epsilon=1e-4), \
argscope(Conv2D, use_bias=False, nl=tf.identity):
logits = (LinearWrap(image)
.Conv2D('conv0', 48, 5, padding='VALID', use_bias=True)
......
......@@ -74,7 +74,7 @@ class Model(ModelDesc):
return [InputVar(tf.int32, (None, param.seq_len), 'input'),
InputVar(tf.int32, (None, param.seq_len), 'nextinput') ]
def _build_graph(self, input_vars, is_training):
def _build_graph(self, input_vars):
input, nextinput = input_vars
cell = rnn_cell.BasicLSTMCell(num_units=param.rnn_size)
......
......@@ -28,10 +28,8 @@ class Model(ModelDesc):
return [InputVar(tf.float32, (None, 227, 227, 3), 'input'),
InputVar(tf.int32, (None,), 'label') ]
def _build_graph(self, inputs, is_training):
def _build_graph(self, inputs):
# img: 227x227x3
is_training = bool(is_training)
keep_prob = tf.constant(0.5 if is_training else 1.0)
image, label = inputs
......
......@@ -8,7 +8,7 @@ import tensorflow as tf
import numpy as np
import os
import argparse
import cPickle as pkl
import pickle as pkl
from tensorpack.train import TrainConfig
from tensorpack.predict import PredictConfig, get_predict_func
......@@ -33,8 +33,6 @@ class Model(ModelDesc):
InputVar(tf.int32, (None,), 'label') ]
def _build_graph(self, inputs, is_training):
is_training = bool(is_training)
keep_prob = tf.constant(0.5 if is_training else 1.0)
image, label = inputs
......@@ -65,9 +63,10 @@ class Model(ModelDesc):
.MaxPooling('pool5', 2)
# 7
.FullyConnected('fc6', 4096)
.tf.nn.dropout(keep_prob)
.Dropout('drop0', 0.5)
.print_tensor()
.FullyConnected('fc7', 4096)
.tf.nn.dropout(keep_prob)
.Dropout('drop1', 0.5)
.FullyConnected('fc8', out_dim=1000, nl=tf.identity)())
prob = tf.nn.softmax(logits, name='output')
......@@ -93,10 +92,10 @@ def run_test(path, input):
outputs = predict_func([im])[0]
prob = outputs[0]
ret = prob.argsort()[-10:][::-1]
print ret
print(ret)
meta = ILSVRCMeta().get_synset_words_1000()
print [meta[k] for k in ret]
print([meta[k] for k in ret])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
......
......@@ -49,9 +49,14 @@ class LinearWrap(object):
layer = eval(layer_name)
if hasattr(layer, 'f'):
# this is a registered tensorpack layer
if layer.use_scope:
def f(name, *args, **kwargs):
ret = layer(name, self._t, *args, **kwargs)
return LinearWrap(ret)
else:
def f(*args, **kwargs):
ret = layer(self._t, *args, **kwargs)
return LinearWrap(ret)
return f
else:
if layer_name != 'tf':
......@@ -70,7 +75,7 @@ class LinearWrap(object):
def tensor(self):
return self._t
def print(self):
def print_tensor(self):
print(self._t)
return self
......
......@@ -22,7 +22,10 @@ def disable_layer_logging():
# can use nonlocal in python3, but how
globals()['_layer_logged'] = ContainEverything()
def layer_register(summary_activation=False, log_shape=True):
def layer_register(
summary_activation=False,
log_shape=True,
use_scope=True):
"""
Register a layer.
:param summary_activation: Define the default behavior of whether to
......@@ -33,17 +36,24 @@ def layer_register(summary_activation=False, log_shape=True):
def wrapper(func):
@wraps(func)
def wrapped_func(name, inputs, *args, **kwargs):
def wrapped_func(*args, **kwargs):
if use_scope:
name, inputs = args[0], args[1]
args = args[1:] # actual positional args used to call func
assert isinstance(name, six.string_types), name
else:
assert not log_shape and not summary_activation
inputs = args[0]
name = None
do_summary = kwargs.pop(
'summary_activation', summary_activation)
args = (inputs,) + args
# TODO use inspect.getcallargs to enhance?
# update from current argument scope
actual_args = copy.copy(get_arg_scope()[func.__name__])
actual_args.update(kwargs)
if name is not None:
with tf.variable_scope(name) as scope:
do_log_shape = log_shape and scope.name not in _layer_logged
do_summary = do_summary and scope.name not in _layer_logged
......@@ -65,8 +75,13 @@ def layer_register(summary_activation=False, log_shape=True):
add_activation_summary(x, scope.name)
else:
add_activation_summary(outputs, scope.name)
else:
# run the actual function
outputs = func(*args, **actual_args)
return outputs
wrapped_func.f = func # attribute to access the underlining function object
wrapped_func.use_scope = use_scope
return wrapped_func
# need some special handling for sphinx to work with the arguments
......
......@@ -11,7 +11,7 @@ from .batch_norm import BatchNorm
__all__ = ['Maxout', 'PReLU', 'LeakyReLU', 'BNReLU']
@layer_register(log_shape=False)
@layer_register()
def Maxout(x, num_unit):
"""
Maxout as in `Maxout Networks <http://arxiv.org/abs/1302.4389>`_.
......
......@@ -7,8 +7,10 @@ import re
from ..utils import logger
from ..utils.utils import *
from .model_desc import get_current_tower_context
from ._common import layer_register
__all__ = ['regularize_cost', 'l2_regularizer', 'l1_regularizer']
__all__ = ['regularize_cost', 'l2_regularizer', 'l1_regularizer', 'Dropout']
@memoized
def _log_regularizer(name):
......@@ -36,3 +38,10 @@ def regularize_cost(regex, func, name=None):
return 0
return tf.add_n(costs, name=name)
@layer_register(log_shape=False)
def Dropout(x, prob=0.5):
is_training = get_current_tower_context().is_training
keep_prob = tf.constant(prob if is_training else 1.0)
return tf.nn.dropout(x, keep_prob)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment