Commit 73f94df2 authored by Patrick Wieschollek's avatar Patrick Wieschollek Committed by Yuxin Wu

Enhance Tensors-results from Layers (fix #228) (#253)

After writing `x = Conv2D(...)` one can access the used variables
by `x.variables.W` and `x.variables.b` if they exists.

This is much better than writing:
`x.name.replace('/output:0', "/W:0").replace('tower0/', '')`

whould would also break the InferenceRunner.
parent 0be066fe
......@@ -43,6 +43,7 @@ install:
before_script:
- flake8 --version
- protoc --version
- python -c "import cv2; print('OpenCV '+ cv2.__version__)"
- python -c "import tensorflow as tf; print('TensorFlow '+ tf.__version__)"
......
......@@ -16,7 +16,11 @@ from ..utils.develop import building_rtfd
_LAYER_LOGGED = set()
_LAYER_REGISTERED = {}
__all__ = ['layer_register', 'disable_layer_logging', 'get_registered_layer']
__all__ = ['layer_register', 'disable_layer_logging', 'get_registered_layer', 'EmptyObject']
class EmptyObject(object):
pass
def _register(name, func):
......
......@@ -4,7 +4,7 @@
# Author: Yuxin Wu <ppwwyyxx@gmail.com>
import tensorflow as tf
from .common import layer_register
from .common import layer_register, EmptyObject
from ..utils.argtools import shape2d, shape4d
__all__ = ['Conv2D', 'Deconv2D']
......@@ -33,7 +33,7 @@ def Conv2D(x, out_channel, kernel_shape,
use_bias (bool): whether to use bias.
Returns:
tf.Tensor named ``output``.
tf.Tensor named ``output`` with attribute `variables`.
Variable Names:
......@@ -58,6 +58,7 @@ def Conv2D(x, out_channel, kernel_shape,
b_init = tf.constant_initializer()
W = tf.get_variable('W', filter_shape, initializer=W_init)
if use_bias:
b = tf.get_variable('b', [out_channel], initializer=b_init)
......@@ -69,7 +70,13 @@ def Conv2D(x, out_channel, kernel_shape,
outputs = [tf.nn.conv2d(i, k, stride, padding, data_format=data_format)
for i, k in zip(inputs, kernels)]
conv = tf.concat(outputs, channel_axis)
return nl(tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv, name='output')
ret = nl(tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv, name='output')
ret.variables = EmptyObject()
ret.variables.W = W
if use_bias:
ret.variables.b = b
return ret
class StaticDynamicShape(object):
......@@ -108,7 +115,7 @@ def Deconv2D(x, out_shape, kernel_shape,
use_bias (bool): whether to use bias.
Returns:
tf.Tensor: a NHWC tensor named ``output``.
tf.Tensor: a NHWC tensor named ``output`` with attribute `variables`.
Variable Names:
......@@ -157,4 +164,10 @@ def Deconv2D(x, out_shape, kernel_shape,
conv = tf.nn.conv2d_transpose(
x, W, out_shape_dyn, stride4d, padding=padding, data_format=data_format)
conv.set_shape(tf.TensorShape([None] + shp3_static))
return nl(tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv, name='output')
ret = nl(tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv, name='output')
ret.variables = EmptyObject()
ret.variables.W = W
if use_bias:
ret.variables.b = b
return ret
......@@ -5,7 +5,7 @@
import tensorflow as tf
from .common import layer_register
from .common import layer_register, EmptyObject
from ..tfutils import symbolic_functions as symbf
__all__ = ['FullyConnected']
......@@ -27,7 +27,7 @@ def FullyConnected(x, out_dim,
use_bias (bool): whether to use bias.
Returns:
tf.Tensor: a NC tensor named ``output``.
tf.Tensor: a NC tensor named ``output`` with attribute `variables`.
Variable Names:
......@@ -46,4 +46,11 @@ def FullyConnected(x, out_dim,
if use_bias:
b = tf.get_variable('b', [out_dim], initializer=b_init)
prod = tf.nn.xw_plus_b(x, W, b) if use_bias else tf.matmul(x, W)
return nl(prod, name='output')
ret = nl(prod, name='output')
ret.variables = EmptyObject()
ret.variables.W = W
if use_bias:
ret.variables.b = b
return ret
......@@ -5,7 +5,7 @@
import tensorflow as tf
from .common import layer_register
from .common import layer_register, EmptyObject
from .batch_norm import BatchNorm
__all__ = ['Maxout', 'PReLU', 'LeakyReLU', 'BNReLU']
......@@ -54,7 +54,11 @@ def PReLU(x, init=0.001, name='output'):
init = tf.constant_initializer(init)
alpha = tf.get_variable('alpha', [], initializer=init)
x = ((1 + alpha) * x + (1 - alpha) * tf.abs(x))
return tf.multiply(x, 0.5, name=name)
ret = tf.multiply(x, 0.5, name=name)
ret.variables = EmptyObject()
ret.variables.alpha = alpha
return ret
@layer_register(use_scope=False, log_shape=False)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment