Commit 0ffdcc44 authored by Yuxin Wu's avatar Yuxin Wu Committed by GitHub

Accept tf.layers compatible argument names (#635)

* Use tflayers

* some docs

* Use decorator for argument renaming

* rename parse_args
parent adca1cd7
......@@ -376,7 +376,7 @@ def autodoc_skip_member(app, what, name, obj, skip, options):
'PrefetchOnGPUs',
'guided_relu', 'saliency_map', 'get_scalar_var', 'psnr',
'prediction_incorrect', 'huber_loss',
'prediction_incorrect', 'huber_loss', 'SoftMax'
]:
return True
if name in ['get_data', 'size', 'reset_state']:
......
......@@ -51,9 +51,9 @@ class Model(ModelDesc):
.Conv2D('conv2')
.MaxPooling('pool1', 2)
.Conv2D('conv3')
.FullyConnected('fc0', 512, nl=tf.nn.relu)
.FullyConnected('fc0', 512, activation=tf.nn.relu)
.Dropout('dropout', 0.5)
.FullyConnected('fc1', out_dim=10, nl=tf.identity)())
.FullyConnected('fc1', 10, activation=tf.identity)())
tf.nn.softmax(logits, name='prob') # a Bx10 with probabilities
......
......@@ -22,7 +22,7 @@ def _global_import(name):
_CURR_DIR = os.path.dirname(__file__)
_SKIP = ['utils', 'registry']
_SKIP = ['utils', 'registry', 'tflayer']
for _, module_name, _ in iter_modules(
[_CURR_DIR]):
srcpath = os.path.join(_CURR_DIR, module_name + '.py')
......
......@@ -3,6 +3,6 @@
# File: common.py
from .registry import layer_register # noqa
from .utils import VariableHolder, rename_get_variable # noqa
from .utils import VariableHolder # noqa
__all__ = ['layer_register', 'VariableHolder']
......@@ -4,133 +4,169 @@
import tensorflow as tf
from .common import layer_register, VariableHolder, rename_get_variable
from .common import layer_register, VariableHolder
from ..tfutils.common import get_tf_version_number
from ..utils.argtools import shape2d, shape4d, get_data_format
from .tflayer import rename_get_variable, convert_to_tflayer_args
__all__ = ['Conv2D', 'Deconv2D']
@layer_register(log_shape=True)
def Conv2D(x, out_channel, kernel_shape,
padding='SAME', stride=1,
W_init=None, b_init=None,
activation=tf.identity, split=1, use_bias=True,
data_format='channels_last', dilation_rate=1):
@convert_to_tflayer_args(
args_names=['filters', 'kernel_size'],
name_mapping={
'out_channel': 'filters',
'kernel_shape': 'kernel_size',
'stride': 'strides',
})
def Conv2D(
inputs,
filters,
kernel_size,
strides=(1, 1),
padding='same',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
split=1):
"""
2D convolution on 4D inputs.
Args:
x (tf.Tensor): a 4D tensor.
Must have known number of channels, but can have other unknown dimensions.
out_channel (int): number of output channel.
kernel_shape: (h, w) tuple or a int.
stride: (h, w) tuple or a int.
padding (str): 'valid' or 'same'. Case insensitive.
split (int): Split channels as used in Alexnet. Defaults to 1 (no split).
W_init: initializer for W. Defaults to `variance_scaling_initializer(2.0)`, i.e. kaiming-normal.
b_init: initializer for b. Defaults to zero.
use_bias (bool): whether to use bias.
dilation_rate: (h, w) tuple or a int.
Returns:
tf.Tensor named ``output`` with attribute `variables`.
A wrapper around `tf.layers.Conv2D`.
Some differences to maintain backward-compatibility:
1. Default kernel initializer is variance_scaling_initializer(2.0).
2. Default padding is 'same'.
3. Support 'split' argument to do group conv.
Variable Names:
* ``W``: weights
* ``b``: bias
"""
data_format = get_data_format(data_format, tfmode=False)
in_shape = x.get_shape().as_list()
channel_axis = 3 if data_format == 'NHWC' else 1
in_channel = in_shape[channel_axis]
assert in_channel is not None, "[Conv2D] Input cannot have unknown channel!"
assert in_channel % split == 0
assert out_channel % split == 0
assert dilation_rate == 1 or get_tf_version_number() >= 1.5, 'TF ver. 1.5 or greater required for dilations'
kernel_shape = shape2d(kernel_shape)
padding = padding.upper()
filter_shape = kernel_shape + [in_channel / split, out_channel]
stride = shape4d(stride, data_format=data_format)
kw_args = dict(data_format=data_format)
if get_tf_version_number() >= 1.5:
kw_args['dilations'] = shape4d(dilation_rate, data_format=data_format)
if W_init is None:
# W_init = tf.variance_scaling_initializer(scale=2.0)
W_init = tf.contrib.layers.variance_scaling_initializer(2.0)
if b_init is None:
b_init = tf.constant_initializer()
W = tf.get_variable('W', filter_shape, initializer=W_init)
if use_bias:
b = tf.get_variable('b', [out_channel], initializer=b_init)
if split == 1:
conv = tf.nn.conv2d(x, W, stride, padding, **kw_args)
with rename_get_variable({'kernel': 'W', 'bias': 'b'}):
layer = tf.layers.Conv2D(
filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
ret = tf.identity(ret, name='output')
ret.variables = VariableHolder(W=layer.kernel)
if use_bias:
ret.variables.b = layer.bias
else:
inputs = tf.split(x, split, channel_axis)
# group conv implementation
data_format = get_data_format(data_format, tfmode=False)
in_shape = inputs.get_shape().as_list()
channel_axis = 3 if data_format == 'NHWC' else 1
in_channel = in_shape[channel_axis]
assert in_channel is not None, "[Conv2D] Input cannot have unknown channel!"
assert in_channel % split == 0
assert kernel_regularizer is None and bias_regularizer is None and activity_regularizer is None, \
"Not supported by group conv now!"
out_channel = filters
assert out_channel % split == 0
assert dilation_rate == (1, 1) or get_tf_version_number() >= 1.5, 'TF>=1.5 required for group dilated conv'
kernel_shape = shape2d(kernel_size)
filter_shape = kernel_shape + [in_channel / split, out_channel]
stride = shape4d(strides, data_format=data_format)
kwargs = dict(data_format=data_format)
if get_tf_version_number() >= 1.5:
kwargs['dilations'] = shape4d(dilation_rate, data_format=data_format)
W = tf.get_variable(
'W', filter_shape, initializer=kernel_initializer)
if use_bias:
b = tf.get_variable('b', [out_channel], initializer=bias_initializer)
inputs = tf.split(inputs, split, channel_axis)
kernels = tf.split(W, split, 3)
outputs = [tf.nn.conv2d(i, k, stride, padding, **kw_args)
outputs = [tf.nn.conv2d(i, k, stride, padding.upper(), **kwargs)
for i, k in zip(inputs, kernels)]
conv = tf.concat(outputs, channel_axis)
if activation is None:
activation = tf.identity
ret = activation(tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv, name='output')
ret = activation(tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv, name='output')
ret.variables = VariableHolder(W=W)
if use_bias:
ret.variables.b = b
ret.variables = VariableHolder(W=W)
if use_bias:
ret.variables.b = b
return ret
@layer_register(log_shape=True)
def Deconv2D(x, out_channel, kernel_shape,
stride, padding='SAME',
W_init=None, b_init=None,
activation=tf.identity, use_bias=True,
data_format='channels_last'):
@convert_to_tflayer_args(
args_names=['filters', 'kernel_size', 'strides'],
name_mapping={
'out_channel': 'filters',
'kernel_shape': 'kernel_size',
'stride': 'strides',
})
def Deconv2D(
inputs,
filters,
kernel_size,
strides=(1, 1),
padding='same',
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None):
"""
2D deconvolution on 4D inputs.
Args:
x (tf.Tensor): a tensor of shape NHWC.
Must have known number of channels, but can have other unknown dimensions.
out_channel: the output number of channel.
kernel_shape: (h, w) tuple or a int.
stride: (h, w) tuple or a int.
padding (str): 'valid' or 'same'. Case insensitive.
W_init: initializer for W. Defaults to `tf.variance_scaling_initializer(2.0)`, i.e. kaiming-normal.
b_init: initializer for b. Defaults to zero.
use_bias (bool): whether to use bias.
Returns:
tf.Tensor: a NHWC tensor named ``output`` with attribute `variables`.
A wrapper around `tf.layers.Conv2DTranspose`.
Some differences to maintain backward-compatibility:
1. Default kernel initializer is variance_scaling_initializer(2.0).
2. Default padding is 'same'
Variable Names:
* ``W``: weights
* ``b``: bias
"""
if W_init is None:
# W_init = tf.variance_scaling_initializer(scale=2.0)
W_init = tf.contrib.layers.variance_scaling_initializer(2.0)
if b_init is None:
b_init = tf.constant_initializer()
with rename_get_variable({'kernel': 'W', 'bias': 'b'}):
layer = tf.layers.Conv2DTranspose(
out_channel, kernel_shape,
strides=stride, padding=padding,
filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=W_init,
bias_initializer=b_init,
trainable=True)
ret = layer.apply(x, scope=tf.get_variable_scope())
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
ret.variables = VariableHolder(W=layer.kernel)
if use_bias:
......
......@@ -5,50 +5,50 @@
import tensorflow as tf
from .common import layer_register, rename_get_variable, VariableHolder
from .common import layer_register, VariableHolder
from .tflayer import convert_to_tflayer_args, rename_get_variable
from ..tfutils import symbolic_functions as symbf
__all__ = ['FullyConnected']
@layer_register(log_shape=True)
def FullyConnected(x, out_dim,
W_init=None, b_init=None,
activation=tf.identity, use_bias=True):
@convert_to_tflayer_args(
args_names=['units'],
name_mapping={'out_dim': 'units'})
def FullyConnected(
inputs,
units,
activation=None,
use_bias=True,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None):
"""
Fully-Connected layer, takes a N>1D tensor and returns a 2D tensor.
It is an equivalent of `tf.layers.dense` except for naming conventions.
Args:
x (tf.Tensor): a tensor to be flattened except for the first dimension.
out_dim (int): output dimension
W_init: initializer for W. Defaults to `variance_scaling_initializer(2.0)`, i.e. kaiming-normal.
b_init: initializer for b. Defaults to zero.
nl: a nonlinearity function
use_bias (bool): whether to use bias.
Returns:
tf.Tensor: a NC tensor named ``output`` with attribute `variables`.
A wrapper around `tf.layers.Dense`.
One differences to maintain backward-compatibility:
Default weight initializer is variance_scaling_initializer(2.0).
Variable Names:
* ``W``: weights of shape [in_dim, out_dim]
* ``b``: bias
"""
x = symbf.batch_flatten(x)
if W_init is None:
# W_init = tf.variance_scaling_initializer(2.0)
W_init = tf.contrib.layers.variance_scaling_initializer(2.0)
if b_init is None:
b_init = tf.constant_initializer()
inputs = symbf.batch_flatten(inputs)
with rename_get_variable({'kernel': 'W', 'bias': 'b'}):
layer = tf.layers.Dense(
out_dim, activation=activation, use_bias=use_bias,
kernel_initializer=W_init, bias_initializer=b_init,
trainable=True)
ret = layer.apply(x, scope=tf.get_variable_scope())
units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
ret.variables = VariableHolder(W=layer.kernel)
if use_bias:
......
......@@ -9,6 +9,7 @@ from .shape_utils import StaticDynamicShape
from .common import layer_register
from ..utils.argtools import shape2d, get_data_format
from ._test import TestModel
from .tflayer import convert_to_tflayer_args
__all__ = ['MaxPooling', 'FixedUnPooling', 'AvgPooling', 'GlobalAvgPooling',
......@@ -16,42 +17,42 @@ __all__ = ['MaxPooling', 'FixedUnPooling', 'AvgPooling', 'GlobalAvgPooling',
@layer_register(log_shape=True)
def MaxPooling(x, shape, stride=None, padding='VALID', data_format='channels_last'):
@convert_to_tflayer_args(
args_names=['pool_size', 'strides'],
name_mapping={'shape': 'pool_size', 'stride': 'strides'})
def MaxPooling(
inputs,
pool_size,
strides=None,
padding='valid',
data_format='channels_last'):
"""
Max Pooling on 4D tensors.
Args:
x (tf.Tensor): a 4D tensor.
shape: int or (h, w) tuple
stride: int or (h, w) tuple. Defaults to be the same as shape.
padding (str): 'valid' or 'same'.
Returns:
tf.Tensor named ``output``.
Same as `tf.layers.MaxPooling2D`. Default strides is equal to pool_size.
"""
if stride is None:
stride = shape
ret = tf.layers.max_pooling2d(x, shape, stride, padding, data_format=data_format)
if strides is None:
strides = pool_size
layer = tf.layers.MaxPooling2D(pool_size, strides, padding=padding, data_format=data_format)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
return tf.identity(ret, name='output')
@layer_register(log_shape=True)
def AvgPooling(x, shape, stride=None, padding='VALID', data_format='channels_last'):
@convert_to_tflayer_args(
args_names=['pool_size', 'strides'],
name_mapping={'shape': 'pool_size', 'stride': 'strides'})
def AvgPooling(
inputs,
pool_size,
strides=None,
padding='valid',
data_format='channels_last'):
"""
Average Pooling on 4D tensors.
Args:
x (tf.Tensor): a 4D tensor.
shape: int or (h, w) tuple
stride: int or (h, w) tuple. Defaults to be the same as shape.
padding (str): 'valid' or 'same'.
Returns:
tf.Tensor named ``output``.
Same as `tf.layers.AveragePooling2D`. Default strides is equal to pool_size.
"""
if stride is None:
stride = shape
ret = tf.layers.average_pooling2d(x, shape, stride, padding, data_format=data_format)
if strides is None:
strides = pool_size
layer = tf.layers.AveragePooling2D(pool_size, strides, padding=padding, data_format=data_format)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
return tf.identity(ret, name='output')
......@@ -62,7 +63,8 @@ def GlobalAvgPooling(x, data_format='channels_last'):
<http://arxiv.org/abs/1312.4400>`_.
Args:
x (tf.Tensor): a NHWC tensor.
x (tf.Tensor): a 4D tensor.
Returns:
tf.Tensor: a NC tensor named ``output``.
"""
......
......@@ -3,7 +3,6 @@
import tensorflow as tf
import inspect
from functools import wraps
import six
import re
......@@ -11,7 +10,6 @@ import copy
from ..tfutils.argscope import get_arg_scope
from ..tfutils.model_utils import get_shape_str
from ..utils.argtools import get_data_format
from ..utils import logger
# make sure each layer is only logged once
......@@ -21,18 +19,6 @@ _LAYER_REGISTRY = {}
__all__ = ['layer_register']
def map_tfargs(kwargs):
df = kwargs.pop('data_format', None)
if df is not None:
df = get_data_format(df, tfmode=True)
kwargs['data_format'] = df
old_nl = kwargs.pop('nl', None)
if old_nl is not None:
kwargs['activation'] = lambda x, name=None: old_nl(x, name=name)
return kwargs
def _register(name, func):
if name in _LAYER_REGISTRY:
raise ValueError("Layer named {} is already registered!".format(name))
......@@ -119,14 +105,13 @@ def layer_register(
actual_args = copy.copy(get_arg_scope()[func.__name__])
# explicit kwargs overwrite argscope
actual_args.update(kwargs)
if six.PY3:
# explicit positional args also override argscope. only work in PY3
posargmap = inspect.signature(func).bind_partial(*args).arguments
for k in six.iterkeys(posargmap):
if k in actual_args:
del actual_args[k]
actual_args = map_tfargs(actual_args)
# if six.PY3:
# # explicit positional args also override argscope. only work in PY3
# posargmap = inspect.signature(func).bind_partial(*args).arguments
# for k in six.iterkeys(posargmap):
# if k in actual_args:
# del actual_args[k]
if name is not None: # use scope
with tf.variable_scope(name) as scope:
# this name is only used to surpress logging, doesn't hurt to do some heuristics
......
......@@ -114,19 +114,30 @@ def regularize_cost_from_collection(name='regularize_cost'):
@layer_register(use_scope=None)
def Dropout(x, keep_prob=0.5, is_training=None, noise_shape=None):
def Dropout(x, *args, **kwargs):
"""
Dropout layer as in the paper `Dropout: a Simple Way to Prevent
Neural Networks from Overfitting <http://dl.acm.org/citation.cfm?id=2670313>`_.
Args:
keep_prob (float): the probability that each element is kept. It is only used
when is_training=True.
is_training (bool): If None, will use the current :class:`tensorpack.tfutils.TowerContext`
to figure out.
noise_shape: same as `tf.nn.dropout`.
Same as `tf.layers.dropout`.
However, for historical reasons, the first positional argument is
interpreted as keep_prob rather than drop_prob.
Explicitly use `rate=` keyword arguments to ensure things are consistent.
"""
if is_training is None:
is_training = get_current_tower_context().is_training
return tf.layers.dropout(
x, rate=1 - keep_prob, noise_shape=noise_shape, training=is_training)
if 'is_training' in kwargs:
kwargs['training'] = kwargs.pop('is_training')
if len(args) > 0:
logger.warn(
"The first positional argument to tensorpack.Dropout is the probability to keep rather than to drop. "
"This is different from the rate argument in tf.layers.Dropout due to historical reasons. "
"To mimic tf.layers.Dropout, explicitly use keyword argument 'rate' instead")
rate = 1 - args[0]
elif 'keep_prob' in kwargs:
assert 'rate' not in kwargs, "Cannot set both keep_prob and rate!"
rate = 1 - kwargs.pop('keep_prob')
elif 'rate' in kwargs:
rate = kwargs.pop('rate')
else:
rate = 0.5
if kwargs.get('training', None) is None:
kwargs['training'] = get_current_tower_context().is_training
return tf.layers.dropout(x, rate=rate, **kwargs)
......@@ -4,6 +4,7 @@
import tensorflow as tf
from .common import layer_register
from ..utils.develop import log_deprecated
__all__ = ['SoftMax']
......@@ -28,6 +29,7 @@ def SoftMax(x, use_temperature=False, temperature_init=1.0):
* ``invtemp``: 1.0/temperature.
"""
log_deprecated("models.SoftMax", "Please implement it by yourself!", "2018-05-01")
if use_temperature:
t = tf.get_variable('invtemp', [],
initializer=tf.constant_initializer(1.0 / float(temperature_init)))
......
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: tflayer.py
import tensorflow as tf
import six
import functools
from ..utils.argtools import get_data_format
from ..tfutils.common import get_tf_version_number
from ..tfutils.varreplace import custom_getter_scope
def map_common_tfargs(kwargs):
df = kwargs.pop('data_format', None)
if df is not None:
df = get_data_format(df, tfmode=True)
kwargs['data_format'] = df
old_nl = kwargs.pop('nl', None)
if old_nl is not None:
kwargs['activation'] = lambda x, name=None: old_nl(x, name=name)
if 'W_init' in kwargs:
kwargs['kernel_initializer'] = kwargs.pop('W_init')
if 'b_init' in kwargs:
kwargs['bias_initializer'] = kwargs.pop('b_init')
return kwargs
def convert_to_tflayer_args(args_names, name_mapping):
"""
After applying this decorator:
1. data_format becomes tf.layers style
2. nl becomes activation
3. initializers are renamed
4. positional args are transformed to correspoding kwargs, according to args_names
5. kwargs are mapped to tf.layers names if needed, by name_mapping
"""
def decorator(func):
@functools.wraps(func)
def decorated_func(inputs, *args, **kwargs):
kwargs = map_common_tfargs(kwargs)
posarg_dic = {}
assert len(args) <= len(args_names), \
"Please use kwargs instead of positional args to call this model, " \
"except for the following arguments: {}".format(', '.join(args_names))
for pos_arg, name in zip(args, args_names):
posarg_dic[name] = pos_arg
ret = {}
for name, arg in six.iteritems(kwargs):
newname = name_mapping.get(name, None)
if newname is not None:
assert newname not in kwargs, \
"Argument {} and {} conflicts!".format(name, newname)
else:
newname = name
ret[newname] = arg
ret.update(posarg_dic) # Let pos arg overwrite kw arg, for argscope to work
return func(inputs, **ret)
return decorated_func
return decorator
def rename_get_variable(mapping):
"""
Args:
mapping(dict): an old -> new mapping for variable basename. e.g. {'kernel': 'W'}
"""
def custom_getter(getter, name, *args, **kwargs):
splits = name.split('/')
basename = splits[-1]
if basename in mapping:
basename = mapping[basename]
splits[-1] = basename
name = '/'.join(splits)
return getter(name, *args, **kwargs)
return custom_getter_scope(custom_getter)
def monkeypatch_tf_layers():
if get_tf_version_number() < 1.4:
if not hasattr(tf.layers, 'Dense'):
from tensorflow.python.layers.core import Dense
tf.layers.Dense = Dense
from tensorflow.python.layers.normalization import BatchNormalization
tf.layers.BatchNormalization = BatchNormalization
from tensorflow.python.layers.convolutional import Conv2DTranspose, Conv2D
tf.layers.Conv2DTranspose = Conv2DTranspose
tf.layers.Conv2D = Conv2D
from tensorflow.python.layers.pooling import MaxPooling2D, AveragePooling2D
tf.layers.MaxPooling2D = MaxPooling2D
tf.layers.AveragePooling2D = AveragePooling2D
monkeypatch_tf_layers()
......@@ -2,9 +2,6 @@
# -*- coding: utf-8 -*-
# File: utils.py
import tensorflow as tf
from ..tfutils.varreplace import custom_getter_scope
from ..tfutils.common import get_tf_version_number
import six
......@@ -39,35 +36,3 @@ class VariableHolder(object):
list of all variables
"""
return list(six.itervalues(self._vars))
def rename_get_variable(mapping):
"""
Args:
mapping(dict): an old -> new mapping for variable basename. e.g. {'kernel': 'W'}
"""
def custom_getter(getter, name, *args, **kwargs):
splits = name.split('/')
basename = splits[-1]
if basename in mapping:
basename = mapping[basename]
splits[-1] = basename
name = '/'.join(splits)
return getter(name, *args, **kwargs)
return custom_getter_scope(custom_getter)
def monkeypatch_tf_layers():
if get_tf_version_number() < 1.4:
if not hasattr(tf.layers, 'Dense'):
from tensorflow.python.layers.core import Dense
tf.layers.Dense = Dense
from tensorflow.python.layers.normalization import BatchNormalization
tf.layers.BatchNormalization = BatchNormalization
from tensorflow.python.layers.convolutional import Conv2DTranspose
tf.layers.Conv2DTranspose = Conv2DTranspose
monkeypatch_tf_layers()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment