Commit 1554550d authored by Patrick Wieschollek's avatar Patrick Wieschollek Committed by Yuxin Wu

Change VGG19 example to tf.layers. (#832)

* Change VGG19 example to tf.layers.

As part of the transition towards tf.layers #291, #627 this is a
rewrite. While The LinearWrap might need less lines, tf.layers are
supported by TensorFlow. And the motivation of tensorpack is being
a training framework.

* cosmetic changes
parent 8f4a27f0
...@@ -7,53 +7,60 @@ import cv2 ...@@ -7,53 +7,60 @@ import cv2
import tensorflow as tf import tensorflow as tf
import numpy as np import numpy as np
import os import os
import six
import argparse import argparse
from tensorpack import * from tensorpack import *
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
from tensorpack.dataflow.dataset import ILSVRCMeta from tensorpack.dataflow.dataset import ILSVRCMeta
enable_argscope_for_module(tf.layers)
def tower_func(image): def tower_func(image):
with argscope(Conv2D, kernel_size=3, activation=tf.nn.relu): is_training = get_current_tower_context().is_training
logits = (LinearWrap(image)
.Conv2D('conv1_1', 64) with argscope([tf.layers.conv2d], kernel_size=3, activation=tf.nn.relu, padding='same'):
.Conv2D('conv1_2', 64) x = image
.MaxPooling('pool1', 2) x = tf.layers.conv2d(x, 64, name='conv1_1')
# 112 x = tf.layers.conv2d(x, 64, name='conv1_2')
.Conv2D('conv2_1', 128) x = tf.layers.max_pooling2d(x, 2, 2, name='pool1')
.Conv2D('conv2_2', 128)
.MaxPooling('pool2', 2) x = tf.layers.conv2d(x, 128, name='conv2_1')
# 56 x = tf.layers.conv2d(x, 128, name='conv2_2')
.Conv2D('conv3_1', 256) x = tf.layers.max_pooling2d(x, 2, 2, name='pool2')
.Conv2D('conv3_2', 256)
.Conv2D('conv3_3', 256) x = tf.layers.conv2d(x, 256, name='conv3_1')
.Conv2D('conv3_4', 256) x = tf.layers.conv2d(x, 256, name='conv3_2')
.MaxPooling('pool3', 2) x = tf.layers.conv2d(x, 256, name='conv3_3')
# 28 x = tf.layers.conv2d(x, 256, name='conv3_4')
.Conv2D('conv4_1', 512) x = tf.layers.max_pooling2d(x, 2, 2, name='pool3')
.Conv2D('conv4_2', 512)
.Conv2D('conv4_3', 512) x = tf.layers.conv2d(x, 512, name='conv4_1')
.Conv2D('conv4_4', 512) x = tf.layers.conv2d(x, 512, name='conv4_2')
.MaxPooling('pool4', 2) x = tf.layers.conv2d(x, 512, name='conv4_3')
# 14 x = tf.layers.conv2d(x, 512, name='conv4_4')
.Conv2D('conv5_1', 512) x = tf.layers.max_pooling2d(x, 2, 2, name='pool4')
.Conv2D('conv5_2', 512)
.Conv2D('conv5_3', 512) x = tf.layers.conv2d(x, 512, name='conv5_1')
.Conv2D('conv5_4', 512) x = tf.layers.conv2d(x, 512, name='conv5_2')
.MaxPooling('pool5', 2) x = tf.layers.conv2d(x, 512, name='conv5_3')
# 7 x = tf.layers.conv2d(x, 512, name='conv5_4')
.FullyConnected('fc6', 4096, activation=tf.nn.relu) x = tf.layers.max_pooling2d(x, 2, 2, name='pool5')
.Dropout('drop0', 0.5) x = tf.layers.flatten(x, name='flatten')
.FullyConnected('fc7', 4096, activation=tf.nn.relu)
.Dropout('drop1', 0.5) x = tf.layers.dense(x, 4096, activation=tf.nn.relu, name='fc6')
.FullyConnected('fc8', 1000)()) x = tf.layers.dropout(x, rate=0.5, name='drop0', training=is_training)
x = tf.layers.dense(x, 4096, activation=tf.nn.relu, name='fc7')
x = tf.layers.dropout(x, rate=0.5, name='drop1', training=is_training)
logits = tf.layers.dense(x, 1000, activation=tf.identity, name='fc8')
tf.nn.softmax(logits, name='prob') tf.nn.softmax(logits, name='prob')
def run_test(path, input): def run_test(path, input):
param_dict = dict(np.load(path)) param_dict = dict(np.load(path))
param_dict = {k.replace('/W', '/kernel').replace('/b', '/bias'): v for k, v in six.iteritems(param_dict)}
predict_func = OfflinePredictor(PredictConfig( predict_func = OfflinePredictor(PredictConfig(
inputs_desc=[InputDesc(tf.float32, (None, 224, 224, 3), 'input')], inputs_desc=[InputDesc(tf.float32, (None, 224, 224, 3), 'input')],
tower_func=tower_func, tower_func=tower_func,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment