Commit 95037482 authored by ppwwyyxx's avatar ppwwyyxx

rollback cifar model to old one

parent 95f4b9b9
......@@ -10,10 +10,10 @@ import os
from tensorpack.train import TrainConfig, start_train
from tensorpack.models import *
from tensorpack.callbacks import *
from tensorpack.utils import *
from tensorpack.utils.symbolic_functions import *
from tensorpack.utils.summary import *
from tensorpack.callbacks import *
from tensorpack.dataflow import *
from tensorpack.dataflow import imgaug
......@@ -27,7 +27,7 @@ def get_model(inputs, is_training):
image, label = inputs
if is_training:
if is_training: # slow?
image, label = tf.train.shuffle_batch(
[image, label], BATCH_SIZE, CAPACITY, MIN_AFTER_DEQUEUE,
num_threads=6, enqueue_many=False)
......@@ -43,19 +43,14 @@ def get_model(inputs, is_training):
l = MaxPooling('pool0', l, 3, stride=2, padding='SAME')
l = tf.nn.lrn(l, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm0')
l = Conv2D('conv1', l, out_channel=64, kernel_shape=5, padding='SAME',
b_init=tf.constant_initializer(0.1))
l = Conv2D('conv1', l, out_channel=64, kernel_shape=5, padding='SAME')
l = tf.nn.lrn(l, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
l = MaxPooling('pool1', l, 3, stride=2, padding='SAME')
l = FullyConnected('fc0', l, 384,
b_init=tf.constant_initializer(0.1))
l = FullyConnected('fc1', l, out_dim=192,
b_init=tf.constant_initializer(0.1))
l = FullyConnected('fc0', l, 384)
l = FullyConnected('fc1', l, out_dim=192)
# fc will have activation summary by default. disable this for the output layer
logits = FullyConnected('linear', l, out_dim=10, summary_activation=False,
nl=tf.identity,
W_init=tf.truncated_normal_initializer(1/192.0))
logits = FullyConnected('fc2', l, out_dim=10, summary_activation=False, nl=tf.identity)
prob = tf.nn.softmax(logits, name='output')
y = one_hot(label, 10)
......@@ -73,7 +68,7 @@ def get_model(inputs, is_training):
SUMMARY_VARS_KEY, tf.reduce_mean(wrong, name='train_error'))
# weight decay on all W of fc layers
wd_cost = tf.mul(4e-3,
wd_cost = tf.mul(1e-4,
regularize_cost('fc.*/W', tf.nn.l2_loss),
name='regularize_loss')
tf.add_to_collection(COST_VARS_KEY, wd_cost)
......@@ -124,7 +119,7 @@ def get_config():
lr = tf.train.exponential_decay(
learning_rate=1e-1,
global_step=get_global_step_var(),
decay_steps=dataset_train.size() * 350,
decay_steps=dataset_train.size() * 200,
decay_rate=0.1, staircase=True, name='learning_rate')
tf.scalar_summary('learning_rate', lr)
......
......@@ -31,7 +31,7 @@ def Conv2D(x, out_channel, kernel_shape,
stride = shape4d(stride)
if W_init is None:
W_init = tf.truncated_normal_initializer(stddev=4e-3)
W_init = tf.truncated_normal_initializer(stddev=4e-2)
if b_init is None:
b_init = tf.constant_initializer()
......
......@@ -17,7 +17,7 @@ def FullyConnected(x, out_dim, W_init=None, b_init=None, nl=tf.nn.relu):
in_dim = x.get_shape().as_list()[1]
if W_init is None:
W_init = tf.truncated_normal_initializer(stddev=0.04)
W_init = tf.truncated_normal_initializer(stddev=1.0 / math.sqrt(float(in_dim)))
if b_init is None:
b_init = tf.constant_initializer()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment