Commit 17a4228a authored by Yuxin Wu's avatar Yuxin Wu

wrong dataset in dorefa

parent 1f3eaf97
...@@ -54,7 +54,7 @@ def eval_with_funcs(predict_funcs, nr_eval): ...@@ -54,7 +54,7 @@ def eval_with_funcs(predict_funcs, nr_eval):
k.start() k.start()
stat = StatCounter() stat = StatCounter()
try: try:
for _ in tqdm(range(nr_eval)): for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get() r = q.get()
stat.feed(r) stat.feed(r)
except: except:
......
...@@ -155,9 +155,7 @@ class Model(ModelDesc): ...@@ -155,9 +155,7 @@ class Model(ModelDesc):
def get_data(dataset_name): def get_data(dataset_name):
isTrain = dataset_name == 'train' isTrain = dataset_name == 'train'
#ds = dataset.ILSVRC12(args.data, dataset_name, ds = dataset.ILSVRC12(args.data, dataset_name,
#shuffle=True if isTrain else False)
ds = dataset.ILSVRC12('/home/wyx/data/fake_ilsvrc', dataset_name,
shuffle=True if isTrain else False) shuffle=True if isTrain else False)
meta = dataset.ILSVRCMeta() meta = dataset.ILSVRCMeta()
......
...@@ -9,7 +9,6 @@ import os, sys ...@@ -9,7 +9,6 @@ import os, sys
import argparse import argparse
from tensorpack import * from tensorpack import *
from tensorpack.tfutils.summary import add_moving_summary
""" """
MNIST ConvNet example. MNIST ConvNet example.
...@@ -52,13 +51,13 @@ class Model(ModelDesc): ...@@ -52,13 +51,13 @@ class Model(ModelDesc):
wrong = symbolic_functions.prediction_incorrect(logits, label) wrong = symbolic_functions.prediction_incorrect(logits, label)
nr_wrong = tf.reduce_sum(wrong, name='wrong') nr_wrong = tf.reduce_sum(wrong, name='wrong')
# monitor training error # monitor training error
add_moving_summary(tf.reduce_mean(wrong, name='train_error')) summary.add_moving_summary(tf.reduce_mean(wrong, name='train_error'))
# weight decay on all W of fc layers # weight decay on all W of fc layers
wd_cost = tf.mul(1e-5, wd_cost = tf.mul(1e-5,
regularize_cost('fc.*/W', tf.nn.l2_loss), regularize_cost('fc.*/W', tf.nn.l2_loss),
name='regularize_loss') name='regularize_loss')
add_moving_summary(cost, wd_cost) summary.add_moving_summary(cost, wd_cost)
summary.add_param_summary([('.*/W', ['histogram'])]) # monitor histogram of all W summary.add_param_summary([('.*/W', ['histogram'])]) # monitor histogram of all W
self.cost = tf.add_n([wd_cost, cost], name='cost') self.cost = tf.add_n([wd_cost, cost], name='cost')
......
...@@ -14,7 +14,7 @@ __all__ = ['Maxout', 'PReLU', 'LeakyReLU', 'BNReLU', 'NonLinearity'] ...@@ -14,7 +14,7 @@ __all__ = ['Maxout', 'PReLU', 'LeakyReLU', 'BNReLU', 'NonLinearity']
@layer_register(log_shape=False) @layer_register(log_shape=False)
def Maxout(x, num_unit): def Maxout(x, num_unit):
""" """
Maxout networks as in `Maxout Networks <http://arxiv.org/abs/1302.4389>`_. Maxout as in `Maxout Networks <http://arxiv.org/abs/1302.4389>`_.
:param input: a NHWC or NC tensor. :param input: a NHWC or NC tensor.
:param num_unit: a int. must be divisible by C. :param num_unit: a int. must be divisible by C.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment