Commit 9b69e860 authored by Yuxin Wu's avatar Yuxin Wu

fix pep8 style in examples/

parent 37e98945
...@@ -62,6 +62,8 @@ def get_player(viz=False, train=False): ...@@ -62,6 +62,8 @@ def get_player(viz=False, train=False):
pl = PreventStuckPlayer(pl, 30, 1) pl = PreventStuckPlayer(pl, 30, 1)
pl = LimitLengthPlayer(pl, 30000) pl = LimitLengthPlayer(pl, 30000)
return pl return pl
common.get_player = get_player # so that eval functions in common can use the player common.get_player = get_player # so that eval functions in common can use the player
...@@ -92,9 +94,9 @@ class Model(ModelDesc): ...@@ -92,9 +94,9 @@ class Model(ModelDesc):
.Conv2D('conv3', out_channel=64, kernel_shape=3) .Conv2D('conv3', out_channel=64, kernel_shape=3)
# the original arch # the original arch
#.Conv2D('conv0', image, out_channel=32, kernel_shape=8, stride=4) # .Conv2D('conv0', image, out_channel=32, kernel_shape=8, stride=4)
#.Conv2D('conv1', out_channel=64, kernel_shape=4, stride=2) # .Conv2D('conv1', out_channel=64, kernel_shape=4, stride=2)
#.Conv2D('conv2', out_channel=64, kernel_shape=3) # .Conv2D('conv2', out_channel=64, kernel_shape=3)
.FullyConnected('fc0', 512, nl=LeakyReLU)()) .FullyConnected('fc0', 512, nl=LeakyReLU)())
if METHOD != 'Dueling': if METHOD != 'Dueling':
...@@ -180,8 +182,8 @@ def get_config(): ...@@ -180,8 +182,8 @@ def get_config():
RunOp(lambda: M.update_target_param()), RunOp(lambda: M.update_target_param()),
dataset_train, dataset_train,
PeriodicCallback(Evaluator(EVAL_EPISODE, ['state'], ['Qvalue']), 3), PeriodicCallback(Evaluator(EVAL_EPISODE, ['state'], ['Qvalue']), 3),
#HumanHyperParamSetter('learning_rate', 'hyper.txt'), # HumanHyperParamSetter('learning_rate', 'hyper.txt'),
#HumanHyperParamSetter(ObjAttrParam(dataset_train, 'exploration'), 'hyper.txt'), # HumanHyperParamSetter(ObjAttrParam(dataset_train, 'exploration'), 'hyper.txt'),
]), ]),
# save memory for multiprocess evaluator # save memory for multiprocess evaluator
session_config=get_default_sess_config(0.6), session_config=get_default_sess_config(0.6),
...@@ -189,6 +191,7 @@ def get_config(): ...@@ -189,6 +191,7 @@ def get_config():
step_per_epoch=STEP_PER_EPOCH, step_per_epoch=STEP_PER_EPOCH,
) )
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -166,9 +166,9 @@ class AtariPlayer(RLEnvironment): ...@@ -166,9 +166,9 @@ class AtariPlayer(RLEnvironment):
self.restart_episode() self.restart_episode()
return (r, isOver) return (r, isOver)
if __name__ == '__main__': if __name__ == '__main__':
import sys import sys
import time
def benchmark(): def benchmark():
a = AtariPlayer(sys.argv[1], viz=False, height_range=(28, -8)) a = AtariPlayer(sys.argv[1], viz=False, height_range=(28, -8))
...@@ -189,7 +189,7 @@ if __name__ == '__main__': ...@@ -189,7 +189,7 @@ if __name__ == '__main__':
import threading import threading
import multiprocessing import multiprocessing
for k in range(3): for k in range(3):
#th = multiprocessing.Process(target=benchmark) # th = multiprocessing.Process(target=benchmark)
th = threading.Thread(target=benchmark) th = threading.Thread(target=benchmark)
th.start() th.start()
time.sleep(0.02) time.sleep(0.02)
...@@ -201,8 +201,8 @@ if __name__ == '__main__': ...@@ -201,8 +201,8 @@ if __name__ == '__main__':
rng = get_rng(num) rng = get_rng(num)
import time import time
while True: while True:
#im = a.grab_image() # im = a.grab_image()
#cv2.imshow(a.romname, im) # cv2.imshow(a.romname, im)
act = rng.choice(range(num)) act = rng.choice(range(num))
print(act) print(act)
r, o = a.action(act) r, o = a.action(act)
......
...@@ -113,6 +113,7 @@ def run_test(model_path, img_file): ...@@ -113,6 +113,7 @@ def run_test(model_path, img_file):
viz = colorize(im, hm) viz = colorize(im, hm)
cv2.imwrite("output.jpg", viz) cv2.imwrite("output.jpg", viz)
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--load', required=True, help='.npy model file') parser.add_argument('--load', required=True, help='.npy model file')
......
...@@ -22,8 +22,9 @@ def get_data(): ...@@ -22,8 +22,9 @@ def get_data():
dataset_train = BatchData(DisturbLabel(dataset.Mnist('train'), args.prob), 128) dataset_train = BatchData(DisturbLabel(dataset.Mnist('train'), args.prob), 128)
dataset_test = BatchData(dataset.Mnist('test'), 256, remainder=True) dataset_test = BatchData(dataset.Mnist('test'), 256, remainder=True)
return dataset_train, dataset_test return dataset_train, dataset_test
mnist_example.get_data = get_data
mnist_example.get_data = get_data
IMAGE_SIZE = 28 IMAGE_SIZE = 28
...@@ -54,6 +55,7 @@ class Model(mnist_example.Model): ...@@ -54,6 +55,7 @@ class Model(mnist_example.Model):
self.cost = tf.add_n([wd_cost, cost], name='cost') self.cost = tf.add_n([wd_cost, cost], name='cost')
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -15,18 +15,19 @@ from disturb import DisturbLabel ...@@ -15,18 +15,19 @@ from disturb import DisturbLabel
import imp import imp
svhn_example = imp.load_source('svhn_example', svhn_example = imp.load_source('svhn_example',
os.path.join(os.path.dirname(__file__), '..', 'svhn-digit-convnet.py'))) os.path.join(os.path.dirname(__file__), '..', 'svhn-digit-convnet.py'))
Model=svhn_example.Model Model = svhn_example.Model
get_config=svhn_example.get_config get_config = svhn_example.get_config
def get_data(): def get_data():
d1=dataset.SVHNDigit('train') d1 = dataset.SVHNDigit('train')
d2=dataset.SVHNDigit('extra') d2 = dataset.SVHNDigit('extra')
data_train=RandomMixData([d1, d2]) data_train = RandomMixData([d1, d2])
data_train=DisturbLabel(data_train, args.prob) data_train = DisturbLabel(data_train, args.prob)
data_test=dataset.SVHNDigit('test') data_test = dataset.SVHNDigit('test')
augmentors=[ augmentors = [
imgaug.Resize((40, 40)), imgaug.Resize((40, 40)),
imgaug.Brightness(30), imgaug.Brightness(30),
imgaug.Contrast((0.5, 1.5)), imgaug.Contrast((0.5, 1.5)),
...@@ -35,18 +36,20 @@ def get_data(): ...@@ -35,18 +36,20 @@ def get_data():
data_train = BatchData(data_train, 128) data_train = BatchData(data_train, 128)
data_train = PrefetchData(data_train, 5, 5) data_train = PrefetchData(data_train, 5, 5)
augmentors = [ imgaug.Resize((40, 40)) ] augmentors = [imgaug.Resize((40, 40))]
data_test = AugmentImageComponent(data_test, augmentors) data_test = AugmentImageComponent(data_test, augmentors)
data_test = BatchData(data_test, 128, remainder=True) data_test = BatchData(data_test, 128, remainder=True)
return data_train, data_test return data_train, data_test
svhn_example.get_data = get_data svhn_example.get_data = get_data
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='a gpu to use') parser.add_argument('--gpu', help='a gpu to use')
parser.add_argument('--load', help='load model') parser.add_argument('--load', help='load model')
parser.add_argument('--prob', help='disturb prob', type=float, required=True) parser.add_argument('--prob', help='disturb prob',
type=float, required=True)
args = parser.parse_args() args = parser.parse_args()
if args.gpu: if args.gpu:
......
...@@ -290,6 +290,7 @@ def run_image(model, sess_init, inputs): ...@@ -290,6 +290,7 @@ def run_image(model, sess_init, inputs):
print(f + ":") print(f + ":")
print(list(zip(names, prob[ret]))) print(list(zip(names, prob[ret])))
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='the physical ids of GPUs to use') parser.add_argument('--gpu', help='the physical ids of GPUs to use')
......
...@@ -140,8 +140,8 @@ def get_config(): ...@@ -140,8 +140,8 @@ def get_config():
imgaug.Brightness(30), imgaug.Brightness(30),
imgaug.Contrast((0.5, 1.5)), imgaug.Contrast((0.5, 1.5)),
# imgaug.GaussianDeform( # this is slow but helpful. only use it when you have lots of cpus # imgaug.GaussianDeform( # this is slow but helpful. only use it when you have lots of cpus
#[(0.2, 0.2), (0.2, 0.8), (0.8,0.8), (0.8,0.2)], # [(0.2, 0.2), (0.2, 0.8), (0.8,0.8), (0.8,0.2)],
#(40,40), 0.2, 3), # (40,40), 0.2, 3),
] ]
data_train = AugmentImageComponent(data_train, augmentors) data_train = AugmentImageComponent(data_train, augmentors)
data_train = BatchData(data_train, 128) data_train = BatchData(data_train, 128)
...@@ -173,6 +173,7 @@ def get_config(): ...@@ -173,6 +173,7 @@ def get_config():
max_epoch=200, max_epoch=200,
) )
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='the GPU to use') parser.add_argument('--gpu', help='the GPU to use')
......
...@@ -134,6 +134,7 @@ def sample(model_path): ...@@ -134,6 +134,7 @@ def sample(model_path):
o = o[:, :, :, ::-1] o = o[:, :, :, ::-1]
viz = next(build_patch_list(o, nr_row=10, nr_col=10, viz=True)) viz = next(build_patch_list(o, nr_row=10, nr_col=10, viz=True))
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -22,7 +22,8 @@ from GAN import GANTrainer, build_GAN_losses ...@@ -22,7 +22,8 @@ from GAN import GANTrainer, build_GAN_losses
To train: To train:
./Image2Image.py --data /path/to/datadir --mode {AtoB,BtoA} ./Image2Image.py --data /path/to/datadir --mode {AtoB,BtoA}
# datadir should contain jpg images of shpae 2s x s, formed by A and B # datadir should contain jpg images of shpae 2s x s, formed by A and B
# you can download some data from the original authors: https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/ # you can download some data from the original authors:
# https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/
# training visualization will appear be in tensorboard # training visualization will appear be in tensorboard
Speed: Speed:
...@@ -193,7 +194,8 @@ def sample(datadir, model_path): ...@@ -193,7 +194,8 @@ def sample(datadir, model_path):
pred = SimpleDatasetPredictor(pred, ds) pred = SimpleDatasetPredictor(pred, ds)
for o in pred.get_result(): for o in pred.get_result():
o = o[0][:, :, :, ::-1] o = o[0][:, :, :, ::-1]
viz = next(build_patch_list(o, nr_row=3, nr_col=2, viz=True)) next(build_patch_list(o, nr_row=3, nr_col=2, viz=True))
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
......
...@@ -134,6 +134,7 @@ def sample(model_path): ...@@ -134,6 +134,7 @@ def sample(model_path):
viz = cv2.resize(viz, (800, 800)) viz = cv2.resize(viz, (800, 800))
interactive_imshow(viz) interactive_imshow(viz)
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -206,6 +206,7 @@ def run(model_path, image_path, output): ...@@ -206,6 +206,7 @@ def run(model_path, image_path, output):
pred = outputs[5][0] pred = outputs[5][0]
cv2.imwrite(output, pred * 255) cv2.imwrite(output, pred * 255)
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -165,7 +165,7 @@ def get_config(): ...@@ -165,7 +165,7 @@ def get_config():
InferenceRunner(dataset_val, [ InferenceRunner(dataset_val, [
ClassificationError('wrong-top1', 'val-top1-error'), ClassificationError('wrong-top1', 'val-top1-error'),
ClassificationError('wrong-top5', 'val-top5-error')]), ClassificationError('wrong-top5', 'val-top5-error')]),
#HumanHyperParamSetter('learning_rate', 'hyper-googlenet.txt') # HumanHyperParamSetter('learning_rate', 'hyper-googlenet.txt')
ScheduledHyperParamSetter('learning_rate', ScheduledHyperParamSetter('learning_rate',
[(8, 0.03), (14, 0.02), (17, 5e-3), [(8, 0.03), (14, 0.02), (17, 5e-3),
(19, 3e-3), (24, 1e-3), (26, 2e-4), (19, 3e-3), (24, 1e-3), (26, 2e-4),
...@@ -177,6 +177,7 @@ def get_config(): ...@@ -177,6 +177,7 @@ def get_config():
max_epoch=80, max_epoch=80,
) )
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -19,7 +19,8 @@ import multiprocessing ...@@ -19,7 +19,8 @@ import multiprocessing
InceptionV3 on ILSVRC12. InceptionV3 on ILSVRC12.
See "Rethinking the Inception Architecture for Computer Vision", arxiv:1512.00567 See "Rethinking the Inception Architecture for Computer Vision", arxiv:1512.00567
This config follows the official inceptionv3 setup (https://github.com/tensorflow/models/tree/master/inception/inception) This config follows the official inceptionv3 setup
(https://github.com/tensorflow/models/tree/master/inception/inception)
with much much fewer lines of code. with much much fewer lines of code.
It reaches 74% single-crop validation accuracy, It reaches 74% single-crop validation accuracy,
and has the same running speed as the official code. and has the same running speed as the official code.
...@@ -284,6 +285,7 @@ def get_config(): ...@@ -284,6 +285,7 @@ def get_config():
max_epoch=100, max_epoch=100,
) )
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -15,6 +15,7 @@ import six ...@@ -15,6 +15,7 @@ import six
from tensorpack import * from tensorpack import *
from tensorpack.RL import * from tensorpack.RL import *
from common import play_one_episode
IMAGE_SIZE = (84, 84) IMAGE_SIZE = (84, 84)
FRAME_HISTORY = 4 FRAME_HISTORY = 4
...@@ -24,8 +25,6 @@ IMAGE_SHAPE3 = IMAGE_SIZE + (CHANNEL,) ...@@ -24,8 +25,6 @@ IMAGE_SHAPE3 = IMAGE_SIZE + (CHANNEL,)
NUM_ACTIONS = None NUM_ACTIONS = None
ENV_NAME = None ENV_NAME = None
from common import play_one_episode
def get_player(dumpdir=None): def get_player(dumpdir=None):
pl = GymEnv(ENV_NAME, dumpdir=dumpdir, auto_restart=False) pl = GymEnv(ENV_NAME, dumpdir=dumpdir, auto_restart=False)
...@@ -82,6 +81,7 @@ def run_submission(cfg, output, nr): ...@@ -82,6 +81,7 @@ def run_submission(cfg, output, nr):
def do_submit(output): def do_submit(output):
gym.upload(output, api_key='xxx') gym.upload(output, api_key='xxx')
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -62,6 +62,8 @@ def get_player(viz=False, train=False, dumpdir=None): ...@@ -62,6 +62,8 @@ def get_player(viz=False, train=False, dumpdir=None):
pl = PreventStuckPlayer(pl, 30, 1) pl = PreventStuckPlayer(pl, 30, 1)
pl = LimitLengthPlayer(pl, 40000) pl = LimitLengthPlayer(pl, 40000)
return pl return pl
common.get_player = get_player common.get_player = get_player
...@@ -220,6 +222,7 @@ def get_config(): ...@@ -220,6 +222,7 @@ def get_config():
max_epoch=1000, max_epoch=1000,
) )
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -153,6 +153,7 @@ def get_config(): ...@@ -153,6 +153,7 @@ def get_config():
max_epoch=400, max_epoch=400,
) )
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -159,9 +159,9 @@ def get_data(train_or_test): ...@@ -159,9 +159,9 @@ def get_data(train_or_test):
imgaug.Saturation(0.4), imgaug.Saturation(0.4),
imgaug.Lighting(0.1, imgaug.Lighting(0.1,
eigval=[0.2175, 0.0188, 0.0045], eigval=[0.2175, 0.0188, 0.0045],
eigvec=[[-0.5675, 0.7192, 0.4009], eigvec=[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140], [-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]] [-0.5836, -0.6948, 0.4203]]
)]), )]),
imgaug.Clip(), imgaug.Clip(),
imgaug.Flip(horiz=True), imgaug.Flip(horiz=True),
...@@ -221,6 +221,7 @@ def eval_on_ILSVRC12(model_file, data_dir): ...@@ -221,6 +221,7 @@ def eval_on_ILSVRC12(model_file, data_dir):
print("Top1 Error: {}".format(acc1.ratio)) print("Top1 Error: {}".format(acc1.ratio))
print("Top5 Error: {}".format(acc5.ratio)) print("Top5 Error: {}".format(acc5.ratio))
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -95,10 +95,10 @@ class Model(ModelDesc): ...@@ -95,10 +95,10 @@ class Model(ModelDesc):
def get_inference_augmentor(): def get_inference_augmentor():
# load ResNet mean from Kaiming: # load ResNet mean from Kaiming:
#from tensorpack.utils.loadcaffe import get_caffe_pb # from tensorpack.utils.loadcaffe import get_caffe_pb
#obj = get_caffe_pb().BlobProto() # obj = get_caffe_pb().BlobProto()
# obj.ParseFromString(open('ResNet_mean.binaryproto').read()) # obj.ParseFromString(open('ResNet_mean.binaryproto').read())
#pp_mean_224 = np.array(obj.data).reshape(3, 224, 224).transpose(1,2,0) # pp_mean_224 = np.array(obj.data).reshape(3, 224, 224).transpose(1,2,0)
meta = ILSVRCMeta() meta = ILSVRCMeta()
pp_mean = meta.get_per_pixel_mean() pp_mean = meta.get_per_pixel_mean()
...@@ -194,6 +194,7 @@ def name_conversion(caffe_layer_name): ...@@ -194,6 +194,7 @@ def name_conversion(caffe_layer_name):
int(layer_group) - 2, layer_block, layer_type) + tf_name int(layer_group) - 2, layer_block, layer_type) + tf_name
return tf_name return tf_name
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -83,6 +83,7 @@ def get_config(): ...@@ -83,6 +83,7 @@ def get_config():
max_epoch=500, max_epoch=500,
) )
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -167,6 +167,7 @@ def get_config(): ...@@ -167,6 +167,7 @@ def get_config():
max_epoch=500, max_epoch=500,
) )
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -15,8 +15,12 @@ from tensorpack.utils.stats import OnlineMoments ...@@ -15,8 +15,12 @@ from tensorpack.utils.stats import OnlineMoments
import bob.ap import bob.ap
CHARSET = set(string.ascii_lowercase + ' ') CHARSET = set(string.ascii_lowercase + ' ')
PHONEME_LIST = "aa,ae,ah,ao,aw,ax,ax-h,axr,ay,b,bcl,ch,d,dcl,dh,dx,eh,el,em,en,eng,epi,er,ey,f,g,gcl,h#,hh,hv,ih,ix,iy,jh,k,kcl,l,m,n,ng,nx,ow,oy,p,pau,pcl,q,r,s,sh,t,tcl,th,uh,uw,ux,v,w,y,z,zh".split( PHONEME_LIST = [
',') 'aa', 'ae', 'ah', 'ao', 'aw', 'ax', 'ax-h', 'axr', 'ay', 'b', 'bcl', 'ch', 'd', 'dcl', 'dh',
'dx', 'eh', 'el', 'em', 'en', 'eng', 'epi', 'er', 'ey', 'f', 'g', 'gcl', 'h#', 'hh', 'hv', 'ih',
'ix', 'iy', 'jh', 'k', 'kcl', 'l', 'm', 'n', 'ng', 'nx', 'ow', 'oy', 'p', 'pau', 'pcl', 'q', 'r',
's', 'sh', 't', 'tcl', 'th', 'uh', 'uw', 'ux', 'v', 'w', 'y', 'z', 'zh']
PHONEME_DIC = {v: k for k, v in enumerate(PHONEME_LIST)} PHONEME_DIC = {v: k for k, v in enumerate(PHONEME_LIST)}
WORD_DIC = {v: k for k, v in enumerate(string.ascii_lowercase + ' ')} WORD_DIC = {v: k for k, v in enumerate(string.ascii_lowercase + ' ')}
...@@ -110,6 +114,7 @@ def compute_mean_std(db, fname): ...@@ -110,6 +114,7 @@ def compute_mean_std(db, fname):
with open(fname, 'wb') as f: with open(fname, 'wb') as f:
f.write(serialize.dumps([o.mean, o.std])) f.write(serialize.dumps([o.mean, o.std]))
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='command', dest='command') subparsers = parser.add_subparsers(title='command', dest='command')
......
...@@ -109,6 +109,7 @@ def get_config(ds_train, ds_test): ...@@ -109,6 +109,7 @@ def get_config(ds_train, ds_test):
max_epoch=70, max_epoch=70,
) )
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -161,6 +161,7 @@ def sample(path, start, length): ...@@ -161,6 +161,7 @@ def sample(path, start, length):
ret += c ret += c
print(ret) print(ret)
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -136,6 +136,7 @@ def get_config(cifar_classnum): ...@@ -136,6 +136,7 @@ def get_config(cifar_classnum):
max_epoch=150, max_epoch=150,
) )
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -74,6 +74,7 @@ def run_test(path, input): ...@@ -74,6 +74,7 @@ def run_test(path, input):
meta = ILSVRCMeta().get_synset_words_1000() meta = ILSVRCMeta().get_synset_words_1000()
print("Top10 class names:", [meta[k] for k in ret]) print("Top10 class names:", [meta[k] for k in ret])
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -17,7 +17,8 @@ from tensorpack.dataflow.dataset import ILSVRCMeta ...@@ -17,7 +17,8 @@ from tensorpack.dataflow.dataset import ILSVRCMeta
""" """
Usage: Usage:
python -m tensorpack.utils.loadcaffe PATH/TO/VGG/{VGG_ILSVRC_16_layers_deploy.prototxt,VGG_ILSVRC_16_layers.caffemodel} vgg16.npy python -m tensorpack.utils.loadcaffe \
PATH/TO/VGG/{VGG_ILSVRC_16_layers_deploy.prototxt,VGG_ILSVRC_16_layers.caffemodel} vgg16.npy
./load-vgg16.py --load vgg16.npy --input cat.png ./load-vgg16.py --load vgg16.npy --input cat.png
""" """
...@@ -84,6 +85,7 @@ def run_test(path, input): ...@@ -84,6 +85,7 @@ def run_test(path, input):
meta = ILSVRCMeta().get_synset_words_1000() meta = ILSVRCMeta().get_synset_words_1000()
print("Top10 class names:", [meta[k] for k in ret]) print("Top10 class names:", [meta[k] for k in ret])
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -129,6 +129,7 @@ def get_config(): ...@@ -129,6 +129,7 @@ def get_config():
max_epoch=100, max_epoch=100,
) )
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
...@@ -111,6 +111,7 @@ def get_config(): ...@@ -111,6 +111,7 @@ def get_config():
max_epoch=350, max_epoch=350,
) )
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
......
[flake8]
max-line-length = 120
ignore = F403,F401,F405,F841
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment