Commit 5e40fa3c authored by Yuxin Wu's avatar Yuxin Wu

update docs

parent 22978393
...@@ -37,7 +37,6 @@ User Tutorials ...@@ -37,7 +37,6 @@ User Tutorials
callback callback
summary summary
faq faq
performance-tuning
Extend Tensorpack Extend Tensorpack
================= =================
...@@ -50,3 +49,10 @@ Extend Tensorpack ...@@ -50,3 +49,10 @@ Extend Tensorpack
extend/model extend/model
extend/callback extend/callback
extend/trainer extend/trainer
Notes
======
.. toctree::
:maxdepth: 1
performance-tuning
...@@ -39,7 +39,7 @@ are likely to have too much variance. To address this issue, you can: ...@@ -39,7 +39,7 @@ are likely to have too much variance. To address this issue, you can:
### Other Data ### Other Data
Besides TensorFlow summaries, Besides TensorFlow summaries,
a callback can also write other data to the monitor backend anytime after the training has started. a callback can also write other data to the monitor backend anytime once the training has started.
As long as the type of data is supported, the data will be dispatched to and logged to the same place. As long as the type of data is supported, the data will be dispatched to and logged to the same place.
As a result, tensorboard will show not only summaries in the graph, but also your custom data. As a result, tensorboard will show not only summaries in the graph, but also your custom data.
......
...@@ -59,13 +59,10 @@ class Model(mnist_example.Model): ...@@ -59,13 +59,10 @@ class Model(mnist_example.Model):
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--prob', help='disturb prob', type=float, required=True) parser.add_argument('--prob', help='disturb prob', type=float, required=True)
args = parser.parse_args() args = parser.parse_args()
if args.gpu: if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
config = get_config() config = get_config()
if args.load:
config.session_init = SaverRestore(args.load)
QueueInputTrainer(config).train() QueueInputTrainer(config).train()
...@@ -46,7 +46,6 @@ svhn_example.get_data = get_data ...@@ -46,7 +46,6 @@ svhn_example.get_data = get_data
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='a gpu to use') parser.add_argument('--gpu', help='a gpu to use')
parser.add_argument('--load', help='load model')
parser.add_argument('--prob', help='disturb prob', parser.add_argument('--prob', help='disturb prob',
type=float, required=True) type=float, required=True)
args = parser.parse_args() args = parser.parse_args()
...@@ -57,6 +56,4 @@ if __name__ == '__main__': ...@@ -57,6 +56,4 @@ if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '0' os.environ['CUDA_VISIBLE_DEVICES'] = '0'
config = get_config(args.prob) config = get_config(args.prob)
if args.load:
config.session_init = SaverRestore(args.load)
QueueInputTrainer(config).train() QueueInputTrainer(config).train()
...@@ -176,19 +176,11 @@ def get_config(): ...@@ -176,19 +176,11 @@ def get_config():
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='the GPU to use')
parser.add_argument('--load', help='load a checkpoint')
parser.add_argument('--dorefa', parser.add_argument('--dorefa',
help='number of bits for W,A,G, separated by comma. Defaults to \'1,2,4\'', help='number of bits for W,A,G, separated by comma. Defaults to \'1,2,4\'',
default='1,2,4') default='1,2,4')
args = parser.parse_args() args = parser.parse_args()
BITW, BITA, BITG = map(int, args.dorefa.split(',')) BITW, BITA, BITG = map(int, args.dorefa.split(','))
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
config = get_config() config = get_config()
if args.load:
config.session_init = SaverRestore(args.load)
QueueInputTrainer(config).train() QueueInputTrainer(config).train()
...@@ -123,14 +123,11 @@ def get_config(): ...@@ -123,14 +123,11 @@ def get_config():
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
args = parser.parse_args() args = parser.parse_args()
if args.gpu: if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
config = get_config() config = get_config()
if args.load:
config.session_init = SaverRestore(args.load)
if args.gpu: if args.gpu:
config.nr_tower = len(args.gpu.split(',')) config.nr_tower = len(args.gpu.split(','))
if config.nr_tower > 1: if config.nr_tower > 1:
......
...@@ -96,12 +96,9 @@ def get_config(): ...@@ -96,12 +96,9 @@ def get_config():
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
args = parser.parse_args() args = parser.parse_args()
if args.gpu: if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
config = get_config() config = get_config()
if args.load:
config.session_init = SaverRestore(args.load)
SimpleTrainer(config).train() SimpleTrainer(config).train()
...@@ -62,7 +62,6 @@ class TrainConfig(object): ...@@ -62,7 +62,6 @@ class TrainConfig(object):
nr_tower (int): number of training towers, used by multigpu trainers. nr_tower (int): number of training towers, used by multigpu trainers.
tower ([int]): list of training towers in relative GPU id. tower ([int]): list of training towers in relative GPU id.
predict_tower ([int]): list of prediction towers in their relative gpu id. Use -1 for cpu.
""" """
# TODO type checker decorator # TODO type checker decorator
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment