Commit b936df1d authored by Yuxin Wu's avatar Yuxin Wu

name scope improvements for StagingInput

parent 1d99dc4e
...@@ -10,7 +10,7 @@ from tensorpack import logger, QueueInput ...@@ -10,7 +10,7 @@ from tensorpack import logger, QueueInput
from tensorpack.models import * from tensorpack.models import *
from tensorpack.callbacks import * from tensorpack.callbacks import *
from tensorpack.train import ( from tensorpack.train import (
TrainConfig, SyncMultiGPUTrainerParameterServer, launch_train_with_config) TrainConfig, SyncMultiGPUTrainerReplicated, launch_train_with_config)
from tensorpack.dataflow import FakeData from tensorpack.dataflow import FakeData
from tensorpack.tfutils import argscope, get_model_loader from tensorpack.tfutils import argscope, get_model_loader
from tensorpack.utils.gpu import get_nr_gpu from tensorpack.utils.gpu import get_nr_gpu
...@@ -128,10 +128,10 @@ if __name__ == '__main__': ...@@ -128,10 +128,10 @@ if __name__ == '__main__':
logger.set_logger_dir(os.path.join('train_log', 'tmp'), 'd') logger.set_logger_dir(os.path.join('train_log', 'tmp'), 'd')
else: else:
logger.set_logger_dir( logger.set_logger_dir(
os.path.join('train_log', 'imagenet-resnet-d' + str(args.depth))) os.path.join('train_log', 'imagenet-{}-d{}'.format(args.mode, args.depth)))
config = get_config(model, fake=args.fake) config = get_config(model, fake=args.fake)
if args.load: if args.load:
config.session_init = get_model_loader(args.load) config.session_init = get_model_loader(args.load)
trainer = SyncMultiGPUTrainerParameterServer(max(get_nr_gpu(), 1)) trainer = SyncMultiGPUTrainerReplicated(max(get_nr_gpu(), 1))
launch_train_with_config(config, trainer) launch_train_with_config(config, trainer)
...@@ -546,16 +546,18 @@ class StagingInput(FeedfreeInput): ...@@ -546,16 +546,18 @@ class StagingInput(FeedfreeInput):
# TODO tensorflow/benchmarks use static shapes here, # TODO tensorflow/benchmarks use static shapes here,
# though it doesn't seem to help. We can use it when it's known. # though it doesn't seem to help. We can use it when it's known.
stage = StagingArea(dtypes, shapes=None) stage = StagingArea(dtypes, shapes=None)
self._stage_ops.append(stage.put(inputs))
self._areas.append(stage) # put & get automatically inherit the name scope from the area
outputs = stage.get() self._stage_ops.append(stage.put(inputs))
if isinstance(outputs, tf.Tensor): # when size=1, TF doesn't return a list self._areas.append(stage)
outputs = [outputs] outputs = stage.get()
for vin, vout in zip(inputs, outputs): if isinstance(outputs, tf.Tensor): # when size=1, TF doesn't return a list
vout.set_shape(vin.get_shape()) outputs = [outputs]
self._unstage_ops.append(outputs) for vin, vout in zip(inputs, outputs):
# self._size_ops.append(stage.size()) vout.set_shape(vin.get_shape())
return outputs self._unstage_ops.append(outputs)
# self._size_ops.append(stage.size())
return outputs
def _get_stage_op(self): def _get_stage_op(self):
with self.cached_name_scope(): with self.cached_name_scope():
......
...@@ -165,7 +165,7 @@ class InputSource(object): ...@@ -165,7 +165,7 @@ class InputSource(object):
with tf.name_scope(self._name_scope): with tf.name_scope(self._name_scope):
yield self._name_scope yield self._name_scope
else: else:
name = type(self).__name__ name = type(self).__name__ + '/'
with tf.name_scope(name) as ns: with tf.name_scope(name) as ns:
self._name_scope = ns self._name_scope = ns
yield ns yield ns
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment