Commit eb2492c4 authored by Bohumír Zámečník's avatar Bohumír Zámečník Committed by Yuxin Wu

Fix some more typos. (#757)

parent 0201f2df
...@@ -104,7 +104,7 @@ class FeedfreeInput(InputSource): ...@@ -104,7 +104,7 @@ class FeedfreeInput(InputSource):
pass pass
# TODO enqueu_many? https://github.com/tensorflow/tensorflow/issues/7817#issuecomment-282053155 # TODO enqueue_many? https://github.com/tensorflow/tensorflow/issues/7817#issuecomment-282053155
class EnqueueThread(ShareSessionThread): class EnqueueThread(ShareSessionThread):
def __init__(self, queue, ds, placehdrs): def __init__(self, queue, ds, placehdrs):
super(EnqueueThread, self).__init__() super(EnqueueThread, self).__init__()
......
...@@ -110,7 +110,7 @@ class InputSource(object): ...@@ -110,7 +110,7 @@ class InputSource(object):
@memoized @memoized
def get_callbacks(self): def get_callbacks(self):
""" """
An InputSource might need some extra maintainance during training, An InputSource might need some extra maintenance during training,
which is done also through the Callback interface. which is done also through the Callback interface.
This method returns the callbacks and the return value will be memoized. This method returns the callbacks and the return value will be memoized.
......
...@@ -204,7 +204,7 @@ class TestPool(TestModel): ...@@ -204,7 +204,7 @@ class TestPool(TestModel):
res = self.run_variable(output) res = self.run_variable(output)
self.assertEqual(res.shape, (1, scale * h, scale * w, 3)) self.assertEqual(res.shape, (1, scale * h, scale * w, 3))
# mat is on cornser # mat is on corner
ele = res[0, ::scale, ::scale, 0] ele = res[0, ::scale, ::scale, 0]
self.assertTrue((ele == mat[:, :, 0]).all()) self.assertTrue((ele == mat[:, :, 0]).all())
# the rest are zeros # the rest are zeros
......
...@@ -34,7 +34,7 @@ def convert_to_tflayer_args(args_names, name_mapping): ...@@ -34,7 +34,7 @@ def convert_to_tflayer_args(args_names, name_mapping):
1. data_format becomes tf.layers style 1. data_format becomes tf.layers style
2. nl becomes activation 2. nl becomes activation
3. initializers are renamed 3. initializers are renamed
4. positional args are transformed to correspoding kwargs, according to args_names 4. positional args are transformed to corresponding kwargs, according to args_names
5. kwargs are mapped to tf.layers names if needed, by name_mapping 5. kwargs are mapped to tf.layers names if needed, by name_mapping
""" """
......
...@@ -25,7 +25,7 @@ class PredictorBase(object): ...@@ -25,7 +25,7 @@ class PredictorBase(object):
Attributes: Attributes:
return_input (bool): whether the call will also return (inputs, outputs) return_input (bool): whether the call will also return (inputs, outputs)
or just outpus or just outputs
""" """
def __call__(self, *args): def __call__(self, *args):
......
...@@ -130,14 +130,14 @@ class PredictorWorkerThread(StoppableThread, ShareSessionThread): ...@@ -130,14 +130,14 @@ class PredictorWorkerThread(StoppableThread, ShareSessionThread):
class MultiThreadAsyncPredictor(AsyncPredictorBase): class MultiThreadAsyncPredictor(AsyncPredictorBase):
""" """
An multithread online async predictor which runs a list of OnlinePredictor. An multithreaded online async predictor which runs a list of OnlinePredictor.
It would do an extra batching internally. It would do an extra batching internally.
""" """
def __init__(self, predictors, batch_size=5): def __init__(self, predictors, batch_size=5):
""" """
Args: Args:
predictors (list): a list of OnlinePredictor avaiable to use. predictors (list): a list of OnlinePredictor available to use.
batch_size (int): the maximum of an internal batch. batch_size (int): the maximum of an internal batch.
""" """
assert len(predictors) assert len(predictors)
......
...@@ -79,7 +79,7 @@ class SimpleDatasetPredictor(DatasetPredictorBase): ...@@ -79,7 +79,7 @@ class SimpleDatasetPredictor(DatasetPredictorBase):
class MultiProcessDatasetPredictor(DatasetPredictorBase): class MultiProcessDatasetPredictor(DatasetPredictorBase):
""" """
Run prediction in multiprocesses, on either CPU or GPU. Run prediction in multiple processes, on either CPU or GPU.
Each process fetch datapoints as tasks and run predictions independently. Each process fetch datapoints as tasks and run predictions independently.
""" """
# TODO allow unordered # TODO allow unordered
......
...@@ -170,5 +170,5 @@ class CollectionGuard(object): ...@@ -170,5 +170,5 @@ class CollectionGuard(object):
""" """
new = tf.get_collection(key) new = tf.get_collection(key)
old = set(self.original.get(key, [])) old = set(self.original.get(key, []))
# presist the order in new # persist the order in new
return [x for x in new if x not in old] return [x for x in new if x not in old]
...@@ -66,7 +66,7 @@ class ModelExport(object): ...@@ -66,7 +66,7 @@ class ModelExport(object):
tags=[tf.saved_model.tag_constants.SERVING], tags=[tf.saved_model.tag_constants.SERVING],
signature_name='prediction_pipeline'): signature_name='prediction_pipeline'):
""" """
Use SavedModelBuilder to export a trained model without tensorpack depency. Use SavedModelBuilder to export a trained model without tensorpack dependency.
Remarks: Remarks:
This produces This produces
......
...@@ -10,7 +10,7 @@ from ..utils.develop import deprecated ...@@ -10,7 +10,7 @@ from ..utils.develop import deprecated
# __all__ = ['get_scalar_var'] # __all__ = ['get_scalar_var']
# this function exists for backwards-compatibilty # this function exists for backwards-compatibility
def prediction_incorrect(logits, label, topk=1, name='incorrect_vector'): def prediction_incorrect(logits, label, topk=1, name='incorrect_vector'):
return tf.cast(tf.logical_not(tf.nn.in_top_k(logits, label, topk)), return tf.cast(tf.logical_not(tf.nn.in_top_k(logits, label, topk)),
tf.float32, name=name) tf.float32, name=name)
......
...@@ -201,7 +201,7 @@ class AutoResumeTrainConfig(TrainConfig): ...@@ -201,7 +201,7 @@ class AutoResumeTrainConfig(TrainConfig):
without changing any line of code or command line arguments. without changing any line of code or command line arguments.
So it's useful to let resume take priority over user-provided arguments sometimes: So it's useful to let resume take priority over user-provided arguments sometimes:
If your training starts from a pretrained model, If your training starts from a pre-trained model,
you would want it to use user-provided model loader at the you would want it to use user-provided model loader at the
beginning, but a "resume" model loader when the job was beginning, but a "resume" model loader when the job was
interrupted and restarted. interrupted and restarted.
......
...@@ -16,7 +16,7 @@ __all__ = ['map_arg', 'memoized', 'graph_memoized', 'shape2d', 'shape4d', ...@@ -16,7 +16,7 @@ __all__ = ['map_arg', 'memoized', 'graph_memoized', 'shape2d', 'shape4d',
def map_arg(**maps): def map_arg(**maps):
""" """
Apply a mapping on certains argument before calling the original function. Apply a mapping on certain argument before calling the original function.
Args: Args:
maps (dict): {argument_name: map_func} maps (dict): {argument_name: map_func}
......
...@@ -25,7 +25,7 @@ class _MyFormatter(logging.Formatter): ...@@ -25,7 +25,7 @@ class _MyFormatter(logging.Formatter):
else: else:
fmt = date + ' ' + msg fmt = date + ' ' + msg
if hasattr(self, '_style'): if hasattr(self, '_style'):
# Python3 compatibilty # Python3 compatibility
self._style._fmt = fmt self._style._fmt = fmt
self._fmt = fmt self._fmt = fmt
return super(_MyFormatter, self).format(record) return super(_MyFormatter, self).format(record)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment