Commit a4df1634 authored by Yuxin Wu's avatar Yuxin Wu

update logging

parent 5b019098
......@@ -52,7 +52,7 @@ def print_class_histogram(roidbs):
data = list(itertools.chain(*[[class_names[i + 1], v] for i, v in enumerate(gt_hist[1:])]))
COL = min(6, len(data))
total_instances = sum(data[1::2])
data.extend([None] * (COL - len(data) % COL))
data.extend([None] * ((COL - len(data) % COL) % COL))
data.extend(["total", total_instances])
data = itertools.zip_longest(*[data[i::COL] for i in range(COL)])
# the first line is BG
......
......@@ -11,7 +11,7 @@ from ..tfutils.collection import backup_collection, restore_collection
from ..tfutils.common import get_tf_version_tuple
from ..tfutils.tower import get_current_tower_context
from ..utils import logger
from ..utils.argtools import get_data_format
from ..utils.argtools import get_data_format, log_once
from ..utils.develop import log_deprecated
from .common import VariableHolder, layer_register
from .tflayer import convert_to_tflayer_args, rename_get_variable
......@@ -216,7 +216,7 @@ def BatchNorm(inputs, axis=None, training=None, momentum=0.9, epsilon=1e-5,
assert TF_version >= (1, 4), \
"Fine tuning a BatchNorm model with fixed statistics needs TF>=1.4!"
if ctx.is_main_training_tower: # only warn in first tower
logger.warn("[BatchNorm] Using moving_mean/moving_variance in training.")
log_once("Some BatchNorm layer uses moving_mean/moving_variance in training.", func='warn')
# Using moving_mean/moving_variance in training, which means we
# loaded a pre-trained BN and only fine-tuning the affine part.
......
......@@ -261,7 +261,7 @@ def find_library_full_path(name):
if 'lib' + name + '.so' in basename:
if os.path.isfile(sofile):
return os.path.realpath(sofile)
except PermissionError:
except (OSError, IOError):
# can fail in certain environment (e.g. chroot)
# if the pids are incorrectly mapped
pass
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment