Commit 4d7f0018 authored by Yuxin Wu's avatar Yuxin Wu

update docs

parent 712fd299
......@@ -375,6 +375,7 @@ def autodoc_skip_member(app, what, name, obj, skip, options):
'DumpParamAsImage',
'StagingInputWrapper',
'PeriodicRunHooks',
'get_nr_gpu',
# deprecated or renamed symbolic code
'Deconv2D', 'LeakyReLU',
......
......@@ -28,8 +28,8 @@ The tower function needs to follow some conventions:
1. __It might get called multiple times__ for data-parallel training or inference.
2. It has to respect variable collections:
* Only put variables __trainable by gradient descent__ into `TRAINABLE_VARIABLES`.
* Put variables that need to be saved into `MODEL_VARIABLES`.
* (Required) Only put variables __trainable by gradient descent__ into `TRAINABLE_VARIABLES`.
* (Recommended) Put non-trainable variables that need to be used in inference into `MODEL_VARIABLES`.
3. It has to respect variable scopes:
* The name of any trainable variables created in the function must be like "variable_scope_name/custom/name".
Don't depend on name_scope's name. Don't use variable_scope's name twice.
......
......@@ -7,7 +7,7 @@ It also contains an implementation of the following papers:
+ [Trained Ternary Quantization](https://arxiv.org/abs/1612.01064), with (W,A,G)=(t,32,32).
+ [Binarized Neural Networks](https://arxiv.org/abs/1602.02830), with (W,A,G)=(1,1,32).
These different quantization techniques achieves the following accuracy in this implementation:
These quantization techniques achieves the following ImageNet performance in this implementation:
| Model | W,A,G | Top 1 Error |
|:-------------------|-------------|------------:|
......
# -*- coding: utf-8 -*-
# File: __init__.py
# flake8: noqa
import os as _os
......@@ -21,11 +22,7 @@ if STATICA_HACK:
from tensorpack.callbacks import *
from tensorpack.tfutils import *
# Default to v2
if _os.environ.get('TENSORPACK_TRAIN_API', 'v2') == 'v2':
from tensorpack.train import *
else:
from tensorpack.trainv1 import *
from tensorpack.graph_builder import InputDesc, ModelDesc, ModelDescBase
from tensorpack.input_source import *
from tensorpack.predict import *
# -*- coding: utf-8 -*-
# File: __init__.py
# flake8: noqa
# https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
STATICA_HACK = True
......
......@@ -178,10 +178,12 @@ def enable_death_signal():
in case the parent dies accidentally.
"""
try:
import prctl
import prctl # pip install prctl-python
except ImportError:
return
else:
assert hasattr(prctl, 'set_pdeathsig'), \
"prctl.set_pdeathsig does not exist! Note that you need to install 'prctl-python' instead of 'prctl'."
# is SIGHUP a good choice?
prctl.set_pdeathsig(signal.SIGHUP)
......
......@@ -8,7 +8,7 @@ from . import logger
from .nvml import NVMLContext
from .concurrency import subproc_call
__all__ = ['change_gpu', 'get_nr_gpu']
__all__ = ['change_gpu', 'get_nr_gpu', 'get_num_gpu']
def change_gpu(val):
......@@ -22,7 +22,7 @@ def change_gpu(val):
return change_env('CUDA_VISIBLE_DEVICES', val)
def get_nr_gpu():
def get_num_gpu():
"""
Returns:
int: #available GPUs in CUDA_VISIBLE_DEVICES, or in the system.
......@@ -47,3 +47,6 @@ def get_nr_gpu():
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return len([x.name for x in local_device_protos if x.device_type == 'GPU'])
get_nr_gpu = get_num_gpu
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment