Commit 0641618d authored by Yuxin Wu's avatar Yuxin Wu

Drop Python2 support

parent 61663400
...@@ -7,7 +7,7 @@ jobs: ...@@ -7,7 +7,7 @@ jobs:
strategy: strategy:
max-parallel: 4 max-parallel: 4
matrix: matrix:
python-version: [2.7, 3.6, 3.7] python-version: [3.6, 3.7]
steps: steps:
- uses: actions/checkout@v1 - uses: actions/checkout@v1
- name: Set up Python ${{ matrix.python-version }} - name: Set up Python ${{ matrix.python-version }}
...@@ -23,13 +23,6 @@ jobs: ...@@ -23,13 +23,6 @@ jobs:
run: | run: |
flake8 . flake8 .
- name: Lint Examples
# some examples are py3 only
if: matrix.python-version != 2.7
run: |
cd examples
flake8 .
unittest: unittest:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
......
sudo: false
dist: trusty
language: python
cache:
pip: true
apt: true
directories:
- $HOME/tensorpack_data
addons:
apt:
packages:
- libprotobuf-dev
- protobuf-compiler
# Test with TF 1.3-latest
matrix:
fast_finish: true
include:
- os: linux
python: 2.7
env: TF_VERSION=1.3.0
- os: linux
python: 2.7
env: TF_VERSION=1.14.0
- os: linux
python: 2.7
env: TF_VERSION=nightly
allow_failures:
- env: TF_VERSION=nightly
install:
- pip install -U pip # the pip version on travis is too old
- pip install .
- pip install scikit-image opencv-python lmdb h5py msgpack
# check that dataflow can be imported alone without tensorflow
- python -c "import tensorpack.dataflow"
- ./tests/install-tensorflow.sh
before_script:
- protoc --version
- python -c "import cv2; print('OpenCV '+ cv2.__version__)"
- python -c "import tensorflow as tf; print('TensorFlow '+ tf.__version__)"
- mkdir -p $HOME/tensorpack_data
- export TENSORPACK_DATASET=$HOME/tensorpack_data
script:
- $TRAVIS_BUILD_DIR/tests/run-tests.sh
- cd $TRAVIS_BUILD_DIR # go back to root so that deploy may work
notifications:
email:
recipients:
- ppwwyyxxc@gmail.com
on_success: never
on_failure: always
webhooks:
urls:
- https://webhooks.gitter.im/e/cede9dbbf6630b3704b3
on_success: change # options: [always|never|change] default: always
on_failure: always # options: [always|never|change] default: always
on_start: never # options: [always|never|change] default: always
# see https://docs.travis-ci.com/user/deployment/pypi/
#deploy:
#- provider: pypi
#user: ppwwyyxx
#distributions: "sdist bdist_wheel"
#skip_upload_docs: true
#password:
#secure: lnNfzPFSk4HF7seuu63CoUa0g4V0JPs42H5FTzWecEIro8IqjdaAvzNKuhu9E4rkrMXPzoYYA6gC4YhseT7N/jg7lyV/Nn1koGXK1gmzu5JnXZXHw5/Ri0I3hOF3OFpEvkR39tzV0r5HsO0JIU3pl11+bBS8iloEtzdTPaUaRgyYxyZGrHl8l3VnUyM50PRnPGDouK6DHxJmknnFCOZFCrEpwN4zpOs55c1rChjJ8aql69rSsXQOUP8++jRtGwgqEvHh0K86uGm1AJUYvSWmcoD+5Urcg8LGaQzySmXtJnFgxtzsORactIEJoAteSMXufWZITn4OQ5VQkc4/CfU0HgHioQw86dpdJrfOLkhEx65JxfUsQiOTgpcTOgYYEda6+dY4cnTTpj2fASVDhQMQ/vo+Ab/W14nYG8z0IPwGJ1qDXRO6AtPD7vbah2LdHQTUTiAbbtva7NWuKbFiVxD2tdrVQHKWqmKXw+JF5F8TBDEnUxFtulW2hbM+vwu6mPxbYQZEpONjLKSa7qiZ8jQZ8cy9KatQYkqPLeGEbgI+IsmA4bnJJennToyWZ2N4W071ddtLB7hDH4ZRVdaLVYtfeKW/b/+YGX3N8p5cMKGIDjpGyF0BocFalQ7gYfg2ouAn1RyEPkCaw6ntA2uzIgvTqxU5inWJCFn20Ogst0oIaPs=
#on:
#tags: true
#branch: master
#repo: tensorpack/tensorpack
#python: "3.6"
#condition: "$PYPI = true"
#- provider: pypi
#server: https://testpypi.python.org/pypi
#user: ppwwyyxx
#distributions: "sdist bdist_wheel"
#skip_upload_docs: true
#password:
#secure: lnNfzPFSk4HF7seuu63CoUa0g4V0JPs42H5FTzWecEIro8IqjdaAvzNKuhu9E4rkrMXPzoYYA6gC4YhseT7N/jg7lyV/Nn1koGXK1gmzu5JnXZXHw5/Ri0I3hOF3OFpEvkR39tzV0r5HsO0JIU3pl11+bBS8iloEtzdTPaUaRgyYxyZGrHl8l3VnUyM50PRnPGDouK6DHxJmknnFCOZFCrEpwN4zpOs55c1rChjJ8aql69rSsXQOUP8++jRtGwgqEvHh0K86uGm1AJUYvSWmcoD+5Urcg8LGaQzySmXtJnFgxtzsORactIEJoAteSMXufWZITn4OQ5VQkc4/CfU0HgHioQw86dpdJrfOLkhEx65JxfUsQiOTgpcTOgYYEda6+dY4cnTTpj2fASVDhQMQ/vo+Ab/W14nYG8z0IPwGJ1qDXRO6AtPD7vbah2LdHQTUTiAbbtva7NWuKbFiVxD2tdrVQHKWqmKXw+JF5F8TBDEnUxFtulW2hbM+vwu6mPxbYQZEpONjLKSa7qiZ8jQZ8cy9KatQYkqPLeGEbgI+IsmA4bnJJennToyWZ2N4W071ddtLB7hDH4ZRVdaLVYtfeKW/b/+YGX3N8p5cMKGIDjpGyF0BocFalQ7gYfg2ouAn1RyEPkCaw6ntA2uzIgvTqxU5inWJCFn20Ogst0oIaPs=
#on:
#branch: test-travis
#repo: tensorpack/tensorpack
#python: "3.6"
#condition: "$PYPI = true"
...@@ -66,7 +66,7 @@ demonstrating its __flexibility__ for actual research. ...@@ -66,7 +66,7 @@ demonstrating its __flexibility__ for actual research.
Dependencies: Dependencies:
+ Python 2.7 or 3.3+. Python 2.7 is supported until [it retires in 2020](https://pythonclock.org/). + Python 3.3+.
+ Python bindings for OpenCV. (Optional, but required by a lot of features) + Python bindings for OpenCV. (Optional, but required by a lot of features)
+ TensorFlow ≥ 1.3, < 2. (Not required if you only want to use `tensorpack.dataflow` alone as a data processing library) + TensorFlow ≥ 1.3, < 2. (Not required if you only want to use `tensorpack.dataflow` alone as a data processing library)
``` ```
......
...@@ -9,11 +9,13 @@ import gym ...@@ -9,11 +9,13 @@ import gym
import multiprocessing as mp import multiprocessing as mp
import numpy as np import numpy as np
import os import os
import six
import sys import sys
import uuid import uuid
import tensorflow as tf import tensorflow as tf
from six.moves import queue from six.moves import queue
from concurrent import futures
CancelledError = futures.CancelledError
from tensorpack import * from tensorpack import *
from tensorpack.tfutils.gradproc import MapGradient, SummaryGradient from tensorpack.tfutils.gradproc import MapGradient, SummaryGradient
...@@ -25,12 +27,6 @@ from atari_wrapper import FireResetEnv, FrameStack, LimitLength, MapState ...@@ -25,12 +27,6 @@ from atari_wrapper import FireResetEnv, FrameStack, LimitLength, MapState
from common import Evaluator, eval_model_multithread, play_n_episodes from common import Evaluator, eval_model_multithread, play_n_episodes
from simulator import SimulatorMaster, SimulatorProcess, TransitionExperience from simulator import SimulatorMaster, SimulatorProcess, TransitionExperience
if six.PY3:
from concurrent import futures
CancelledError = futures.CancelledError
else:
CancelledError = Exception
IMAGE_SIZE = (84, 84) IMAGE_SIZE = (84, 84)
FRAME_HISTORY = 4 FRAME_HISTORY = 4
GAMMA = 0.99 GAMMA = 0.99
......
...@@ -3,7 +3,6 @@ ...@@ -3,7 +3,6 @@
# Author: Yuxin Wu # Author: Yuxin Wu
import numpy as np import numpy as np
from six.moves import range
from tensorpack import ProxyDataFlow from tensorpack import ProxyDataFlow
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
import argparse import argparse
import os import os
import tensorflow as tf import tensorflow as tf
from six.moves import range
from tensorpack import * from tensorpack import *
from tensorpack.tfutils.gradproc import GlobalNormClip, SummaryGradient from tensorpack.tfutils.gradproc import GlobalNormClip, SummaryGradient
......
...@@ -9,9 +9,7 @@ import operator ...@@ -9,9 +9,7 @@ import operator
import os import os
import sys import sys
from collections import Counter from collections import Counter
import six
import tensorflow as tf import tensorflow as tf
from six.moves import range
from tensorpack import * from tensorpack import *
from tensorpack.tfutils import optimizer, summary from tensorpack.tfutils import optimizer, summary
...@@ -44,8 +42,6 @@ class CharRNNData(RNGDataFlow): ...@@ -44,8 +42,6 @@ class CharRNNData(RNGDataFlow):
# preprocess data # preprocess data
with open(input_file, 'rb') as f: with open(input_file, 'rb') as f:
data = f.read() data = f.read()
if six.PY2:
data = bytearray(data)
data = [chr(c) for c in data if c < 128] data = [chr(c) for c in data if c < 128]
counter = Counter(data) counter = Counter(data)
char_cnt = sorted(counter.items(), key=operator.itemgetter(1), reverse=True) char_cnt = sorted(counter.items(), key=operator.itemgetter(1), reverse=True)
......
...@@ -11,7 +11,6 @@ import six ...@@ -11,7 +11,6 @@ import six
from atari_py.ale_python_interface import ALEInterface from atari_py.ale_python_interface import ALEInterface
from gym import spaces from gym import spaces
from gym.envs.atari.atari_env import ACTION_MEANING from gym.envs.atari.atari_env import ACTION_MEANING
from six.moves import range
from tensorpack.utils import logger, execute_only_once, get_rng from tensorpack.utils import logger, execute_only_once, get_rng
from tensorpack.utils.fs import get_dataset_path from tensorpack.utils.fs import get_dataset_path
......
...@@ -8,7 +8,6 @@ import numpy as np ...@@ -8,7 +8,6 @@ import numpy as np
import cv2 import cv2
import tensorflow as tf import tensorflow as tf
from scipy.signal import convolve2d from scipy.signal import convolve2d
from six.moves import range, zip
from tensorpack import * from tensorpack import *
from tensorpack.dataflow import dataset from tensorpack.dataflow import dataset
......
...@@ -16,8 +16,7 @@ with the support of: ...@@ -16,8 +16,7 @@ with the support of:
This is likely the best-performing open source TensorFlow reimplementation of the above papers. This is likely the best-performing open source TensorFlow reimplementation of the above papers.
## Dependencies ## Dependencies
+ Python 3.3+; OpenCV + OpenCV, TensorFlow ≥ 1.6
+ TensorFlow ≥ 1.6
+ pycocotools/scipy: `for i in cython 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI' scipy; do pip install $i; done` + pycocotools/scipy: `for i in cython 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI' scipy; do pip install $i; done`
+ Pre-trained [ImageNet ResNet model](http://models.tensorpack.com/#FasterRCNN) + Pre-trained [ImageNet ResNet model](http://models.tensorpack.com/#FasterRCNN)
from tensorpack model zoo from tensorpack model zoo
......
...@@ -7,11 +7,8 @@ import os ...@@ -7,11 +7,8 @@ import os
import shutil import shutil
import tensorflow as tf import tensorflow as tf
import cv2 import cv2
import six
import tqdm import tqdm
assert six.PY3, "This example requires Python 3!"
import tensorpack.utils.viz as tpviz import tensorpack.utils.viz as tpviz
from tensorpack.predict import MultiTowerOfflinePredictor, OfflinePredictor, PredictConfig from tensorpack.predict import MultiTowerOfflinePredictor, OfflinePredictor, PredictConfig
from tensorpack.tfutils import SmartInit, get_tf_version_tuple from tensorpack.tfutils import SmartInit, get_tf_version_tuple
......
...@@ -3,8 +3,6 @@ ...@@ -3,8 +3,6 @@
# File: train.py # File: train.py
import argparse import argparse
import six
assert six.PY3, "This example requires Python 3!"
from tensorpack import * from tensorpack import *
from tensorpack.tfutils import collect_env_info from tensorpack.tfutils import collect_env_info
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
# File: viz.py # File: viz.py
import numpy as np import numpy as np
from six.moves import zip
from tensorpack.utils import viz from tensorpack.utils import viz
from tensorpack.utils.palette import PALETTE_RGB from tensorpack.utils.palette import PALETTE_RGB
......
...@@ -7,7 +7,6 @@ import argparse ...@@ -7,7 +7,6 @@ import argparse
import numpy as np import numpy as np
import os import os
import tensorflow as tf import tensorflow as tf
from six.moves import map, zip
from tensorpack import * from tensorpack import *
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope
......
...@@ -8,7 +8,6 @@ import numpy as np ...@@ -8,7 +8,6 @@ import numpy as np
import os import os
import cv2 import cv2
import tensorflow as tf import tensorflow as tf
from six.moves import zip
from tensorpack import * from tensorpack import *
from tensorpack.dataflow import dataset from tensorpack.dataflow import dataset
......
...@@ -61,14 +61,11 @@ setup( ...@@ -61,14 +61,11 @@ setup(
"msgpack-numpy>=0.4.4.2", "msgpack-numpy>=0.4.4.2",
"pyzmq>=16", "pyzmq>=16",
"psutil>=5", "psutil>=5",
"subprocess32; python_version < '3.0'",
"functools32; python_version < '3.0'",
], ],
tests_require=['flake8', 'scikit-image'], tests_require=['flake8', 'scikit-image'],
extras_require={ extras_require={
'all': ['scipy', 'h5py', 'lmdb>=0.92', 'matplotlib', 'scikit-learn'], 'all': ['scipy', 'h5py', 'lmdb>=0.92', 'matplotlib', 'scikit-learn'],
'all: "linux" in sys_platform': ['python-prctl'], 'all: "linux" in sys_platform': ['python-prctl'],
'all: python_version < "3.0"': ['tornado'],
}, },
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#universal-wheels # https://packaging.python.org/guides/distributing-packages-using-setuptools/#universal-wheels
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
import numpy as np import numpy as np
import os import os
from six.moves import zip
from ..compat import tfv1 as tf from ..compat import tfv1 as tf
from ..tfutils.common import get_op_tensor_name from ..tfutils.common import get_op_tensor_name
......
...@@ -4,8 +4,7 @@ ...@@ -4,8 +4,7 @@
import traceback import traceback
from contextlib import contextmanager from contextlib import contextmanager
from time import time as timer from time import perf_counter as timer # noqa
import six
from ..compat import tfv1 as tf from ..compat import tfv1 as tf
from ..utils import logger from ..utils import logger
...@@ -13,9 +12,6 @@ from ..utils.utils import humanize_time_delta ...@@ -13,9 +12,6 @@ from ..utils.utils import humanize_time_delta
from .base import Callback from .base import Callback
from .hooks import CallbackToHook from .hooks import CallbackToHook
if six.PY3:
from time import perf_counter as timer # noqa
__all__ = ['Callbacks'] __all__ = ['Callbacks']
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
import numpy as np import numpy as np
from abc import ABCMeta from abc import ABCMeta
import six import six
from six.moves import zip
from ..tfutils.common import get_op_tensor_name from ..tfutils.common import get_op_tensor_name
from ..utils import logger from ..utils import logger
......
...@@ -6,7 +6,6 @@ import itertools ...@@ -6,7 +6,6 @@ import itertools
import sys import sys
from contextlib import contextmanager from contextlib import contextmanager
import tqdm import tqdm
from six.moves import range
from tensorflow.python.training.monitored_session import _HookedSession as HookedSession from tensorflow.python.training.monitored_session import _HookedSession as HookedSession
from ..compat import tfv1 as tf from ..compat import tfv1 as tf
......
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import unittest import unittest
import tensorflow as tf import tensorflow as tf
import six
from ..utils import logger from ..utils import logger
from ..train.trainers import NoOpTrainer from ..train.trainers import NoOpTrainer
...@@ -74,7 +73,6 @@ class ScheduledHyperParamSetterTest(unittest.TestCase): ...@@ -74,7 +73,6 @@ class ScheduledHyperParamSetterTest(unittest.TestCase):
history = self._create_trainer_with_scheduler(scheduler, 1, 92, starting_epoch=90) history = self._create_trainer_with_scheduler(scheduler, 1, 92, starting_epoch=90)
self.assertEqual(len(history), 0) self.assertEqual(len(history), 0)
@unittest.skipIf(six.PY2, "assertLogs not supported in Python 2")
def testWarningStartInTheMiddle(self): def testWarningStartInTheMiddle(self):
scheduler = ScheduledHyperParamSetter( scheduler = ScheduledHyperParamSetter(
ObjAttrParam(self._param_obj, ParamObject.PARAM_NAME), ObjAttrParam(self._param_obj, ParamObject.PARAM_NAME),
...@@ -82,7 +80,6 @@ class ScheduledHyperParamSetterTest(unittest.TestCase): ...@@ -82,7 +80,6 @@ class ScheduledHyperParamSetterTest(unittest.TestCase):
with self.assertLogs(logger=logger._logger, level='WARNING'): with self.assertLogs(logger=logger._logger, level='WARNING'):
self._create_trainer_with_scheduler(scheduler, 1, 21, starting_epoch=20) self._create_trainer_with_scheduler(scheduler, 1, 21, starting_epoch=20)
@unittest.skipIf(six.PY2, "unittest.mock not available in Python 2")
def testNoWarningStartInTheMiddle(self): def testNoWarningStartInTheMiddle(self):
scheduler = ScheduledHyperParamSetter( scheduler = ScheduledHyperParamSetter(
ObjAttrParam(self._param_obj, ParamObject.PARAM_NAME), ObjAttrParam(self._param_obj, ParamObject.PARAM_NAME),
......
...@@ -9,7 +9,6 @@ from collections import defaultdict, deque ...@@ -9,7 +9,6 @@ from collections import defaultdict, deque
from copy import copy from copy import copy
import six import six
import tqdm import tqdm
from six.moves import map, range
from termcolor import colored from termcolor import colored
from ..utils import logger from ..utils import logger
......
...@@ -3,7 +3,6 @@ ...@@ -3,7 +3,6 @@
import os import os
from six.moves import range
from ...utils import logger from ...utils import logger
from ...utils.fs import download, get_dataset_path from ...utils.fs import download, get_dataset_path
......
...@@ -7,8 +7,6 @@ import numpy as np ...@@ -7,8 +7,6 @@ import numpy as np
import os import os
import pickle import pickle
import tarfile import tarfile
import six
from six.moves import range
from ...utils import logger from ...utils import logger
from ...utils.fs import download, get_dataset_path from ...utils.fs import download, get_dataset_path
...@@ -44,10 +42,7 @@ def read_cifar(filenames, cifar_classnum): ...@@ -44,10 +42,7 @@ def read_cifar(filenames, cifar_classnum):
ret = [] ret = []
for fname in filenames: for fname in filenames:
fo = open(fname, 'rb') fo = open(fname, 'rb')
if six.PY3:
dic = pickle.load(fo, encoding='bytes') dic = pickle.load(fo, encoding='bytes')
else:
dic = pickle.load(fo)
data = dic[b'data'] data = dic[b'data']
if cifar_classnum == 10: if cifar_classnum == 10:
label = dic[b'labels'] label = dic[b'labels']
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
import gzip import gzip
import numpy import numpy
import os import os
from six.moves import range
from ...utils import logger from ...utils import logger
from ...utils.fs import download, get_dataset_path from ...utils.fs import download, get_dataset_path
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
import numpy as np import numpy as np
import os import os
import six import six
from six.moves import range
from ..utils import logger from ..utils import logger
from ..utils.argtools import log_once from ..utils.argtools import log_once
......
...@@ -6,7 +6,6 @@ import inspect ...@@ -6,7 +6,6 @@ import inspect
import pprint import pprint
from collections import namedtuple from collections import namedtuple
import weakref import weakref
import six
from ...utils.argtools import log_once from ...utils.argtools import log_once
from ...utils.utils import get_rng from ...utils.utils import get_rng
...@@ -34,17 +33,6 @@ def _default_repr(self): ...@@ -34,17 +33,6 @@ def _default_repr(self):
It assumes that the instance `self` contains attributes that match its constructor. It assumes that the instance `self` contains attributes that match its constructor.
""" """
classname = type(self).__name__ classname = type(self).__name__
if six.PY2:
argspec = inspect.getargspec(self.__init__)
assert argspec.varargs is None, "The default __repr__ in {} doesn't work for varargs!".format(classname)
assert argspec.keywords is None, "The default __repr__ in {} doesn't work for kwargs!".format(classname)
fields = argspec.args[1:]
defaults = {}
defaults_list = argspec.defaults
if defaults_list is not None:
for f, d in zip(fields[::-1], defaults_list[::-1]):
defaults[f] = d
else:
argspec = inspect.getfullargspec(self.__init__) argspec = inspect.getfullargspec(self.__init__)
assert argspec.varargs is None, "The default __repr__ in {} doesn't work for varargs!".format(classname) assert argspec.varargs is None, "The default __repr__ in {} doesn't work for varargs!".format(classname)
assert argspec.varkw is None, "The default __repr__ in {} doesn't work for kwargs!".format(classname) assert argspec.varkw is None, "The default __repr__ in {} doesn't work for kwargs!".format(classname)
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
import copy import copy
import numpy as np import numpy as np
import six import six
from six.moves import range
from .base import DataFlow, RNGDataFlow from .base import DataFlow, RNGDataFlow
......
...@@ -6,7 +6,6 @@ import multiprocessing as mp ...@@ -6,7 +6,6 @@ import multiprocessing as mp
import time import time
from collections import deque from collections import deque
import tqdm import tqdm
from six.moves import range
from ..utils import logger from ..utils import logger
from ..utils.concurrency import DIE from ..utils.concurrency import DIE
......
...@@ -3,7 +3,6 @@ ...@@ -3,7 +3,6 @@
import re import re
import tensorflow as tf import tensorflow as tf
from six.moves import range
from ..tfutils.common import get_global_step_var, get_op_tensor_name from ..tfutils.common import get_global_step_var, get_op_tensor_name
from ..utils import logger from ..utils import logger
......
...@@ -8,7 +8,6 @@ from abc import ABCMeta, abstractmethod ...@@ -8,7 +8,6 @@ from abc import ABCMeta, abstractmethod
from contextlib import contextmanager from contextlib import contextmanager
import six import six
import tensorflow as tf import tensorflow as tf
from six.moves import range, zip
from ..compat import tfv1 from ..compat import tfv1
from ..tfutils.common import get_tf_version_tuple from ..tfutils.common import get_tf_version_tuple
......
...@@ -6,7 +6,6 @@ import threading ...@@ -6,7 +6,6 @@ import threading
from contextlib import contextmanager from contextlib import contextmanager
from itertools import chain from itertools import chain
import tensorflow as tf import tensorflow as tf
from six.moves import range, zip
from ..compat import tfv1 from ..compat import tfv1
from ..callbacks.base import Callback, CallbackFactory from ..callbacks.base import Callback, CallbackFactory
......
...@@ -6,7 +6,6 @@ from abc import ABCMeta, abstractmethod ...@@ -6,7 +6,6 @@ from abc import ABCMeta, abstractmethod
from contextlib import contextmanager from contextlib import contextmanager
import six import six
import tensorflow as tf import tensorflow as tf
from six.moves import zip
from ..callbacks.base import CallbackFactory from ..callbacks.base import CallbackFactory
from ..tfutils.common import get_op_tensor_name from ..tfutils.common import get_op_tensor_name
......
...@@ -3,7 +3,6 @@ ...@@ -3,7 +3,6 @@
import re import re
import six
from ..compat import tfv1 as tf # this should be avoided first in model code from ..compat import tfv1 as tf # this should be avoided first in model code
from tensorflow.python.training import moving_averages from tensorflow.python.training import moving_averages
...@@ -292,7 +291,7 @@ def BatchNorm(inputs, axis=None, training=None, momentum=0.9, epsilon=1e-5, ...@@ -292,7 +291,7 @@ def BatchNorm(inputs, axis=None, training=None, momentum=0.9, epsilon=1e-5,
if num_dev == 1: if num_dev == 1:
logger.warn("BatchNorm(sync_statistics='nccl') is used with only one tower!") logger.warn("BatchNorm(sync_statistics='nccl') is used with only one tower!")
else: else:
assert six.PY2 or TF_version >= (1, 10), \ assert TF_version >= (1, 10), \
"Cross-GPU BatchNorm is only supported in TF>=1.10 ." \ "Cross-GPU BatchNorm is only supported in TF>=1.10 ." \
"Upgrade TF or apply this patch manually: https://github.com/tensorflow/tensorflow/pull/20360" "Upgrade TF or apply this patch manually: https://github.com/tensorflow/tensorflow/pull/20360"
......
...@@ -71,7 +71,6 @@ class AsyncPredictorBase(PredictorBase): ...@@ -71,7 +71,6 @@ class AsyncPredictorBase(PredictorBase):
""" Start workers """ """ Start workers """
def _do_call(self, dp): def _do_call(self, dp):
assert six.PY3, "With Python2, sync methods not available for async predictor"
fut = self.put_task(dp) fut = self.put_task(dp)
# in Tornado, Future.result() doesn't wait # in Tornado, Future.result() doesn't wait
return fut.result() return fut.result()
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
import multiprocessing import multiprocessing
import numpy as np import numpy as np
import six from concurrent.futures import Future
import tensorflow as tf import tensorflow as tf
from six.moves import queue, range from six.moves import queue, range
...@@ -154,12 +154,6 @@ class MultiThreadAsyncPredictor(AsyncPredictorBase): ...@@ -154,12 +154,6 @@ class MultiThreadAsyncPredictor(AsyncPredictorBase):
self.input_queue, f, id, batch_size=batch_size) self.input_queue, f, id, batch_size=batch_size)
for id, f in enumerate(predictors)] for id, f in enumerate(predictors)]
if six.PY2:
# TODO XXX set logging here to avoid affecting TF logging
import tornado.options as options
options.parse_command_line(['--logging=debug'])
logger.warn("MultiThreadAsyncPredictor is inefficient in Python 2! Switch to Python 3 instead.")
def start(self): def start(self):
if self._need_default_sess: if self._need_default_sess:
assert tfv1.get_default_session() is not None, \ assert tfv1.get_default_session() is not None, \
...@@ -183,13 +177,3 @@ class MultiThreadAsyncPredictor(AsyncPredictorBase): ...@@ -183,13 +177,3 @@ class MultiThreadAsyncPredictor(AsyncPredictorBase):
f.add_done_callback(callback) f.add_done_callback(callback)
self.input_queue.put((dp, f)) self.input_queue.put((dp, f))
return f return f
try:
if six.PY2:
from tornado.concurrent import Future
else:
from concurrent.futures import Future
except ImportError:
from ..utils.develop import create_dummy_class
MultiThreadAsyncPredictor = create_dummy_class('MultiThreadAsyncPredictor', 'tornado.concurrent') # noqa
...@@ -6,7 +6,6 @@ import multiprocessing ...@@ -6,7 +6,6 @@ import multiprocessing
import os import os
from abc import ABCMeta, abstractmethod from abc import ABCMeta, abstractmethod
import six import six
from six.moves import range, zip
from ..dataflow import DataFlow from ..dataflow import DataFlow
from ..dataflow.remote import dump_dataflow_to_process_queue from ..dataflow.remote import dump_dataflow_to_process_queue
......
...@@ -119,9 +119,6 @@ class MapGradient(GradientProcessor): ...@@ -119,9 +119,6 @@ class MapGradient(GradientProcessor):
If it return None, the gradient is discarded (hence no update to the variable will happen). If it return None, the gradient is discarded (hence no update to the variable will happen).
regex (str): used to match variables. Defaults to match all variables. regex (str): used to match variables. Defaults to match all variables.
""" """
if six.PY2:
args = inspect.getargspec(func).args
else:
args = inspect.getfullargspec(func).args args = inspect.getfullargspec(func).args
arg_num = len(args) - inspect.ismethod(func) arg_num = len(args) - inspect.ismethod(func)
assert arg_num in [1, 2], \ assert arg_num in [1, 2], \
......
...@@ -5,8 +5,6 @@ ...@@ -5,8 +5,6 @@
from abc import ABCMeta, abstractmethod, abstractproperty from abc import ABCMeta, abstractmethod, abstractproperty
import six import six
from six.moves import zip
from ..compat import tfv1 as tf from ..compat import tfv1 as tf
from ..utils import logger from ..utils import logger
from ..utils.argtools import call_only_once from ..utils.argtools import call_only_once
......
#-*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import six
import unittest import unittest
import tensorflow as tf import tensorflow as tf
...@@ -21,7 +20,6 @@ class ScopeUtilsTest(unittest.TestCase): ...@@ -21,7 +20,6 @@ class ScopeUtilsTest(unittest.TestCase):
with self.assertRaises(AssertionError): with self.assertRaises(AssertionError):
self._f() # name conflict self._f() # name conflict
@unittest.skipIf(six.PY2, "assertLogs not supported in Python 2")
def test_under_name_scope_warning(self): def test_under_name_scope_warning(self):
x = tf.placeholder(tf.float32, [3]) x = tf.placeholder(tf.float32, [3])
tf.nn.relu(x, name='s') tf.nn.relu(x, name='s')
......
...@@ -4,9 +4,7 @@ ...@@ -4,9 +4,7 @@
import copy import copy
import time import time
import weakref import weakref
import six
import tensorflow as tf import tensorflow as tf
from six.moves import range
from ..compat import tfv1 from ..compat import tfv1
from ..callbacks import Callback, Callbacks, Monitors, MonitorBase from ..callbacks import Callback, Callbacks, Monitors, MonitorBase
...@@ -360,7 +358,6 @@ def _get_property(name): ...@@ -360,7 +358,6 @@ def _get_property(name):
""" """
ret = property( ret = property(
lambda self: getattr(self.loop, name)) lambda self: getattr(self.loop, name))
if six.PY3: # __doc__ is readonly in Py2
try: try:
ret.__doc__ = getattr(TrainLoop, name).__doc__ ret.__doc__ = getattr(TrainLoop, name).__doc__
except AttributeError: except AttributeError:
......
...@@ -3,15 +3,10 @@ ...@@ -3,15 +3,10 @@
import inspect import inspect
import six import functools
from . import logger from . import logger
if six.PY2:
import functools32 as functools
else:
import functools
__all__ = ['map_arg', 'memoized', 'memoized_method', 'graph_memoized', 'shape2d', 'shape4d', __all__ = ['map_arg', 'memoized', 'memoized_method', 'graph_memoized', 'shape2d', 'shape4d',
'memoized_ignoreargs', 'log_once'] 'memoized_ignoreargs', 'log_once']
...@@ -26,13 +21,10 @@ def map_arg(**maps): ...@@ -26,13 +21,10 @@ def map_arg(**maps):
def deco(func): def deco(func):
@functools.wraps(func) @functools.wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
if six.PY2:
argmap = inspect.getcallargs(func, *args, **kwargs)
else:
# getcallargs was deprecated since 3.5 # getcallargs was deprecated since 3.5
sig = inspect.signature(func) sig = inspect.signature(func)
argmap = sig.bind_partial(*args, **kwargs).arguments argmap = sig.bind_partial(*args, **kwargs).arguments
for k, map_func in six.iteritems(maps): for k, map_func in maps.items():
if k in argmap: if k in argmap:
argmap[k] = map_func(argmap[k]) argmap[k] = map_func(argmap[k])
return func(**argmap) return func(**argmap)
......
...@@ -14,15 +14,11 @@ import weakref ...@@ -14,15 +14,11 @@ import weakref
from contextlib import contextmanager from contextlib import contextmanager
import six import six
from six.moves import queue from six.moves import queue
import subprocess
from . import logger from . import logger
from .argtools import log_once from .argtools import log_once
if six.PY2:
import subprocess32 as subprocess
else:
import subprocess
__all__ = ['StoppableThread', 'LoopThread', 'ShareSessionThread', __all__ = ['StoppableThread', 'LoopThread', 'ShareSessionThread',
'ensure_proc_terminate', 'ensure_proc_terminate',
......
...@@ -127,7 +127,6 @@ def get_caffe_pb(): ...@@ -127,7 +127,6 @@ def get_caffe_pb():
download(CAFFE_PROTO_URL, dir) download(CAFFE_PROTO_URL, dir)
assert os.path.isfile(os.path.join(dir, 'caffe.proto')) assert os.path.isfile(os.path.join(dir, 'caffe.proto'))
if sys.version_info.major == 3:
cmd = "protoc --version" cmd = "protoc --version"
version, ret = subproc_call(cmd, timeout=3) version, ret = subproc_call(cmd, timeout=3)
if ret != 0: if ret != 0:
......
...@@ -5,15 +5,11 @@ ...@@ -5,15 +5,11 @@
import atexit import atexit
from collections import defaultdict from collections import defaultdict
from contextlib import contextmanager from contextlib import contextmanager
from time import time as timer from time import perf_counter as timer # noqa
import six
from . import logger from . import logger
from .stats import StatCounter from .stats import StatCounter
if six.PY3:
from time import perf_counter as timer # noqa
__all__ = ['timed_operation', 'IterSpeedCounter', 'Timer'] __all__ = ['timed_operation', 'IterSpeedCounter', 'Timer']
...@@ -68,7 +64,7 @@ def print_total_timer(): ...@@ -68,7 +64,7 @@ def print_total_timer():
""" """
if len(_TOTAL_TIMER_DATA) == 0: if len(_TOTAL_TIMER_DATA) == 0:
return return
for k, v in six.iteritems(_TOTAL_TIMER_DATA): for k, v in _TOTAL_TIMER_DATA.items():
logger.info("Total Time: {} -> {:.2f} sec, {} times, {:.3g} sec/time".format( logger.info("Total Time: {} -> {:.2f} sec, {} times, {:.3g} sec/time".format(
k, v.sum, v.count, v.average)) k, v.sum, v.count, v.average))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment