Commit 2fa49895 authored by Yuxin Wu's avatar Yuxin Wu

Use stricter linter in examples. Clean-up imports

parent 8bc909be
......@@ -3,7 +3,6 @@
# File: simulator.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import tensorflow as tf
import multiprocessing as mp
import time
import os
......@@ -15,9 +14,6 @@ import six
from six.moves import queue
import zmq
from tensorpack.callbacks import Callback
from tensorpack.tfutils.varmanip import SessionUpdate
from tensorpack.predict import OfflinePredictor
from tensorpack.utils import logger
from tensorpack.utils.serialize import loads, dumps
from tensorpack.utils.concurrency import LoopThread, ensure_proc_terminate
......
......@@ -5,13 +5,8 @@
import numpy as np
import os
import sys
import time
import random
import uuid
import argparse
import multiprocessing
import threading
import cv2
import tensorflow as tf
......@@ -20,18 +15,16 @@ from six.moves import queue
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.serialize import *
from tensorpack.utils.stats import *
from tensorpack.utils.concurrency import ensure_proc_terminate, start_proc_mask_signal
from tensorpack.utils.serialize import dumps
from tensorpack.tfutils import symbolic_functions as symbf
from tensorpack.tfutils.gradproc import MapGradient, SummaryGradient
from tensorpack.utils.gpu import get_nr_gpu
import gym
from simulator import *
from common import (Evaluator, eval_model_multithread,
play_one_episode, play_n_episodes)
from simulator import SimulatorProcess, SimulatorMaster, TransitionExperience
from common import Evaluator, eval_model_multithread, play_n_episodes
from atari_wrapper import MapState, FrameStack, FireResetEnv, LimitLength
if six.PY3:
......
......@@ -2,16 +2,16 @@
# -*- coding: utf-8 -*-
# File: create-lmdb.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import sys
import os
import scipy.io.wavfile as wavfile
import string
import numpy as np
import argparse
from tensorpack import *
from tensorpack.dataflow import dftools, DataFlow, LMDBDataPoint
from tensorpack.utils.argtools import memoized
from tensorpack.utils.stats import OnlineMoments
from tensorpack.utils import serialize, fs, logger
from tensorpack.utils.utils import get_tqdm
import bob.ap
......@@ -34,6 +34,7 @@ def read_timit_txt(f):
line = line.replace('.', '').lower()
line = filter(lambda c: c in CHARSET, line)
f.close()
ret = []
for c in line:
ret.append(WORD_DIC[c])
return np.asarray(ret)
......
......@@ -3,20 +3,13 @@
# File: train-timit.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import numpy as np
import os
import sys
import argparse
from collections import Counter
import operator
import six
from six.moves import map, range
from six.moves import range
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.tfutils.gradproc import SummaryGradient, GlobalNormClip
from tensorpack.utils.globvars import globalns as param
import tensorpack.tfutils.symbolic_functions as symbf
import tensorflow as tf
from timitdata import TIMITBatch
......
......@@ -10,7 +10,7 @@ import argparse
from collections import Counter
import operator
import six
from six.moves import map, range
from six.moves import range
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
......@@ -96,7 +96,7 @@ class Model(ModelDesc):
# seqlen x (Bxrnnsize)
output = tf.reshape(tf.concat(outputs, 1), [-1, param.rnn_size]) # (Bxseqlen) x rnnsize
logits = FullyConnected('fc', output, param.vocab_size, nl=tf.identity)
prob = tf.nn.softmax(logits / param.softmax_temprature, name='prob')
tf.nn.softmax(logits / param.softmax_temprature, name='prob')
xent_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=tf.reshape(nextinput, [-1]))
......
......@@ -33,7 +33,6 @@ def colorize(img, heatmap):
@memoized
def get_gaussian_map():
sigma = 21
gaussian_map = np.zeros((368, 368), dtype='float32')
for x_p in range(368):
for y_p in range(368):
......@@ -93,9 +92,8 @@ def CPM(image):
out3 = add_stage(3, out2)
out4 = add_stage(4, out3)
out5 = add_stage(5, out4)
out6 = add_stage(6, out4)
resized_map = tf.image.resize_bilinear(out6,
[368, 368], name='resized_map')
out6 = add_stage(6, out5)
tf.image.resize_bilinear(out6, [368, 368], name='resized_map')
def run_test(model_path, img_file):
......
......@@ -3,24 +3,13 @@
# File: DQN.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import numpy as np
import os
import sys
import re
import time
import random
import argparse
import subprocess
import multiprocessing
import threading
from collections import deque
import cv2
import tensorflow as tf
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.utils.concurrency import *
import tensorflow as tf
from DQNModel import Model as DQNModel
from common import Evaluator, eval_model_multithread, play_n_episodes
......
......@@ -9,8 +9,7 @@ import tensorpack
from tensorpack import ModelDesc, InputDesc
from tensorpack.utils import logger
from tensorpack.tfutils import (
collection, summary, get_current_tower_context, optimizer, gradproc)
from tensorpack.tfutils import symbolic_functions as symbf
summary, get_current_tower_context, optimizer, gradproc)
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope
assert tensorpack.tfutils.common.get_tf_version_number() >= 1.2
......
......@@ -2,7 +2,7 @@
[video demo](https://youtu.be/o21mddZtE5Y)
Reproduce the following reinforcement learning methods:
Reproduce (performance of) the following reinforcement learning methods:
+ Nature-DQN in:
[Human-level Control Through Deep Reinforcement Learning](http://www.nature.com/nature/journal/v518/n7540/full/nature14236.html)
......
......@@ -13,7 +13,6 @@ from six.moves import range
from tensorpack.utils import logger
from tensorpack.utils.utils import get_rng, execute_only_once
from tensorpack.utils.fs import get_dataset_path
from tensorpack.utils.stats import StatCounter
import gym
from gym import spaces
......@@ -157,7 +156,7 @@ class AtariPlayer(gym.Env):
(self.live_lost_as_eoe and newlives < oldlives):
break
trueIsOver = isOver = self.ale.game_over()
isOver = self.ale.game_over()
if self.live_lost_as_eoe:
isOver = isOver or newlives < oldlives
......
......@@ -3,7 +3,6 @@
# File: atari_wrapper.py
import numpy as np
import cv2
from collections import deque
import gym
......
......@@ -7,7 +7,6 @@ import numpy as np
import copy
from collections import deque, namedtuple
import threading
import six
from six.moves import queue, range
from tensorpack.dataflow import DataFlow
......
......@@ -3,9 +3,7 @@
# File: mnist-disturb.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import numpy as np
import os
import sys
import argparse
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
......@@ -43,7 +41,7 @@ class Model(mnist_example.Model):
.MaxPooling('pool1', 2)
.FullyConnected('fc0', 512, nl=tf.nn.relu)
.FullyConnected('fc1', out_dim=10, nl=tf.identity)())
prob = tf.nn.softmax(logits, name='prob')
tf.nn.softmax(logits, name='prob')
wrong = symbolic_functions.prediction_incorrect(logits, label)
add_moving_summary(tf.reduce_mean(wrong, name='train_error'))
......
......@@ -4,7 +4,6 @@
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import argparse
import numpy as np
import os
import imp
......
......@@ -7,14 +7,13 @@ import cv2
import tensorflow as tf
import argparse
import numpy as np
import multiprocessing
import os
import sys
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
from tensorpack.tfutils.symbolic_functions import prediction_incorrect
from tensorpack.tfutils.summary import add_moving_summary, add_param_summary
from tensorpack.tfutils.varreplace import remap_variables
from tensorpack.dataflow import dataset
from tensorpack.utils.gpu import get_nr_gpu
......@@ -86,8 +85,6 @@ class Model(ModelDesc):
fw, fa, fg = get_dorefa(BITW, BITA, BITG)
old_get_variable = tf.get_variable
# monkey-patch tf.get_variable to apply fw
def new_get_variable(v):
name = v.op.name
......@@ -146,7 +143,7 @@ class Model(ModelDesc):
.apply(nonlin)
.FullyConnected('fct', 1000, use_bias=True)())
prob = tf.nn.softmax(logits, name='output')
tf.nn.softmax(logits, name='output')
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
......
......@@ -7,12 +7,9 @@ import tensorflow as tf
import argparse
import numpy as np
import os
import sys
from tensorpack import *
from tensorpack.dataflow import dataset
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.utils.stats import RatioCounter
from tensorpack.tfutils.varreplace import remap_variables
from imagenet_utils import ImageNetModel, eval_on_ILSVRC12, fbresnet_augmentor
......@@ -44,7 +41,6 @@ class Model(ModelDesc):
image = image / 256.0
fw, fa, fg = get_dorefa(BITW, BITA, BITG)
old_get_variable = tf.get_variable
def new_get_variable(v):
name = v.op.name
......@@ -111,7 +107,7 @@ class Model(ModelDesc):
.GlobalAvgPooling('gap')
.tf.multiply(49) # this is due to a bug in our model design
.FullyConnected('fct', 1000)())
prob = tf.nn.softmax(logits, name='output')
tf.nn.softmax(logits, name='output')
ImageNetModel.compute_loss_and_error(logits, label)
......
......@@ -4,13 +4,12 @@
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import argparse
import numpy as np
import os
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
from tensorpack.tfutils.symbolic_functions import prediction_incorrect
from tensorpack.tfutils.summary import add_moving_summary, add_param_summary
from tensorpack.dataflow import dataset
from tensorpack.tfutils.varreplace import remap_variables
import tensorflow as tf
......@@ -56,8 +55,6 @@ class Model(ModelDesc):
fw, fa, fg = get_dorefa(BITW, BITA, BITG)
old_get_variable = tf.get_variable
# monkey-patch tf.get_variable to apply fw
def binarize_weight(v):
name = v.op.name
......@@ -112,7 +109,7 @@ class Model(ModelDesc):
.apply(fg).BatchNorm('bn6')
.apply(cabs)
.FullyConnected('fc1', 10, nl=tf.identity)())
prob = tf.nn.softmax(logits, name='output')
tf.nn.softmax(logits, name='output')
# compute the number of failed samples
wrong = prediction_incorrect(logits, label)
......
# Faster-RCNN on COCO
This example aims to provide a minimal (1.2k lines) multi-GPU implementation of ResNet50-Faster-RCNN on COCO.
This example aims to provide a minimal (1.2k lines) multi-GPU implementation of ResNet-Faster-RCNN on COCO.
## Dependencies
+ TensorFlow >= 1.4.0
......
......@@ -3,12 +3,9 @@
# File: basemodel.py
import tensorflow as tf
from tensorflow.contrib.layers import variance_scaling_initializer
import tensorpack.tfutils.symbolic_functions as symbf
from tensorpack.tfutils.summary import add_moving_summary, add_activation_summary
from tensorpack.tfutils.argscope import argscope, get_arg_scope
from tensorpack.models import (
Conv2D, MaxPooling, BatchNorm, BNReLU, GlobalAvgPooling, FullyConnected)
Conv2D, MaxPooling, BatchNorm, BNReLU)
def image_preprocess(image, bgr=True):
......
......@@ -4,11 +4,9 @@
import numpy as np
import os
import six
from termcolor import colored
from tabulate import tabulate
from tensorpack.dataflow import DataFromList
from tensorpack.utils import logger
from tensorpack.utils.rect import FloatBox
from tensorpack.utils.timer import timed_operation
......
......@@ -3,17 +3,13 @@
# File: data.py
import cv2
import os
import numpy as np
import logging
from tensorpack.utils import logger
from tensorpack.utils.argtools import memoized, log_once
from tensorpack.dataflow import (
MapData, imgaug, TestDataSpeed,
MapDataComponent, DataFromList, PrefetchDataZMQ)
import tensorpack.utils.viz as tpviz
from tensorpack.utils.viz import interactive_imshow
# import tensorpack.utils.viz as tpviz
from coco import COCODetection
from utils.generate_anchors import generate_anchors
......@@ -121,7 +117,6 @@ def get_anchor_labels(anchors, gt_boxes, crowd_boxes):
anchor_labels[overlap_with_crowd] = -1
# Filter fg labels: ignore some fg if fg is too many
old_num_fg = np.sum(anchor_labels == 1)
target_num_fg = int(config.RPN_BATCH_PER_IM * config.RPN_FG_RATIO)
fg_inds = filter_box_label(anchor_labels, 1, target_num_fg)
# Note that fg could be fewer than the target ratio
......@@ -133,7 +128,7 @@ def get_anchor_labels(anchors, gt_boxes, crowd_boxes):
# This can happen if, e.g. the image has large crowd.
raise MalformedData("No valid foreground/background for RPN!")
target_num_bg = config.RPN_BATCH_PER_IM - len(fg_inds)
bg_inds = filter_box_label(anchor_labels, 0, target_num_bg)
filter_box_label(anchor_labels, 0, target_num_bg) # ignore return values
# Set anchor boxes: the best gt_box for each fg anchor
anchor_boxes = np.zeros((NA, 4), dtype='float32')
......
......@@ -2,23 +2,17 @@
# -*- coding: utf-8 -*-
# File: eval.py
import numpy as np
import tqdm
import cv2
import six
import os
from collections import namedtuple, defaultdict
from collections import namedtuple
import tensorflow as tf
from tensorpack.dataflow import MapDataComponent, TestDataSpeed
from tensorpack.tfutils import get_default_sess_config
from tensorpack.utils.utils import get_tqdm_kwargs
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from coco import COCODetection, COCOMeta
from common import clip_boxes, DataFromListOfDict, CustomResize
from coco import COCOMeta
from common import clip_boxes, CustomResize
import config
DetectionResult = namedtuple(
......
......@@ -2,7 +2,6 @@
# -*- coding: utf-8 -*-
# File: model.py
import numpy as np
import tensorflow as tf
from tensorpack.tfutils import get_current_tower_context
from tensorpack.tfutils.summary import add_moving_summary
......@@ -206,7 +205,6 @@ def generate_rpn_proposals(boxes, scores, img_shape):
topk_valid_boxes,
nms_indices, name='boxes')
final_scores = tf.gather(topk_valid_scores, nms_indices, name='scores')
final_probs = tf.gather(topk_valid_scores, nms_indices, name='probs')
return final_boxes, final_scores
......@@ -436,7 +434,6 @@ def fastrcnn_predictions(boxes, probs):
"""
assert boxes.shape[1] == config.NUM_CLASS - 1
assert probs.shape[1] == config.NUM_CLASS
N = tf.shape(boxes)[0]
boxes = tf.transpose(boxes, [1, 0, 2]) # #catxnx4
probs = tf.transpose(probs[:, 1:], [1, 0]) # #catxn
......
......@@ -2,7 +2,7 @@
# -*- coding: utf-8 -*-
# File: train.py
import sys, os
import os
import argparse
import cv2
import shutil
......@@ -16,7 +16,7 @@ import tensorflow as tf
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.tfutils import optimizer, gradproc
from tensorpack.tfutils import optimizer
import tensorpack.utils.viz as tpviz
from tensorpack.utils.gpu import get_nr_gpu
......@@ -35,7 +35,7 @@ from data import (
from viz import (
draw_annotation, draw_proposal_recall,
draw_predictions, draw_final_outputs)
from common import clip_boxes, CustomResize, print_config
from common import print_config
from eval import (
eval_on_dataflow, detect_one_image, print_evaluation_scores)
import config
......@@ -145,8 +145,8 @@ class Model(ModelDesc):
# indices: Nx2. Each index into (#proposal, #category)
pred_indices, final_probs = fastrcnn_predictions(decoded_boxes, label_probs)
final_probs = tf.identity(final_probs, 'final_probs')
final_boxes = tf.gather_nd(decoded_boxes, pred_indices, name='final_boxes')
final_labels = tf.add(pred_indices[:, 1], 1, name='final_labels')
tf.gather_nd(decoded_boxes, pred_indices, name='final_boxes')
tf.add(pred_indices[:, 1], 1, name='final_labels')
def _get_optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.003, trainable=False)
......
......@@ -4,7 +4,6 @@
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import os
import argparse
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
......
......@@ -6,13 +6,12 @@
import numpy as np
import tensorflow as tf
import os
import sys
import cv2
import argparse
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.utils.viz import *
from tensorpack.utils.viz import interactive_imshow, stack_patches
import tensorpack.tfutils.symbolic_functions as symbf
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope
from tensorpack.dataflow import dataset
......
......@@ -3,16 +3,13 @@
# File: CycleGAN.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import os, sys
import os
import argparse
import glob
from six.moves import map, zip, range
import numpy as np
from six.moves import range
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.utils.viz import *
import tensorpack.tfutils.symbolic_functions as symbf
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope
import tensorflow as tf
......
......@@ -5,13 +5,12 @@
import glob
import numpy as np
import os, sys
import os
import argparse
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.utils.viz import *
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.utils.viz import stack_patches
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope
from tensorpack.utils.globvars import globalns as opt
import tensorflow as tf
......@@ -127,11 +126,11 @@ def sample(model, model_path, output_name='gen/gen'):
output_names=[output_name, 'z'])
pred = SimpleDatasetPredictor(pred, RandomZData((100, opt.Z_DIM)))
for o in pred.get_result():
o, zs = o[0] + 1, o[1]
o = o[0] + 1
o = o * 128.0
o = np.clip(o, 0, 255)
o = o[:, :, :, ::-1]
viz = stack_patches(o, nr_row=10, nr_col=10, viz=True)
stack_patches(o, nr_row=10, nr_col=10, viz=True)
def get_args():
......
......@@ -3,15 +3,13 @@
# File: DiscoGAN-CelebA.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import os, sys
import os
import argparse
from six.moves import map, zip
import numpy as np
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.utils.viz import *
import tensorpack.tfutils.symbolic_functions as symbf
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope
import tensorflow as tf
......
......@@ -5,8 +5,7 @@
import tensorflow as tf
import numpy as np
import time
from tensorpack import (TowerTrainer, QueueInput,
from tensorpack import (TowerTrainer,
ModelDescBase, DataFlow, StagingInput)
from tensorpack.tfutils.tower import TowerContext, TowerFuncWrapper
from tensorpack.graph_builder import DataParallelBuilder, LeastLoadedDeviceSetter
......
......@@ -7,17 +7,14 @@ import cv2
import numpy as np
import tensorflow as tf
import glob
import pickle
import os
import sys
import argparse
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.utils.viz import *
from tensorpack.utils.viz import stack_patches
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope
import tensorpack.tfutils.symbolic_functions as symbf
from GAN import GANTrainer, GANModelDesc
"""
......
......@@ -4,7 +4,6 @@
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import os
import argparse
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
......
......@@ -7,7 +7,6 @@ import cv2
import numpy as np
import tensorflow as tf
import os
import sys
import argparse
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
......@@ -16,7 +15,6 @@ from tensorpack.utils import viz
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope, under_name_scope
from tensorpack.tfutils import optimizer, summary
import tensorpack.tfutils.symbolic_functions as symbf
from tensorpack.tfutils.gradproc import ScaleGradient
from tensorpack.dataflow import dataset
from GAN import GANTrainer, GANModelDesc
......@@ -149,8 +147,6 @@ class Model(GANModelDesc):
of P, and whose parameters are predicted by the discriminator network.
"""
with tf.name_scope("mutual_information"):
batch_prior = tf.tile(tf.expand_dims(DIST_PRIOR_PARAM, 0), [BATCH, 1], name='batch_prior')
with tf.name_scope('prior_entropy'):
cat, uni = get_distributions(DIST_PRIOR_PARAM[:NUM_CLASS], DIST_PRIOR_PARAM[NUM_CLASS:])
ents = [cat.entropy(name='cat_entropy'), tf.reduce_sum(uni.entropy(), name='uni_entropy')]
......
......@@ -4,11 +4,9 @@
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import os
import argparse
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.tfutils import optimizer
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.utils.globvars import globalns as G
import tensorflow as tf
......@@ -41,6 +39,7 @@ class Model(DCGAN.Model):
# An alternative way to implement the clipping:
"""
from tensorpack.tfutils import optimizer
def clip(v):
n = v.op.name
if not n.startswith('discrim/'):
......
......@@ -6,17 +6,15 @@
import cv2
import tensorflow as tf
import argparse
import numpy as np
from six.moves import zip
import os
import sys
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.dataflow import dataset
from tensorpack.utils.gpu import get_nr_gpu
from tensorpack.tfutils import optimizer
from tensorpack.tfutils.summary import *
from tensorpack.tfutils.summary import add_moving_summary, add_param_summary
def class_balanced_sigmoid_cross_entropy(logits, label, name='cross_entropy_loss'):
......@@ -56,7 +54,7 @@ class Model(ModelDesc):
edgemap = tf.expand_dims(edgemap, 3, name='edgemap4d')
def branch(name, l, up):
with tf.variable_scope(name) as scope:
with tf.variable_scope(name):
l = Conv2D('convfc', l, 1, kernel_shape=1, nl=tf.identity,
use_bias=True,
W_init=tf.constant_initializer(),
......
......@@ -3,16 +3,14 @@
# File: inception-bn.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import cv2
import argparse
import numpy as np
import os
import tensorflow as tf
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
from tensorpack.tfutils.symbolic_functions import prediction_incorrect
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.dataflow import dataset
from tensorpack.utils.gpu import get_nr_gpu
......@@ -42,7 +40,7 @@ class Model(ModelDesc):
def inception(name, x, nr1x1, nr3x3r, nr3x3, nr233r, nr233, nrpool, pooltype):
stride = 2 if nr1x1 == 0 else 1
with tf.variable_scope(name) as scope:
with tf.variable_scope(name):
outs = []
if nr1x1 != 0:
outs.append(Conv2D('conv1x1', x, nr1x1, 1))
......@@ -101,7 +99,7 @@ class Model(ModelDesc):
l = GlobalAvgPooling('gap', l)
logits = FullyConnected('linear', l, out_dim=1000, nl=tf.identity)
prob = tf.nn.softmax(logits, name='output')
tf.nn.softmax(logits, name='output')
loss3 = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
loss3 = tf.reduce_mean(loss3, name='loss3')
......@@ -119,7 +117,6 @@ class Model(ModelDesc):
80000, 0.7, True)
wd_cost = tf.multiply(wd_w, regularize_cost('.*/W', tf.nn.l2_loss), name='l2_regularize_loss')
add_param_summary(('.*/W', ['histogram'])) # monitor W
self.cost = tf.add_n([cost, wd_cost], name='cost')
add_moving_summary(wd_cost, self.cost)
......
......@@ -5,15 +5,14 @@
import cv2
import argparse
import numpy as np
import os
import tensorflow as tf
import multiprocessing
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
from tensorpack.tfutils.symbolic_functions import prediction_incorrect
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.dataflow import dataset
"""
......@@ -150,7 +149,7 @@ class Model(ModelDesc):
MaxPooling('maxpool', l, 3, 2)
], 3, name='concat')
for x in ['a', 'b']:
with tf.variable_scope('incep-8-2048{}'.format(x)) as scope:
with tf.variable_scope('incep-8-2048{}'.format(x)):
br11 = Conv2D('conv11', l, 320, 1)
br33 = Conv2D('conv133r', l, 384, 1)
br33 = tf.concat([
......
......@@ -9,8 +9,7 @@ import argparse
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.tfutils.gradproc import *
from tensorpack.tfutils import optimizer, summary
from tensorpack.tfutils import optimizer, summary, gradproc
from tensorpack.utils import logger
from tensorpack.utils.fs import download, get_dataset_path
from tensorpack.utils.argtools import memoized_ignoreargs
......
......@@ -3,14 +3,12 @@
# File: cifar10-resnet.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import numpy as np
import argparse
import os
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
from tensorpack.tfutils.summary import add_moving_summary, add_param_summary
from tensorpack.utils.gpu import get_nr_gpu
from tensorpack.dataflow import dataset
......@@ -64,7 +62,7 @@ class Model(ModelDesc):
out_channel = in_channel
stride1 = 1
with tf.variable_scope(name) as scope:
with tf.variable_scope(name):
b1 = l if first else BNReLU(l)
c1 = Conv2D('conv1', b1, out_channel, stride=stride1, nl=BNReLU)
c2 = Conv2D('conv2', c1, out_channel)
......@@ -97,7 +95,7 @@ class Model(ModelDesc):
l = GlobalAvgPooling('gap', l)
logits = FullyConnected('linear', l, out_dim=10, nl=tf.identity)
prob = tf.nn.softmax(logits, name='output')
tf.nn.softmax(logits, name='output')
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
......
......@@ -2,20 +2,16 @@
# -*- coding: UTF-8 -*-
# File: imagenet-resnet.py
import sys
import argparse
import numpy as np
import os
import tensorflow as tf
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import logger, QueueInput
from tensorpack.models import *
from tensorpack.callbacks import *
from tensorpack.train import (
TrainConfig, SyncMultiGPUTrainerParameterServer, launch_train_with_config)
from tensorpack.dataflow import imgaug, FakeData
from tensorpack.dataflow import FakeData
from tensorpack.tfutils import argscope, get_model_loader
from tensorpack.utils.gpu import get_nr_gpu
......
......@@ -8,18 +8,14 @@ import cv2
import functools
import tensorflow as tf
import argparse
import os
import re
import numpy as np
import six
from six.moves import zip
from tensorflow.contrib.layers import variance_scaling_initializer
from tensorpack import *
from tensorpack.utils import logger
from tensorpack.utils.stats import RatioCounter
from tensorpack.tfutils.summary import *
from tensorpack.dataflow.dataset import ILSVRCMeta, ILSVRC12
from tensorpack.dataflow.dataset import ILSVRCMeta
from imagenet_utils import eval_on_ILSVRC12, get_imagenet_dataflow, ImageNetModel
from resnet_model import resnet_group, resnet_bottleneck
......@@ -60,7 +56,7 @@ class Model(ModelDesc):
.apply(resnet_group, 'group3', bottleneck, 512, blocks[3], 2)
.GlobalAvgPooling('gap')
.FullyConnected('linear', 1000, nl=tf.identity)())
prob = tf.nn.softmax(logits, name='prob')
tf.nn.softmax(logits, name='prob')
ImageNetModel.compute_loss_and_error(logits, label)
......
......@@ -8,7 +8,7 @@ from tensorflow.contrib.layers import variance_scaling_initializer
from tensorpack.tfutils.argscope import argscope, get_arg_scope
from tensorpack.models import (
Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm, BNReLU, FullyConnected,
Conv2D, GlobalAvgPooling, BatchNorm, BNReLU, FullyConnected,
LinearWrap)
......
......@@ -3,7 +3,6 @@
import cv2
import sys
import os
from contextlib import contextmanager
import numpy as np
......
......@@ -2,7 +2,6 @@
# -*- coding: UTF-8 -*-
# File: shufflenet.py
import sys
import argparse
import numpy as np
import os
......@@ -21,7 +20,7 @@ from tensorpack.tfutils.scope_utils import under_name_scope
from tensorpack.utils.gpu import get_nr_gpu
from imagenet_utils import (
fbresnet_augmentor, get_imagenet_dataflow,
get_imagenet_dataflow,
ImageNetModel, GoogleNetResize, eval_on_ILSVRC12)
TOTAL_BATCH_SIZE = 256
......
......@@ -11,7 +11,6 @@ import tensorflow.contrib.slim as slim
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
import tensorpack.tfutils.symbolic_functions as symbf
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.utils.gpu import change_gpu
......@@ -19,7 +18,6 @@ from embedding_data import get_test_data, MnistPairs, MnistTriplets
MATPLOTLIB_AVAIBLABLE = False
try:
import matplotlib
from matplotlib import offsetbox
import matplotlib.pyplot as plt
MATPLOTLIB_AVAIBLABLE = True
......
......@@ -7,7 +7,6 @@ import cv2
import numpy as np
import tensorflow as tf
import os
import sys
import argparse
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
......@@ -75,7 +74,7 @@ class Model(ModelDesc):
.FullyConnected('fc1', out_dim=256, nl=tf.nn.relu)
.FullyConnected('fc2', out_dim=128, nl=tf.nn.relu)
.FullyConnected('fct', out_dim=19, nl=tf.identity)())
prob = tf.nn.softmax(logits, name='prob')
tf.nn.softmax(logits, name='prob')
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
......
......@@ -4,12 +4,10 @@
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import tensorflow as tf
import argparse
import numpy as np
import os
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
import tensorpack.tfutils.symbolic_functions as symbf
from tensorpack.tfutils.summary import *
from tensorpack.dataflow import dataset
......
......@@ -42,7 +42,7 @@ def tower_func(image):
l = FullyConnected('fc6', l, 4096)
l = FullyConnected('fc7', l, out_dim=4096)
logits = FullyConnected('fc8', l, out_dim=1000, nl=tf.identity)
prob = tf.nn.softmax(logits, name='prob')
tf.nn.softmax(logits, name='prob')
def run_test(path, input):
......
......@@ -54,7 +54,7 @@ def tower_func(image):
.FullyConnected('fc7', 4096, nl=tf.nn.relu)
.Dropout('drop1', 0.5)
.FullyConnected('fc8', out_dim=1000, nl=tf.identity)())
prob = tf.nn.softmax(logits, name='prob')
tf.nn.softmax(logits, name='prob')
def run_test(path, input):
......
......@@ -2,9 +2,7 @@
# -*- coding: utf-8 -*-
# File: mnist-convnet.py
import numpy as np
import os
import sys
import argparse
import tensorflow as tf
"""
......@@ -57,7 +55,7 @@ class Model(ModelDesc):
.Dropout('dropout', 0.5)
.FullyConnected('fc1', out_dim=10, nl=tf.identity)())
prob = tf.nn.softmax(logits, name='prob') # a Bx10 with probabilities
tf.nn.softmax(logits, name='prob') # a Bx10 with probabilities
# a vector of length B with loss of each sample
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
......
......@@ -10,9 +10,7 @@ from tensorflow import keras
KL = keras.layers
from tensorpack.train import SimpleTrainer
from tensorpack.input_source import QueueInput
from tensorpack.callbacks import ModelSaver, InferenceRunner, ScalarStats
from tensorpack.dataflow import dataset, BatchData, MapData
from tensorpack.utils import logger
from tensorpack.contrib.keras import KerasModel
......
......@@ -3,12 +3,8 @@
# File: mnist-keras.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
import os
import sys
import argparse
from tensorflow import keras
KL = keras.layers
......
......@@ -2,9 +2,7 @@
# -*- coding: utf-8 -*-
# File: mnist-tfslim.py
import numpy as np
import os
import sys
import argparse
"""
MNIST ConvNet example using TensorFlow-slim.
......@@ -48,7 +46,7 @@ class Model(ModelDesc):
l = slim.layers.dropout(l, is_training=is_training)
logits = slim.layers.fully_connected(l, 10, activation_fn=None, scope='fc1')
prob = tf.nn.softmax(logits, name='prob')
tf.nn.softmax(logits, name='prob')
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
......
......@@ -2,9 +2,7 @@
# -*- coding: utf-8 -*-
# File: mnist-visualizations.py
import numpy as np
import os
import sys
import argparse
"""
......
......@@ -4,7 +4,6 @@
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import argparse
import numpy as np
import os
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
......@@ -44,9 +43,9 @@ class Model(ModelDesc):
.FullyConnected('fc0', 512,
b_init=tf.constant_initializer(0.1), nl=tf.nn.relu)
.FullyConnected('linear', out_dim=10, nl=tf.identity)())
prob = tf.nn.softmax(logits, name='output')
tf.nn.softmax(logits, name='output')
acc = tf.to_float(tf.nn.in_top_k(logits, label, 1))
accuracy = tf.to_float(tf.nn.in_top_k(logits, label, 1))
add_moving_summary(tf.reduce_mean(accuracy, name='accuracy'))
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
......
[flake8]
max-line-length = 120
ignore = F403,F401,F405,F841,E4,E741,E742,E743
# See https://pep8.readthedocs.io/en/latest/intro.html#error-codes
ignore = F403,F405,E402,E741,E742,E743
exclude = private,
FasterRCNN/utils
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment