Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
900a7eb0
Commit
900a7eb0
authored
May 21, 2017
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
add graph_memoized and fix #276
parent
9b710110
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
36 additions
and
21 deletions
+36
-21
examples/DoReFa-Net/dorefa.py
examples/DoReFa-Net/dorefa.py
+2
-2
tensorpack/models/regularize.py
tensorpack/models/regularize.py
+2
-2
tensorpack/tfutils/common.py
tensorpack/tfutils/common.py
+12
-17
tensorpack/utils/argtools.py
tensorpack/utils/argtools.py
+20
-0
No files found.
examples/DoReFa-Net/dorefa.py
View file @
900a7eb0
...
...
@@ -4,10 +4,10 @@
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import
tensorflow
as
tf
from
tensorpack.utils.argtools
import
memoized
from
tensorpack.utils.argtools
import
graph_
memoized
@
memoized
@
graph_
memoized
def
get_dorefa
(
bitW
,
bitA
,
bitG
):
"""
return the three quantization functions fw, fa, fg, for weights, activations and gradients respectively
...
...
tensorpack/models/regularize.py
View file @
900a7eb0
...
...
@@ -6,14 +6,14 @@ import tensorflow as tf
import
re
from
..utils
import
logger
from
..utils.argtools
import
memoized
from
..utils.argtools
import
graph_
memoized
from
..tfutils.tower
import
get_current_tower_context
from
.common
import
layer_register
__all__
=
[
'regularize_cost'
,
'l2_regularizer'
,
'l1_regularizer'
,
'Dropout'
]
@
memoized
@
graph_
memoized
def
_log_regularizer
(
name
):
logger
.
info
(
"Apply regularizer for {}"
.
format
(
name
))
...
...
tensorpack/tfutils/common.py
View file @
900a7eb0
...
...
@@ -5,17 +5,14 @@
import
tensorflow
as
tf
from
six.moves
import
map
from
..utils.argtools
import
graph_memoized
from
..utils.naming
import
(
GLOBAL_STEP_VAR_NAME
,
GLOBAL_STEP_OP_NAME
)
from
..utils.naming
import
GLOBAL_STEP_OP_NAME
__all__
=
[
'get_default_sess_config'
,
'get_global_step_value'
,
'get_global_step_var'
,
#'get_local_step_var',
'get_op_tensor_name'
,
'get_tensors_by_names'
,
'get_op_or_tensor_by_name'
,
...
...
@@ -51,15 +48,13 @@ def get_default_sess_config(mem_fraction=0.99):
return
conf
@
graph_memoized
def
get_global_step_var
():
"""
Returns:
tf.Tensor: the global_step variable in the current graph. create if
doesn't exist.
"""
try
:
return
tf
.
get_default_graph
()
.
get_tensor_by_name
(
GLOBAL_STEP_VAR_NAME
)
except
KeyError
:
scope
=
tf
.
get_variable_scope
()
assert
scope
.
name
==
''
,
\
"The global_step variable should be created under the root variable scope!"
...
...
tensorpack/utils/argtools.py
View file @
900a7eb0
...
...
@@ -38,6 +38,26 @@ memoized = functools.lru_cache(maxsize=None)
""" Alias to :func:`functools.lru_cache` """
def
graph_memoized
(
func
):
"""
Like memoized, but keep one cache per default graph.
"""
import
tensorflow
as
tf
GRAPH_ARG_NAME
=
'__IMPOSSIBLE_NAME_FOR_YOU__'
@
memoized
def
func_with_graph_arg
(
*
args
,
**
kwargs
):
kwargs
.
pop
(
GRAPH_ARG_NAME
)
return
func
(
*
args
,
**
kwargs
)
def
wrapper
(
*
args
,
**
kwargs
):
assert
GRAPH_ARG_NAME
not
in
kwargs
,
"No Way!!"
graph
=
tf
.
get_default_graph
()
kwargs
[
GRAPH_ARG_NAME
]
=
graph
return
func_with_graph_arg
(
*
args
,
**
kwargs
)
return
wrapper
_MEMOIZED_NOARGS
=
{}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment