Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
ebf2332b
Commit
ebf2332b
authored
Dec 29, 2017
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
remove some deprecations
parent
709f89a9
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
7 additions
and
373 deletions
+7
-373
tensorpack/callbacks/base.py
tensorpack/callbacks/base.py
+1
-9
tensorpack/callbacks/inference_runner.py
tensorpack/callbacks/inference_runner.py
+1
-4
tensorpack/dataflow/raw.py
tensorpack/dataflow/raw.py
+1
-1
tensorpack/input_source/input_source.py
tensorpack/input_source/input_source.py
+1
-1
tensorpack/models/conv2d.py
tensorpack/models/conv2d.py
+1
-16
tensorpack/tfutils/distributions.py
tensorpack/tfutils/distributions.py
+0
-330
tensorpack/tfutils/summary.py
tensorpack/tfutils/summary.py
+1
-1
tensorpack/train/base.py
tensorpack/train/base.py
+1
-1
tensorpack/train/config.py
tensorpack/train/config.py
+0
-10
No files found.
tensorpack/callbacks/base.py
View file @
ebf2332b
...
@@ -5,7 +5,6 @@
...
@@ -5,7 +5,6 @@
import
tensorflow
as
tf
import
tensorflow
as
tf
from
abc
import
ABCMeta
from
abc
import
ABCMeta
import
six
import
six
from
..utils.develop
import
log_deprecated
from
..tfutils.common
import
get_op_or_tensor_by_name
from
..tfutils.common
import
get_op_or_tensor_by_name
__all__
=
[
'Callback'
,
'ProxyCallback'
,
'CallbackFactory'
]
__all__
=
[
'Callback'
,
'ProxyCallback'
,
'CallbackFactory'
]
...
@@ -284,12 +283,9 @@ class CallbackFactory(Callback):
...
@@ -284,12 +283,9 @@ class CallbackFactory(Callback):
Create a callback with some lambdas.
Create a callback with some lambdas.
"""
"""
def
__init__
(
self
,
setup_graph
=
None
,
before_train
=
None
,
trigger
=
None
,
def
__init__
(
self
,
setup_graph
=
None
,
before_train
=
None
,
trigger
=
None
,
after_train
=
None
,
trigger_epoch
=
None
):
after_train
=
None
):
"""
"""
Each lambda takes ``self`` as the only argument.
Each lambda takes ``self`` as the only argument.
Note:
trigger_epoch was deprecated.
"""
"""
self
.
_cb_setup_graph
=
setup_graph
self
.
_cb_setup_graph
=
setup_graph
...
@@ -297,10 +293,6 @@ class CallbackFactory(Callback):
...
@@ -297,10 +293,6 @@ class CallbackFactory(Callback):
self
.
_cb_trigger
=
trigger
self
.
_cb_trigger
=
trigger
self
.
_cb_after_train
=
after_train
self
.
_cb_after_train
=
after_train
if
trigger_epoch
:
self
.
_cb_trigger
=
trigger_epoch
log_deprecated
(
"CallbackFactory(trigger_epoch=)"
,
"Use trigger instead."
,
"2017-11-15"
)
def
_setup_graph
(
self
):
def
_setup_graph
(
self
):
if
self
.
_cb_setup_graph
:
if
self
.
_cb_setup_graph
:
self
.
_cb_setup_graph
(
self
)
self
.
_cb_setup_graph
(
self
)
...
...
tensorpack/callbacks/inference_runner.py
View file @
ebf2332b
...
@@ -127,10 +127,7 @@ class InferenceRunner(InferenceRunnerBase):
...
@@ -127,10 +127,7 @@ class InferenceRunner(InferenceRunnerBase):
return
InferencerToHook
(
inf
,
fetches
)
return
InferencerToHook
(
inf
,
fetches
)
def
_setup_graph
(
self
):
def
_setup_graph
(
self
):
if
self
.
trainer
.
_API_VERSION
==
1
and
self
.
trainer
.
_config
.
predict_tower
is
not
None
:
device
=
self
.
_device
device
=
self
.
trainer
.
_config
.
predict_tower
[
0
]
else
:
device
=
self
.
_device
assert
self
.
trainer
.
tower_func
is
not
None
,
"You must set tower_func of the trainer to use InferenceRunner!"
assert
self
.
trainer
.
tower_func
is
not
None
,
"You must set tower_func of the trainer to use InferenceRunner!"
input_callbacks
=
self
.
_input_source
.
setup
(
self
.
trainer
.
inputs_desc
)
input_callbacks
=
self
.
_input_source
.
setup
(
self
.
trainer
.
inputs_desc
)
...
...
tensorpack/dataflow/raw.py
View file @
ebf2332b
...
@@ -110,7 +110,7 @@ class DataFromGenerator(DataFlow):
...
@@ -110,7 +110,7 @@ class DataFromGenerator(DataFlow):
else
:
else
:
self
.
_gen
=
gen
self
.
_gen
=
gen
if
size
is
not
None
:
if
size
is
not
None
:
log_deprecated
(
"DataFromGenerator(size=)"
,
"It doesn't make much sense."
)
log_deprecated
(
"DataFromGenerator(size=)"
,
"It doesn't make much sense."
,
"2018-03-31"
)
def
get_data
(
self
):
def
get_data
(
self
):
# yield from
# yield from
...
...
tensorpack/input_source/input_source.py
View file @
ebf2332b
...
@@ -516,7 +516,7 @@ class StagingInput(FeedfreeInput):
...
@@ -516,7 +516,7 @@ class StagingInput(FeedfreeInput):
assert
isinstance
(
input
,
FeedfreeInput
),
input
assert
isinstance
(
input
,
FeedfreeInput
),
input
self
.
_input
=
input
self
.
_input
=
input
if
towers
is
not
None
:
if
towers
is
not
None
:
log_deprecated
(
"StagingInput(towers=)
has no effect! Devices are handled automatically.
"
)
log_deprecated
(
"StagingInput(towers=)
"
,
"Devices are handled automatically."
,
"2018-03-31
"
)
self
.
_nr_stage
=
nr_stage
self
.
_nr_stage
=
nr_stage
self
.
_areas
=
[]
self
.
_areas
=
[]
...
...
tensorpack/models/conv2d.py
View file @
ebf2332b
...
@@ -6,7 +6,6 @@
...
@@ -6,7 +6,6 @@
import
tensorflow
as
tf
import
tensorflow
as
tf
from
.common
import
layer_register
,
VariableHolder
,
rename_get_variable
from
.common
import
layer_register
,
VariableHolder
,
rename_get_variable
from
..utils.argtools
import
shape2d
,
shape4d
from
..utils.argtools
import
shape2d
,
shape4d
from
..utils.develop
import
log_deprecated
__all__
=
[
'Conv2D'
,
'Deconv2D'
]
__all__
=
[
'Conv2D'
,
'Deconv2D'
]
...
@@ -113,16 +112,7 @@ def Deconv2D(x, out_channel, kernel_shape,
...
@@ -113,16 +112,7 @@ def Deconv2D(x, out_channel, kernel_shape,
in_channel
=
in_shape
[
channel_axis
]
in_channel
=
in_shape
[
channel_axis
]
assert
in_channel
is
not
None
,
"[Deconv2D] Input cannot have unknown channel!"
assert
in_channel
is
not
None
,
"[Deconv2D] Input cannot have unknown channel!"
out_shape
=
out_channel
assert
isinstance
(
out_channel
,
int
),
out_channel
if
isinstance
(
out_shape
,
int
):
out_channel
=
out_shape
else
:
log_deprecated
(
"Deconv2D(out_shape=[...])"
,
"Use an integer 'out_channel' instead!"
,
"2017-11-18"
)
for
k
in
out_shape
:
if
not
isinstance
(
k
,
int
):
raise
ValueError
(
"[Deconv2D] out_shape {} is invalid!"
.
format
(
k
))
out_channel
=
out_shape
[
channel_axis
-
1
]
# out_shape doesn't have batch
if
W_init
is
None
:
if
W_init
is
None
:
W_init
=
tf
.
contrib
.
layers
.
xavier_initializer_conv2d
()
W_init
=
tf
.
contrib
.
layers
.
xavier_initializer_conv2d
()
...
@@ -141,11 +131,6 @@ def Deconv2D(x, out_channel, kernel_shape,
...
@@ -141,11 +131,6 @@ def Deconv2D(x, out_channel, kernel_shape,
trainable
=
True
)
trainable
=
True
)
ret
=
layer
.
apply
(
x
,
scope
=
tf
.
get_variable_scope
())
ret
=
layer
.
apply
(
x
,
scope
=
tf
.
get_variable_scope
())
# Check that we only supports out_shape = in_shape * stride
out_shape3
=
ret
.
get_shape
()
.
as_list
()[
1
:]
if
not
isinstance
(
out_shape
,
int
):
assert
list
(
out_shape
)
==
out_shape3
,
"{} != {}"
.
format
(
out_shape
,
out_shape3
)
ret
.
variables
=
VariableHolder
(
W
=
layer
.
kernel
)
ret
.
variables
=
VariableHolder
(
W
=
layer
.
kernel
)
if
use_bias
:
if
use_bias
:
ret
.
variables
.
b
=
layer
.
bias
ret
.
variables
.
b
=
layer
.
bias
...
...
tensorpack/tfutils/distributions.py
deleted
100644 → 0
View file @
709f89a9
import
tensorflow
as
tf
from
functools
import
wraps
import
numpy
as
np
from
..utils.develop
import
log_deprecated
from
.common
import
get_tf_version_number
__all__
=
[
'Distribution'
,
'CategoricalDistribution'
,
'GaussianDistribution'
,
'ProductDistribution'
]
def
class_scope
(
func
):
"""
A decorator which wraps a function with a name_scope: "{class_name}_{method_name}".
The "{class_name}" is either ``cls.name`` or simply the class name.
It helps enhance TensorBoard graph visualization by grouping operators.
This is just syntactic sugar to prevent writing: with
``tf.name_scope(...)`` in each method.
"""
def
get_name_scope_name
():
if
get_tf_version_number
()
>
1.2
:
return
tf
.
get_default_graph
()
.
get_name_scope
()
else
:
g
=
tf
.
get_default_graph
()
s
=
"RANDOM_STR_ABCDEFG"
unique
=
g
.
unique_name
(
s
)
scope
=
unique
[:
-
len
(
s
)]
.
rstrip
(
'/'
)
return
scope
@
wraps
(
func
)
def
_impl
(
self
,
*
args
,
**
kwargs
):
# is there a specific name?
distr_name
=
self
.
name
if
distr_name
is
None
:
distr_name
=
self
.
__class__
.
__name__
# scope it only when it is not already scoped with current class
if
distr_name
not
in
get_name_scope_name
():
with
tf
.
name_scope
(
distr_name
+
"_"
+
func
.
__name__
):
return
func
(
self
,
*
args
,
**
kwargs
)
else
:
return
func
(
self
,
*
args
,
**
kwargs
)
return
_impl
class
Distribution
(
object
):
"""
Base class of symbolic distribution utilities
(the distribution parameters can be symbolic tensors).
"""
name
=
None
def
__init__
(
self
,
name
):
"""
Args:
name(str): the name to be used for scope and tensors in this
distribution.
"""
self
.
name
=
name
log_deprecated
(
"tfutils.distributions"
,
"Please use tf.distributions instead!"
,
"2017-12-10"
)
@
class_scope
def
loglikelihood
(
self
,
x
,
theta
):
"""
Args:
x: samples of shape (batch, sample_dim)
theta: model parameters of shape (batch, param_dim)
Returns:
log likelihood of each sample, of shape (batch,)
"""
assert
x
.
get_shape
()
.
ndims
==
2
and
\
x
.
get_shape
()[
1
]
==
self
.
sample_dim
,
\
x
.
get_shape
()
assert
theta
.
get_shape
()
.
ndims
==
2
and
\
theta
.
get_shape
()[
1
]
==
self
.
param_dim
,
\
theta
.
get_shape
()
ret
=
self
.
_loglikelihood
(
x
,
theta
)
assert
ret
.
get_shape
()
.
ndims
==
1
,
ret
.
get_shape
()
return
ret
@
class_scope
def
entropy
(
self
,
x
,
theta
):
r"""
Entropy of this distribution parameterized by theta, estimated from a batch of samples.
.. math::
H(x) = - E[\log p(x_i)], \text{where } p \text{ is parameterized by } \theta.
Args:
x: samples of shape (batch, sample_dim)
theta: model parameters of shape (batch, param_dim)
Returns:
a scalar tensor, the entropy.
"""
return
tf
.
reduce_mean
(
-
self
.
loglikelihood
(
x
,
theta
),
name
=
"entropy"
)
@
class_scope
def
sample
(
self
,
batch_size
,
theta
):
"""
Sample a batch of vectors from this distribution parameterized by theta.
Args:
batch_size(int): the batch size.
theta: a tensor of shape (param_dim,) or (batch, param_dim).
Returns:
a batch of samples of shape (batch, sample_dim)
"""
assert
isinstance
(
batch_size
,
int
),
batch_size
shp
=
theta
.
get_shape
()
assert
shp
.
ndims
in
[
1
,
2
]
and
shp
[
-
1
]
==
self
.
sample_dim
,
shp
if
shp
.
ndims
==
1
:
theta
=
tf
.
tile
(
tf
.
expand_dims
(
theta
,
0
),
[
batch_size
,
1
],
name
=
'tiled_theta'
)
else
:
assert
shp
[
0
]
==
batch_size
,
shp
x
=
self
.
_sample
(
batch_size
,
theta
)
assert
x
.
get_shape
()
.
ndims
==
2
and
\
x
.
get_shape
()[
1
]
==
self
.
sample_dim
,
\
x
.
get_shape
()
return
x
@
class_scope
def
encoder_activation
(
self
,
dist_param
):
""" An activation function which transform unconstrained raw network output
to a vector of feasible distribution parameters.
Note that for each distribution,
there are many feasible ways to design this function and it's hard to say which is better.
The default implementations in the distribution classes here is
just one reasonable way to do this.
Args:
dist_param: output from a network, of shape (batch, param_dim).
Returns:
a tensor of the same shape, the distribution parameters.
"""
return
self
.
_encoder_activation
(
dist_param
)
@
property
def
param_dim
(
self
):
"""
Returns:
int: the dimension of parameters of this distribution.
"""
raise
NotImplementedError
()
@
property
def
sample_dim
(
self
):
"""
Returns:
int: the dimension of samples out of this distribution.
"""
raise
NotImplementedError
()
def
_loglikelihood
(
self
,
x
,
theta
):
raise
NotImplementedError
()
def
_encoder_activation
(
self
,
dist_param
):
return
dist_param
def
_sample
(
self
,
batch_size
,
theta
):
raise
NotImplementedError
()
class
CategoricalDistribution
(
Distribution
):
""" Categorical distribution of a set of classes.
Each sample is a one-hot vector.
"""
def
__init__
(
self
,
name
,
cardinality
):
"""
Args:
cardinality (int): number of categories
"""
super
(
CategoricalDistribution
,
self
)
.
__init__
(
name
)
self
.
cardinality
=
cardinality
def
_loglikelihood
(
self
,
x
,
theta
):
eps
=
1e-8
return
tf
.
reduce_sum
(
tf
.
log
(
theta
+
eps
)
*
x
,
1
)
def
_encoder_activation
(
self
,
dist_param
):
return
tf
.
nn
.
softmax
(
dist_param
)
def
_sample
(
self
,
batch_size
,
theta
):
ids
=
tf
.
squeeze
(
tf
.
multinomial
(
tf
.
log
(
theta
+
1e-8
),
num_samples
=
1
),
1
)
return
tf
.
one_hot
(
ids
,
self
.
cardinality
,
name
=
'sample'
)
@
property
def
param_dim
(
self
):
return
self
.
cardinality
@
property
def
sample_dim
(
self
):
return
self
.
cardinality
class
GaussianDistribution
(
Distribution
):
def
__init__
(
self
,
name
,
dim
,
fixed_std
=
True
):
"""
Args:
dim(int): the dimension of samples.
fixed_std (bool): if True, will use 1 as std for all dimensions.
"""
super
(
GaussianDistribution
,
self
)
.
__init__
(
name
)
self
.
dim
=
dim
self
.
fixed_std
=
fixed_std
def
_loglikelihood
(
self
,
x
,
theta
):
eps
=
1e-8
if
self
.
fixed_std
:
mean
=
theta
stddev
=
tf
.
ones_like
(
mean
)
exponent
=
(
x
-
mean
)
else
:
mean
,
stddev
=
tf
.
split
(
theta
,
2
,
axis
=
1
)
exponent
=
(
x
-
mean
)
/
(
stddev
+
eps
)
return
tf
.
reduce_sum
(
-
0.5
*
np
.
log
(
2
*
np
.
pi
)
-
tf
.
log
(
stddev
+
eps
)
-
0.5
*
tf
.
square
(
exponent
),
1
)
def
_encoder_activation
(
self
,
dist_param
):
if
self
.
fixed_std
:
return
dist_param
else
:
mean
,
stddev
=
tf
.
split
(
dist_param
,
2
,
axis
=
1
)
stddev
=
tf
.
exp
(
stddev
)
# just make it positive and assume it's stddev
# OpenAI code assumes exp(input) is variance. https://github.com/openai/InfoGAN.
# not sure if there is any theory about this.
return
tf
.
concat
([
mean
,
stddev
],
axis
=
1
)
def
_sample
(
self
,
batch_size
,
theta
):
if
self
.
fixed_std
:
mean
=
theta
stddev
=
1
else
:
mean
,
stddev
=
tf
.
split
(
theta
,
2
,
axis
=
1
)
e
=
tf
.
random_normal
(
tf
.
shape
(
mean
))
return
tf
.
add
(
mean
,
e
*
stddev
,
name
=
'sample'
)
@
property
def
param_dim
(
self
):
if
self
.
fixed_std
:
return
self
.
dim
else
:
return
2
*
self
.
dim
@
property
def
sample_dim
(
self
):
return
self
.
dim
class
ProductDistribution
(
Distribution
):
"""A product of a list of independent distributions. """
def
__init__
(
self
,
name
,
dists
):
"""
Args:
dists(list): list of :class:`Distribution`.
"""
super
(
ProductDistribution
,
self
)
.
__init__
(
name
)
self
.
dists
=
dists
@
property
def
param_dim
(
self
):
return
np
.
sum
([
d
.
param_dim
for
d
in
self
.
dists
])
@
property
def
sample_dim
(
self
):
return
np
.
sum
([
d
.
sample_dim
for
d
in
self
.
dists
])
def
_splitter
(
self
,
s
,
param
):
"""Input is split into a list of chunks according
to dist.param_dim along axis=1
Args:
s (tf.Tensor): batch of vectors with shape (batch, param_dim or sample_dim)
param (bool): split params, otherwise split samples
Yields:
tf.Tensor: chunk from input of length N_i with sum N_i = N
"""
offset
=
0
for
dist
in
self
.
dists
:
if
param
:
off
=
dist
.
param_dim
else
:
off
=
dist
.
sample_dim
yield
s
[:,
offset
:
offset
+
off
]
offset
+=
off
def
entropy
(
self
,
x
,
theta
):
"""
Note:
It returns a list, as one might use different weights for each
distribution.
Returns:
list[tf.Tensor]: entropy of each distribution.
"""
ret
=
[]
for
dist
,
xi
,
ti
in
zip
(
self
.
dists
,
self
.
_splitter
(
x
,
False
),
self
.
_splitter
(
theta
,
True
)):
ret
.
append
(
dist
.
entropy
(
xi
,
ti
))
return
ret
def
_encoder_activation
(
self
,
dist_params
):
rsl
=
[]
for
dist
,
dist_param
in
zip
(
self
.
dists
,
self
.
_splitter
(
dist_params
,
True
)):
if
dist
.
param_dim
>
0
:
rsl
.
append
(
dist
.
_encoder_activation
(
dist_param
))
return
tf
.
concat
(
rsl
,
1
)
def
_sample
(
self
,
batch_size
,
theta
):
ret
=
[]
for
dist
,
ti
in
zip
(
self
.
dists
,
self
.
_splitter
(
theta
,
True
)):
ret
.
append
(
dist
.
_sample
(
batch_size
,
ti
))
return
tf
.
concat
(
ret
,
1
,
name
=
'sample'
)
tensorpack/tfutils/summary.py
View file @
ebf2332b
...
@@ -220,7 +220,7 @@ def add_moving_summary(*args, **kwargs):
...
@@ -220,7 +220,7 @@ def add_moving_summary(*args, **kwargs):
if
not
isinstance
(
args
[
0
],
list
):
if
not
isinstance
(
args
[
0
],
list
):
v
=
args
v
=
args
else
:
else
:
log_deprecated
(
"Call add_moving_summary with positional args instead of a list!"
)
log_deprecated
(
"Call add_moving_summary with positional args instead of a list!"
,
eos
=
"2018-02-28"
)
v
=
args
[
0
]
v
=
args
[
0
]
for
x
in
v
:
for
x
in
v
:
assert
isinstance
(
x
,
tf
.
Tensor
),
x
assert
isinstance
(
x
,
tf
.
Tensor
),
x
...
...
tensorpack/train/base.py
View file @
ebf2332b
...
@@ -21,7 +21,7 @@ from ..callbacks.steps import MaintainStepCounter
...
@@ -21,7 +21,7 @@ from ..callbacks.steps import MaintainStepCounter
from
.config
import
TrainConfig
,
DEFAULT_MONITORS
,
DEFAULT_CALLBACKS
from
.config
import
TrainConfig
,
DEFAULT_MONITORS
,
DEFAULT_CALLBACKS
__all__
=
[
'StopTraining'
,
'Train
Config'
,
'Train
er'
]
__all__
=
[
'StopTraining'
,
'Trainer'
]
class
StopTraining
(
BaseException
):
class
StopTraining
(
BaseException
):
...
...
tensorpack/train/config.py
View file @
ebf2332b
...
@@ -12,7 +12,6 @@ from ..utils import logger
...
@@ -12,7 +12,6 @@ from ..utils import logger
from
..tfutils
import
(
JustCurrentSession
,
SessionInit
)
from
..tfutils
import
(
JustCurrentSession
,
SessionInit
)
from
..tfutils.sesscreate
import
NewSessionCreator
from
..tfutils.sesscreate
import
NewSessionCreator
from
..input_source
import
InputSource
from
..input_source
import
InputSource
from
..utils.develop
import
log_deprecated
__all__
=
[
'TrainConfig'
,
'DEFAULT_CALLBACKS'
,
'DEFAULT_MONITORS'
]
__all__
=
[
'TrainConfig'
,
'DEFAULT_CALLBACKS'
,
'DEFAULT_MONITORS'
]
...
@@ -151,15 +150,6 @@ class TrainConfig(object):
...
@@ -151,15 +150,6 @@ class TrainConfig(object):
assert
self
.
nr_tower
==
1
,
"Cannot set both nr_tower and tower in TrainConfig!"
assert
self
.
nr_tower
==
1
,
"Cannot set both nr_tower and tower in TrainConfig!"
self
.
tower
=
tower
self
.
tower
=
tower
predict_tower
=
kwargs
.
pop
(
'predict_tower'
,
None
)
if
predict_tower
is
not
None
:
log_deprecated
(
"TrainConfig(predict_tower=)"
,
"InferenceRunner now accepts a 'device' argument."
,
"2017-12-31"
)
self
.
predict_tower
=
predict_tower
if
isinstance
(
self
.
predict_tower
,
int
):
self
.
predict_tower
=
[
self
.
predict_tower
]
# --------------------------------------------------------------
assert
len
(
kwargs
)
==
0
,
'Unknown arguments: {}'
.
format
(
str
(
kwargs
.
keys
()))
assert
len
(
kwargs
)
==
0
,
'Unknown arguments: {}'
.
format
(
str
(
kwargs
.
keys
()))
@
property
@
property
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment