Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
seminar-breakout
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Shashank Suhas
seminar-breakout
Commits
315eab9d
Commit
315eab9d
authored
May 22, 2020
by
Yuxin Wu
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
warning about fork
parent
d95bf7a1
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
7 additions
and
6 deletions
+7
-6
examples/FasterRCNN/config.py
examples/FasterRCNN/config.py
+1
-1
tensorpack/dataflow/parallel.py
tensorpack/dataflow/parallel.py
+4
-3
tensorpack/utils/concurrency.py
tensorpack/utils/concurrency.py
+2
-2
No files found.
examples/FasterRCNN/config.py
View file @
315eab9d
...
@@ -142,7 +142,7 @@ _C.TRAIN.NUM_GPUS = None # by default, will be set from code
...
@@ -142,7 +142,7 @@ _C.TRAIN.NUM_GPUS = None # by default, will be set from code
_C
.
TRAIN
.
WEIGHT_DECAY
=
1e-4
_C
.
TRAIN
.
WEIGHT_DECAY
=
1e-4
_C
.
TRAIN
.
BASE_LR
=
1e-2
# defined for total batch size=8. Otherwise it will be adjusted automatically
_C
.
TRAIN
.
BASE_LR
=
1e-2
# defined for total batch size=8. Otherwise it will be adjusted automatically
_C
.
TRAIN
.
WARMUP
=
1000
# in terms of iterations. This is not affected by #GPUs
_C
.
TRAIN
.
WARMUP
=
1000
# in terms of iterations. This is not affected by #GPUs
_C
.
TRAIN
.
WARMUP_INIT_LR
=
0.
# defined for total batch size=8. Otherwise it will be adjusted automatically
_C
.
TRAIN
.
WARMUP_INIT_LR
=
1e-5
# defined for total batch size=8. Otherwise it will be adjusted automatically
_C
.
TRAIN
.
STEPS_PER_EPOCH
=
500
_C
.
TRAIN
.
STEPS_PER_EPOCH
=
500
_C
.
TRAIN
.
STARTING_EPOCH
=
1
# the first epoch to start with, useful to continue a training
_C
.
TRAIN
.
STARTING_EPOCH
=
1
# the first epoch to start with, useful to continue a training
...
...
tensorpack/dataflow/parallel.py
View file @
315eab9d
...
@@ -223,7 +223,7 @@ lead of failure on some of the code.")
...
@@ -223,7 +223,7 @@ lead of failure on some of the code.")
self
.
procs
=
[
MultiProcessRunner
.
_Worker
(
self
.
ds
,
self
.
queue
,
idx
)
self
.
procs
=
[
MultiProcessRunner
.
_Worker
(
self
.
ds
,
self
.
queue
,
idx
)
for
idx
in
range
(
self
.
num_proc
)]
for
idx
in
range
(
self
.
num_proc
)]
ensure_proc_terminate
(
self
.
procs
)
ensure_proc_terminate
(
self
.
procs
)
s
tart_proc_mask_signal
(
self
.
procs
)
s
elf
.
_reset_done
=
False
def
__iter__
(
self
):
def
__iter__
(
self
):
for
k
in
itertools
.
count
():
for
k
in
itertools
.
count
():
...
@@ -233,8 +233,9 @@ lead of failure on some of the code.")
...
@@ -233,8 +233,9 @@ lead of failure on some of the code.")
yield
dp
yield
dp
def
reset_state
(
self
):
def
reset_state
(
self
):
# do nothing. all ds are reset once and only once in spawned processes
assert
not
self
.
_reset_done
,
"reset_state() was called twice! This violates the API of DataFlow!"
pass
self
.
_reset_done
=
True
start_proc_mask_signal
(
self
.
procs
)
class
MultiProcessRunnerZMQ
(
_MultiProcessZMQDataFlow
):
class
MultiProcessRunnerZMQ
(
_MultiProcessZMQDataFlow
):
...
...
tensorpack/utils/concurrency.py
View file @
315eab9d
...
@@ -232,8 +232,8 @@ def start_proc_mask_signal(proc):
...
@@ -232,8 +232,8 @@ def start_proc_mask_signal(proc):
if
isinstance
(
p
,
mp
.
Process
):
if
isinstance
(
p
,
mp
.
Process
):
if
sys
.
version_info
<
(
3
,
4
)
or
mp
.
get_start_method
()
==
'fork'
:
if
sys
.
version_info
<
(
3
,
4
)
or
mp
.
get_start_method
()
==
'fork'
:
log_once
(
"""
log_once
(
"""
Starting a process with 'fork' method is
not safe and may consume unnecessary extra CPU memory
.
Starting a process with 'fork' method is
efficient but not safe and may cause deadlock or crash
.
Use 'forkserver' or 'spawn' method
(available after Py3.4) instead if you run into any
issues.
Use 'forkserver' or 'spawn' method
instead if you run into such
issues.
See https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods on how to set them.
See https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods on how to set them.
"""
.
replace
(
"
\n
"
,
""
),
"""
.
replace
(
"
\n
"
,
""
),
'warn'
)
# noqa
'warn'
)
# noqa
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment