Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
P
presto
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Commits
Open sidebar
SYNERG
presto
Commits
8636389d
Commit
8636389d
authored
Aug 24, 2020
by
RAHUL BHARDWAJ
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Added experiment results
parent
8eabe343
Changes
22
Hide whitespace changes
Inline
Side-by-side
Showing
22 changed files
with
315 additions
and
273 deletions
+315
-273
code/CMakeLists.txt
code/CMakeLists.txt
+2
-0
code/exp1.cc
code/exp1.cc
+13
-12
code/exp2.cc
code/exp2.cc
+61
-71
code/experiments.cc
code/experiments.cc
+5
-0
code/gen.py
code/gen.py
+1
-1
code/gen_main.py
code/gen_main.py
+2
-0
code/main.cc
code/main.cc
+1
-1
code/merge.py
code/merge.py
+5
-1
code/plot-exp2.py
code/plot-exp2.py
+208
-187
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-diff.png
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-diff.png
+0
-0
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-hr.png
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-hr.png
+0
-0
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-hr10.png
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-hr10.png
+0
-0
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-hr15.png
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-hr15.png
+0
-0
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-hr5.png
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-hr5.png
+0
-0
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-misses.png
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-misses.png
+0
-0
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-vdisk-corr-iops-share.png
.../plots/75%-100MBps-K_RECN_MEM-x-vdisk-corr-iops-share.png
+0
-0
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-vdisk-corr-iops-wss.png
...l1/plots/75%-100MBps-K_RECN_MEM-x-vdisk-corr-iops-wss.png
+0
-0
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-vdisk-corr-wss-share.png
...1/plots/75%-100MBps-K_RECN_MEM-x-vdisk-corr-wss-share.png
+0
-0
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-vdisk-hr.png
...riments/real1/plots/75%-100MBps-K_RECN_MEM-x-vdisk-hr.png
+0
-0
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-vdisk-hr5-wop.png
...ts/real1/plots/75%-100MBps-K_RECN_MEM-x-vdisk-hr5-wop.png
+0
-0
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-vdisk-hr5-wp.png
...nts/real1/plots/75%-100MBps-K_RECN_MEM-x-vdisk-hr5-wp.png
+0
-0
experiments/real1/setup.txt
experiments/real1/setup.txt
+17
-0
No files found.
code/CMakeLists.txt
View file @
8636389d
...
@@ -3,6 +3,8 @@ project(presto)
...
@@ -3,6 +3,8 @@ project(presto)
set
(
CMAKE_CXX_STANDARD 14
)
set
(
CMAKE_CXX_STANDARD 14
)
set
(
CMAKE_BUILD_TYPE Debug
)
set
(
CMAKE_CXX_FLAGS
"
${
CMAKE_CXX_FLAGS
}
-Wall -Werror -Wno-unused -fno-omit-frame-pointer -O0"
)
set
(
CMAKE_CXX_FLAGS
"
${
CMAKE_CXX_FLAGS
}
-Wall -Werror -Wno-unused -fno-omit-frame-pointer -O0"
)
set
(
SOURCE_FILES main.cc metadata.h metadata.cc cache.h hashmap.cc hashmap.h cache.cc util.h policy.cc policy.h
set
(
SOURCE_FILES main.cc metadata.h metadata.cc cache.h hashmap.cc hashmap.h cache.cc util.h policy.cc policy.h
...
...
code/exp1.cc
View file @
8636389d
...
@@ -38,8 +38,8 @@ void exp1()
...
@@ -38,8 +38,8 @@ void exp1()
struct
ns_pair
snapshot_rate
[]
=
{
struct
ns_pair
snapshot_rate
[]
=
{
//{5000, "5000"},
//{5000, "5000"},
{
10000
,
"10000"
},
{
10000
,
"10000"
},
{
50000
,
"50000"
},
//
{50000, "50000"},
{
100000
,
"100000"
},
//
{100000, "100000"},
//{500000, "500K"},
//{500000, "500K"},
//{5, "5s"},
//{5, "5s"},
//{10, "10s"},
//{10, "10s"},
...
@@ -49,25 +49,26 @@ void exp1()
...
@@ -49,25 +49,26 @@ void exp1()
//{300, "5m"}
//{300, "5m"}
};
};
struct
ns_pair
prewarm_set_size_limit
[]
=
{
struct
ns_pair
prewarm_set_size_limit
[]
=
{
{
5
,
"5%"
},
//
{5, "5%"},
{
10
,
"10%"
},
//
{10, "10%"},
{
25
,
"25%"
},
//
{25, "25%"},
{
50
,
"50%"
},
//
{50, "50%"},
{
75
,
"75%"
},
{
75
,
"75%"
},
{
100
,
"100%"
}
//
{100, "100%"}
};
};
struct
ns_pair
prewarm_rate_limit
[]
=
{
struct
ns_pair
prewarm_rate_limit
[]
=
{
{
0
,
"0MBps"
},
// {0, "0MBps"},
//{1000, "100MBps"},
{
50
,
"50MBps"
},
//{2000, "200MBps"}
// {100, "100MBps"},
// {500, "200MBps"}
};
};
struct
ns_pair
heuristic
[]
=
{
struct
ns_pair
heuristic
[]
=
{
//{K_FREQ, "K_FREQ"},
//{K_FREQ, "K_FREQ"},
{
K_FREQ_MEM
,
"K_FREQ_MEM"
},
{
K_FREQ_MEM
,
"K_FREQ_MEM"
},
//{K_RECN, "K_RECN"},
//{K_RECN, "K_RECN"},
{
K_RECN_MEM
,
"K_RECN_MEM"
},
//
{K_RECN_MEM, "K_RECN_MEM"},
//{K_FRERECN, "K_FRERECN"},
//{K_FRERECN, "K_FRERECN"},
{
K_FRERECN_MEM
,
"K_FRERECN_MEM"
}
//
{K_FRERECN_MEM, "K_FRERECN_MEM"}
};
};
struct
ns_pair
score_recn
[]
=
{
struct
ns_pair
score_recn
[]
=
{
{
1
,
"1"
},
{
1
,
"1"
},
...
...
code/exp2.cc
View file @
8636389d
...
@@ -29,10 +29,10 @@ void exp2()
...
@@ -29,10 +29,10 @@ void exp2()
pthread_t
cache_thread
,
io_thread
;
pthread_t
cache_thread
,
io_thread
;
Workload
*
wload
=
new
Workload
(
"r
real.4hr
"
);
// <-------------------
Workload
*
wload
=
new
Workload
(
"r
eal1
"
);
// <-------------------
Hashmap
*
hm1
=
new
Hashmap
(
&
comp_hm1
,
&
print_hm1
,
nullptr
);
Hashmap
*
hm1
=
new
Hashmap
(
&
comp_hm1
,
&
print_hm1
,
nullptr
);
Hashmap
*
hm3
=
new
Hashmap
(
&
comp_hm3
,
&
print_hm3
,
nullptr
);
Hashmap
*
hm3
=
new
Hashmap
(
&
comp_hm3
,
&
print_hm3
,
nullptr
);
Cache
*
cache
=
new
Cache
(
CACHE_A
,
hm1
,
hm3
,
wload
,
32
);
// <-------------------
Cache
*
cache
=
new
Cache
(
CACHE_A
,
hm1
,
hm3
,
wload
,
6400
);
// <-------------------
Hashmap
*
phm1
=
new
Hashmap
(
&
comp_hm1
,
&
print_hm1
,
nullptr
);
Hashmap
*
phm1
=
new
Hashmap
(
&
comp_hm1
,
&
print_hm1
,
nullptr
);
Hashmap
*
phm3
=
new
Hashmap
(
&
comp_hm3
,
&
print_hm3
,
nullptr
);
Hashmap
*
phm3
=
new
Hashmap
(
&
comp_hm3
,
&
print_hm3
,
nullptr
);
Cache
*
pcache
=
new
Cache
();
Cache
*
pcache
=
new
Cache
();
...
@@ -40,67 +40,62 @@ void exp2()
...
@@ -40,67 +40,62 @@ void exp2()
pcache
->
hm
[
HASHMAP3
]
=
phm3
;
pcache
->
hm
[
HASHMAP3
]
=
phm3
;
struct
ns_pair
snapshot_rate
[]
=
{
struct
ns_pair
snapshot_rate
[]
=
{
//{5000, "5000"},
//{5000, "5000"},
{
10000
,
"10000"
},
{
10000
,
"10000"
},
//{50000,
"50000"},
{
50000
,
"50000"
},
//
{100000, "100000"},
{
100000
,
"100000"
},
//
{500000, "500K"},
//
{500000, "500K"},
//{5, "5s"},
//{5, "5s"},
//{10, "10s"},
//{10, "10s"},
//{15, "15s"},
//{15, "15s"},
//{30, "30s"},
//{30, "30s"},
//{60, "1m"},
//{60, "1m"},
//{300, "5m"}
//{300, "5m"}
};
};
struct
ns_pair
prewarm_set_size_limit
[]
=
{
struct
ns_pair
prewarm_set_size_limit
[]
=
{
{
5
,
"5%"
},
// {5, "5%"},
{
10
,
"10%"
},
// {10, "10%"},
{
25
,
"25%"
},
// {25, "25%"},
{
50
,
"50%"
},
{
50
,
"50%"
},
{
75
,
"75%"
},
{
75
,
"75%"
},
{
100
,
"100%"
}
{
100
,
"100%"
}};
};
struct
ns_pair
prewarm_rate_limit
[]
=
{
struct
ns_pair
prewarm_rate_limit
[]
=
{
{
0
,
"0MBps"
},
// {0, "0MBps"},
{
50
,
"50MBps"
},
{
50
,
"50MBps"
},
{
100
,
"100MBps"
},
{
100
,
"100MBps"
},
{
500
,
"500MBps"
}
{
500
,
"500MBps"
}};
};
struct
ns_pair
heuristic
[]
=
{
struct
ns_pair
heuristic
[]
=
{
//{K_FREQ, "K_FREQ"},
//{K_FREQ, "K_FREQ"},
{
K_FREQ_MEM
,
"K_FREQ_MEM"
},
{
K_FREQ_MEM
,
"K_FREQ_MEM"
},
//{K_RECN, "K_RECN"},
//{K_RECN, "K_RECN"},
{
K_RECN_MEM
,
"K_RECN_MEM"
},
{
K_RECN_MEM
,
"K_RECN_MEM"
},
//{K_FRERECN, "K_FRERECN"},
//{K_FRERECN, "K_FRERECN"},
{
K_FRERECN_MEM
,
"K_FRERECN_MEM"
}
{
K_FRERECN_MEM
,
"K_FRERECN_MEM"
}};
};
struct
ns_pair
score_recn
[]
=
{
struct
ns_pair
score_recn
[]
=
{
{
1
,
"1"
},
{
1
,
"1"
},
{
2
,
"2"
},
{
2
,
"2"
},
{
3
,
"3"
},
{
3
,
"3"
},
{
4
,
"4"
},
{
4
,
"4"
},
{
5
,
"5"
},
{
5
,
"5"
},
};
};
struct
ns_pair
score_freq
[]
=
{
struct
ns_pair
score_freq
[]
=
{
{
100
,
"100%"
},
{
100
,
"100%"
},
{
50
,
"50%"
},
{
50
,
"50%"
},
{
33
,
"33%"
},
{
33
,
"33%"
},
{
25
,
"25%"
},
{
25
,
"25%"
},
{
20
,
"20%"
},
{
20
,
"20%"
},
};
};
struct
ns_pair
score_frerecn
[]
=
{
struct
ns_pair
score_frerecn
[]
=
{
{
1
,
"1%"
},
{
1
,
"1%"
},
{
5
,
"5%"
},
{
5
,
"5%"
},
{
10
,
"10%"
},
{
10
,
"10%"
},
{
15
,
"15%"
},
{
15
,
"15%"
},
{
20
,
"20%"
}
{
20
,
"20%"
}};
};
struct
ns_pair
*
score
;
struct
ns_pair
*
score
;
struct
ns_pair
func_frerecn
[]
=
{
struct
ns_pair
func_frerecn
[]
=
{
{
LINEAR
,
"LNR"
},
{
LINEAR
,
"LNR"
},
{
QUADRATIC
,
"QDR"
}
{
QUADRATIC
,
"QDR"
}};
};
struct
run_params
*
rp
=
new
run_params
;
struct
run_params
*
rp
=
new
run_params
;
rp
->
cache
=
cache
;
rp
->
cache
=
cache
;
...
@@ -111,7 +106,7 @@ void exp2()
...
@@ -111,7 +106,7 @@ void exp2()
rp
->
trace_file
=
wload
->
tracefile
;
rp
->
trace_file
=
wload
->
tracefile
;
rp
->
wss_window
=
600
;
rp
->
wss_window
=
600
;
rp
->
wss_stride
=
300
;
rp
->
wss_stride
=
300
;
rp
->
moment
=
5
400
;
rp
->
moment
=
2
400
;
asprintf
(
&
basedirname
,
PRESTO_BASEPATH
"results/%s/%s/"
,
"exp2"
,
wload
->
name
);
asprintf
(
&
basedirname
,
PRESTO_BASEPATH
"results/%s/%s/"
,
"exp2"
,
wload
->
name
);
if
(
!
file_exists
(
basedirname
))
if
(
!
file_exists
(
basedirname
))
...
@@ -121,10 +116,10 @@ void exp2()
...
@@ -121,10 +116,10 @@ void exp2()
rq
=
new
struct
sync_rq
;
rq
=
new
struct
sync_rq
;
rq
->
tfname
=
rp
->
trace_file
;
rq
->
tfname
=
rp
->
trace_file
;
rp
->
rq
=
rq
;
rp
->
rq
=
rq
;
pthread_create
(
&
io_thread
,
nullptr
,
get_req_from_vdisk_sync
,
(
void
*
)
rq
);
pthread_create
(
&
io_thread
,
nullptr
,
get_req_from_vdisk_sync
,
(
void
*
)
rq
);
pthread_join
(
io_thread
,
nullptr
);
pthread_join
(
io_thread
,
nullptr
);
}
}
for
(
ssr
=
0
;
ssr
<
LEN
(
snapshot_rate
,
ns_pair
);
ssr
++
)
// snapshot rate
for
(
ssr
=
0
;
ssr
<
LEN
(
snapshot_rate
,
ns_pair
);
ssr
++
)
// snapshot rate
{
{
rp
->
snapshot_rate
=
snapshot_rate
[
ssr
].
val
;
rp
->
snapshot_rate
=
snapshot_rate
[
ssr
].
val
;
fprintf
(
stderr
,
"Snapshot rate: %s
\n
"
,
snapshot_rate
[
ssr
].
str
);
fprintf
(
stderr
,
"Snapshot rate: %s
\n
"
,
snapshot_rate
[
ssr
].
str
);
...
@@ -132,24 +127,24 @@ void exp2()
...
@@ -132,24 +127,24 @@ void exp2()
if
(
!
file_exists
(
basedirname
))
if
(
!
file_exists
(
basedirname
))
mkdir
(
basedirname
,
0755
);
mkdir
(
basedirname
,
0755
);
for
(
pss
=
0
;
pss
<
LEN
(
prewarm_set_size_limit
,
ns_pair
);
pss
++
)
// prewarm set size limit
for
(
pss
=
0
;
pss
<
LEN
(
prewarm_set_size_limit
,
ns_pair
);
pss
++
)
// prewarm set size limit
{
{
rp
->
prewarm_set_size_limit
=
((
prewarm_set_size_limit
[
pss
].
val
/
100.0
)
*
rp
->
prewarm_set_size_limit
=
((
prewarm_set_size_limit
[
pss
].
val
/
100.0
)
*
(
cache
->
total_cache_size
/
NUM_BYTES_IN_MB
));
(
cache
->
total_cache_size
/
NUM_BYTES_IN_MB
));
fprintf
(
stderr
,
"Prewarm set size limit: %ldMB (%s of total cache: %ldMB)
\n
"
,
rp
->
prewarm_set_size_limit
,
fprintf
(
stderr
,
"Prewarm set size limit: %ldMB (%s of total cache: %ldMB)
\n
"
,
rp
->
prewarm_set_size_limit
,
prewarm_set_size_limit
[
pss
].
str
,
cache
->
total_cache_size
/
NUM_BYTES_IN_MB
);
prewarm_set_size_limit
[
pss
].
str
,
cache
->
total_cache_size
/
NUM_BYTES_IN_MB
);
for
(
pr
=
0
;
pr
<
LEN
(
prewarm_rate_limit
,
ns_pair
);
pr
++
)
// prewarm rate limit
for
(
pr
=
0
;
pr
<
LEN
(
prewarm_rate_limit
,
ns_pair
);
pr
++
)
// prewarm rate limit
{
{
rp
->
prewarm_rate
=
prewarm_rate_limit
[
pr
].
val
;
rp
->
prewarm_rate
=
prewarm_rate_limit
[
pr
].
val
;
fprintf
(
stderr
,
"Prewarm rate limit: %s
\n
"
,
prewarm_rate_limit
[
pr
].
str
);
fprintf
(
stderr
,
"Prewarm rate limit: %s
\n
"
,
prewarm_rate_limit
[
pr
].
str
);
for
(
hrstc
=
0
;
hrstc
<
LEN
(
heuristic
,
ns_pair
);
hrstc
++
)
// heuristic
for
(
hrstc
=
0
;
hrstc
<
LEN
(
heuristic
,
ns_pair
);
hrstc
++
)
// heuristic
{
{
rp
->
heuristic
=
heuristic
[
hrstc
].
val
;
rp
->
heuristic
=
heuristic
[
hrstc
].
val
;
fprintf
(
stderr
,
"Heuristic: %s
\n
"
,
heuristic
[
hrstc
].
str
);
fprintf
(
stderr
,
"Heuristic: %s
\n
"
,
heuristic
[
hrstc
].
str
);
for
(
scr
=
0
;
scr
<
LEN
(
score_frerecn
,
ns_pair
);
scr
++
)
// heuristic score
for
(
scr
=
0
;
scr
<
LEN
(
score_frerecn
,
ns_pair
);
scr
++
)
// heuristic score
{
{
switch
(
heuristic
[
hrstc
].
val
)
switch
(
heuristic
[
hrstc
].
val
)
{
{
...
@@ -182,7 +177,7 @@ void exp2()
...
@@ -182,7 +177,7 @@ void exp2()
rp
->
base_filename
=
res_basefname
;
rp
->
base_filename
=
res_basefname
;
rp
->
hparams
.
frerecn_func
=
func_frerecn
[
frfn
].
val
;
rp
->
hparams
.
frerecn_func
=
func_frerecn
[
frfn
].
val
;
fprintf
(
stderr
,
"File: %s
\n
"
,
res_basefname
);
fprintf
(
stderr
,
"File: %s
\n
"
,
res_basefname
);
pthread_create
(
&
cache_thread
,
nullptr
,
run_exp2
,
(
void
*
)
rp
);
pthread_create
(
&
cache_thread
,
nullptr
,
run_exp2
,
(
void
*
)
rp
);
pthread_join
(
cache_thread
,
nullptr
);
pthread_join
(
cache_thread
,
nullptr
);
}
}
}
}
...
@@ -274,7 +269,7 @@ void *run_exp2(void *data)
...
@@ -274,7 +269,7 @@ void *run_exp2(void *data)
mkdir
(
rp
->
base_filename
,
0755
);
mkdir
(
rp
->
base_filename
,
0755
);
snprintf
(
all_stats_fname
,
MAX_FILENAME_LEN
,
"%s/ALL-STATS.txt"
,
rp
->
base_filename
);
snprintf
(
all_stats_fname
,
MAX_FILENAME_LEN
,
"%s/ALL-STATS.txt"
,
rp
->
base_filename
);
snprintf
(
diff_fname
,
MAX_FILENAME_LEN
,
"/home/
skrtbhtngr/CLionProjects/presto
/results/exp2/%s/%d.diff"
,
snprintf
(
diff_fname
,
MAX_FILENAME_LEN
,
"/home/
rahul/Rnd/presto/code
/results/exp2/%s/%d.diff"
,
cache
->
wload_name
,
snapshot_rate
);
cache
->
wload_name
,
snapshot_rate
);
res_hr
=
new
double
*
[
num_vdisks
];
res_hr
=
new
double
*
[
num_vdisks
];
...
@@ -282,7 +277,7 @@ void *run_exp2(void *data)
...
@@ -282,7 +277,7 @@ void *run_exp2(void *data)
res_hm1
=
new
int
*
[
num_vdisks
];
res_hm1
=
new
int
*
[
num_vdisks
];
res_hm3
=
new
int
*
[
num_vdisks
];
res_hm3
=
new
int
*
[
num_vdisks
];
res_hm1_miss
=
new
long
*
[
num_vdisks
];
res_hm1_miss
=
new
long
*
[
num_vdisks
];
res_hm3_miss
=
new
long
*
[
num_vdisks
];
;
res_hm3_miss
=
new
long
*
[
num_vdisks
];
res_wss
=
new
long
*
[
num_vdisks
];
res_wss
=
new
long
*
[
num_vdisks
];
for
(
i
=
0
;
i
<
num_vdisks
;
i
++
)
for
(
i
=
0
;
i
<
num_vdisks
;
i
++
)
{
{
...
@@ -341,9 +336,7 @@ void *run_exp2(void *data)
...
@@ -341,9 +336,7 @@ void *run_exp2(void *data)
request
=
new
struct
bio_req
;
request
=
new
struct
bio_req
;
sprintf
(
base_fname
,
PRESTO_BASEPATH
sprintf
(
base_fname
,
PRESTO_BASEPATH
"results/%s/%s/%d/"
,
"exp2"
,
cache
->
wload_name
,
snapshot_rate
);
"results/%s/%s/%d/"
,
"exp2"
,
cache
->
wload_name
,
snapshot_rate
);
// If pcache is present, skip the first half!
// If pcache is present, skip the first half!
if
(
pcache
->
cache_idx
!=
-
1
&&
pcache
->
run_ssr
==
snapshot_rate
)
if
(
pcache
->
cache_idx
!=
-
1
&&
pcache
->
run_ssr
==
snapshot_rate
)
...
@@ -363,7 +356,6 @@ void *run_exp2(void *data)
...
@@ -363,7 +356,6 @@ void *run_exp2(void *data)
while
(
get_req
(
request
,
rq
))
while
(
get_req
(
request
,
rq
))
{
{
count
+=
perform_lookup
(
cache
,
request
);
count
+=
perform_lookup
(
cache
,
request
);
if
(
request
->
ts
>=
(
plot_epoch
+
1
)
*
plot_rate
)
if
(
request
->
ts
>=
(
plot_epoch
+
1
)
*
plot_rate
)
{
{
for
(
i
=
0
;
i
<
num_vdisks
;
i
++
)
for
(
i
=
0
;
i
<
num_vdisks
;
i
++
)
...
@@ -572,7 +564,6 @@ void *run_exp2(void *data)
...
@@ -572,7 +564,6 @@ void *run_exp2(void *data)
count
=
plot_epoch
=
0
;
count
=
plot_epoch
=
0
;
counter
=
rp
->
moment
;
counter
=
rp
->
moment
;
failed
=
false
;
failed
=
false
;
while
(
get_req
(
request
,
rq
))
while
(
get_req
(
request
,
rq
))
{
{
count
+=
perform_lookup
(
cache
,
request
);
count
+=
perform_lookup
(
cache
,
request
);
...
@@ -649,8 +640,9 @@ void *run_exp2(void *data)
...
@@ -649,8 +640,9 @@ void *run_exp2(void *data)
if
(
prewarm_set_used_total
>
0
)
if
(
prewarm_set_used_total
>
0
)
{
{
// fprintf(stderr, "reached here 2");
fprintf
(
stderr
,
"Total size of objects loaded at %d second: %.2lf MB
\n
"
,
counter
,
fprintf
(
stderr
,
"Total size of objects loaded at %d second: %.2lf MB
\n
"
,
counter
,
prewarm_set_used_total
/
(
double
)
NUM_BYTES_IN_MB
);
prewarm_set_used_total
/
(
double
)
NUM_BYTES_IN_MB
);
snprintf
(
pre_fname
,
MAX_FILENAME_LEN
,
"%s/%ds-SHARE.csv"
,
rp
->
base_filename
,
counter
);
snprintf
(
pre_fname
,
MAX_FILENAME_LEN
,
"%s/%ds-SHARE.csv"
,
rp
->
base_filename
,
counter
);
pre_file
=
fopen
(
pre_fname
,
"w+"
);
pre_file
=
fopen
(
pre_fname
,
"w+"
);
...
@@ -675,10 +667,8 @@ void *run_exp2(void *data)
...
@@ -675,10 +667,8 @@ void *run_exp2(void *data)
for
(
i
=
0
;
i
<
num_vdisks
;
i
++
)
for
(
i
=
0
;
i
<
num_vdisks
;
i
++
)
{
{
res_hr
[
i
][
plot_epoch
]
=
cache
->
get_hit_ratio
(
i
);
res_hr
[
i
][
plot_epoch
]
=
cache
->
get_hit_ratio
(
i
);
res_hm1
[
i
][
plot_epoch
]
=
cache
->
cache_obj_count_vdisk
[
POOL_SINGLE
][
HASHMAP1
][
i
]
res_hm1
[
i
][
plot_epoch
]
=
cache
->
cache_obj_count_vdisk
[
POOL_SINGLE
][
HASHMAP1
][
i
]
+
cache
->
cache_obj_count_vdisk
[
POOL_MULTI
][
HASHMAP1
][
i
];
+
cache
->
cache_obj_count_vdisk
[
POOL_MULTI
][
HASHMAP1
][
i
];
res_hm3
[
i
][
plot_epoch
]
=
cache
->
cache_obj_count_vdisk
[
POOL_SINGLE
][
HASHMAP3
][
i
]
+
cache
->
cache_obj_count_vdisk
[
POOL_MULTI
][
HASHMAP3
][
i
];
res_hm3
[
i
][
plot_epoch
]
=
cache
->
cache_obj_count_vdisk
[
POOL_SINGLE
][
HASHMAP3
][
i
]
+
cache
->
cache_obj_count_vdisk
[
POOL_MULTI
][
HASHMAP3
][
i
];
res_hm1_miss
[
i
][
plot_epoch
]
=
cache
->
miss_count_vdisk
[
HASHMAP1
][
i
];
res_hm1_miss
[
i
][
plot_epoch
]
=
cache
->
miss_count_vdisk
[
HASHMAP1
][
i
];
res_hm3_miss
[
i
][
plot_epoch
]
=
cache
->
miss_count_vdisk
[
HASHMAP3
][
i
];
res_hm3_miss
[
i
][
plot_epoch
]
=
cache
->
miss_count_vdisk
[
HASHMAP3
][
i
];
res_io_count
[
i
][
plot_epoch
]
=
cache
->
io_count_vdisk
[
i
];
res_io_count
[
i
][
plot_epoch
]
=
cache
->
io_count_vdisk
[
i
];
...
...
code/experiments.cc
View file @
8636389d
...
@@ -223,6 +223,7 @@ int perform_lookup(Cache *cache, struct bio_req *request)
...
@@ -223,6 +223,7 @@ int perform_lookup(Cache *cache, struct bio_req *request)
delete
[]
hash
;
delete
[]
hash
;
khm3
=
get_hm3_key
(
vhm1
->
egroup_id
);
khm3
=
get_hm3_key
(
vhm1
->
egroup_id
);
assert
(
khm3
->
egroup_id
!=
614819
);
vhm3
=
(
val_hm3
*
)
cache
->
lookup
(
HASHMAP3
,
khm3
,
request
->
vdisk
);
vhm3
=
(
val_hm3
*
)
cache
->
lookup
(
HASHMAP3
,
khm3
,
request
->
vdisk
);
if
(
vhm3
==
nullptr
)
if
(
vhm3
==
nullptr
)
{
{
...
@@ -388,6 +389,10 @@ BitArray **analyze_snapshots(Cache *cache, int max_epochs, int ssr, long pre_siz
...
@@ -388,6 +389,10 @@ BitArray **analyze_snapshots(Cache *cache, int max_epochs, int ssr, long pre_siz
case
K_FREQ_MEM
:
case
K_FREQ_MEM
:
calc_scores_freq_mem
(
ba
[
j
],
scores
[
j
]);
calc_scores_freq_mem
(
ba
[
j
],
scores
[
j
]);
// if (j==num_vdisks)
// {
// fprintf(stderr, "score %ld", scores[j][614819]);
// }
break
;
break
;
case
K_RECN_MEM
:
case
K_RECN_MEM
:
calc_scores_rec_mem
(
ba
[
j
],
scores
[
j
],
(
experiment
==
EXP1
)
?
max_epochs
-
i
-
1
:
i
+
1
);
calc_scores_rec_mem
(
ba
[
j
],
scores
[
j
],
(
experiment
==
EXP1
)
?
max_epochs
-
i
-
1
:
i
+
1
);
...
...
code/gen.py
View file @
8636389d
...
@@ -214,7 +214,7 @@ def generate(conf):
...
@@ -214,7 +214,7 @@ def generate(conf):
blocks_r_hr
.
append
(
sector
)
blocks_r_hr
.
append
(
sector
)
times_r_hr
.
append
(
ts
)
times_r_hr
.
append
(
ts
)
if
ts
>
36
00
:
if
ts
>
142
00
:
break
break
while
True
:
while
True
:
...
...
code/gen_main.py
View file @
8636389d
...
@@ -70,6 +70,8 @@ def main():
...
@@ -70,6 +70,8 @@ def main():
for
csize
in
cluster_size_list
:
for
csize
in
cluster_size_list
:
for
cstride
in
cluster_stride_list
:
for
cstride
in
cluster_stride_list
:
for
hs
in
range
(
len
(
hotspot_list
)):
for
hs
in
range
(
len
(
hotspot_list
)):
if
i
>=
6
:
break
conf_template
[
'name'
]
=
'syn'
+
str
(
501
+
i
)
conf_template
[
'name'
]
=
'syn'
+
str
(
501
+
i
)
conf_template
[
'vdisk'
][
'id'
]
=
501
+
i
conf_template
[
'vdisk'
][
'id'
]
=
501
+
i
conf_template
[
'sector'
][
'cluster'
][
'size'
]
=
{
'min'
:
csize
[
0
],
'max'
:
csize
[
1
]}
conf_template
[
'sector'
][
'cluster'
][
'size'
]
=
{
'min'
:
csize
[
0
],
'max'
:
csize
[
1
]}
...
...
code/main.cc
View file @
8636389d
...
@@ -7,7 +7,7 @@
...
@@ -7,7 +7,7 @@
int
Snapshot
::
num_epochs
;
int
Snapshot
::
num_epochs
;
int
experiment
=
1
;
int
experiment
=
2
;
int
main
()
int
main
()
{
{
...
...
code/merge.py
View file @
8636389d
...
@@ -3,5 +3,9 @@ import os
...
@@ -3,5 +3,9 @@ import os
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
wload
=
"wss_iops100"
wload
=
"wss_iops100"
os
.
chdir
(
"traces/"
+
wload
)
os
.
chdir
(
"traces/"
+
wload
)
os
.
system
(
"cat syn*.csv > temp.csv"
)
disks
=
[
501
,
502
,
503
,
504
,
505
,
506
]
os
.
system
(
"cat syn"
+
str
(
disks
[
0
])
+
".csv > temp.csv"
)
for
i
in
range
(
1
,
len
(
disks
)):
os
.
system
(
"cat syn"
+
str
(
disks
[
i
])
+
".csv >> temp.csv"
)
# os.system("cat syn*.csv > temp.csv")
os
.
system
(
"sort --field-separator=',' -n -k 5,5 temp.csv >
%
sMerged.csv"
%
(
wload
))
os
.
system
(
"sort --field-separator=',' -n -k 5,5 temp.csv >
%
sMerged.csv"
%
(
wload
))
code/plot-exp2.py
View file @
8636389d
...
@@ -12,26 +12,35 @@ def main(wload, target_cache_size):
...
@@ -12,26 +12,35 @@ def main(wload, target_cache_size):
plt
.
rcParams
.
update
({
'font.size'
:
14
})
plt
.
rcParams
.
update
({
'font.size'
:
14
})
plt
.
rcParams
.
update
({
'mathtext.default'
:
'regular'
})
plt
.
rcParams
.
update
({
'mathtext.default'
:
'regular'
})
heuristics
=
[
'K_FREQ_MEM'
,
'K_RECN_MEM'
,
'K_FRERECN_MEM'
]
# heuristics = ['K_FREQ_MEM', 'K_RECN_MEM', 'K_FRERECN_MEM']
hname
=
[
'k-Frequent'
,
'k-Recent'
,
'k-Frerecent'
]
# constrained
# hname = ['k-Frequent', 'k-Recent', 'k-Frerecent'] # constrained
snaprates
=
[
'10000'
,
'50000'
,
'100000'
]
# snaprates = ['10000', '50000', '100000']
sname
=
[
'10 K'
,
'50 K'
,
'100 K'
]
# sname = ['10 K', '50 K', '100 K']
heuristics
=
[
'K_RECN_MEM'
]
hname
=
[
'k-Recent'
]
# constrained
snaprates
=
[
'10000'
]
sname
=
[
'10 K'
]
# scores = ['100%', '50%', '33%', '25%', '20%', '1', '2', '3', '4', '5', '1%', '5%', '10%', '15%', '20%']
# scores = ['100%', '50%', '33%', '25%', '20%', '1', '2', '3', '4', '5', '1%', '5%', '10%', '15%', '20%']
# scname = ['100\%', '50\%', '33\%', '25\%', '20\%', '1', '2', '3', '4', '5', '1\%', '5\%', '10\%', '15\%', '20\%']
# scname = ['100\%', '50\%', '33\%', '25\%', '20\%', '1', '2', '3', '4', '5', '1\%', '5\%', '10\%', '15\%', '20\%']
mlimits
=
[
'5
%
'
,
'10
%
'
,
'25
%
'
,
'50
%
'
,
'75
%
'
,
'100
%
'
]
# mlimits = ['5%', '10%', '25%', '50%', '75%', '100%']
mname
=
[
'5
%
'
,
'10
%
'
,
'25
%
'
,
'50
%
'
,
'75
%
'
,
'100
%
'
]
# mname = ['5%', '10%', '25%', '50%', '75%', '100%']
mlimits
=
[
'75
%
'
]
mname
=
[
'75
%
'
]
# mlimits = ['75%', '100%']
# mlimits = ['75%', '100%']
# mname = ['75%', '100%']
# mname = ['75%', '100%']
frerern_func
=
[
'LNR'
,
'QDR'
]
frerern_func
=
[
'LNR'
,
'QDR'
]
func_name
=
[
'Linear'
,
'Quadratic'
]
func_name
=
[
'Linear'
,
'Quadratic'
]
prewarmrates
=
[
'0MBps'
,
'500MBps'
,
'100MBps'
,
'50MBps'
]
# prewarmrates = ['0MBps','500MBps', '100MBps', '50MBps']
pratenames
=
[
'∞'
,
'500 MBps'
,
'100 MBps'
,
'50 MBps'
]
# pratenames = ['∞', '500 MBps', '100 MBps', '50 MBps']
prewarmrates
=
[
'100MBps'
]
moment
=
5400
pratenames
=
[
'100 MBps'
]
moment
=
2400
wss_window
=
600
wss_window
=
600
wss_stride
=
300
wss_stride
=
300
hr_windows
=
[
5
,
15
,
30
]
#
hr_windows = [5, 15, 30]
hr_windows
=
[
5
,
10
,
15
]
# wload = 'scr_merg' # <<--------------------
# wload = 'scr_merg' # <<--------------------
# target_cache_size = '64 MiB' # <<--------------------
# target_cache_size = '64 MiB' # <<--------------------
wload_file
=
'traces/'
+
wload
+
'.csv'
wload_file
=
'traces/'
+
wload
+
'.csv'
...
@@ -42,7 +51,7 @@ def main(wload, target_cache_size):
...
@@ -42,7 +51,7 @@ def main(wload, target_cache_size):
######## NOFAIL
######## NOFAIL
##################################################################################################################
##################################################################################################################
basedir_nop
=
'results/exp2/'
+
wload
+
'
.nofail
/10000/'
basedir_nop
=
'results/exp2/'
+
wload
+
'/10000/'
x
=
[]
x
=
[]
hr_wof_cache
=
[]
hr_wof_cache
=
[]
...
@@ -132,7 +141,6 @@ def main(wload, target_cache_size):
...
@@ -132,7 +141,6 @@ def main(wload, target_cache_size):
##################################################################################################################
##################################################################################################################
for
ssr
in
range
(
len
(
snaprates
)):
for
ssr
in
range
(
len
(
snaprates
)):
break
for
mlim
in
range
(
len
(
mlimits
)):
for
mlim
in
range
(
len
(
mlimits
)):
for
prate
in
range
(
len
(
prewarmrates
)):
for
prate
in
range
(
len
(
prewarmrates
)):
for
hrstc
in
range
(
len
(
heuristics
)):
for
hrstc
in
range
(
len
(
heuristics
)):
...
@@ -263,191 +271,204 @@ def main(wload, target_cache_size):
...
@@ -263,191 +271,204 @@ def main(wload, target_cache_size):
iops
=
[
5.18
,
15.15
,
3.03
,
2.22
,
2.57
,
1.81
,
0.89
,
0.39
]
iops
=
[
5.18
,
15.15
,
3.03
,
2.22
,
2.57
,
1.81
,
0.89
,
0.39
]
elif
wload
==
'scr_merg'
:
elif
wload
==
'scr_merg'
:
iops
=
[
3.53
,
0.12
,
51.17
,
55.22
,
362.62
,
0.88
,
212.14
,
11.97
]
iops
=
[
3.53
,
0.12
,
51.17
,
55.22
,
362.62
,
0.88
,
212.14
,
11.97
]
else
:
iops
=
[
99.16
,
24.56
,
49.59
,
199.25
,
99.54
,
149.74
]
fsize_iops
=
[
11
if
x
>=
200
else
(
fsize_iops
=
[
11
if
x
>=
200
else
(
10
if
x
>=
150
else
(
9
if
x
>=
100
else
(
8
if
x
>=
50
else
(
7
if
x
>=
25
else
6
))))
10
if
x
>=
150
else
(
9
if
x
>=
100
else
(
8
if
x
>=
50
else
(
7
if
x
>=
25
else
6
))))
for
x
in
iops
]
for
x
in
iops
]
sc_iops
=
[
4
*
x
for
x
in
iops
]
sc_iops
=
[
4
*
x
for
x
in
iops
]
#############################################################################################
#############################################################################################
#
plt.clf()
plt
.
clf
()
#
fig, ax = plt.subplots()
fig
,
ax
=
plt
.
subplots
()
#
plt.grid(b=True, color='grey', linewidth=0.5, alpha=0.25, zorder=1)
plt
.
grid
(
b
=
True
,
color
=
'grey'
,
linewidth
=
0.5
,
alpha
=
0.25
,
zorder
=
1
)
#
for vd in range(len(vdisks)):
for
vd
in
range
(
len
(
vdisks
)):
#
plt.scatter(wss_before_failure_vdisk[vd], total_prewarmed_vdisk[vd], s=50,
plt
.
scatter
(
wss_before_failure_vdisk
[
vd
],
total_prewarmed_vdisk
[
vd
],
s
=
50
,
#
c=colors[vd], label=classes[vd], zorder=2 + vd)
c
=
colors
[
vd
],
label
=
classes
[
vd
],
zorder
=
2
+
vd
)
#
plt.legend(bbox_to_anchor=(1.01, 1.0), loc='upper left', fancybox=True, prop={'size': 10})
plt
.
legend
(
bbox_to_anchor
=
(
1.01
,
1.0
),
loc
=
'upper left'
,
fancybox
=
True
,
prop
=
{
'size'
:
10
})
#
plt.xlabel('WSS before failure (MB)')
plt
.
xlabel
(
'WSS before failure (MB)'
)
#
plt.ylabel('Prewarm Set Size (MB)')
plt
.
ylabel
(
'Prewarm Set Size (MB)'
)
#
plt.savefig(basedir_pre[:-1] + '-vdisk-corr-wss-share.png', dpi=300, bbox_inches='tight')
plt
.
savefig
(
basedir_pre
[:
-
1
]
+
'-vdisk-corr-wss-share.png'
,
dpi
=
300
,
bbox_inches
=
'tight'
)
#
plt.close(fig)
plt
.
close
(
fig
)
#
plt.clf()
plt
.
clf
()
#
fig, ax = plt.subplots()
fig
,
ax
=
plt
.
subplots
()
#
plt.grid(b=True, color='grey', linewidth=0.5, alpha=0.25, zorder=1)
plt
.
grid
(
b
=
True
,
color
=
'grey'
,
linewidth
=
0.5
,
alpha
=
0.25
,
zorder
=
1
)
#
for vd in range(len(vdisks)):
for
vd
in
range
(
len
(
vdisks
)):
#
plt.scatter(iops[vd], total_prewarmed_vdisk[vd], s=50,
plt
.
scatter
(
iops
[
vd
],
total_prewarmed_vdisk
[
vd
],
s
=
50
,
#
c=colors[vd], label=classes[vd], zorder=2 + vd)
c
=
colors
[
vd
],
label
=
classes
[
vd
],
zorder
=
2
+
vd
)
#
plt.legend(bbox_to_anchor=(1.01, 1.0), loc='upper left', fancybox=True, prop={'size': 10})
plt
.
legend
(
bbox_to_anchor
=
(
1.01
,
1.0
),
loc
=
'upper left'
,
fancybox
=
True
,
prop
=
{
'size'
:
10
})
#
plt.xlabel('Avg. IOPS')
plt
.
xlabel
(
'Avg. IOPS'
)
#
plt.ylabel('Prewarm Set Size (MB)')
plt
.
ylabel
(
'Prewarm Set Size (MB)'
)
#
plt.savefig(basedir_pre[:-1] + '-vdisk-corr-iops-share.png', dpi=300, bbox_inches='tight')
plt
.
savefig
(
basedir_pre
[:
-
1
]
+
'-vdisk-corr-iops-share.png'
,
dpi
=
300
,
bbox_inches
=
'tight'
)
#
plt.close(fig)
plt
.
close
(
fig
)
#
plt.clf()
plt
.
clf
()
#
fig, ax = plt.subplots()
fig
,
ax
=
plt
.
subplots
()
#
plt.grid(b=True, color='grey', linewidth=0.5, alpha=0.25, zorder=1)
plt
.
grid
(
b
=
True
,
color
=
'grey'
,
linewidth
=
0.5
,
alpha
=
0.25
,
zorder
=
1
)
#
for vd in range(len(vdisks)):
for
vd
in
range
(
len
(
vdisks
)):
#
plt.scatter(iops[vd], wss_before_failure_vdisk[vd], s=50,
plt
.
scatter
(
iops
[
vd
],
wss_before_failure_vdisk
[
vd
],
s
=
50
,
#
c=colors[vd], label=classes[vd], zorder=2 + vd)
c
=
colors
[
vd
],
label
=
classes
[
vd
],
zorder
=
2
+
vd
)
#
plt.legend(bbox_to_anchor=(1.01, 1.0), loc='upper left', fancybox=True, prop={'size': 10})
plt
.
legend
(
bbox_to_anchor
=
(
1.01
,
1.0
),
loc
=
'upper left'
,
fancybox
=
True
,
prop
=
{
'size'
:
10
})
#
plt.xlabel('Avg. IOPS')
plt
.
xlabel
(
'Avg. IOPS'
)
#
plt.ylabel('WSS before failure (MB)')
plt
.
ylabel
(
'WSS before failure (MB)'
)
#
plt.savefig(basedir_pre[:-1] + '-vdisk-corr-iops-wss.png', dpi=300, bbox_inches='tight')
plt
.
savefig
(
basedir_pre
[:
-
1
]
+
'-vdisk-corr-iops-wss.png'
,
dpi
=
300
,
bbox_inches
=
'tight'
)
#
plt.close(fig)
plt
.
close
(
fig
)
############################################################################################
#
############################################################################################
#
plt.clf()
plt
.
clf
()
#
for vd in range(len(vdisks)):
for
vd
in
range
(
len
(
vdisks
)):
#
plt.plot(x, hrs_wp_vdisk[vd], color=colors[vd], linewidth=1, label='vDisk ' + str(vd + 1),
plt
.
plot
(
x
,
hrs_wp_vdisk
[
vd
],
color
=
colors
[
vd
],
linewidth
=
1
,
label
=
'vDisk '
+
str
(
vd
+
1
),
#
alpha=1)
alpha
=
1
)
#
plt.plot(x, hrs_wop_vdisk[vd], color=colors[vd], linewidth=0.6, linestyle='--', alpha=0.5)
plt
.
plot
(
x
,
hrs_wop_vdisk
[
vd
],
color
=
colors
[
vd
],
linewidth
=
0.6
,
linestyle
=
'--'
,
alpha
=
0.5
)
#
plt.legend(bbox_to_anchor=(1.03, 1.0), loc='upper left', fancybox=True, prop={'size': 10})
plt
.
legend
(
bbox_to_anchor
=
(
1.03
,
1.0
),
loc
=
'upper left'
,
fancybox
=
True
,
prop
=
{
'size'
:
10
})
#
plt.xlabel('Time (minutes)')
plt
.
xlabel
(
'Time (minutes)'
)
#
plt.ylabel('vDisk Hit Ratio')
plt
.
ylabel
(
'vDisk Hit Ratio'
)
# beg_sec = 4500
# beg_sec = 4500
# end_sec = 9900
# end_sec = 9900
# if wload == 'rreal.4hr' or wload == 'scr_merg':
beg_sec
=
1200
# plt.ylim([0.4, 1.0])
end_sec
=
3600
# else:
if
wload
==
'rreal.4hr'
or
wload
==
'scr_merg'
:
# plt.ylim([0.2, 1.0])
plt
.
ylim
([
0.4
,
1.0
])
# plt.xlim([beg_sec, end_sec])
else
:
plt
.
ylim
([
0.2
,
1.0
])
plt
.
xlim
([
beg_sec
,
end_sec
])
# plt.xticks(np.arange(beg_sec + 900, end_sec - 900 + 1, 1800),
# plt.xticks(np.arange(beg_sec + 900, end_sec - 900 + 1, 1800),
# np.arange((beg_sec + 900) / 60, (end_sec - 900) / 60 + 1, 30).astype(int))
# np.arange((beg_sec + 900) / 60, (end_sec - 900) / 60 + 1, 30).astype(int))
# plt.savefig(basedir_pre[:-1] + '-vdisk-hr.png', dpi=300, bbox_inches='tight')
plt
.
xticks
(
np
.
arange
(
beg_sec
,
end_sec
+
1
,
600
),
#############################################################################################
np
.
arange
(
beg_sec
,
end_sec
+
1
,
600
)
.
astype
(
int
))
# plt.clf()
plt
.
savefig
(
basedir_pre
[:
-
1
]
+
'-vdisk-hr.png'
,
dpi
=
300
,
bbox_inches
=
'tight'
)
# for vd in range(len(vdisks)):
############################################################################################
# plt.plot(x[moment:moment + 5 * 60], hrs_wop_vdisk[vd][moment:moment + 5 * 60],
plt
.
clf
()
# color=colors[vd], linewidth=1.2, label='vDisk ' + str(vd + 1), alpha=1)
for
vd
in
range
(
len
(
vdisks
)):
# plt.legend(bbox_to_anchor=(1.03, 1.0), loc='upper left', fancybox=True, prop={'size': 10})
plt
.
plot
(
x
[
moment
:
moment
+
5
*
60
],
hrs_wop_vdisk
[
vd
][
moment
:
moment
+
5
*
60
],
# plt.xlabel('Time (minutes)')
color
=
colors
[
vd
],
linewidth
=
1.2
,
label
=
'vDisk '
+
str
(
vd
+
1
),
alpha
=
1
)
# plt.ylabel('vDisk Hit Ratio')
plt
.
legend
(
bbox_to_anchor
=
(
1.03
,
1.0
),
loc
=
'upper left'
,
fancybox
=
True
,
prop
=
{
'size'
:
10
})
# beg_sec = moment
plt
.
xlabel
(
'Time (minutes)'
)
# end_sec = moment + 5 * 60
plt
.
ylabel
(
'vDisk Hit Ratio'
)
# if wload == 'rreal.4hr' or wload == 'scr_merg':
beg_sec
=
moment
# plt.ylim([0.4, 1.0])
end_sec
=
moment
+
5
*
60
# else:
if
wload
==
'rreal.4hr'
or
wload
==
'scr_merg'
:
# plt.ylim([0.2, 1.0])
plt
.
ylim
([
0.4
,
1.0
])
# plt.xlim([beg_sec, end_sec])
else
:
# plt.xticks(np.arange(beg_sec, end_sec + 1, 60),
plt
.
ylim
([
0.2
,
1.0
])
# np.arange(beg_sec / 60, end_sec / 60 + 1, 1).astype(int))
plt
.
xlim
([
beg_sec
,
end_sec
])
# plt.savefig(basedir_pre[:-1] + '-vdisk-hr5-wop.png', dpi=300, bbox_inches='tight')
plt
.
xticks
(
np
.
arange
(
beg_sec
,
end_sec
+
1
,
60
),
# plt.clf()
np
.
arange
(
beg_sec
/
60
,
end_sec
/
60
+
1
,
1
)
.
astype
(
int
))
# for vd in range(len(vdisks)):
plt
.
savefig
(
basedir_pre
[:
-
1
]
+
'-vdisk-hr5-wop.png'
,
dpi
=
300
,
bbox_inches
=
'tight'
)
# plt.plot(x[moment:moment + 5 * 60], hrs_wp_vdisk[vd][moment:moment + 5 * 60],
plt
.
clf
()
# color=colors[vd], linewidth=1.2, label='vDisk ' + str(vd + 1), alpha=1)
for
vd
in
range
(
len
(
vdisks
)):
# plt.legend(bbox_to_anchor=(1.03, 1.0), loc='upper left', fancybox=True, prop={'size': 10})
plt
.
plot
(
x
[
moment
:
moment
+
5
*
60
],
hrs_wp_vdisk
[
vd
][
moment
:
moment
+
5
*
60
],
# plt.xlabel('Time (minutes)')
color
=
colors
[
vd
],
linewidth
=
1.2
,
label
=
'vDisk '
+
str
(
vd
+
1
),
alpha
=
1
)
# plt.ylabel('vDisk Hit Ratio')
plt
.
legend
(
bbox_to_anchor
=
(
1.03
,
1.0
),
loc
=
'upper left'
,
fancybox
=
True
,
prop
=
{
'size'
:
10
})
# beg_sec = moment
plt
.
xlabel
(
'Time (minutes)'
)
# end_sec = moment + 5 * 60
plt
.
ylabel
(
'vDisk Hit Ratio'
)
# if wload == 'rreal.4hr' or wload == 'scr_merg':
beg_sec
=
moment
# plt.ylim([0.4, 1.0])
end_sec
=
moment
+
5
*
60
# else:
if
wload
==
'rreal.4hr'
or
wload
==
'scr_merg'
:
# plt.ylim([0.2, 1.0])
plt
.
ylim
([
0.4
,
1.0
])
# plt.xlim([beg_sec, end_sec])
else
:
# plt.xticks(np.arange(beg_sec, end_sec + 1, 60),
plt
.
ylim
([
0.2
,
1.0
])
# np.arange(beg_sec / 60, end_sec / 60 + 1, 1).astype(int))
plt
.
xlim
([
beg_sec
,
end_sec
])
# plt.savefig(basedir_pre[:-1] + '-vdisk-hr5-wp.png', dpi=300, bbox_inches='tight')
plt
.
xticks
(
np
.
arange
(
beg_sec
,
end_sec
+
1
,
60
),
#############################################################################################
np
.
arange
(
beg_sec
/
60
,
end_sec
/
60
+
1
,
1
)
.
astype
(
int
))
# plt.clf()
plt
.
savefig
(
basedir_pre
[:
-
1
]
+
'-vdisk-hr5-wp.png'
,
dpi
=
300
,
bbox_inches
=
'tight'
)
# plt.xlabel('Time (minutes)')
############################################################################################
# plt.ylabel('Cache Hit Ratio')
plt
.
clf
()
# plt.plot(x, hr_wof_cache, color='g', linewidth=1.3, label='no failure', zorder=1, alpha=0.75)
plt
.
xlabel
(
'Time (minutes)'
)
# plt.plot(x, hr_wop_cache, color='b', linewidth=1.3, label='no prewarming', zorder=2)
plt
.
ylabel
(
'Cache Hit Ratio'
)
# plt.plot(x, hr_wp_cache, color='r', linewidth=1.3, label='with prewarming', zorder=5)
plt
.
plot
(
x
,
hr_wof_cache
,
color
=
'g'
,
linewidth
=
1.3
,
label
=
'no failure'
,
zorder
=
1
,
alpha
=
0.75
)
# ty = [hr_val_before_fail] * len(x)
plt
.
plot
(
x
,
hr_wop_cache
,
color
=
'b'
,
linewidth
=
1.3
,
label
=
'no prewarming'
,
zorder
=
2
)
# for n in range(len(x)):
plt
.
plot
(
x
,
hr_wp_cache
,
color
=
'r'
,
linewidth
=
1.3
,
label
=
'with prewarming'
,
zorder
=
5
)
# if n > tt_hr_val_before_fail_wop or n < tt_hr_val_before_fail_wp:
ty
=
[
hr_val_before_fail
]
*
len
(
x
)
# ty[n] = 0
for
n
in
range
(
len
(
x
)):
# plt.plot(x, ty, color='darkgrey', linewidth=0.7, linestyle='--')
if
n
>
tt_hr_val_before_fail_wop
or
n
<
tt_hr_val_before_fail_wp
:
# plt.legend(loc='center right', fancybox=True, prop={'size': 12})
ty
[
n
]
=
0
# beg_sec = 3600
plt
.
plot
(
x
,
ty
,
color
=
'darkgrey'
,
linewidth
=
0.7
,
linestyle
=
'--'
)
# end_sec = 12600
plt
.
legend
(
loc
=
'center right'
,
fancybox
=
True
,
prop
=
{
'size'
:
12
})
# if wload == 'wload_6gb':
# beg_sec = 2400
# plt.ylim([0.4, 0.9])
# end_sec = 3600
# elif wload == 'wload_4gb':
beg_sec
=
1200
# plt.ylim([0.5, 0.9])
end_sec
=
3600
# elif wload == 'rreal.4hr':
if
wload
==
'wload_6gb'
:
# plt.ylim([0.6, 1.0])
plt
.
ylim
([
0.4
,
0.9
])
# elif wload == 'scr_merg':
elif
wload
==
'wload_4gb'
:
# plt.ylim([0.6, 1.0])
plt
.
ylim
([
0.5
,
0.9
])
# plt.xlim([beg_sec, end_sec])
elif
wload
==
'rreal.4hr'
:
plt
.
ylim
([
0.6
,
1.0
])
elif
wload
==
'scr_merg'
:
plt
.
ylim
([
0.6
,
1.0
])
else
:
plt
.
ylim
([
0.2
,
1.0
])
plt
.
xlim
([
beg_sec
,
end_sec
])
# plt.xticks(np.arange(beg_sec, end_sec + 1, 1800),
# plt.xticks(np.arange(beg_sec, end_sec + 1, 1800),
# np.arange(beg_sec / 60, end_sec / 60 + 1, 30).astype(int))
# np.arange(beg_sec / 60, end_sec / 60 + 1, 30).astype(int))
# # plt.show()
plt
.
xticks
(
np
.
arange
(
beg_sec
,
end_sec
+
1
,
300
),
# plt.savefig(basedir_pre[:-1] + '-hr.png', dpi=300, bbox_inches='tight')
np
.
arange
(
beg_sec
/
60
,
end_sec
/
60
+
1
,
5
)
.
astype
(
int
))
# plt.show()
plt
.
savefig
(
basedir_pre
[:
-
1
]
+
'-hr.png'
,
dpi
=
300
,
bbox_inches
=
'tight'
)
############################################################################################
for
nn
in
hr_windows
:
plt
.
clf
()
plt
.
xlabel
(
'Time (minutes)'
)
plt
.
ylabel
(
'Cache Hit Ratio'
)
plt
.
plot
(
x
[
moment
:
moment
+
nn
*
60
],
hr_wop_cache
[
moment
:
moment
+
nn
*
60
],
color
=
'b'
,
linewidth
=
1.3
,
label
=
'no prewarming'
,
zorder
=
2
)
plt
.
plot
(
x
[
moment
:
moment
+
nn
*
60
],
hr_wp_cache
[
moment
:
moment
+
nn
*
60
],
color
=
'r'
,
linewidth
=
1.3
,
label
=
'with prewarming'
,
zorder
=
5
)
plt
.
legend
(
loc
=
'lower right'
,
fancybox
=
True
,
prop
=
{
'size'
:
12
})
beg_sec
=
moment
end_sec
=
moment
+
nn
*
60
if
wload
==
'wload_6gb'
:
plt
.
ylim
([
0.3
,
0.9
])
elif
wload
==
'wload_4gb'
:
plt
.
ylim
([
0.5
,
0.9
])
elif
wload
==
'rreal.4hr'
:
plt
.
ylim
([
0.6
,
1.0
])
plt
.
xlim
([
beg_sec
,
end_sec
])
plt
.
xticks
(
np
.
arange
(
beg_sec
,
end_sec
+
1
,
nn
/
5
*
60
),
np
.
arange
(
beg_sec
/
60
,
end_sec
/
60
+
1
,
nn
/
5
)
.
astype
(
int
))
# plt.show()
plt
.
savefig
(
basedir_pre
[:
-
1
]
+
'-hr'
+
str
(
nn
)
+
'.png'
,
dpi
=
300
,
bbox_inches
=
'tight'
)
#############################################################################################
gain_hr_cache
=
[]
for
n
in
range
(
moment
,
len
(
hr_wp_cache
)):
gain_hr_cache
.
append
(
hr_wp_cache
[
n
]
-
hr_wop_cache
[
n
])
plt
.
clf
()
plt
.
xlabel
(
'Time (minutes)'
)
plt
.
ylabel
(
'Difference in Hit Ratio'
)
plt
.
plot
(
x
[
moment
:],
gain_hr_cache
,
color
=
'g'
,
linewidth
=
1.3
)
beg_sec
=
moment
end_sec
=
moment
+
900
plt
.
ylim
([
0
,
0.4
])
plt
.
xlim
([
beg_sec
,
end_sec
])
plt
.
xticks
(
np
.
arange
(
beg_sec
,
end_sec
+
1
,
300
),
np
.
arange
(
beg_sec
/
60
,
end_sec
/
60
+
1
,
5
)
.
astype
(
int
))
plt
.
savefig
(
basedir_pre
[:
-
1
]
+
'-diff.png'
,
dpi
=
300
,
bbox_inches
=
'tight'
)
#############################################################################################
#############################################################################################
# for nn in hr_windows:
mrc_window
=
900
# plt.clf()
nx
=
x
[
moment
:
moment
+
mrc_window
]
# plt.xlabel('Time (minutes)')
plt
.
clf
()
# plt.ylabel('Cache Hit Ratio')
if
wload
==
'rreal.4hr'
:
# plt.plot(x[moment:moment + nn * 60], hr_wop_cache[moment:moment + nn * 60], color='b',
scale
=
10
# linewidth=1.3, label='no prewarming', zorder=2)
else
:
# plt.plot(x[moment:moment + nn * 60], hr_wp_cache[moment:moment + nn * 60], color='r',
scale
=
1000
# linewidth=1.3, label='with prewarming', zorder=5)
plt
.
xlabel
(
'Time (minutes)'
)
# plt.legend(loc='lower right', fancybox=True, prop={'size': 12})
plt
.
ylabel
(
'Total Cache Misses (x'
+
str
(
scale
)
+
')'
)
# beg_sec = moment
plt
.
plot
(
nx
,
wop_misses
[
moment
:
moment
+
mrc_window
],
color
=
'b'
,
linewidth
=
1.3
,
# end_sec = moment + nn * 60
label
=
'no prewarming'
,
marker
=
'd'
,
markevery
=
300
)
# if wload == 'wload_6gb':
plt
.
plot
(
nx
,
wp_misses
[
moment
:
moment
+
mrc_window
],
color
=
'r'
,
linewidth
=
1.3
,
# plt.ylim([0.3, 0.9])
label
=
'with prewarming'
,
marker
=
'd'
,
markevery
=
300
)
# elif wload == 'wload_4gb':
plt
.
legend
(
loc
=
'upper left'
,
fancybox
=
True
,
prop
=
{
'size'
:
12
})
# plt.ylim([0.5, 0.9])
beg_sec
=
moment
# elif wload == 'rreal.4hr':
end_sec
=
moment
+
mrc_window
# plt.ylim([0.6, 1.0])
max_y
=
wop_misses
[
moment
+
mrc_window
]
# plt.xlim([beg_sec, end_sec])
plt
.
ylim
([
0
,
max_y
])
# plt.xticks(np.arange(beg_sec, end_sec + 1, nn / 5 * 60),
plt
.
yticks
(
np
.
arange
(
0
,
max_y
+
1
,
max_y
/
5
),
# np.arange(beg_sec / 60, end_sec / 60 + 1, nn / 5).astype(int))
np
.
arange
(
0
,
int
(
max_y
/
scale
)
+
1
,
(
max_y
/
scale
)
/
5
)
.
astype
(
int
))
# # plt.show()
plt
.
xlim
([
beg_sec
,
end_sec
])
# plt.savefig(basedir_pre[:-1] + '-hr' + str(nn) + '.png', dpi=300, bbox_inches='tight')
plt
.
xticks
(
np
.
arange
(
beg_sec
,
end_sec
+
1
,
300
),
##############################################################################################
np
.
arange
(
beg_sec
/
60
,
end_sec
/
60
+
1
,
5
)
.
astype
(
int
))
# gain_hr_cache = []
# plt.show()
# for n in range(moment, len(hr_wp_cache)):
plt
.
savefig
(
basedir_pre
[:
-
1
]
+
'-misses.png'
,
dpi
=
300
,
bbox_inches
=
'tight'
)
# gain_hr_cache.append(hr_wp_cache[n] - hr_wop_cache[n])
break
# plt.clf()
# plt.xlabel('Time (minutes)')
# plt.ylabel('Difference in Hit Ratio')
# plt.plot(x[moment:], gain_hr_cache, color='g', linewidth=1.3)
# beg_sec = moment
# end_sec = moment + 900
# plt.ylim([0, 0.4])
# plt.xlim([beg_sec, end_sec])
# plt.xticks(np.arange(beg_sec, end_sec + 1, 300),
# np.arange(beg_sec / 60, end_sec / 60 + 1, 5).astype(int))
# plt.savefig(basedir_pre[:-1] + '-diff.png', dpi=300, bbox_inches='tight')
##############################################################################################
# mrc_window = 900
# nx = x[moment: moment + mrc_window]
# plt.clf()
# if wload == 'rreal.4hr':
# scale = 10
# else:
# scale = 1000
# plt.xlabel('Time (minutes)')
# plt.ylabel('Total Cache Misses (x' + str(scale) + ')')
# plt.plot(nx, wop_misses[moment: moment + mrc_window], color='b', linewidth=1.3,
# label='no prewarming', marker='d', markevery=300)
# plt.plot(nx, wp_misses[moment: moment + mrc_window], color='r', linewidth=1.3,
# label='with prewarming', marker='d', markevery=300)
# plt.legend(loc='upper left', fancybox=True, prop={'size': 12})
# beg_sec = moment
# end_sec = moment + mrc_window
# max_y = wop_misses[moment + mrc_window]
# plt.ylim([0, max_y])
# plt.yticks(np.arange(0, max_y + 1, max_y / 5),
# np.arange(0, int(max_y / scale) + 1, (max_y / scale) / 5).astype(int))
# plt.xlim([beg_sec, end_sec])
# plt.xticks(np.arange(beg_sec, end_sec + 1, 300),
# np.arange(beg_sec / 60, end_sec / 60 + 1, 5).astype(int))
# # plt.show()
# plt.savefig(basedir_pre[:-1] + '-misses.png', dpi=300, bbox_inches='tight')
##################################################################################################################
##################################################################################################################
######## EXTRAS
######## EXTRAS
...
@@ -745,7 +766,7 @@ def main(wload, target_cache_size):
...
@@ -745,7 +766,7 @@ def main(wload, target_cache_size):
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
# matplotlib.use('agg')
# matplotlib.use('agg')
threads
=
[]
threads
=
[]
wloads
=
[(
'wload_6gb'
,
'6 GiB'
),
(
'wload_4gb'
,
'4 GiB'
),
(
'r
real.4hr'
,
'32 M
iB'
),
(
'scr_merg'
,
'64 MiB'
)]
wloads
=
[(
'wload_6gb'
,
'6 GiB'
),
(
'wload_4gb'
,
'4 GiB'
),
(
'r
eal1'
,
'6 G
iB'
),
(
'scr_merg'
,
'64 MiB'
)]
wload
=
wloads
[
2
]
wload
=
wloads
[
2
]
# for wload in wloads:
# for wload in wloads:
# x = threading.Thread(target=main, args=(wload[0], wload[1]))
# x = threading.Thread(target=main, args=(wload[0], wload[1]))
...
...
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-diff.png
0 → 100644
View file @
8636389d
81.1 KB
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-hr.png
0 → 100644
View file @
8636389d
115 KB
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-hr10.png
0 → 100644
View file @
8636389d
117 KB
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-hr15.png
0 → 100644
View file @
8636389d
116 KB
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-hr5.png
0 → 100644
View file @
8636389d
112 KB
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-misses.png
0 → 100644
View file @
8636389d
139 KB
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-vdisk-corr-iops-share.png
0 → 100644
View file @
8636389d
121 KB
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-vdisk-corr-iops-wss.png
0 → 100644
View file @
8636389d
107 KB
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-vdisk-corr-wss-share.png
0 → 100644
View file @
8636389d
127 KB
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-vdisk-hr.png
0 → 100644
View file @
8636389d
191 KB
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-vdisk-hr5-wop.png
0 → 100644
View file @
8636389d
169 KB
experiments/real1/plots/75%-100MBps-K_RECN_MEM-x-vdisk-hr5-wp.png
0 → 100644
View file @
8636389d
163 KB
experiments/real1/setup.txt
0 → 100644
View file @
8636389d
Basic Info
1)Experiment ran on trace taken from http://iotta.snia.org/tracetypes/3
2)The above site has hourly traces of 6 disks for 8 days.
3)We used the trace from 1200 to 1300 on 16/02/2016
4)I used only 1 hour trace(an the failure happens at the 40 minute mark) for the experiment because the hr ratio rises and saturates very quickly.
Results
1)The relevant plots are included in the plots directory with the following nameing convention
<prewarm-set-size>-<prewarm-rate>-<heuristic>-<info>
where info can be
hr-5 -> shows the hit ratio during the 5 minutes after failure in the 3 cases(with pre-warming, without pre-warming, no failure)
hr-10 -> shows the hit ratio during the 10 minutes after failure in the 3 cases
hr-15 -> shows the hit ratio during the 15 minutes after failure in the 3 cases
hr -> shows the hit ratio for the entire 1 hour duration in the 3 cases
vdisk-hr5-wop -> shows the hit ratio(without pre-warming) for each vdisk during the 5 minutes after failure
vdisk-hr5-wp -> shows the hit ratio(with pre-warming) for each vdisk during the 5 minutes after failure
vdisk-hr -> shows the hit ratio(with pre-warming) for each vdisk during entire 1 hour duration
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment