Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
P
presto
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Commits
Open sidebar
SYNERG
presto
Commits
6170e62c
Commit
6170e62c
authored
Nov 08, 2020
by
RAHUL BHARDWAJ
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Partially completed IOPS/WSS analysis
parent
ef7374a8
Changes
6
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
355 additions
and
26 deletions
+355
-26
code/cache.h
code/cache.h
+39
-1
code/exp2.cc
code/exp2.cc
+25
-14
code/experiments.cc
code/experiments.cc
+54
-1
code/plot-exp2.py
code/plot-exp2.py
+232
-9
code/run
code/run
+3
-0
code/util.h
code/util.h
+2
-1
No files found.
code/cache.h
View file @
6170e62c
...
...
@@ -61,6 +61,8 @@ public:
long
evct_count
[
NUM_POOLS
][
NUM_HASHMAPS
];
// pools X hashmaps
long
evct_count_prewarmed
;
// pools X hashmaps
long
*
io_count_vdisk
;
// vdisk
double
*
avg_iops
;
double
*
wss_before_failure
;
bool
prewarm_till_first_eviction
;
// stop prewarming once a prewarm object get evicted
bool
prewarm_at_head
;
// insert prewrming objects at front
...
...
@@ -102,9 +104,12 @@ public:
this
->
singlepool
=
false
;
this
->
evct_count_prewarmed
=
0
;
this
->
prewarm_till_first_eviction
=
false
;
this
->
prewarm_at_head
=
fals
e
;
this
->
prewarm_at_head
=
tru
e
;
this
->
io_count_vdisk
=
new
long
[
num_vdisks
];
this
->
avg_iops
=
new
double
[
num_vdisks
];
this
->
wss_before_failure
=
new
double
[
num_vdisks
];
memset
(
this
->
io_count_vdisk
,
0
,
num_vdisks
*
sizeof
(
long
));
for
(
i
=
0
;
i
<
NUM_HASHMAPS
;
i
++
)
{
...
...
@@ -150,6 +155,39 @@ public:
ba3
=
ba1
[
i
]
=
new
BitArray
(
1UL
<<
HASH_CHARS_FOR_EGROUP
*
4
);
oba3
=
oba1
[
i
]
=
new
BitArray
(
1UL
<<
HASH_CHARS_FOR_EGROUP
*
4
);
}
const
char
*
tf
=
wload
->
tracefile
;
FILE
*
fp
=
fopen
(
tf
,
"r"
);
assert
(
fp
!=
nullptr
);
char
buf
[
MAX_REQ_LENGTH
],
*
word
,
*
saveptr
;
double
total_time
=
0
;
while
(
fgets
(
buf
,
MAX_REQ_LENGTH
,
fp
))
{
word
=
strtok_r
(
buf
,
","
,
&
saveptr
);
//req->type = word[0];
word
=
strtok_r
(
nullptr
,
","
,
&
saveptr
);
int
disk_id
=
-
1
;
for
(
int
j
=
0
;
j
<
num_vdisks
;
j
++
)
{
if
(
wload
->
vdisks
[
j
].
vdisk_id
==
strtol
(
word
,
nullptr
,
0
))
disk_id
=
j
;
}
avg_iops
[
disk_id
]
+=
1.0
;
word
=
strtok_r
(
nullptr
,
","
,
&
saveptr
);;
word
=
strtok_r
(
nullptr
,
","
,
&
saveptr
);
word
=
strtok_r
(
nullptr
,
","
,
&
saveptr
);
total_time
=
strtod
(
word
,
nullptr
);
}
for
(
i
=
0
;
i
<
num_vdisks
;
i
++
)
{
avg_iops
[
i
]
/=
total_time
;
}
fclose
(
fp
);
//else if (type_bitmap == BITSET)
//{
// bs1 = new BitSet *[num_vdisks];
...
...
code/exp2.cc
View file @
6170e62c
...
...
@@ -56,16 +56,24 @@ void exp2()
// {5, "5%"},
// {10, "10%"},
// {25, "25%"},
// {50, "50%"}
};
{
100
,
"100
%"
}};
// {50, "50%"}
,
{
75
,
"75
%"
}};
// {100, "100%"}};
// struct ns_pair prewarm_rate_limit[] = {
// // // {0, "0MBps"},
// {20, "20MBps"},
// {50, "50MBps"},
// {100, "100MBps"}
// {500, "500MBps"},
// {10000, "10000MBps"}};
struct
ns_pair
prewarm_rate_limit
[]
=
{
// {
0, "0
MBps"},
{
20
,
"2
0MBps"
},
{
50
,
"50MBps"
},
{
100
,
"100MBps"
}
{
500
,
"5
00MBps"
}};
{
10000
,
"10
000MBps"
}};
// {
15, "15
MBps"},
// {30, "3
0MBps"},
{
100
,
"100MBps"
}};
// {200, "200MBps"},
// {1000, "10
00MBps"}};
// {60000, "6
000MBps"}};
// {500, "500MBps"}};
struct
ns_pair
heuristic
[]
=
{
...
...
@@ -105,11 +113,13 @@ void exp2()
rp
->
cache
=
cache
;
rp
->
sync_reqs
=
true
;
rp
->
pcache
=
pcache
;
rp
->
part_scheme_hm1
=
HM1_NO_PART
;
// rp->part_scheme_hm1 = HM1_NO_PART;
rp
->
part_scheme_hm1
=
HM1_WSS_SHARE
;
rp
->
part_scheme_hm1_hm3
=
HM1_HM3_NO_PART
;
rp
->
trace_file
=
wload
->
tracefile
;
rp
->
wss_window
=
600
;
rp
->
wss_stride
=
300
;
// rp->moment = 2400;
rp
->
moment
=
5400
;
asprintf
(
&
basedirname
,
PRESTO_BASEPATH
"results/%s/%s/"
,
"exp2"
,
wload
->
name
);
...
...
@@ -120,7 +130,7 @@ void exp2()
rq
=
new
struct
sync_rq
;
rq
->
tfname
=
rp
->
trace_file
;
rp
->
rq
=
rq
;
pthread_create
(
&
io_thread
,
nullptr
,
get_req_from_vdisk_sync
,
(
void
*
)
rq
);
pthread_create
(
&
io_thread
,
nullptr
,
get_req_from_vdisk_sync
,
rq
);
pthread_join
(
io_thread
,
nullptr
);
}
for
(
ssr
=
0
;
ssr
<
LEN
(
snapshot_rate
,
ns_pair
);
ssr
++
)
// snapshot rate
...
...
@@ -294,7 +304,7 @@ void *run_exp2(void *data)
res_wss
[
i
]
=
new
long
[
MAX_TIME_SEC
+
2
-
wss_stride
];
}
res_usage
=
new
long
[
MAX_TIME_SEC
+
1
];
prewarmed_evct_cnt
=
new
long
[
MAX_TIME_SEC
+
1
];
prewarmed_evct_cnt
=
new
long
[
MAX_TIME_SEC
+
1
];
res_evct
=
new
long
*
[
NUM_POOLS
];
res_evct
[
POOL_SINGLE
]
=
new
long
[
MAX_TIME_SEC
+
1
];
res_evct
[
POOL_MULTI
]
=
new
long
[
MAX_TIME_SEC
+
1
];
...
...
@@ -406,6 +416,7 @@ void *run_exp2(void *data)
if
(
request
->
ts
>=
(
wss_stride_epoch
+
1
)
*
wss_stride
)
{
printf
(
"%d, %f
\n
"
,
wss_stride_epoch
,
request
->
ts
);
if
(
request
->
ts
>=
wss_window
)
{
wsses
=
cache
->
estimate_wss
(
wss_window
,
true
);
...
...
@@ -453,7 +464,7 @@ void *run_exp2(void *data)
max_epochs
=
epoch
+
1
;
max_plot_epochs
=
plot_epoch
+
1
;
max_wss_stride_epochs
=
wss_stride_epoch
+
1
;
printf
(
"%d
\n
"
,
max_wss_stride_epochs
);
for
(
i
=
0
;
i
<
num_vdisks
;
i
++
)
{
snprintf
(
hr_fname
,
MAX_FILENAME_LEN
,
"%sCOLD-VDISK-%d-HR.csv"
,
base_fname
,
cache
->
vdisk_ids
[
i
]);
...
...
@@ -474,7 +485,7 @@ void *run_exp2(void *data)
}
for
(
j
=
0
;
j
<
max_wss_stride_epochs
;
j
++
)
fprintf
(
wss_file
,
"%d,%ld
\n
"
,
j
,
res_wss
[
i
][
j
]);
cache
->
wss_before_failure
[
i
]
=
res_wss
[
i
][
rp
->
moment
/
rp
->
wss_stride
-
1
];
fflush
(
hr_file
);
fclose
(
hr_file
);
fflush
(
obj_file
);
...
...
@@ -598,7 +609,7 @@ prewarm_step:
}
while
(
request
->
ts
>=
counter
&&
(
rem_vdisks_prewarm
>
0
||
pre_size_limit
>=
SIZE_HM1_OBJ
))
{
if
(
cache
->
prewarm_till_first_eviction
&&
cache
->
evct_count_prewarmed
>
0
)
if
(
cache
->
prewarm_till_first_eviction
&&
cache
->
evct_count_prewarmed
>
0
)
break
;
prewarm_set_used_total
=
0
;
for
(
i
=
0
;
i
<
num_vdisks
;
i
++
)
...
...
code/experiments.cc
View file @
6170e62c
...
...
@@ -452,7 +452,24 @@ BitArray **analyze_snapshots(Cache *cache, int max_epochs, int ssr, long pre_siz
mem_limits
[
i
]
=
((
double
)
cache
->
ba1
[
i
]
->
onbits
/
total_objs
)
*
(
pre_size_limit
/
2.0
);
mem_limits
[
i
]
=
pre_size_limit
/
2.0
;
}
else
if
(
part_scheme_hm1
==
HM1_WSS_SHARE
)
{
printf
(
"Using wss share
\n
"
);
double
weight
[
num_vdisks
];
double
total_weight
=
0
;
for
(
i
=
0
;
i
<
num_vdisks
;
i
++
)
{
weight
[
i
]
=
cache
->
avg_iops
[
i
]
/
cache
->
wss_before_failure
[
i
];
// printf("disk %d, %f, %f\n", i, cache->avg_iops[i], cache->wss_before_failure[i]);
total_weight
+=
weight
[
i
];
}
for
(
i
=
0
;
i
<
num_vdisks
;
i
++
)
{
printf
(
"disk %d, %f, %f, %f
\n
"
,
i
,
weight
[
i
]
/
total_weight
,
cache
->
avg_iops
[
i
],
cache
->
wss_before_failure
[
i
]);
mem_limits
[
i
]
=
(
pre_size_limit
*
weight
[
i
])
/
(
2
*
total_weight
);
}
mem_limits
[
i
]
=
pre_size_limit
/
2.0
;
}
rem_mem
=
rem_vdisks
=
0
;
memset
(
obj_pos
,
0
,
sizeof
(
int
)
*
(
num_vdisks
+
1
));
for
(
i
=
0
;
i
<
num_vdisks
;
i
++
)
...
...
@@ -468,6 +485,42 @@ BitArray **analyze_snapshots(Cache *cache, int max_epochs, int ssr, long pre_siz
}
analyze_scores
(
agg
[
i
],
scores
[
i
],
&
obj_pos
[
i
],
SIZE_HM3_OBJ
,
mem_limits
[
i
]);
while
(
part_scheme_hm1
==
HM1_WSS_SHARE
&&
rem_mem
>
SIZE_HM1_OBJ
)
{
//fprintf(stderr, "rem_mem: %lu, rem_vdisks: %lu\n", rem_mem, rem_vdisks);
if
(
rem_mem
>=
rem_vdisks
*
SIZE_HM1_OBJ
)
{
rem_mem_old
=
rem_mem
;
rem_vdisks_old
=
rem_vdisks
;
rem_mem
=
rem_vdisks
=
0
;
for
(
i
=
0
;
i
<
num_vdisks
;
i
++
)
{
if
(
mem_limits
[
i
]
==
0
)
continue
;
mem_limits
[
i
]
=
(
double
)
rem_mem_old
/
rem_vdisks_old
;
k
=
analyze_scores
(
agg
[
i
],
scores
[
i
],
&
obj_pos
[
i
],
SIZE_HM1_OBJ
,
mem_limits
[
i
]);
if
(
k
<
SIZE_HM1_OBJ
)
rem_vdisks
++
;
else
{
mem_limits
[
i
]
=
0
;
rem_mem
+=
k
;
}
}
}
else
{
for
(
i
=
0
;
i
<
num_vdisks
&&
rem_mem
>=
SIZE_HM1_OBJ
;
i
++
)
{
if
(
mem_limits
[
i
]
==
0
)
continue
;
analyze_scores
(
agg
[
i
],
scores
[
i
],
&
obj_pos
[
i
],
SIZE_HM1_OBJ
,
SIZE_HM1_OBJ
);
rem_mem
-=
SIZE_HM1_OBJ
;
}
}
}
while
(
part_scheme_hm1
==
HM1_FAIR_SHARE
&&
rem_mem
>
SIZE_HM1_OBJ
)
{
//fprintf(stderr, "rem_mem: %lu, rem_vdisks: %lu\n", rem_mem, rem_vdisks);
...
...
code/plot-exp2.py
View file @
6170e62c
This diff is collapsed.
Click to expand it.
code/run
0 → 100644
View file @
6170e62c
#!/bin/bash
cmake
-H
.
-Bbuild
cmake
--build
build
--
-j3
\ No newline at end of file
code/util.h
View file @
6170e62c
...
...
@@ -140,7 +140,8 @@ enum PartitionSchemeHM1
{
HM1_PROP_SHARE
,
HM1_FAIR_SHARE
,
HM1_NO_PART
HM1_NO_PART
,
HM1_WSS_SHARE
,
};
enum
PartitionSchemeHM1HM3
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment