Commit de2f5889 authored by RAHUL BHARDWAJ's avatar RAHUL BHARDWAJ

Prewarm at head analysis

parent 8f834207
...@@ -139,9 +139,9 @@ Cache::load_dump_nopart(struct kv_analyze_all *kvs, long n, long prewarm_set_lim ...@@ -139,9 +139,9 @@ Cache::load_dump_nopart(struct kv_analyze_all *kvs, long n, long prewarm_set_lim
continue; continue;
} }
if (mem_usage[POOL_MULTI] + SIZE_HM3_OBJ <= mem_limit[POOL_MULTI]) if (mem_usage[POOL_MULTI] + SIZE_HM3_OBJ <= mem_limit[POOL_MULTI])
fetch_into_multi(&node->data); fetch_into_multi(&node->data,1);
else if (mem_usage[POOL_SINGLE] + SIZE_HM3_OBJ <= mem_limit[POOL_SINGLE]) else if (mem_usage[POOL_SINGLE] + SIZE_HM3_OBJ <= mem_limit[POOL_SINGLE])
fetch_into_single(&node->data); fetch_into_single(&node->data,1);
else else
{ {
*pos = n; *pos = n;
...@@ -175,9 +175,9 @@ Cache::load_dump_nopart(struct kv_analyze_all *kvs, long n, long prewarm_set_lim ...@@ -175,9 +175,9 @@ Cache::load_dump_nopart(struct kv_analyze_all *kvs, long n, long prewarm_set_lim
continue; continue;
} }
if (mem_usage[POOL_MULTI] + SIZE_HM1_OBJ <= mem_limit[POOL_MULTI]) if (mem_usage[POOL_MULTI] + SIZE_HM1_OBJ <= mem_limit[POOL_MULTI])
fetch_into_multi(&node->data); fetch_into_multi(&node->data,1);
else if (mem_usage[POOL_SINGLE] + SIZE_HM1_OBJ <= mem_limit[POOL_SINGLE]) else if (mem_usage[POOL_SINGLE] + SIZE_HM1_OBJ <= mem_limit[POOL_SINGLE])
fetch_into_single(&node->data); fetch_into_single(&node->data,1);
else else
{ {
*pos = n; *pos = n;
...@@ -260,7 +260,7 @@ int Cache::load_dump(BitArray *ba, int vdisk, HMType type, int prewarm_set_share ...@@ -260,7 +260,7 @@ int Cache::load_dump(BitArray *ba, int vdisk, HMType type, int prewarm_set_share
ba->unset_bit(i * 64 + j); ba->unset_bit(i * 64 + j);
continue; continue;
} }
fetch_into_multi(&node->data); fetch_into_multi(&node->data,0);
//if (type == HASHMAP1) //if (type == HASHMAP1)
// ba1[vtl[vdisk]]->set_bit(i * 64 + j); // ba1[vtl[vdisk]]->set_bit(i * 64 + j);
//else if (type == HASHMAP3) //else if (type == HASHMAP3)
...@@ -393,9 +393,9 @@ Cache::load_vdisk_ba(kv_analyze_type *kvs, long n, long prewarm_set_limit, long ...@@ -393,9 +393,9 @@ Cache::load_vdisk_ba(kv_analyze_type *kvs, long n, long prewarm_set_limit, long
break; break;
} }
if (kvs[k].pool == POOL_MULTI) if (kvs[k].pool == POOL_MULTI)
fetch_into_multi(&node->data); fetch_into_multi(&node->data,0);
else else
fetch_into_single(&node->data); fetch_into_single(&node->data,0);
} }
*pos = k; *pos = k;
return prewarm_set_limit - cursize; return prewarm_set_limit - cursize;
......
#include "cache.h" #include "cache.h"
void Cache::fetch_into_single(Pair *val) void Cache::fetch_into_single(Pair *val,bool pre_warming)
{ {
int i, c = cache_idx; int i, c = cache_idx;
HMType type = val->type; HMType type = val->type;
...@@ -9,7 +9,7 @@ void Cache::fetch_into_single(Pair *val) ...@@ -9,7 +9,7 @@ void Cache::fetch_into_single(Pair *val)
long egroup; long egroup;
if (policy_replace == REPL_LRU) if (policy_replace == REPL_LRU)
insert_lru_list(c, lru_list[POOL_SINGLE], val, counter); insert_lru_list(c, lru_list[POOL_SINGLE], val, counter, pre_warming, this->prewarm_at_head);
//else if (policy_replace == REPL_LFU) //else if (policy_replace == REPL_LFU)
// insert_lfu_list(c, &lfu_list[POOL_SINGLE], val); // insert_lfu_list(c, &lfu_list[POOL_SINGLE], val);
...@@ -41,7 +41,7 @@ void Cache::fetch_into_single(Pair *val) ...@@ -41,7 +41,7 @@ void Cache::fetch_into_single(Pair *val)
while (mem_usage[POOL_SINGLE] > mem_limit[POOL_SINGLE]) while (mem_usage[POOL_SINGLE] > mem_limit[POOL_SINGLE])
{ {
if (policy_replace == REPL_LRU) if (policy_replace == REPL_LRU)
ev = evict_lru_list(c, lru_list[POOL_SINGLE]); ev = evict_lru_list(c, lru_list[POOL_SINGLE],0,evct_count_prewarmed);
//else if (policy_replace == REPL_LFU) //else if (policy_replace == REPL_LFU)
// ev = evict_lfu_list(c, &lfu_list[POOL_SINGLE]); // ev = evict_lfu_list(c, &lfu_list[POOL_SINGLE]);
assert(ev != nullptr); assert(ev != nullptr);
...@@ -76,7 +76,7 @@ void Cache::fetch_into_single(Pair *val) ...@@ -76,7 +76,7 @@ void Cache::fetch_into_single(Pair *val)
} }
} }
void Cache::single_to_multi(Pair *val) void Cache::single_to_multi(Pair *val, bool prewarming)
{ {
int i, c = cache_idx; int i, c = cache_idx;
HMType type = val->type; HMType type = val->type;
...@@ -86,8 +86,8 @@ void Cache::single_to_multi(Pair *val) ...@@ -86,8 +86,8 @@ void Cache::single_to_multi(Pair *val)
if (policy_replace == REPL_LRU) if (policy_replace == REPL_LRU)
{ {
delete_lru_list(c, lru_list[POOL_SINGLE], val); delete_lru_list(c, lru_list[POOL_SINGLE], val, prewarming);
insert_lru_list(c, lru_list[POOL_MULTI], val, counter); insert_lru_list(c, lru_list[POOL_MULTI], val, counter, prewarming, this->prewarm_at_head);
} }
//else if (policy_replace == REPL_LFU) //else if (policy_replace == REPL_LFU)
//{ //{
...@@ -122,7 +122,7 @@ void Cache::single_to_multi(Pair *val) ...@@ -122,7 +122,7 @@ void Cache::single_to_multi(Pair *val)
while (mem_usage[POOL_MULTI] > mem_limit[POOL_MULTI]) while (mem_usage[POOL_MULTI] > mem_limit[POOL_MULTI])
{ {
if (policy_replace == REPL_LRU) if (policy_replace == REPL_LRU)
ev = evict_lru_list(c, lru_list[POOL_MULTI]); ev = evict_lru_list(c, lru_list[POOL_MULTI],prewarming,evct_count_prewarmed);
//else if (policy_replace == REPL_LFU) //else if (policy_replace == REPL_LFU)
// ev = evict_lfu_list(c, &lfu_list[POOL_MULTI]); // ev = evict_lfu_list(c, &lfu_list[POOL_MULTI]);
assert(ev != nullptr); assert(ev != nullptr);
...@@ -158,7 +158,7 @@ void Cache::single_to_multi(Pair *val) ...@@ -158,7 +158,7 @@ void Cache::single_to_multi(Pair *val)
} }
} }
void Cache::fetch_into_multi(Pair *val) void Cache::fetch_into_multi(Pair *val, bool prewarming)
{ {
int i, c = cache_idx; int i, c = cache_idx;
HMType type = val->type; HMType type = val->type;
...@@ -167,7 +167,7 @@ void Cache::fetch_into_multi(Pair *val) ...@@ -167,7 +167,7 @@ void Cache::fetch_into_multi(Pair *val)
long egroup; long egroup;
if (policy_replace == REPL_LRU) if (policy_replace == REPL_LRU)
insert_lru_list(c, lru_list[POOL_MULTI], val, counter); insert_lru_list(c, lru_list[POOL_MULTI], val, counter, prewarming, this->prewarm_at_head);
//else if (policy_replace == REPL_LFU) //else if (policy_replace == REPL_LFU)
// insert_lfu_list(c, &lfu_list[POOL_MULTI], val); // insert_lfu_list(c, &lfu_list[POOL_MULTI], val);
...@@ -199,7 +199,7 @@ void Cache::fetch_into_multi(Pair *val) ...@@ -199,7 +199,7 @@ void Cache::fetch_into_multi(Pair *val)
while (mem_usage[POOL_MULTI] > mem_limit[POOL_MULTI]) while (mem_usage[POOL_MULTI] > mem_limit[POOL_MULTI])
{ {
if (policy_replace == REPL_LRU) if (policy_replace == REPL_LRU)
ev = evict_lru_list(c, lru_list[POOL_MULTI]); ev = evict_lru_list(c, lru_list[POOL_MULTI], prewarming,evct_count_prewarmed);
//else if (policy_replace == REPL_LFU) //else if (policy_replace == REPL_LFU)
// ev = evict_lfu_list(c, &lfu_list[POOL_MULTI]); // ev = evict_lfu_list(c, &lfu_list[POOL_MULTI]);
assert(ev != nullptr); assert(ev != nullptr);
...@@ -235,15 +235,15 @@ void Cache::fetch_into_multi(Pair *val) ...@@ -235,15 +235,15 @@ void Cache::fetch_into_multi(Pair *val)
} }
} }
void Cache::touch_pool(Pair *val, int pool) void Cache::touch_pool(Pair *val, int pool, bool prewarming)
{ {
int c = cache_idx; int c = cache_idx;
if (policy_replace == REPL_LRU) if (policy_replace == REPL_LRU)
{ {
if (lru_list[pool][REAR] != val) if (lru_list[pool][REAR] != val)
{ {
delete_lru_list(c, lru_list[pool], val); delete_lru_list(c, lru_list[pool], val, prewarming);
insert_lru_list(c, lru_list[pool], val, counter); insert_lru_list(c, lru_list[pool], val, counter, prewarming, this->prewarm_at_head);
} }
} }
//else if (policy_replace == REPL_LFU) //else if (policy_replace == REPL_LFU)
...@@ -260,20 +260,20 @@ void *Cache::lookup(HMType type, void *k, int vdisk) ...@@ -260,20 +260,20 @@ void *Cache::lookup(HMType type, void *k, int vdisk)
else if (!val->in_cache(c)) else if (!val->in_cache(c))
{ {
miss_count_vdisk[type][vtl[vdisk]]++; miss_count_vdisk[type][vtl[vdisk]]++;
fetch_into_single(val); fetch_into_single(val,0);
} }
else if (val->in_single(c)) else if (val->in_single(c))
{ {
hit_count_vdisk[POOL_SINGLE][type][vtl[vdisk]]++; hit_count_vdisk[POOL_SINGLE][type][vtl[vdisk]]++;
if (singlepool) if (singlepool)
touch_pool(val, POOL_SINGLE); touch_pool(val, POOL_SINGLE,0);
else else
single_to_multi(val); single_to_multi(val, 0);
} }
else if (val->in_multi(c)) else if (val->in_multi(c))
{ {
hit_count_vdisk[POOL_MULTI][type][vtl[vdisk]]++; hit_count_vdisk[POOL_MULTI][type][vtl[vdisk]]++;
touch_pool(val, POOL_MULTI); touch_pool(val, POOL_MULTI, 0);
} }
return val->val; return val->val;
} }
......
...@@ -59,8 +59,12 @@ public: ...@@ -59,8 +59,12 @@ public:
long *miss_count_vdisk[NUM_HASHMAPS]; // hashmaps X vdisks long *miss_count_vdisk[NUM_HASHMAPS]; // hashmaps X vdisks
long *evct_count_vdisk_hm1[NUM_POOLS]; // pools X vdisks long *evct_count_vdisk_hm1[NUM_POOLS]; // pools X vdisks
long evct_count[NUM_POOLS][NUM_HASHMAPS]; // pools X hashmaps long evct_count[NUM_POOLS][NUM_HASHMAPS]; // pools X hashmaps
long evct_count_prewarmed; // pools X hashmaps
long *io_count_vdisk; // vdisk long *io_count_vdisk; // vdisk
bool prewarm_till_first_eviction; // stop prewarming once a prewarm object get evicted
bool prewarm_at_head; // insert prewrming objects at front
BitSet **bs1; BitSet **bs1;
BitSet *bs3; BitSet *bs3;
...@@ -96,6 +100,9 @@ public: ...@@ -96,6 +100,9 @@ public:
this->num_vdisks = wload->num_vdisks; this->num_vdisks = wload->num_vdisks;
this->wload_name = wload->name; this->wload_name = wload->name;
this->singlepool = false; this->singlepool = false;
this->evct_count_prewarmed = 0;
this->prewarm_till_first_eviction = false;
this->prewarm_at_head = false;
this->io_count_vdisk = new long[num_vdisks]; this->io_count_vdisk = new long[num_vdisks];
memset(this->io_count_vdisk, 0, num_vdisks * sizeof(long)); memset(this->io_count_vdisk, 0, num_vdisks * sizeof(long));
...@@ -199,13 +206,13 @@ public: ...@@ -199,13 +206,13 @@ public:
void *lookup(HMType type, void *k, int vdisk); void *lookup(HMType type, void *k, int vdisk);
void fetch_into_single(Pair *val); void fetch_into_single(Pair *val, bool prewarming);
void single_to_multi(Pair *val); void single_to_multi(Pair *val, bool prewarming);
void fetch_into_multi(Pair *val); void fetch_into_multi(Pair *val, bool prewarming);
void touch_pool(Pair *val, int pool); void touch_pool(Pair *val, int pool, bool prewarming);
long snapshot_bitmaps(int epoch, int ssr, bool inmem, int experiment); long snapshot_bitmaps(int epoch, int ssr, bool inmem, int experiment);
......
...@@ -29,7 +29,7 @@ void exp2() ...@@ -29,7 +29,7 @@ void exp2()
pthread_t cache_thread, io_thread; pthread_t cache_thread, io_thread;
Workload *wload = new Workload("real2"); // <------------------- Workload *wload = new Workload("wss_iops100"); // <-------------------
Hashmap *hm1 = new Hashmap(&comp_hm1, &print_hm1, nullptr); Hashmap *hm1 = new Hashmap(&comp_hm1, &print_hm1, nullptr);
Hashmap *hm3 = new Hashmap(&comp_hm3, &print_hm3, nullptr); Hashmap *hm3 = new Hashmap(&comp_hm3, &print_hm3, nullptr);
Cache *cache = new Cache(CACHE_A, hm1, hm3, wload, 6400); // <------------------- Cache *cache = new Cache(CACHE_A, hm1, hm3, wload, 6400); // <-------------------
...@@ -56,13 +56,17 @@ void exp2() ...@@ -56,13 +56,17 @@ void exp2()
// {5, "5%"}, // {5, "5%"},
// {10, "10%"}, // {10, "10%"},
// {25, "25%"}, // {25, "25%"},
// {50, "50%"}, // {50, "50%"}};
{75, "75%"}}; {100, "100%"}};
// {100, "100%"}}; // {100, "100%"}};
struct ns_pair prewarm_rate_limit[] = { struct ns_pair prewarm_rate_limit[] = {
// {0, "0MBps"}, // {0, "0MBps"},
// {50, "50MBps"}, {20, "20MBps"},
{100, "100MBps"}}; {50, "50MBps"},
{100, "100MBps"}
{500, "500MBps"}};
{10000, "10000MBps"}};
// {500, "500MBps"}}; // {500, "500MBps"}};
struct ns_pair heuristic[] = { struct ns_pair heuristic[] = {
//{K_FREQ, "K_FREQ"}, //{K_FREQ, "K_FREQ"},
...@@ -106,7 +110,7 @@ void exp2() ...@@ -106,7 +110,7 @@ void exp2()
rp->trace_file = wload->tracefile; rp->trace_file = wload->tracefile;
rp->wss_window = 600; rp->wss_window = 600;
rp->wss_stride = 300; rp->wss_stride = 300;
rp->moment = 2400; rp->moment = 5400;
asprintf(&basedirname, PRESTO_BASEPATH "results/%s/%s/", "exp2", wload->name); asprintf(&basedirname, PRESTO_BASEPATH "results/%s/%s/", "exp2", wload->name);
if (!file_exists(basedirname)) if (!file_exists(basedirname))
...@@ -254,7 +258,7 @@ void *run_exp2(void *data) ...@@ -254,7 +258,7 @@ void *run_exp2(void *data)
int (*get_req)(struct bio_req *, void *); int (*get_req)(struct bio_req *, void *);
double **res_hr, res_all_hr[MAX_TIME_SEC + 1]; double **res_hr, res_all_hr[MAX_TIME_SEC + 1];
long **res_io_count, **res_hm1_miss, **res_hm3_miss, **res_wss, *res_usage, **res_evct; long **res_io_count, **res_hm1_miss, **res_hm3_miss, **res_wss, *res_usage, **res_evct, *prewarmed_evct_cnt;
int **res_hm1, **res_hm3; int **res_hm1, **res_hm3;
BitArray **agg; BitArray **agg;
...@@ -290,6 +294,7 @@ void *run_exp2(void *data) ...@@ -290,6 +294,7 @@ void *run_exp2(void *data)
res_wss[i] = new long[MAX_TIME_SEC + 2 - wss_stride]; res_wss[i] = new long[MAX_TIME_SEC + 2 - wss_stride];
} }
res_usage = new long[MAX_TIME_SEC + 1]; res_usage = new long[MAX_TIME_SEC + 1];
prewarmed_evct_cnt = new long [MAX_TIME_SEC + 1];
res_evct = new long *[NUM_POOLS]; res_evct = new long *[NUM_POOLS];
res_evct[POOL_SINGLE] = new long[MAX_TIME_SEC + 1]; res_evct[POOL_SINGLE] = new long[MAX_TIME_SEC + 1];
res_evct[POOL_MULTI] = new long[MAX_TIME_SEC + 1]; res_evct[POOL_MULTI] = new long[MAX_TIME_SEC + 1];
...@@ -369,6 +374,7 @@ void *run_exp2(void *data) ...@@ -369,6 +374,7 @@ void *run_exp2(void *data)
} }
res_all_hr[plot_epoch] = cache->get_hit_ratio(); res_all_hr[plot_epoch] = cache->get_hit_ratio();
res_usage[plot_epoch] = cache->mem_usage[POOL_SINGLE] + cache->mem_usage[POOL_MULTI]; res_usage[plot_epoch] = cache->mem_usage[POOL_SINGLE] + cache->mem_usage[POOL_MULTI];
prewarmed_evct_cnt[plot_epoch] = cache->evct_count_prewarmed;
res_evct[POOL_SINGLE][plot_epoch] = cache->evct_count[POOL_SINGLE][HASHMAP1] + res_evct[POOL_SINGLE][plot_epoch] = cache->evct_count[POOL_SINGLE][HASHMAP1] +
cache->evct_count[POOL_SINGLE][HASHMAP3]; cache->evct_count[POOL_SINGLE][HASHMAP3];
res_evct[POOL_MULTI][plot_epoch] = cache->evct_count[POOL_MULTI][HASHMAP1] + res_evct[POOL_MULTI][plot_epoch] = cache->evct_count[POOL_MULTI][HASHMAP1] +
...@@ -437,6 +443,7 @@ void *run_exp2(void *data) ...@@ -437,6 +443,7 @@ void *run_exp2(void *data)
} }
res_all_hr[plot_epoch] = cache->get_hit_ratio(); res_all_hr[plot_epoch] = cache->get_hit_ratio();
res_usage[plot_epoch] = cache->mem_usage[POOL_SINGLE] + cache->mem_usage[POOL_MULTI]; res_usage[plot_epoch] = cache->mem_usage[POOL_SINGLE] + cache->mem_usage[POOL_MULTI];
prewarmed_evct_cnt[plot_epoch] = cache->evct_count_prewarmed;
res_evct[POOL_SINGLE][plot_epoch] = cache->evct_count[POOL_SINGLE][HASHMAP1] + res_evct[POOL_SINGLE][plot_epoch] = cache->evct_count[POOL_SINGLE][HASHMAP1] +
cache->evct_count[POOL_SINGLE][HASHMAP3]; cache->evct_count[POOL_SINGLE][HASHMAP3];
res_evct[POOL_MULTI][plot_epoch] = cache->evct_count[POOL_MULTI][HASHMAP1] + res_evct[POOL_MULTI][plot_epoch] = cache->evct_count[POOL_MULTI][HASHMAP1] +
...@@ -495,7 +502,7 @@ void *run_exp2(void *data) ...@@ -495,7 +502,7 @@ void *run_exp2(void *data)
evct_file = fopen(evct_fname, "w+"); evct_file = fopen(evct_fname, "w+");
assert(evct_file != nullptr); assert(evct_file != nullptr);
for (j = 0; j < max_plot_epochs; j++) for (j = 0; j < max_plot_epochs; j++)
fprintf(all_hr_file, "%d,%ld,%ld\n", j, res_evct[POOL_SINGLE][j], res_evct[POOL_MULTI][j]); fprintf(evct_file, "%d,%ld,%ld,%ld\n", j, res_evct[POOL_SINGLE][j], res_evct[POOL_MULTI][j], prewarmed_evct_cnt[j]);
fflush(evct_file); fflush(evct_file);
fclose(evct_file); fclose(evct_file);
...@@ -577,6 +584,7 @@ prewarm_step: ...@@ -577,6 +584,7 @@ prewarm_step:
} }
res_all_hr[plot_epoch] = cache->get_hit_ratio(); res_all_hr[plot_epoch] = cache->get_hit_ratio();
res_usage[plot_epoch] = cache->mem_usage[POOL_SINGLE] + cache->mem_usage[POOL_MULTI]; res_usage[plot_epoch] = cache->mem_usage[POOL_SINGLE] + cache->mem_usage[POOL_MULTI];
prewarmed_evct_cnt[plot_epoch] = cache->evct_count_prewarmed;
res_evct[POOL_SINGLE][plot_epoch] = cache->evct_count[POOL_SINGLE][HASHMAP1] + res_evct[POOL_SINGLE][plot_epoch] = cache->evct_count[POOL_SINGLE][HASHMAP1] +
cache->evct_count[POOL_SINGLE][HASHMAP3]; cache->evct_count[POOL_SINGLE][HASHMAP3];
res_evct[POOL_MULTI][plot_epoch] = cache->evct_count[POOL_MULTI][HASHMAP1] + res_evct[POOL_MULTI][plot_epoch] = cache->evct_count[POOL_MULTI][HASHMAP1] +
...@@ -590,6 +598,8 @@ prewarm_step: ...@@ -590,6 +598,8 @@ prewarm_step:
} }
while (request->ts >= counter && (rem_vdisks_prewarm > 0 || pre_size_limit >= SIZE_HM1_OBJ)) while (request->ts >= counter && (rem_vdisks_prewarm > 0 || pre_size_limit >= SIZE_HM1_OBJ))
{ {
if (cache->prewarm_till_first_eviction && cache->evct_count_prewarmed>0)
break;
prewarm_set_used_total = 0; prewarm_set_used_total = 0;
for (i = 0; i < num_vdisks; i++) for (i = 0; i < num_vdisks; i++)
prewarm_set_used_size[i] = prewarm_set_used_objs[i] = 0; prewarm_set_used_size[i] = prewarm_set_used_objs[i] = 0;
...@@ -669,6 +679,7 @@ prewarm_step: ...@@ -669,6 +679,7 @@ prewarm_step:
} }
res_all_hr[plot_epoch] = cache->get_hit_ratio(); res_all_hr[plot_epoch] = cache->get_hit_ratio();
res_usage[plot_epoch] = cache->mem_usage[POOL_SINGLE] + cache->mem_usage[POOL_MULTI]; res_usage[plot_epoch] = cache->mem_usage[POOL_SINGLE] + cache->mem_usage[POOL_MULTI];
prewarmed_evct_cnt[plot_epoch] = cache->evct_count_prewarmed;
res_evct[POOL_SINGLE][plot_epoch] = cache->evct_count[POOL_SINGLE][HASHMAP1] + res_evct[POOL_SINGLE][plot_epoch] = cache->evct_count[POOL_SINGLE][HASHMAP1] +
cache->evct_count[POOL_SINGLE][HASHMAP3]; cache->evct_count[POOL_SINGLE][HASHMAP3];
res_evct[POOL_MULTI][plot_epoch] = cache->evct_count[POOL_MULTI][HASHMAP1] + res_evct[POOL_MULTI][plot_epoch] = cache->evct_count[POOL_MULTI][HASHMAP1] +
...@@ -719,7 +730,7 @@ prewarm_step: ...@@ -719,7 +730,7 @@ prewarm_step:
evct_file = fopen(evct_fname, "w+"); evct_file = fopen(evct_fname, "w+");
assert(evct_file != nullptr); assert(evct_file != nullptr);
for (j = 0; j < max_plot_epochs; j++) for (j = 0; j < max_plot_epochs; j++)
fprintf(all_hr_file, "%d,%ld,%ld\n", j, res_evct[POOL_SINGLE][j], res_evct[POOL_MULTI][j]); fprintf(evct_file, "%d,%ld,%ld, %ld\n", j, res_evct[POOL_SINGLE][j], res_evct[POOL_MULTI][j], prewarmed_evct_cnt[j]);
fflush(evct_file); fflush(evct_file);
fclose(evct_file); fclose(evct_file);
......
...@@ -41,6 +41,8 @@ struct run_params ...@@ -41,6 +41,8 @@ struct run_params
int vdisk; int vdisk;
int tpool; int tpool;
bool one_disk = true;
string disk_name = "501";
}; };
struct mig_cache_state struct mig_cache_state
......
...@@ -31,6 +31,7 @@ struct CacheMetaLRU ...@@ -31,6 +31,7 @@ struct CacheMetaLRU
struct CacheMeta struct CacheMeta
{ {
unsigned char in; unsigned char in;
bool inserted_by_prewarming;
CacheMetaLRU lru; CacheMetaLRU lru;
//CacheMetaLFU lfu; //CacheMetaLFU lfu;
}; };
......
...@@ -26,17 +26,25 @@ def main(wload, target_cache_size): ...@@ -26,17 +26,25 @@ def main(wload, target_cache_size):
# scname = ['100\%', '50\%', '33\%', '25\%', '20\%', '1', '2', '3', '4', '5', '1\%', '5\%', '10\%', '15\%', '20\%'] # scname = ['100\%', '50\%', '33\%', '25\%', '20\%', '1', '2', '3', '4', '5', '1\%', '5\%', '10\%', '15\%', '20\%']
# mlimits = ['5%', '10%', '25%', '50%', '75%', '100%'] # mlimits = ['5%', '10%', '25%', '50%', '75%', '100%']
# mname = ['5%', '10%', '25%', '50%', '75%', '100%'] # mname = ['5%', '10%', '25%', '50%', '75%', '100%']
mlimits = ['75%'] # mlimits = ['75%']
mname = ['75%'] # mname = ['75%']
# mlimits = ['50%']
# mname = ['50%']
mlimits = ['100%']
mname = ['100%']
# mlimits = ['75%', '100%'] # mlimits = ['75%', '100%']
# mname = ['75%', '100%'] # mname = ['75%', '100%']
frerern_func = ['LNR', 'QDR'] frerern_func = ['LNR', 'QDR']
func_name = ['Linear', 'Quadratic'] func_name = ['Linear', 'Quadratic']
# prewarmrates = ['0MBps','500MBps', '100MBps', '50MBps'] # prewarmrates = ['0MBps','500MBps', '100MBps', '50MBps']
# pratenames = ['∞', '500 MBps', '100 MBps', '50 MBps'] # pratenames = ['∞', '500 MBps', '100 MBps', '50 MBps']
prewarmrates = ['100MBps'] # prewarmrates = ['1000MBps']
pratenames = ['100 MBps'] # pratenames = ['1000 MBps']
moment = 2400 prewarmrates = ['10000MBps']
pratenames = ['10000 MBps']
moment = 5400
wss_window = 600 wss_window = 600
wss_stride = 300 wss_stride = 300
# hr_windows = [5, 15, 30] # hr_windows = [5, 15, 30]
...@@ -150,10 +158,19 @@ def main(wload, target_cache_size): ...@@ -150,10 +158,19 @@ def main(wload, target_cache_size):
basedir_pre = basedir_nop + mlimits[mlim] + '-' + prewarmrates[prate] + '-' + \ basedir_pre = basedir_nop + mlimits[mlim] + '-' + prewarmrates[prate] + '-' + \
heuristics[hrstc] + '-' + ((frerern_func[ffn] + '-') if (hrstc == 2) else '') + \ heuristics[hrstc] + '-' + ((frerern_func[ffn] + '-') if (hrstc == 2) else '') + \
'x/' 'x/'
# print(basedir_pre[len(basedir_nop):-1]) print(basedir_pre[len(basedir_nop):-1])
hr_wp_cache = [] hr_wp_cache = []
tt_hr_val_before_fail_wp = 14400 tt_hr_val_before_fail_wp = 14400
avg_hr_wp_after_fail = [] avg_hr_wp_after_fail = []
total_evictions = []
evictions_due_to_load = []
prewarmed_objects_evictions = []
with open(basedir_pre + 'ALL-EVCT.csv') as prefile:
lines = list(csv.reader(prefile))
for line in lines:
total_evictions.append(float(line[1]) + float(line[2]))
prewarmed_objects_evictions.append(float(line[3]))
evictions_due_to_load.append(float(line[1]) + float(line[2]) - float(line[3]))
with open(basedir_pre + 'ALL-HR.csv') as prefile: with open(basedir_pre + 'ALL-HR.csv') as prefile:
lines = list(csv.reader(prefile)) lines = list(csv.reader(prefile))
for line in lines: for line in lines:
...@@ -273,6 +290,8 @@ def main(wload, target_cache_size): ...@@ -273,6 +290,8 @@ def main(wload, target_cache_size):
iops = [3.53, 0.12, 51.17, 55.22, 362.62, 0.88, 212.14, 11.97] iops = [3.53, 0.12, 51.17, 55.22, 362.62, 0.88, 212.14, 11.97]
elif wload == 'real1': elif wload == 'real1':
iops = [767.01, 543.30, 945.15, 569.38 ,663.33 , 510.78] iops = [767.01, 543.30, 945.15, 569.38 ,663.33 , 510.78]
elif wload == 'real2':
iops = [6.6, 1.00, 2.00, 2.7, 18.57, 9.23]
else: else:
iops = [99.16, 24.56, 49.59, 199.25, 99.54, 149.74] iops = [99.16, 24.56, 49.59, 199.25, 99.54, 149.74]
...@@ -325,8 +344,8 @@ def main(wload, target_cache_size): ...@@ -325,8 +344,8 @@ def main(wload, target_cache_size):
plt.ylabel('vDisk Hit Ratio') plt.ylabel('vDisk Hit Ratio')
# beg_sec = 4500 # beg_sec = 4500
# end_sec = 9900 # end_sec = 9900
beg_sec = 1200 beg_sec = 0
end_sec = 3600 end_sec = 14400
if wload == 'rreal.4hr' or wload == 'scr_merg': if wload == 'rreal.4hr' or wload == 'scr_merg':
plt.ylim([0.4, 1.0]) plt.ylim([0.4, 1.0])
else: else:
...@@ -334,8 +353,8 @@ def main(wload, target_cache_size): ...@@ -334,8 +353,8 @@ def main(wload, target_cache_size):
plt.xlim([beg_sec, end_sec]) plt.xlim([beg_sec, end_sec])
# plt.xticks(np.arange(beg_sec + 900, end_sec - 900 + 1, 1800), # plt.xticks(np.arange(beg_sec + 900, end_sec - 900 + 1, 1800),
# np.arange((beg_sec + 900) / 60, (end_sec - 900) / 60 + 1, 30).astype(int)) # np.arange((beg_sec + 900) / 60, (end_sec - 900) / 60 + 1, 30).astype(int))
plt.xticks(np.arange(beg_sec, end_sec + 1, 600), plt.xticks(np.arange(beg_sec, end_sec + 1, 1800),
np.arange(beg_sec, end_sec + 1 , 600).astype(int)) np.arange(beg_sec/60, end_sec/60 + 1 , 30).astype(int))
plt.savefig(basedir_pre[:-1] + '-vdisk-hr.png', dpi=300, bbox_inches='tight') plt.savefig(basedir_pre[:-1] + '-vdisk-hr.png', dpi=300, bbox_inches='tight')
############################################################################################ ############################################################################################
plt.clf() plt.clf()
...@@ -387,7 +406,7 @@ def main(wload, target_cache_size): ...@@ -387,7 +406,7 @@ def main(wload, target_cache_size):
plt.legend(loc='center right', fancybox=True, prop={'size': 12}) plt.legend(loc='center right', fancybox=True, prop={'size': 12})
# beg_sec = 2400 # beg_sec = 2400
# end_sec = 3600 # end_sec = 3600
beg_sec = 1200 beg_sec = 0
end_sec = 14400 end_sec = 14400
if wload == 'wload_6gb': if wload == 'wload_6gb':
plt.ylim([0.4, 0.9]) plt.ylim([0.4, 0.9])
...@@ -402,11 +421,30 @@ def main(wload, target_cache_size): ...@@ -402,11 +421,30 @@ def main(wload, target_cache_size):
plt.xlim([beg_sec, end_sec]) plt.xlim([beg_sec, end_sec])
# plt.xticks(np.arange(beg_sec, end_sec + 1, 1800), # plt.xticks(np.arange(beg_sec, end_sec + 1, 1800),
# np.arange(beg_sec / 60, end_sec / 60 + 1, 30).astype(int)) # np.arange(beg_sec / 60, end_sec / 60 + 1, 30).astype(int))
plt.xticks(np.arange(beg_sec, end_sec + 1, 2400), plt.xticks(np.arange(beg_sec, end_sec + 1, 1800),
np.arange(beg_sec / 60, end_sec / 60 + 1, 40).astype(int)) np.arange(beg_sec / 60, end_sec / 60 + 1, 30).astype(int))
# plt.show() # plt.show()
plt.savefig(basedir_pre[:-1] + '-hr.png', dpi=300, bbox_inches='tight') plt.savefig(basedir_pre[:-1] + '-hr.png', dpi=300, bbox_inches='tight')
############################################################################################ ############################################################################################
plt.clf()
plt.xlabel('Time (minutes)')
plt.ylabel('Cache Evictions')
plt.plot(x, evictions_due_to_load, color='g', linewidth=1.3, label='non_prewarmed', zorder=1, alpha=0.75)
plt.plot(x, prewarmed_objects_evictions, color='b', linewidth=1.3, label='prewarmed', zorder=2)
# plt.plot(x, hr_wp_cache, color='r', linewidth=1.3, label='with prewarming', zorder=5)
plt.legend(loc='center right', fancybox=True, prop={'size': 12})
beg_sec = 0
end_sec = 14400
# plt.ylim([0, 1000000])
# plt.yticks(np.arrange(0,1000000),5)
plt.xlim([beg_sec, end_sec])
# plt.xticks(np.arange(beg_sec, end_sec + 1, 1800),
# np.arange(beg_sec / 60, end_sec / 60 + 1, 30).astype(int))
plt.xticks(np.arange(beg_sec, end_sec + 1,1800),
np.arange(beg_sec / 60, end_sec / 60 + 1, 30).astype(int))
# plt.show()
plt.savefig(basedir_pre[:-1] + '-evictions.png', dpi=300, bbox_inches='tight')
############################################################################################
for nn in hr_windows: for nn in hr_windows:
plt.clf() plt.clf()
plt.xlabel('Time (minutes)') plt.xlabel('Time (minutes)')
...@@ -769,8 +807,8 @@ if __name__ == '__main__': ...@@ -769,8 +807,8 @@ if __name__ == '__main__':
# matplotlib.use('agg') # matplotlib.use('agg')
threads = [] threads = []
wloads = [('wload_6gb', '6 GiB'), ('wload_4gb', '4 GiB'), ('real1', '6 GiB'), ('scr_merg', '64 MiB'), ('real2', '6 GiB')] wloads = [('wload_6gb', '6 GiB'), ('wload_4gb', '4 GiB'), ('real1', '6 GiB'), ('scr_merg', '64 MiB'), ('real2', '6 GiB')]
wload = wloads[4] wload = wloads[2]
# wload = ('wss_iops100','6 GiB') wload = ('wss_iops100','6 GiB')
# for wload in wloads: # for wload in wloads:
# x = threading.Thread(target=main, args=(wload[0], wload[1])) # x = threading.Thread(target=main, args=(wload[0], wload[1]))
# threads.append(x) # threads.append(x)
......
#include "policy.h" #include "policy.h"
void insert_lru_list(int c, Pair *list[], Pair *val, int ts) void insert_lru_list(int c, Pair *list[], Pair *val, int ts, bool prewarming, bool insert_at_head)
{ {
if(list[FRONT]) if(list[FRONT])
assert(list[FRONT]->meta[c]->lru.next); assert(list[FRONT]->meta[c]->lru.next);
val->meta[c]->inserted_by_prewarming = prewarming;
if (list[FRONT] == nullptr) if (list[FRONT] == nullptr)
{ {
val->meta[c]->lru.next = val; val->meta[c]->lru.next = val;
...@@ -17,10 +18,13 @@ void insert_lru_list(int c, Pair *list[], Pair *val, int ts) ...@@ -17,10 +18,13 @@ void insert_lru_list(int c, Pair *list[], Pair *val, int ts)
val->meta[c]->lru.ts = ts; val->meta[c]->lru.ts = ts;
list[REAR]->meta[c]->lru.next = val; list[REAR]->meta[c]->lru.next = val;
list[FRONT]->meta[c]->lru.prev = val; list[FRONT]->meta[c]->lru.prev = val;
list[REAR] = val; if (insert_at_head && prewarming)
list[FRONT] = val;
else
list[REAR] = val;
} }
void delete_lru_list(int c, Pair *list[], Pair *val) void delete_lru_list(int c, Pair *list[], Pair *val, bool prewarming)
{ {
Pair *prev, *next; Pair *prev, *next;
if (list[FRONT] == list[REAR]) if (list[FRONT] == list[REAR])
...@@ -38,7 +42,7 @@ void delete_lru_list(int c, Pair *list[], Pair *val) ...@@ -38,7 +42,7 @@ void delete_lru_list(int c, Pair *list[], Pair *val)
next->meta[c]->lru.prev = prev; next->meta[c]->lru.prev = prev;
} }
Pair *evict_lru_list(int c, Pair *list[]) Pair *evict_lru_list(int c, Pair *list[], bool prewarming, long &evct_count_prewarmed)
{ {
Pair *ev = list[FRONT]; Pair *ev = list[FRONT];
Pair *prev, *next; Pair *prev, *next;
...@@ -53,6 +57,8 @@ Pair *evict_lru_list(int c, Pair *list[]) ...@@ -53,6 +57,8 @@ Pair *evict_lru_list(int c, Pair *list[])
prev->meta[c]->lru.next = next; prev->meta[c]->lru.next = next;
next->meta[c]->lru.prev = prev; next->meta[c]->lru.prev = prev;
list[FRONT] = next; list[FRONT] = next;
if (ev->meta[c]->inserted_by_prewarming)
evct_count_prewarmed++;
return ev; return ev;
} }
......
...@@ -3,11 +3,11 @@ ...@@ -3,11 +3,11 @@
#include "util.h" #include "util.h"
#include "hashmap.h" #include "hashmap.h"
void insert_lru_list(int c, Pair *list[], Pair *val, int ts); void insert_lru_list(int c, Pair *list[], Pair *val, int ts, bool prewarming, bool insert_at_head);
void delete_lru_list(int c, Pair *list[], Pair *val); void delete_lru_list(int c, Pair *list[], Pair *val, bool prewarming);
Pair *evict_lru_list(int c, Pair *list[]); Pair *evict_lru_list(int c, Pair *list[], bool prewarming,long &evct_count_prewarmed);
//void insert_lfu_list(int c, LFUList **list, Pair *val); //void insert_lfu_list(int c, LFUList **list, Pair *val);
......
Tried the following experiments
1)Inserted the prewarming objects at the front. other objects due to workload are inserted at the back
1.1)Do entire prewarming upfront. This seems to give a significant boost in performance as expected. The main reason could be that
the most important prewarmed objects are at the back of the queue, while if we prewarm at the tail, the important objects move to the front.
So the important thing to ensure seems to be that the important objects are not evicted(and not to push back the first eviction).
This suggests that though the evictions start early it is the less important objects that are getting evicted first.
Things to try ->
a)try with 50% prewraming size(should decrease performance) - This happened as expected. Also the performance improved
even more for prewarming set size of 100%. So given that we are prewarming at head,
performance increases monotonically as we increasing prewarming set size(doesnt't matter even if we make it 100%).
This is an important observation because earlier we were worried that having prewarming size of 100% may lead to contention with incoming load.
b)check on other trace
Checked on the synthetic trace. here again inserting at head with 100% prewarm size far outperforms the normal algorithm
1.2)Do prewarming at some rate(more realistic scenaria). This surprisingly performs poorly than the normal algorithm.
possible explanation - IOPS rate is >= prewarming rate and so important objects are being pushed to the front.
Overall observations
1)Doing entire prewarming upfront seems to be the best thing to do. Check if it's possible to do. Else test with prewarming rate greater than IOPS rate.
This should push back the important objects and might have a similar effect.
\ No newline at end of file
1)Try 1 vdisk at a time
2)plot 5-min avg hr by varying prewarm rate and size fixed.
3)plot 5-min avg by varying size and keeping rate fixed
4)Do this for different types of datasets
1)WSS_IOPS
Tasks
1)Add a field in Pair struct to indicate weather the node was inserted due to pre-warming or due to normal load.
Results
1)On real1 dataset
1.1)Over the period of 20 minutes after failure, there are 212k evictions from single_pool
1.2)Over the period of 20 minutes after failure, there are 589k evictions from multi_pool
1.3)Over the period of 20 minutes after failure, there are 801k evictions in total
1.4)Over the period of 20 minutes after failure, there are 201k evictions of pre-warmed objects. so almost 25% of the evictions
1.5)Almost all the 201k evictions of pre-warmed objects happen in the first 5 minutes. So essentially there are no prewarmed objects
in the cache after first 5 minutes -> this seems to be bad. try reducing pre-warming size(50%? maybe)
1.6)In first 5 minutes 171k/235k evictions are of pre-warmed objects
1.7)Another idea could be to stop pre-warming once the first pre-warmed object is evicted -> try this. idea seems interesting but can such thing be done iun actual setting?
1.8)prewarming size of 50% gives slightly better results
1.9)Another possible idea could be to fix the prewarm set size and then prewarm in reverse order. so that important objects are at the back of the queue
1.10)Analyse trace carefulle for locality
1.11)estimate how many objects can fit in cache
\ No newline at end of file
...@@ -20,3 +20,8 @@ Key observations ...@@ -20,3 +20,8 @@ Key observations
1)Hit ratio, 5 minutes after failure is 0.85 without pre-warming and 0.87 with pre-warming 1)Hit ratio, 5 minutes after failure is 0.85 without pre-warming and 0.87 with pre-warming
2)Pre-warming doesnt seem to help much here. This maybe because IOPS(around 700-800) of the disks are very high and the hit ratio is recovered very quickly 2)Pre-warming doesnt seem to help much here. This maybe because IOPS(around 700-800) of the disks are very high and the hit ratio is recovered very quickly
after failure. after failure.
Other Prewarming policies
1)I tried inserting the pre-wramed objects at the front of the LRU queue. The intution was that the important would be pushed
to the back of the queue by the time prewarming is done. But the results are with this strategy is worse than with the normal policy
1) in plot_exp2.py - hrs_wop, here wop stands for without pre-warm and hrs_wp stands for with prewarm
2)check what is in ALL-USAGE file and 3rd column of SHARE file
3)check how wss is estimated from cache->estimate_wss function
4)check what is in VDISK-OBJ.csv files
Trying a new dataset
1)Put the merged trace file in traces/[wload_name]/[wload_name]Merged.csv
2)In exp2.cc, change name of workload and rp->moment(if needed). No need to change any other configurations.
3)In plot_exp2.py change workload name while passing from main. change begin time and end time in plots. change moment(of failure)
4)Set prewarming related parameters like prewarming at head etc in constructor of cache
Doubts
1)Pre-warm doesn't give much benefit on hit rate. This maybe because wss is low. But how to properly estimate wss. Current method
gives only relative estimate.
2)check if some problem due to large disk size in real1
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment