Commit fff1436f authored by NILANJAN DAW's avatar NILANJAN DAW

Write Through based cache implemented for client side.

Fixed Client bottleneck caused by shared channel and gRPC stub.
Added a synchronous mode client with cache.
parent daca520b
app.name="HPDOS-Client"
app.version="0.1.4"
app.mode=rps
app.thread_count=10
app.rps=1000
app.concurrency=5
app.runtime=10
app.data_size=10
app.data_conversion_factor=B
......
......@@ -53,7 +53,7 @@ dependencies {
application {
// Define the main class for the application.
mainClass = 'HpdosClient.ClientRunner'
mainClass = 'HpdosClient.ClientRunnerSync'
}
sourceSets {
......
/*
* Copyright 2021 Nilanjan Daw, Synerg Lab, Department of CSE, IIT Bombay
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* This Java source file was generated by the Gradle 'init' task.
*/
package HpdosClient;
import HpdosClient.lib.StorageModel;
import HpdosClient.lib.StorageServiceSync;
import com.google.common.base.Stopwatch;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.*;
import java.util.concurrent.*;
public class ClientRunnerSync {
public static int parallelCount;
public static int runtime;
private final String clientID;
public static String propertiesFile;
private int cCreate, cRead, cUpdate, cDelete;
public boolean experimentEnded = false;
private Queue<Long> createTime, updateTime, readTime, deleteTime;
private final Properties properties;
public ClientRunnerSync() {
clientID = UUID.randomUUID().toString();
properties = new Properties();
try {
InputStream inputStream = new FileInputStream(propertiesFile);
properties.load(inputStream);
parallelCount = Integer.parseInt((String) properties.get("app.thread_count"));
runtime = Integer.parseInt((String) properties.get("app.runtime"));
cCreate = Integer.parseInt((String) properties.get("app.cycle_create"));
cRead = Integer.parseInt((String) properties.get("app.cycle_read"));
cUpdate = Integer.parseInt((String) properties.get("app.cycle_update"));
cDelete = Integer.parseInt((String) properties.get("app.cycle_delete"));
createTime = new ConcurrentLinkedQueue<>();
updateTime = new ConcurrentLinkedQueue<>();
readTime = new ConcurrentLinkedQueue<>();
deleteTime = new ConcurrentLinkedQueue<>();
} catch (IOException e) {
e.printStackTrace();
}
}
public String getGreeting() {
return "Hello World!";
}
private String createString() {
double dataSize = Double.parseDouble((String) properties.get("app.data_size"));
dataSize /= 2.0; // Java strings are 2B long
String conversionFactor = (String) properties.get("app.data_conversion_factor");
int multiplier = 1;
switch (conversionFactor) {
case "G": multiplier *= 1000;
case "M": multiplier *= 1000;
case "K": multiplier *= 1000;
}
char[] data = new char[(int)(dataSize * multiplier)];
return new String(data);
}
public double runExperiment(String id, long experimentStartTime) {
StorageServiceSync storageService = new StorageServiceSync(this.clientID);
storageService.initStorage();
String value = createString(), updatedValue = createString();
for (;;) {
String key = id + (int) (Math.random() * Integer.MAX_VALUE);
for (int j = 0; j < cCreate; j++) {
long timestampCreateStart = storageService.create(key, value);
createTime.add(System.currentTimeMillis() - timestampCreateStart);
}
for (int j = 0; j < cRead; j++) {
AbstractMap.Entry<StorageModel, Long> data = storageService.read(key);
readTime.add(System.currentTimeMillis() - data.getValue());
}
for (int j = 0; j < cUpdate; j++) {
AbstractMap.Entry<StorageModel, Long> data = storageService.read(key);
long timestampUpdateStart = storageService.update(key, updatedValue,
data.getKey().getVersion());
readTime.add(System.currentTimeMillis() - data.getValue());
updateTime.add(System.currentTimeMillis() - timestampUpdateStart);
}
for (int j = 0; j < cDelete; j++) {
AbstractMap.Entry<StorageModel, Long> data = storageService.read(key);
long timestampDeleteStart = storageService.delete(key, data.getKey().getVersion());
readTime.add(System.currentTimeMillis() - data.getValue());
deleteTime.add(System.currentTimeMillis() - timestampDeleteStart);
}
long currentTime = System.currentTimeMillis();
if ((currentTime - experimentStartTime) >= runtime * 1000L)
break;
}
System.out.println(storageService.getCache().stats());
storageService.cleanup();
return 0;
}
private void timerService() {
Stopwatch stopwatch = Stopwatch.createUnstarted();
stopwatch.start();
Timer timer = new Timer();
timer.scheduleAtFixedRate(new TimerTask() {
@Override
public void run() {
if (experimentEnded) {
timer.cancel();
timer.purge();
}
System.out.println("Experiment ran: " + stopwatch);
}
}, 5000, 5000);
}
private void printStatistics(double totalRuntime) {
long readQps = 0, createQps = 0, updateQps = 0, deleteQps = 0;
double avgRead = 0, avgCreate = 0, avgUpdate = 0, avgDelete = 0;
for (Long time: this.readTime) {
readQps++;
avgRead += time;
}
avgRead /= readQps * 1.0;
for (Long time: this.createTime) {
createQps++;
avgCreate += time;
}
avgCreate /= createQps * 1.0;
for (Long time: this.updateTime) {
updateQps++;
avgUpdate += time;
}
avgUpdate /= updateQps * 1.0;
for (Long time: this.deleteTime) {
deleteQps++;
avgDelete += time;
}
avgDelete /= deleteQps * 1.0;
double totalQps = readQps + createQps + updateQps + deleteQps;
System.out.println("Total runtime: " + totalRuntime);
System.out.println("Read: " + readQps + " Create: " + createQps
+ " Update: " + updateQps + " Delete: " + deleteQps + " Total: " + totalQps);
totalRuntime /= 1000;
System.out.println("Total QPS: " + totalQps / totalRuntime + " avg query time: " +
(totalRuntime * parallelCount / (totalQps)));
System.out.println("Read QPS: " + readQps / totalRuntime + " avg query time: " + avgRead);
System.out.println("Create QPS: " + createQps / totalRuntime + " avg query time: " + avgCreate);
System.out.println("Update QPS: " + updateQps / totalRuntime + " avg query time: " + avgUpdate);
System.out.println("Delete QPS: " + deleteQps / totalRuntime + " avg query time: " + avgDelete);
}
public static void main(String[] args) throws InterruptedException, ExecutionException {
propertiesFile = args[0];
ClientRunnerSync clientRunner = new ClientRunnerSync();
System.out.println(clientRunner.getGreeting());
System.out.println("Using Sync Server. Thread count: " + parallelCount + " runtime: " + runtime + "s");
ExecutorService executorService = Executors.newFixedThreadPool(parallelCount);
Thread.sleep(1000); // let things settle down a bit
Set<Callable<Double>> callables = new HashSet<>();
final long startTime = System.currentTimeMillis();
for (int i = 0; i < parallelCount; i++) {
int finalI = i;
callables.add(() -> clientRunner.runExperiment(Integer.toString(finalI), startTime));
}
clientRunner.timerService();
List<Future<Double>> futures = executorService.invokeAll(callables);
for (Future<Double> future: futures) {
future.get();
}
clientRunner.experimentEnded = true;
long endTime = System.currentTimeMillis();
double totalRuntime = endTime - startTime;
clientRunner.printStatistics(totalRuntime);
executorService.shutdown();
executorService.awaitTermination(2, TimeUnit.SECONDS);
}
}
/*
* Copyright 2021 Nilanjan Daw, Synerg Lab, Department of CSE, IIT Bombay
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package HpdosClient.lib;
import com.google.common.util.concurrent.ListenableFuture;
import hpdos.grpc.Packet;
public class StorageHandler {
private boolean isResultReady;
private Packet packet;
private ListenableFuture<Packet> listener;
public StorageHandler(boolean isResultReady, Packet packet, ListenableFuture<Packet> listener) {
this.isResultReady = isResultReady;
this.packet = packet;
this.listener = listener;
}
public boolean isResultReady() {
return isResultReady;
}
public void setResultReady(boolean resultReady) {
isResultReady = resultReady;
}
public Packet getPacket() {
return packet;
}
public void setPacket(Packet packet) {
this.packet = packet;
}
public ListenableFuture<Packet> getListener() {
return listener;
}
public void setListener(ListenableFuture<Packet> listener) {
this.listener = listener;
}
}
/*
* Copyright 2021 Nilanjan Daw, Synerg Lab, Department of CSE, IIT Bombay
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package HpdosClient.lib;
import HpdosClient.ConfigConstants;
import HpdosClient.MessageFormat.MessageConstants;
import HpdosClient.MessageFormat.RequestBuilder;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import hpdos.grpc.*;
import io.grpc.ManagedChannel;
import io.grpc.ManagedChannelBuilder;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
public class StorageServiceSync {
private final String clientID;
private ManagedChannel masterChannel;
private ArrayList<ManagedChannel> channels;
private List<Follower> replicaSet;
private final LoadingCache<String, StorageModel> cache;
public StorageServiceSync(String clientID) {
this.clientID = clientID;
this.cache = CacheBuilder
.newBuilder()
.maximumSize(10000)
.recordStats()
.build(new CacheLoader<>() {
@Override
public StorageModel load(@Nonnull String key) throws Exception {
int index = (int) (Math.random() * channels.size());
NetworkServiceGrpc.NetworkServiceBlockingStub stub =
NetworkServiceGrpc.newBlockingStub(channels.get(index));
ArrayList<Request> request = new ArrayList<>();
request.add(RequestBuilder.buildRequest(MessageConstants.METADATA_READ,
0, 0, key, 0, MessageConstants.METADATA_ACCESS_PRIVATE, clientID, ""));
Packet requestPacket = RequestBuilder.buildPacket(request);
Packet responsePacket = stub.readMetadata(requestPacket);
Response response = responsePacket.getResponse(0);
if (response.getStatus() == MessageConstants.STATUS_OK) {
Ack ack = response.getAck();
return new StorageModel(ack.getVersion(), ack.getDataSize(), ack.getKey(),
MessageConstants.METADATA_ACCESS_PRIVATE, clientID, ack.getCrc(), ack.getValue());
} else {
throw new IOException("Resource not found");
}
}
});
}
public LoadingCache<String, StorageModel> getCache() {
return cache;
}
public void retrieveFollowerList() {
NetworkServiceGrpc.NetworkServiceBlockingStub stub = NetworkServiceGrpc.newBlockingStub(this.masterChannel);
ResponseList responseList = stub.getReadReplicaList(null);
this.replicaSet = responseList.getFollowerList();
}
public void cleanup() {
for (ManagedChannel channel: this.channels)
channel.shutdown();
}
public void initStorage() {
masterChannel = ManagedChannelBuilder.
forAddress(ConfigConstants.HOST, ConfigConstants.PORT)
.usePlaintext()
.build();
channels = new ArrayList<>();
channels.add(masterChannel);
retrieveFollowerList();
for (Follower follower: replicaSet) {
ManagedChannel channel = ManagedChannelBuilder.
forAddress(follower.getIp(), follower.getPort())
.usePlaintext()
.build();
channels.add(channel);
}
}
// create a metadata block
public long create(String key, String value) {
ArrayList<Request> request = new ArrayList<>();
request.add(RequestBuilder.buildRequest(MessageConstants.METADATA_CREATE,
0, value.length(), key,
0, MessageConstants.METADATA_ACCESS_PRIVATE, this.clientID, value));
Packet packet = RequestBuilder.buildPacket(request);
long timestampCreateStart = System.currentTimeMillis();
NetworkServiceGrpc.NetworkServiceBlockingStub masterStub = NetworkServiceGrpc.newBlockingStub(masterChannel);
Packet response = masterStub.createMetadata(packet);
return timestampCreateStart;
}
// read back the metadata
public Map.Entry<StorageModel, Long> read(String key) {
long timestampReadStart = System.currentTimeMillis();
try {
StorageModel model = cache.get(key);
return new AbstractMap.SimpleEntry<>(model, timestampReadStart);
} catch (ExecutionException e) {
e.printStackTrace();
}
return new AbstractMap.SimpleEntry<>(null, timestampReadStart);
}
public long update(String key, String value, int version) {
ArrayList<Request> request = new ArrayList<>();
request.add(RequestBuilder.buildRequest(MessageConstants.METADATA_UPDATE,
version, value.length(), key,
0, MessageConstants.METADATA_ACCESS_PRIVATE, this.clientID, value));
Packet packet = RequestBuilder.buildPacket(request);
long timestampCreateStart = System.currentTimeMillis();
NetworkServiceGrpc.NetworkServiceBlockingStub masterStub = NetworkServiceGrpc.newBlockingStub(masterChannel);
Packet responsePacket = masterStub.updateMetadata(packet);
if (responsePacket.getResponse(0).getStatus() == MessageConstants.STATUS_OK)
cache.invalidate(key);
return timestampCreateStart;
}
public long delete(String key, int version) {
ArrayList<Request> request = new ArrayList<>();
request.add(RequestBuilder.buildRequest(MessageConstants.METADATA_DELETE,
version, 0, key,
0, MessageConstants.METADATA_ACCESS_PRIVATE, this.clientID, ""));
Packet packet = RequestBuilder.buildPacket(request);
long timestampCreateStart = System.currentTimeMillis();
NetworkServiceGrpc.NetworkServiceBlockingStub masterStub = NetworkServiceGrpc.newBlockingStub(masterChannel);
Packet responsePacket = masterStub.deleteMetadata(packet);
if (responsePacket.getResponse(0).getStatus() == MessageConstants.STATUS_OK)
cache.invalidate(key);
return timestampCreateStart;
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment