Commit 41b6326b authored by Paras Garg's avatar Paras Garg

Fixed double free while deregistering memory

parent 7e25adec
...@@ -29,6 +29,7 @@ $(OBJ_DIR)/%.o: $(SRC_DIR)/%.cpp ...@@ -29,6 +29,7 @@ $(OBJ_DIR)/%.o: $(SRC_DIR)/%.cpp
#$(BINDIR)/$(TARGET): $(OBJS) #$(BINDIR)/$(TARGET): $(OBJS)
$(TARGET) : $(OBJS) $(TARGET) : $(OBJS)
mkdir -p .build
$(CXX) -o $@ $^ $(LIBS) $(CXX) -o $@ $^ $(LIBS)
@echo "Linked "$<" successfully!" @echo "Linked "$<" successfully!"
...@@ -36,6 +37,9 @@ $(TARGET) : $(OBJS) ...@@ -36,6 +37,9 @@ $(TARGET) : $(OBJS)
clean: clean:
rm -f $(OBJ_DIR)/* rm -f $(OBJ_DIR)/*
rm -f $(TARGET) rm -f $(TARGET)
rm -f *.log
rm -f YCSB/*.log
rm -f libhpdosclient.so
.PHONY: count .PHONY: count
count: count:
...@@ -47,6 +51,7 @@ count: ...@@ -47,6 +51,7 @@ count:
Jclient: $(OBJS) Jclient: $(OBJS)
mkdir -p .build
$(CXX) -o libhpdosclient.so -L/usr/local/lib -shared $^ $(LIBS) $(CXX) -o libhpdosclient.so -L/usr/local/lib -shared $^ $(LIBS)
@echo "jclient "$<" successfully!" @echo "jclient "$<" successfully!"
sudo rm -f /usr/lib/libhpdosclient.so sudo rm -f /usr/lib/libhpdosclient.so
......
Steps to build jni client Steps to build jni client
> mkdir -f .build
> make JniHeader <br> > make JniHeader <br>
> make JClient <br> > make Jclient <br>
> java -cp jsrc JClient > java -cp jsrc JClient
Running YSCB Running YSCB
> mvn compile <br>
add .build to make
./bin/ycsb load hpdos -P workloads/workloadb -threads 1
mvn -pl site.ycsb:hpdos-binding -am clean package -Dcheckstyle.skip
https://medium.com/@pirogov.alexey/gdb-debug-native-part-of-java-application-c-c-libraries-and-jdk-6593af3b4f3f
to do
delete client endpoint on close
threading in client and hashing in client
add more code for success failure not found etc server error client error
add cache add support for invalidation
interface client API through endpointGroup
Endpointgroup to manage list of servers and caches, Invalidation
./bin/ycsb shell hpdos ./bin/ycsb shell hpdos
./bin/ycsb run hpdos -P workloads/workloada ./bin/ycsb run hpdos -P workloads/workloada -threads 1
./bin/ycsb load hpdos -P workloads/workloada ./bin/ycsb load hpdos -P workloads/workloada
Options: Options:
-P file Specify workload file -P file Specify workload file
...@@ -33,6 +17,17 @@ Options: ...@@ -33,6 +17,17 @@ Options:
-target n Target ops/sec (default: unthrottled) -target n Target ops/sec (default: unthrottled)
-threads n Number of client threads (default: 1) -threads n Number of client threads (default: 1)
mvn -pl site.ycsb:hpdos-binding -am clean package -Dcheckstyle.skip
https://medium.com/@pirogov.alexey/gdb-debug-native-part-of-java-application-c-c-libraries-and-jdk-6593af3b4f3f
to do
make requestId unique
delete client endpoint on close
threading in client and hashing in client
add more code for success failure not found etc server error client error
add cache add support for invalidation
# Steps to configure spdk target and linux initiator # Steps to configure spdk target and linux initiator
Build spdk using Build spdk using
https://spdk.io/doc/nvmf.html https://spdk.io/doc/nvmf.html
......
...@@ -50,13 +50,13 @@ public class HpdosClient extends DB { ...@@ -50,13 +50,13 @@ public class HpdosClient extends DB {
// Read a single record // Read a single record
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
//System.out.println("Read" + key); System.out.println("Read" + key.length());
byte res[] = jclient.get(jclient.endpointGroup, key.getBytes()); byte res[] = jclient.get(jclient.endpointGroup, key.getBytes());
if (res == null) if (res == null)
return Status.NOT_FOUND; return Status.NOT_FOUND;
if(fields == null) if(fields == null)
{ {
return Status.OK; return Status.BAD_REQUEST;
} }
Iterator<String> it = fields.iterator(); Iterator<String> it = fields.iterator();
if (it.hasNext()) if (it.hasNext())
...@@ -72,7 +72,7 @@ public class HpdosClient extends DB { ...@@ -72,7 +72,7 @@ public class HpdosClient extends DB {
// Update a single record // Update a single record
public Status update(String table, String key, Map<String, ByteIterator> values) { public Status update(String table, String key, Map<String, ByteIterator> values) {
//System.out.println("update" + key); System.out.println("update" + key.getBytes().length );
try { try {
ByteArrayOutputStream stream = new ByteArrayOutputStream(); ByteArrayOutputStream stream = new ByteArrayOutputStream();
for (ByteIterator v : values.values()) { for (ByteIterator v : values.values()) {
......
...@@ -22,10 +22,10 @@ ...@@ -22,10 +22,10 @@
# Default data size: 1 KB records (10 fields, 100 bytes each, plus key) # Default data size: 1 KB records (10 fields, 100 bytes each, plus key)
# Request distribution: zipfian # Request distribution: zipfian
recordcount=200 recordcount=2000000
operationcount=200 operationcount=500000
fieldcount=1 fieldcount=1
fieldlength=200 fieldlength=2500
workload=site.ycsb.workloads.CoreWorkload workload=site.ycsb.workloads.CoreWorkload
......
...@@ -11,14 +11,13 @@ ...@@ -11,14 +11,13 @@
//#include <boost/lockfree/queue.hpp> //#include <boost/lockfree/queue.hpp>
#include <queue> #include <queue>
#include <map> #include <map>
#include <mutex> #include <mutex>
#include "Buffer.hpp" #include "Buffer.hpp"
#include "Logger.hpp" #include "Logger.hpp"
#include "RdmaEndpointGroup.hpp" #include "RdmaEndpointGroup.hpp"
#include "MessageFormats.hpp" #include "MessageFormats.hpp"
#include "RdmaFuture.hpp" #include "RdmaFuture.hpp"
class RdmaClientEndpoint class RdmaClientEndpoint
{ {
...@@ -41,7 +40,7 @@ class RdmaClientEndpoint ...@@ -41,7 +40,7 @@ class RdmaClientEndpoint
int _state; int _state;
int _maxInLine; int _maxInLine;
int _timeoutMs; int _timeoutMs;
const char *_connData; const char *_connData;
char *_sendBuff = NULL; char *_sendBuff = NULL;
...@@ -49,32 +48,34 @@ class RdmaClientEndpoint ...@@ -49,32 +48,34 @@ class RdmaClientEndpoint
struct ibv_mr *_sendMr = NULL; struct ibv_mr *_sendMr = NULL;
struct ibv_mr *_recvMr = NULL; struct ibv_mr *_recvMr = NULL;
//boost::lockfree::queue<char *> *_sendBuffers; // boost::lockfree::queue<char *> *_sendBuffers;
std::queue<char*> _sendBuffers; std::queue<char *> _sendBuffers;
std::mutex _sendBuffersM; std::mutex _sendBuffersM;
std::map<uint64_t,RdmaFuture*> futures;
void completeClose(); std::map<uint64_t, RdmaFuture *> futures;
std::mutex _futureMutex;
void connect(); void connect();
void registerMemory(); void registerMemory();
void createResources(); void createQueuePairs();
public: public:
std::atomic<uint64_t> _requestId{12}; std::atomic<uint64_t> _requestId{12};
RdmaClientEndpoint(struct rdma_cm_id *id, RdmaEndpointGroup *group, int sendQueueSize, int recvQueueSize, RdmaClientEndpoint(struct rdma_cm_id *id, RdmaEndpointGroup *group, int sendQueueSize, int recvQueueSize,
int sendMsgSize, int recvMsgSize, int maxInLine, int timeout); int sendMsgSize, int recvMsgSize, int maxInLine, int timeout);
void processCmEvent(struct rdma_cm_event *event);
void connect(const char *ip, const char *port, const char *connData); void connect(const char *ip, const char *port, const char *connData);
bool isConnected(); bool isConnected();
void close(); void close();
void completeClose();
void processCmEvent(struct rdma_cm_event *event); void processSendComp(struct ibv_wc &);
void processSendComp(struct ibv_wc); void processRecvComp(struct ibv_wc &);
void processRecvComp(struct ibv_wc);
int sendMessage(const char *buffer, int size); int sendMessage(const char *buffer, int size);
RdmaFuture* put(const char *key, int keySize, const char *value, int valueSize); RdmaFuture *put(const char *key, int keySize, const char *value, int valueSize);
RdmaFuture* get(const char *key, int keySize); RdmaFuture *get(const char *key, int keySize);
RdmaFuture* deleteKey(const char *key, int keySize); RdmaFuture *deleteKey(const char *key, int keySize);
}; };
#endif #endif
\ No newline at end of file
...@@ -46,7 +46,7 @@ public: ...@@ -46,7 +46,7 @@ public:
RdmaFuture* put(const char *key, int keySize, const char *value, int valueSize); RdmaFuture* put(const char *key, int keySize, const char *value, int valueSize);
RdmaFuture* get(const char *key, int keySize); RdmaFuture* get(const char *key, int keySize);
RdmaFuture* deleteKey(const char *key, int keySize); RdmaFuture* deleteKey(const char *key, int keySize);
void close();
}; };
#endif #endif
\ No newline at end of file
#ifndef __RDMACMPROCESSOR__
#define __RDMACMPROCESSOR__
#include <unordered_map>
#include <rdma/rdma_cma.h> #include <rdma/rdma_cma.h>
#include <rdma/rdma_verbs.h> #include <rdma/rdma_verbs.h>
#include <stdint.h> #include <stdint.h>
#include <thread> #include <thread>
#include <iostream> #include <iostream>
#ifndef __RDMACMPROCESSOR__
#define __RDMACMPROCESSOR__
#include "RdmaEndpointGroup.hpp" #include "RdmaEndpointGroup.hpp"
#include <unordered_map> #include "Logger.hpp"
class RdmaCmProcessor class RdmaCmProcessor
{ {
......
...@@ -12,11 +12,12 @@ ...@@ -12,11 +12,12 @@
class RdmaCqProcessor class RdmaCqProcessor
{ {
bool _stop{false};
public: public:
struct ibv_comp_channel *_compChannel; struct ibv_comp_channel *_compChannel;
struct ibv_cq *_completionQueue; struct ibv_cq *_completionQueue;
std::thread *_compQueueThread; std::thread *_compQueueThread;
std::unordered_map<uint32_t, RdmaClientEndpoint *> *_qpEndpointMap{NULL}; std::unordered_map<uint32_t, RdmaClientEndpoint *> _qpEndpointMap;
RdmaCqProcessor(ibv_context *verbs, int compQueueSize); RdmaCqProcessor(ibv_context *verbs, int compQueueSize);
struct ibv_cq *getCq(); struct ibv_cq *getCq();
......
#For commenting used # empty line are ignored sendQS=100
#comments after parameters also supported recvQS=100
# use key=value format compQS=100
#All Parameters will be taken as string sendMS=3000
# Fixed Parameters recvMS=3000
ENABLE_LOGGING=1
ENABLE_LOGGING=0 NSERVERS=2
SERVER_IP=192.168.200.20 SERVER_IP1=192.168.200.30
SERVER_PORT=1921 SERVER_PORT1=1920
EXECUTOR_POOL_SIZE=4 SERVER_IP2=192.168.200.50
\ No newline at end of file SERVER_PORT2=1920
...@@ -32,6 +32,8 @@ JNIEXPORT jlong JNICALL Java_JClient_createEndpointGroup(JNIEnv *jenv, jobject j ...@@ -32,6 +32,8 @@ JNIEXPORT jlong JNICALL Java_JClient_createEndpointGroup(JNIEnv *jenv, jobject j
JNIEXPORT void JNICALL Java_JClient_closeEndpointGroup(JNIEnv *jenv, jobject jobj, jlong jclient) JNIEXPORT void JNICALL Java_JClient_closeEndpointGroup(JNIEnv *jenv, jobject jobj, jlong jclient)
{ {
RdmaClientEndpointGroup *group = reinterpret_cast<RdmaClientEndpointGroup *>(jclient); RdmaClientEndpointGroup *group = reinterpret_cast<RdmaClientEndpointGroup *>(jclient);
group->close();
//delete group;
// group->close(); // group->close();
} }
...@@ -61,10 +63,10 @@ JNIEXPORT jbyteArray JNICALL Java_JClient_get(JNIEnv *jenv, jobject jobj, jlong ...@@ -61,10 +63,10 @@ JNIEXPORT jbyteArray JNICALL Java_JClient_get(JNIEnv *jenv, jobject jobj, jlong
/* Create Java Byte Array and send data to java side /* Create Java Byte Array and send data to java side
* after copying data from c char array to java byte array * after copying data from c char array to java byte array
*/ */
std::cout << "get future.get()\n"; //std::cout << "get future.get()\n";
char *data = future->get(); char *data = future->get();
delete future; delete future;
std::cout << "g future.get()\n"; //std::cout << "g future.get()\n";
struct MessageHeader *response = (struct MessageHeader *)data; struct MessageHeader *response = (struct MessageHeader *)data;
if (response->type == MessageType::FAILURE) if (response->type == MessageType::FAILURE)
{ {
...@@ -117,12 +119,12 @@ JNIEXPORT jint JNICALL Java_JClient_put(JNIEnv *jenv, jobject jobj, jlong jclien ...@@ -117,12 +119,12 @@ JNIEXPORT jint JNICALL Java_JClient_put(JNIEnv *jenv, jobject jobj, jlong jclien
{ {
return -1; return -1;
} }
std::cout << "put future.get()\n"; //std::cout << "put future.get()\n";
auto data = future->get(); auto data = future->get();
delete future; delete future;
std::cout << "p future.get()\n"; //std::cout << "p future.get()\n";
struct MessageHeader *response = (struct MessageHeader *)data; struct MessageHeader *response = (struct MessageHeader *)data;
std::cout<<"server response "<<response->type<<"\n"; //std::cout<<"server response "<<response->type<<"\n";
if (response->type == MessageType::SUCCESS) if (response->type == MessageType::SUCCESS)
{ {
// Currently 0 indicate succces and remaing are failure more status code can be added in future // Currently 0 indicate succces and remaing are failure more status code can be added in future
...@@ -153,10 +155,10 @@ JNIEXPORT jint JNICALL Java_JClient_delete(JNIEnv *jenv, jobject jobj, jlong jcl ...@@ -153,10 +155,10 @@ JNIEXPORT jint JNICALL Java_JClient_delete(JNIEnv *jenv, jobject jobj, jlong jcl
{ {
return -1; return -1;
} }
std::cout << "delete future.get()\n"; //std::cout << "delete future.get()\n";
auto data = future->get(); auto data = future->get();
delete future; delete future;
std::cout << "d future.get()\n"; //std::cout << "d future.get()\n";
struct MessageHeader *response = (struct MessageHeader *)data; struct MessageHeader *response = (struct MessageHeader *)data;
if (response->type == MessageType::FAILURE) if (response->type == MessageType::FAILURE)
{ {
......
This diff is collapsed.
...@@ -27,9 +27,10 @@ void RdmaClientEndpointGroup::processCmEvent(struct rdma_cm_event *event) ...@@ -27,9 +27,10 @@ void RdmaClientEndpointGroup::processCmEvent(struct rdma_cm_event *event)
} }
else else
{ {
std::cout << "RdmaClientEndpointGroup : Not able to procces CM EVent"; std::ostringstream ss;
std::cout << rdma_event_str(event->event) << event->id << " "; ss<<"RdmaClientEndpointGroup : Not able to procces CM EVent" << rdma_event_str(event->event) ;
std::cout << event->listen_id << std::endl; ss << " id "<<event->id << " listen id"<< event->listen_id;
CPPLog::LOG_ERROR(ss);
} }
} }
...@@ -39,6 +40,8 @@ RdmaClientEndpoint *RdmaClientEndpointGroup::createEndpoint() ...@@ -39,6 +40,8 @@ RdmaClientEndpoint *RdmaClientEndpointGroup::createEndpoint()
RdmaClientEndpoint *endpoint = new RdmaClientEndpoint(id, this, RdmaClientEndpoint *endpoint = new RdmaClientEndpoint(id, this,
_sendQueueSize, _recvQueueSize, _sendQueueSize, _recvQueueSize,
_sendMsgSize, _recvMsgSize, _maxInLine, _timeoutMs); _sendMsgSize, _recvMsgSize, _maxInLine, _timeoutMs);
/* Setting Endpoint in cm_id context so that whenever cm event come we can get endpoint
*/
id->context = (void *)endpoint; id->context = (void *)endpoint;
return endpoint; return endpoint;
} }
...@@ -47,7 +50,6 @@ struct ibv_cq *RdmaClientEndpointGroup::createCq(struct rdma_cm_id *id) ...@@ -47,7 +50,6 @@ struct ibv_cq *RdmaClientEndpointGroup::createCq(struct rdma_cm_id *id)
{ {
if (_cqProcessor == NULL) if (_cqProcessor == NULL)
{ {
std::cout << "RdmaClientEndpointGroup : Creating CQ processor" << std::endl;
_cqProcessor = new RdmaCqProcessor(id->verbs, _compQueueSize); _cqProcessor = new RdmaCqProcessor(id->verbs, _compQueueSize);
_cqProcessor->start(); _cqProcessor->start();
} }
...@@ -60,34 +62,18 @@ void RdmaClientEndpointGroup::createClientEps(Properties *prop) ...@@ -60,34 +62,18 @@ void RdmaClientEndpointGroup::createClientEps(Properties *prop)
std::cout << "clients" << _clients << "\n"; std::cout << "clients" << _clients << "\n";
for (int i = 0; i < _clients; i++) for (int i = 0; i < _clients; i++)
{ {
std::cout << "creating client for " << prop->getValue("SERVER_IP") << (i + 1); std::cout << "creating client for " << prop->getValue("SERVER_IP" + std::to_string(i + 1));
std::cout << ":" << prop->getValue("SERVER_PORT") << (i + 1) << " \n"; std::cout << ":" << prop->getValue("SERVER_PORT" + std::to_string(i + 1)) << " \n";
RdmaClientEndpoint *ep = createEndpoint(); RdmaClientEndpoint *ep = createEndpoint();
ep->connect((prop->getValue("SERVER_IP" + std::to_string(i + 1))).c_str(), ep->connect((prop->getValue("SERVER_IP" + std::to_string(i + 1))).c_str(),
(prop->getValue("SERVER_PORT" + std::to_string(i + 1))).c_str(), "sal"); (prop->getValue("SERVER_PORT" + std::to_string(i + 1))).c_str(), "sal");
_clientEps.push_back(ep); _clientEps.push_back(ep);
std::cout << "ep" << ep << std::endl;
} }
std::cout << "vec size" << _clientEps.size() << "\n";
for (int i = 0; i < _clients; i++) for (int i = 0; i < _clients; i++)
{ {
std::cout << "ep" << _clientEps[i] << std::endl; while (!_clientEps[i]->isConnected())
int timeout = 0;
do
{ {
if (_clientEps[i]->isConnected()) continue;
{
break;
}
std::cout << "timeout " << timeout << "\n"
<< _clients;
std::this_thread::sleep_for(std::chrono::milliseconds(10));
timeout += 10;
} while (timeout < 1000);
if (!_clientEps[i]->isConnected())
{
std::cout << "Client Endpoint not connected " << prop->getValue("SERVER_IP" + std::to_string(i + 1)) << "\n";
exit(1);
} }
} }
std::cout << "connected\n"; std::cout << "connected\n";
...@@ -114,4 +100,24 @@ RdmaFuture *RdmaClientEndpointGroup::deleteKey(const char *key, int keySize) ...@@ -114,4 +100,24 @@ RdmaFuture *RdmaClientEndpointGroup::deleteKey(const char *key, int keySize)
int id = _counter; int id = _counter;
_counter = (_counter + 1) % _clients; _counter = (_counter + 1) % _clients;
return _clientEps[id]->deleteKey(key, keySize); return _clientEps[id]->deleteKey(key, keySize);
}
void RdmaClientEndpointGroup::close()
{
if (_cmProcessor != nullptr)
{
_cmProcessor->close();
delete _cmProcessor;
}
if (_cqProcessor != nullptr)
{
_cqProcessor->close();
delete _cqProcessor;
}
for (auto ep : _clientEps)
{
ep->close();
ep->completeClose();
delete ep;
}
} }
\ No newline at end of file
...@@ -4,12 +4,13 @@ ...@@ -4,12 +4,13 @@
RdmaCmProcessor::RdmaCmProcessor(RdmaEndpointGroup *group) RdmaCmProcessor::RdmaCmProcessor(RdmaEndpointGroup *group)
: _endpointGroup(group) : _endpointGroup(group)
{ {
std::cout << "CMProcessor : Step 1 creating event channel" << std::endl; CPPLog::LOG_ALWAYS("CMProcessor : Step 1 creating event channel");
_eventChannel = rdma_create_event_channel(); _eventChannel = rdma_create_event_channel();
_stop = false; _stop = false;
if (_eventChannel == NULL) if (_eventChannel == NULL)
{ {
std::cout << "CMProcesor : error creating event channel"; CPPLog::LOG_ERROR("CMProcesor : error creating event channel");
exit(1);
} }
} }
...@@ -17,39 +18,44 @@ struct rdma_event_channel *RdmaCmProcessor::getEventChannel() ...@@ -17,39 +18,44 @@ struct rdma_event_channel *RdmaCmProcessor::getEventChannel()
{ {
return _eventChannel; return _eventChannel;
} }
struct rdma_cm_id *RdmaCmProcessor::createId() struct rdma_cm_id *RdmaCmProcessor::createId()
{ {
struct rdma_cm_id *id = NULL; struct rdma_cm_id *id = NULL;
int ret = rdma_create_id(_eventChannel, &id, NULL, RDMA_PS_TCP); int ret = rdma_create_id(_eventChannel, &id, NULL, RDMA_PS_TCP);
if (ret == -1) if (ret == -1)
std::cout << "CMProcesor : rdma_create_id failed" << std::endl; {
CPPLog::LOG_ERROR("CMProcesor : rdma_create_id failed");
return nullptr;
}
return id; return id;
} }
void RdmaCmProcessor::start() void RdmaCmProcessor::start()
{ {
_cmEventThread = new std::thread(&RdmaCmProcessor::processCmEvent, this); _cmEventThread = new std::thread(&RdmaCmProcessor::processCmEvent, this);
pthread_setname_np(_cmEventThread->native_handle(),"CMProcessor"); pthread_setname_np(_cmEventThread->native_handle(), "CMProcessor");
} }
void RdmaCmProcessor::processCmEvent() void RdmaCmProcessor::processCmEvent()
{ {
int ret; int ret;
struct rdma_cm_event *event; struct rdma_cm_event *event;
std::cout << "CMProcessor : starting cm processing thread" << std::endl; CPPLog::LOG_ALWAYS("CMProcessor : starting cm processing thread");
while (!_stop) while (!_stop)
{ {
ret = rdma_get_cm_event(_eventChannel, &event); ret = rdma_get_cm_event(_eventChannel, &event);
if (ret) if (ret)
{ {
std::cout << "CMProcesor : rdma_get_cm_event failed" << std::endl; CPPLog::LOG_ERROR("CMProcesor : rdma_get_cm_event failed");
continue; continue;
} }
_endpointGroup->processCmEvent(event); _endpointGroup->processCmEvent(event);
ret = rdma_ack_cm_event(event); ret = rdma_ack_cm_event(event);
if (ret) if (ret)
{ {
std::cout << "CMProcesor : rdma_ack_cm_event failed"; CPPLog::LOG_ERROR( "CMProcesor : rdma_ack_cm_event failed");
} }
} }
} }
...@@ -57,6 +63,10 @@ void RdmaCmProcessor::processCmEvent() ...@@ -57,6 +63,10 @@ void RdmaCmProcessor::processCmEvent()
void RdmaCmProcessor::close() void RdmaCmProcessor::close()
{ {
_stop = true; _stop = true;
_cmEventThread->join(); if(_cmEventThread != nullptr)
{
_cmEventThread->join();
delete _cmEventThread;
}
rdma_destroy_event_channel(_eventChannel); rdma_destroy_event_channel(_eventChannel);
} }
\ No newline at end of file
...@@ -2,46 +2,49 @@ ...@@ -2,46 +2,49 @@
RdmaCqProcessor::RdmaCqProcessor(ibv_context *verbs, int compQueueSize) RdmaCqProcessor::RdmaCqProcessor(ibv_context *verbs, int compQueueSize)
{ {
//_qpEndpointMap = new std::unordered_map<>(); CPPLog::LOG_ALWAYS("RdmaClientEndpointGroup : Creating CQ processor");
//_qpEndpointMap = new std::unordered_map<uint32_t, RdmaClientEndpoint *>();
_qpEndpointMap = new std::unordered_map<uint32_t, RdmaClientEndpoint *>();
_compChannel = ibv_create_comp_channel(verbs); _compChannel = ibv_create_comp_channel(verbs);
if (_compChannel == NULL) if (_compChannel == NULL)
{ {
std::cout << "CqProcessr : ibv_create_comp_channel failed\n"; CPPLog::LOG_ERROR("CqProcessr : ibv_create_comp_channel failed");
return; exit(1);
} }
_completionQueue = ibv_create_cq(verbs, compQueueSize, NULL, _compChannel, 0); _completionQueue = ibv_create_cq(verbs, compQueueSize, NULL, _compChannel, 0);
if (_completionQueue == NULL) if (_completionQueue == NULL)
{ {
std::cout << "CqProcessr : ibv_create_cq failed" << std::endl; CPPLog::LOG_ERROR("CqProcessr : ibv_create_cq failed");
return; exit(1);
} }
int ret = ibv_req_notify_cq(_completionQueue, 0); int ret = ibv_req_notify_cq(_completionQueue, 0);
if (ret) if (ret)
{ {
std::cout << "CqProcessr : ibv_req_notify_cq failed\n"; CPPLog::LOG_ERROR("CqProcessr : ibv_req_notify_cq failed");
} }
} }
struct ibv_cq *RdmaCqProcessor::getCq() struct ibv_cq *RdmaCqProcessor::getCq()
{ {
return _completionQueue; return _completionQueue;
} }
void RdmaCqProcessor::registerEp(uint64_t qp,RdmaClientEndpoint* ep) void RdmaCqProcessor::registerEp(uint64_t qp, RdmaClientEndpoint *ep)
{ {
_qpEndpointMap->emplace(qp,ep); _qpEndpointMap.emplace(qp, ep);
} }
void RdmaCqProcessor::deRegisterEp(uint64_t qp) void RdmaCqProcessor::deRegisterEp(uint64_t qp)
{