Commit 41b6326b authored by Paras Garg's avatar Paras Garg

Fixed double free while deregistering memory

parent 7e25adec
......@@ -29,6 +29,7 @@ $(OBJ_DIR)/%.o: $(SRC_DIR)/%.cpp
#$(BINDIR)/$(TARGET): $(OBJS)
$(TARGET) : $(OBJS)
mkdir -p .build
$(CXX) -o $@ $^ $(LIBS)
@echo "Linked "$<" successfully!"
......@@ -36,6 +37,9 @@ $(TARGET) : $(OBJS)
clean:
rm -f $(OBJ_DIR)/*
rm -f $(TARGET)
rm -f *.log
rm -f YCSB/*.log
rm -f libhpdosclient.so
.PHONY: count
count:
......@@ -47,6 +51,7 @@ count:
Jclient: $(OBJS)
mkdir -p .build
$(CXX) -o libhpdosclient.so -L/usr/local/lib -shared $^ $(LIBS)
@echo "jclient "$<" successfully!"
sudo rm -f /usr/lib/libhpdosclient.so
......
Steps to build jni client
> mkdir -f .build
> make JniHeader <br>
> make JClient <br>
> make Jclient <br>
> java -cp jsrc JClient
Running YSCB
> mvn compile <br>
add .build to make
./bin/ycsb load hpdos -P workloads/workloadb -threads 1
mvn -pl site.ycsb:hpdos-binding -am clean package -Dcheckstyle.skip
https://medium.com/@pirogov.alexey/gdb-debug-native-part-of-java-application-c-c-libraries-and-jdk-6593af3b4f3f
to do
delete client endpoint on close
threading in client and hashing in client
add more code for success failure not found etc server error client error
add cache add support for invalidation
interface client API through endpointGroup
Endpointgroup to manage list of servers and caches, Invalidation
Running YSCB
./bin/ycsb shell hpdos
./bin/ycsb run hpdos -P workloads/workloada
./bin/ycsb run hpdos -P workloads/workloada -threads 1
./bin/ycsb load hpdos -P workloads/workloada
Options:
-P file Specify workload file
......@@ -33,6 +17,17 @@ Options:
-target n Target ops/sec (default: unthrottled)
-threads n Number of client threads (default: 1)
mvn -pl site.ycsb:hpdos-binding -am clean package -Dcheckstyle.skip
https://medium.com/@pirogov.alexey/gdb-debug-native-part-of-java-application-c-c-libraries-and-jdk-6593af3b4f3f
to do
make requestId unique
delete client endpoint on close
threading in client and hashing in client
add more code for success failure not found etc server error client error
add cache add support for invalidation
# Steps to configure spdk target and linux initiator
Build spdk using
https://spdk.io/doc/nvmf.html
......
......@@ -50,13 +50,13 @@ public class HpdosClient extends DB {
// Read a single record
public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {
//System.out.println("Read" + key);
System.out.println("Read" + key.length());
byte res[] = jclient.get(jclient.endpointGroup, key.getBytes());
if (res == null)
return Status.NOT_FOUND;
if(fields == null)
{
return Status.OK;
return Status.BAD_REQUEST;
}
Iterator<String> it = fields.iterator();
if (it.hasNext())
......@@ -72,7 +72,7 @@ public class HpdosClient extends DB {
// Update a single record
public Status update(String table, String key, Map<String, ByteIterator> values) {
//System.out.println("update" + key);
System.out.println("update" + key.getBytes().length );
try {
ByteArrayOutputStream stream = new ByteArrayOutputStream();
for (ByteIterator v : values.values()) {
......
......@@ -22,10 +22,10 @@
# Default data size: 1 KB records (10 fields, 100 bytes each, plus key)
# Request distribution: zipfian
recordcount=200
operationcount=200
recordcount=2000000
operationcount=500000
fieldcount=1
fieldlength=200
fieldlength=2500
workload=site.ycsb.workloads.CoreWorkload
......
......@@ -11,14 +11,13 @@
//#include <boost/lockfree/queue.hpp>
#include <queue>
#include <map>
#include <mutex>
#include <mutex>
#include "Buffer.hpp"
#include "Logger.hpp"
#include "RdmaEndpointGroup.hpp"
#include "MessageFormats.hpp"
#include "RdmaFuture.hpp"
class RdmaClientEndpoint
{
......@@ -41,7 +40,7 @@ class RdmaClientEndpoint
int _state;
int _maxInLine;
int _timeoutMs;
const char *_connData;
char *_sendBuff = NULL;
......@@ -49,32 +48,34 @@ class RdmaClientEndpoint
struct ibv_mr *_sendMr = NULL;
struct ibv_mr *_recvMr = NULL;
//boost::lockfree::queue<char *> *_sendBuffers;
std::queue<char*> _sendBuffers;
// boost::lockfree::queue<char *> *_sendBuffers;
std::queue<char *> _sendBuffers;
std::mutex _sendBuffersM;
std::map<uint64_t,RdmaFuture*> futures;
void completeClose();
std::map<uint64_t, RdmaFuture *> futures;
std::mutex _futureMutex;
void connect();
void registerMemory();
void createResources();
void createQueuePairs();
public:
std::atomic<uint64_t> _requestId{12};
RdmaClientEndpoint(struct rdma_cm_id *id, RdmaEndpointGroup *group, int sendQueueSize, int recvQueueSize,
int sendMsgSize, int recvMsgSize, int maxInLine, int timeout);
void processCmEvent(struct rdma_cm_event *event);
void connect(const char *ip, const char *port, const char *connData);
bool isConnected();
void close();
void completeClose();
void processCmEvent(struct rdma_cm_event *event);
void processSendComp(struct ibv_wc);
void processRecvComp(struct ibv_wc);
void processSendComp(struct ibv_wc &);
void processRecvComp(struct ibv_wc &);
int sendMessage(const char *buffer, int size);
RdmaFuture* put(const char *key, int keySize, const char *value, int valueSize);
RdmaFuture* get(const char *key, int keySize);
RdmaFuture* deleteKey(const char *key, int keySize);
RdmaFuture *put(const char *key, int keySize, const char *value, int valueSize);
RdmaFuture *get(const char *key, int keySize);
RdmaFuture *deleteKey(const char *key, int keySize);
};
#endif
\ No newline at end of file
......@@ -46,7 +46,7 @@ public:
RdmaFuture* put(const char *key, int keySize, const char *value, int valueSize);
RdmaFuture* get(const char *key, int keySize);
RdmaFuture* deleteKey(const char *key, int keySize);
void close();
};
#endif
\ No newline at end of file
#ifndef __RDMACMPROCESSOR__
#define __RDMACMPROCESSOR__
#include <unordered_map>
#include <rdma/rdma_cma.h>
#include <rdma/rdma_verbs.h>
#include <stdint.h>
#include <thread>
#include <iostream>
#ifndef __RDMACMPROCESSOR__
#define __RDMACMPROCESSOR__
#include "RdmaEndpointGroup.hpp"
#include <unordered_map>
#include "Logger.hpp"
class RdmaCmProcessor
{
......
......@@ -12,11 +12,12 @@
class RdmaCqProcessor
{
bool _stop{false};
public:
struct ibv_comp_channel *_compChannel;
struct ibv_cq *_completionQueue;
std::thread *_compQueueThread;
std::unordered_map<uint32_t, RdmaClientEndpoint *> *_qpEndpointMap{NULL};
std::unordered_map<uint32_t, RdmaClientEndpoint *> _qpEndpointMap;
RdmaCqProcessor(ibv_context *verbs, int compQueueSize);
struct ibv_cq *getCq();
......
#For commenting used # empty line are ignored
#comments after parameters also supported
# use key=value format
#All Parameters will be taken as string
# Fixed Parameters
ENABLE_LOGGING=0
SERVER_IP=192.168.200.20
SERVER_PORT=1921
EXECUTOR_POOL_SIZE=4
\ No newline at end of file
sendQS=100
recvQS=100
compQS=100
sendMS=3000
recvMS=3000
ENABLE_LOGGING=1
NSERVERS=2
SERVER_IP1=192.168.200.30
SERVER_PORT1=1920
SERVER_IP2=192.168.200.50
SERVER_PORT2=1920
......@@ -32,6 +32,8 @@ JNIEXPORT jlong JNICALL Java_JClient_createEndpointGroup(JNIEnv *jenv, jobject j
JNIEXPORT void JNICALL Java_JClient_closeEndpointGroup(JNIEnv *jenv, jobject jobj, jlong jclient)
{
RdmaClientEndpointGroup *group = reinterpret_cast<RdmaClientEndpointGroup *>(jclient);
group->close();
//delete group;
// group->close();
}
......@@ -61,10 +63,10 @@ JNIEXPORT jbyteArray JNICALL Java_JClient_get(JNIEnv *jenv, jobject jobj, jlong
/* Create Java Byte Array and send data to java side
* after copying data from c char array to java byte array
*/
std::cout << "get future.get()\n";
//std::cout << "get future.get()\n";
char *data = future->get();
delete future;
std::cout << "g future.get()\n";
//std::cout << "g future.get()\n";
struct MessageHeader *response = (struct MessageHeader *)data;
if (response->type == MessageType::FAILURE)
{
......@@ -117,12 +119,12 @@ JNIEXPORT jint JNICALL Java_JClient_put(JNIEnv *jenv, jobject jobj, jlong jclien
{
return -1;
}
std::cout << "put future.get()\n";
//std::cout << "put future.get()\n";
auto data = future->get();
delete future;
std::cout << "p future.get()\n";
//std::cout << "p future.get()\n";
struct MessageHeader *response = (struct MessageHeader *)data;
std::cout<<"server response "<<response->type<<"\n";
//std::cout<<"server response "<<response->type<<"\n";
if (response->type == MessageType::SUCCESS)
{
// Currently 0 indicate succces and remaing are failure more status code can be added in future
......@@ -153,10 +155,10 @@ JNIEXPORT jint JNICALL Java_JClient_delete(JNIEnv *jenv, jobject jobj, jlong jcl
{
return -1;
}
std::cout << "delete future.get()\n";
//std::cout << "delete future.get()\n";
auto data = future->get();
delete future;
std::cout << "d future.get()\n";
//std::cout << "d future.get()\n";
struct MessageHeader *response = (struct MessageHeader *)data;
if (response->type == MessageType::FAILURE)
{
......
......@@ -28,6 +28,7 @@ void RdmaClientEndpoint::connect(const char *ip, const char *port, const char *c
CPPLog::LOG_DEBUG("RdmaClientEndpoint : step2 getaddrinfo");
struct addrinfo *addr;
ret = getaddrinfo(ip, port, NULL, &addr);
std::cout << "get addr info " << ret << "\n";
if (ret)
{
CPPLog::LOG_ERROR("RdmaServerEndpointGroup : get_addr_info failed");
......@@ -35,13 +36,13 @@ void RdmaClientEndpoint::connect(const char *ip, const char *port, const char *c
CPPLog::LOG_DEBUG("RdmaClientEndpoint : step2 resolve addr");
ret = rdma_resolve_addr(_cm_id, NULL, addr->ai_addr, _timeoutMs);
std::cout << "resolve route " << ret << "\n";
if (ret)
{
CPPLog::LOG_ERROR("unable to resolve addr");
return;
}
CPPLog::LOG_DEBUG("RdmaClientEndpoint : step2 resolve addr resolved");
_state = CONN_STATE_ADDR_RESOLVED;
}
void RdmaClientEndpoint::connect()
......@@ -56,27 +57,39 @@ void RdmaClientEndpoint::connect()
conn_param.rnr_retry_count = 7;
conn_param.private_data = _connData;
conn_param.private_data_len = strlen(_connData);
rdma_connect(_cm_id, &conn_param);
int ret = rdma_connect(_cm_id, &conn_param);
if (ret == -1)
{
std::ostringstream ss;
ss << "rdma_connect error occured errno " << errno << strerror(errno);
CPPLog::LOG_ERROR(ss);
}
}
else
{
rdma_connect(_cm_id, NULL);
int ret = rdma_connect(_cm_id, NULL);
if (ret == -1)
{
std::ostringstream ss;
ss << "rdma_connect error occured errno " << errno << strerror(errno);
CPPLog::LOG_ERROR(ss);
}
}
}
bool RdmaClientEndpoint::isConnected()
{
return _state == CONN_STATE_CONNECTED;
}
void RdmaClientEndpoint::processCmEvent(struct rdma_cm_event *event)
{
std::cout << "ClientEndpoint " << rdma_event_str(event->event) << "\n";
if (event->event == RDMA_CM_EVENT_ADDR_RESOLVED && event->id != NULL)
{
_state = CONN_STATE_ADDR_RESOLVED;
CPPLog::LOG_DEBUG("RdmaClientEndpoint : step3 resolve_route");
createResources();
createQueuePairs();
rdma_resolve_route(_cm_id, _timeoutMs);
}
else if (event->event == RDMA_CM_EVENT_ROUTE_RESOLVED && event->id != NULL)
......@@ -95,6 +108,11 @@ void RdmaClientEndpoint::processCmEvent(struct rdma_cm_event *event)
CPPLog::LOG_DEBUG("RdmaClientEndpoint : step7 Closed");
completeClose();
}
else if (event->id != NULL && event->event == RDMA_CM_EVENT_UNREACHABLE)
{
std::cout << "Not able to connect client\n";
exit(1);
}
else
{
std::ostringstream ss;
......@@ -124,14 +142,18 @@ void RdmaClientEndpoint::completeClose()
if (_state != CONN_STATE_PARTIAL_CLOSED)
{
CPPLog::LOG_ERROR("RdmaClientEndpoint : completeClose invalid state");
//return;
// return;
}
_state = CONN_STATE_CLOSED;
// delete _sendBuffers;
rdma_dereg_mr(_sendMr);
rdma_dereg_mr(_recvMr);
delete[] _sendBuff;
delete[] _recvBuff;
if (_sendMr != nullptr)
rdma_dereg_mr(_sendMr);
if (_recvMr != nullptr)
rdma_dereg_mr(_recvMr);
if (_sendBuff != nullptr)
delete[] _sendBuff;
if (_recvBuff != nullptr)
delete[] _recvBuff;
rdma_destroy_qp(_cm_id);
rdma_destroy_id(_cm_id);
CPPLog::LOG_INFO("RdmaClientEndpoint : close connection");
......@@ -144,11 +166,11 @@ void RdmaClientEndpoint::registerMemory()
CPPLog::LOG_ERROR("RdmaClientEndpoint : createResource address not resolved");
return;
}
_sendBuff = new char[(_sendMsgSize+MessageHeaderSize) * _sendQueueSize];
_sendBuff = new char[(_sendMsgSize + MessageHeaderSize) * _sendQueueSize];
if (_sendBuff == NULL)
{
CPPLog::LOG_ERROR("RdmaClientEndpoint : sendBuff allocation failed");
return;
exit(1);
}
_sendMr = rdma_reg_msgs(_cm_id, reinterpret_cast<void *>(_sendBuff),
......@@ -156,14 +178,14 @@ void RdmaClientEndpoint::registerMemory()
if (_sendMr == NULL)
{
CPPLog::LOG_ERROR("RdmaClientEndpoint : sendMr reg failed");
return;
exit(1);
}
_recvBuff = new char[(MessageHeaderSize + _recvMsgSize) * _recvQueueSize];
if (_recvBuff == NULL)
{
CPPLog::LOG_ERROR("RdmaClientEndpoint : recvBuff allocation failed\n");
return;
exit(1);
}
_recvMr = rdma_reg_msgs(_cm_id, reinterpret_cast<void *>(_recvBuff),
......@@ -172,14 +194,18 @@ void RdmaClientEndpoint::registerMemory()
if (_recvMr == NULL)
{
CPPLog::LOG_ERROR("RdmaClientEndpoint : recvMr reg failed\n");
return;
exit(1);
}
for (int i = 0; i < _recvQueueSize; i++)
{
char *buffer = _recvBuff + i * _recvMsgSize;
rdma_post_recv(_cm_id, reinterpret_cast<void *>(buffer), reinterpret_cast<void *>(buffer),
_recvMsgSize, _recvMr);
int ret = rdma_post_recv(_cm_id, reinterpret_cast<void *>(buffer), reinterpret_cast<void *>(buffer),
_recvMsgSize, _recvMr);
if (ret == -1)
{
CPPLog::LOG_ERROR("Failed rmda_post_recv");
}
}
for (int i = 0; i < _sendQueueSize; i++)
{
......@@ -191,18 +217,19 @@ void RdmaClientEndpoint::registerMemory()
_state = CONN_STATE_RESOURCES_ALLOCATED;
}
void RdmaClientEndpoint::createResources()
void RdmaClientEndpoint::createQueuePairs()
{
if (_state != CONN_STATE_ADDR_RESOLVED)
{
CPPLog::LOG_ERROR("RdmaClientEndpoint : createResource address not resolved");
return;
exit(1);
}
_protectionDomain = ibv_alloc_pd(_cm_id->verbs);
if (_protectionDomain == NULL)
{
CPPLog::LOG_ERROR("RdmaClientEndpoint : ibv_alloc_pd failed ");
return;
exit(1);
}
struct ibv_cq *completionQueue = _group->createCq(_cm_id);
struct ibv_qp_init_attr qp_init_attr;
......@@ -229,7 +256,8 @@ void RdmaClientEndpoint::createResources()
int ret = rdma_create_qp(_cm_id, _protectionDomain, &qp_init_attr);
if (ret)
{
CPPLog::LOG_ERROR("RdmaClientEndpoint : ibv_create_cq failed");
CPPLog::LOG_ERROR("RdmaClientEndpoint : ibv_create_qp failed");
exit(1);
}
if (_cm_id->pd == NULL)
{
......@@ -245,12 +273,12 @@ int RdmaClientEndpoint::sendMessage(const char *buffer, int size)
if (size > _sendMsgSize)
{
std::ostringstream ss;
ss<<"Large Message size "<<size;
ss<<" send buffer size " << _sendMsgSize;
ss << "Large Message size " << size;
ss << " send buffer size " << _sendMsgSize;
CPPLog::LOG_ERROR(ss);
return -1;
}
std::unique_lock<std::mutex> lock(_sendBuffersM);
if (_sendBuffers.size() == 0)
return -1;
......@@ -259,15 +287,27 @@ int RdmaClientEndpoint::sendMessage(const char *buffer, int size)
lock.unlock();
memcpy(sendBuffer, buffer, size);
return rdma_post_send(_cm_id, sendBuffer, sendBuffer, size, _sendMr, 0);
int ret = rdma_post_send(_cm_id, sendBuffer, sendBuffer, size, _sendMr, 0);
if (ret == -1)
{
std::ostringstream ss;
ss << "rdma_post_send error occured errno " << errno << strerror(errno);
CPPLog::LOG_ERROR(ss);
}
return ret;
}
void RdmaClientEndpoint::processSendComp(struct ibv_wc wc)
void RdmaClientEndpoint::processSendComp(struct ibv_wc &wc)
{
std::unique_lock<std::mutex> lock(_sendBuffersM);
if ((char *)wc.wr_id == nullptr)
{
CPPLog::LOG_ERROR("Process Send Comp got nullptr in wc_id");
return;
}
_sendBuffers.push((char *)wc.wr_id);
}
void RdmaClientEndpoint::processRecvComp(struct ibv_wc wc)
void RdmaClientEndpoint::processRecvComp(struct ibv_wc &wc)
{
char *data = new char[wc.byte_len];
memcpy(data, (void *)wc.wr_id, wc.byte_len);
......@@ -280,6 +320,7 @@ void RdmaClientEndpoint::processRecvComp(struct ibv_wc wc)
CPPLog::LOG_INFO(ss);*/
if (response->type != MessageType::INVALIDATE)
{
std::unique_lock<std::mutex> lock(_futureMutex);
auto it = futures.find(response->id);
if (it == futures.end())
{
......@@ -287,17 +328,30 @@ void RdmaClientEndpoint::processRecvComp(struct ibv_wc wc)
ss << "Recv completion for invalid id" << response->id;
CPPLog::LOG_DEBUG(ss);
}
it->second->put(data);
auto future = it->second;
futures.erase(it);
lock.unlock();
future->put(data);
}
}
RdmaFuture *RdmaClientEndpoint::put(const char *key, int keySize, const char *value, int valueSize)
{
if (keySize + valueSize + (int)MessageHeaderSize > _sendMsgSize)
{
if (keySize + valueSize > _sendMsgSize)
{
std::ostringstream ss;
ss << "Large Message size " << keySize + valueSize;
ss << " send buffer size " << _sendMsgSize;
CPPLog::LOG_ERROR(ss);
return nullptr;
}
std::unique_lock<std::mutex> lock(_sendBuffersM);
if (_sendBuffers.size() == 0)
{
CPPLog::LOG_ERROR("No Buffers available to send packet");
return nullptr;
}
char *sendBuffer = _sendBuffers.front();
_sendBuffers.pop();
lock.unlock();
......@@ -308,20 +362,39 @@ RdmaFuture *RdmaClientEndpoint::put(const char *key, int keySize, const char *va
request->valueSize = valueSize;
memcpy(sendBuffer + MessageHeaderSize, key, keySize);
memcpy(sendBuffer + MessageHeaderSize + keySize, value, valueSize);
rdma_post_send(_cm_id, sendBuffer, sendBuffer, MessageHeaderSize + keySize + valueSize,
_sendMr, 0);
int ret = rdma_post_send(_cm_id, sendBuffer, sendBuffer, MessageHeaderSize + keySize + valueSize,
_sendMr, 0);
if (ret == -1)
{
std::ostringstream ss;
ss << "rdma_post_send error occured errno " << errno << strerror(errno);
CPPLog::LOG_ERROR(ss);
return nullptr;
}
RdmaFuture *future = new RdmaFuture(request->id);
std::unique_lock<std::mutex> futureLock(_futureMutex);
futures[request->id] = future;
std::cout<<"created future\n";
futureLock.unlock();
// std::cout << "created future\n";
return future;
}
RdmaFuture *RdmaClientEndpoint::get(const char *key, int keySize)
{
if (keySize + (int)MessageHeaderSize > _sendMsgSize)
if (keySize > _sendMsgSize)
{
std::ostringstream ss;
ss << "Large Message size " << keySize;
ss << " send buffer size " << _sendMsgSize;
CPPLog::LOG_ERROR(ss);
return nullptr;
}
std::unique_lock<std::mutex> lock(_sendBuffersM);
if (_sendBuffers.size() == 0)
{
CPPLog::LOG_ERROR("No Buffers available to send packet");
return nullptr;
}
char *sendBuffer = _sendBuffers.front();
_sendBuffers.pop();
lock.unlock();
......@@ -330,19 +403,40 @@ RdmaFuture *RdmaClientEndpoint::get(const char *key, int keySize)
request->type = MessageType::GET;
request->keySize = keySize;
memcpy(sendBuffer + MessageHeaderSize, key, keySize);
rdma_post_send(_cm_id, sendBuffer, sendBuffer, MessageHeaderSize + keySize,
_sendMr, 0);
/*context associated with the send request will be returned,
through the work completion wr_id, work request identifier, field. */
int ret = rdma_post_send(_cm_id, sendBuffer, sendBuffer, MessageHeaderSize + keySize,
_sendMr, 0);
if (ret == -1)
{
std::ostringstream ss;
ss << "rdma_post_send error occured errno " << errno << strerror(errno);
CPPLog::LOG_ERROR(ss);
return nullptr;
}
RdmaFuture *future = new RdmaFuture(request->id);
std::unique_lock<std::mutex> futureLock(_futureMutex);
futures[request->id] = future;
futureLock.unlock();
return future;
}
RdmaFuture *RdmaClientEndpoint::deleteKey(const char *key, int keySize)
{
if (keySize + (int)MessageHeaderSize > _sendMsgSize)
if (keySize > _sendMsgSize)
{
std::ostringstream ss;
ss << "Large Message size " << keySize;
ss << " send buffer size " << _sendMsgSize;
CPPLog::LOG_ERROR(ss);
return nullptr;
}
std::unique_lock<std::mutex> lock(_sendBuffersM);
if (_sendBuffers.size() == 0)
{
CPPLog::LOG_ERROR("No Buffers available to send packet");
return nullptr;
}
char *sendBuffer = _sendBuffers.front();
_sendBuffers.pop();
lock.unlock();
......@@ -351,9 +445,18 @@ RdmaFuture *RdmaClientEndpoint::deleteKey(const char *key, int keySize)
request->type = MessageType::DELETE;
request->keySize = keySize;
memcpy(sendBuffer + MessageHeaderSize, key, keySize);
rdma_post_send(_cm_id, sendBuffer, sendBuffer, MessageHeaderSize + keySize,
_sendMr, 0);
int ret = rdma_post_send(_cm_id, sendBuffer, sendBuffer, MessageHeaderSize + keySize,
_sendMr, 0);
if (ret == -1)
{
std::ostringstream ss;
ss << "rdma_post_send error occured errno " << errno << strerror(errno);
CPPLog::LOG_ERROR(ss);
return nullptr;
}
RdmaFuture *future = new RdmaFuture(request->id);
std::unique_lock<std::mutex> futureLock(_futureMutex);
futures[request->id] = future;
futureLock.unlock();
return future;
}
......@@ -27,9 +27,10 @@ void RdmaClientEndpointGroup::processCmEvent(struct rdma_cm_event *event)
}
else
{
std::cout << "RdmaClientEndpointGroup : Not able to procces CM EVent";
std::cout << rdma_event_str(event->event) << event->id << " ";
std::cout << event->listen_id << std::endl;
std::ostringstream ss;
ss<<"RdmaClientEndpointGroup : Not able to procces CM EVent" << rdma_event_str(event->event) ;
ss << " id "<<event->id << " listen id"<< event->listen_id;
CPPLog::LOG_ERROR(ss);
}
}
......@@ -39,6 +40,8 @@ RdmaClientEndpoint *RdmaClientEndpointGroup::createEndpoint()
RdmaClientEndpoint *endpoint = new RdmaClientEndpoint(id, this,
_sendQueueSize, _recvQueueSize,
_sendMsgSize, _recvMsgSize, _maxInLine, _timeoutMs);
/* Setting Endpoint in cm_id context so that whenever cm event come we can get endpoint
*/
id->context = (void *)endpoint;
return endpoint;
}
......@@ -47,7 +50,6 @@ struct ibv_cq *RdmaClientEndpointGroup::createCq(struct rdma_cm_id *id)
{
if (_cqProcessor == NULL)
{
std::cout << "RdmaClientEndpointGroup : Creating CQ processor" << std::endl;
_cqProcessor = new RdmaCqProcessor(id->verbs, _compQueueSize);
_cqProcessor->start();
}
......@@ -60,34 +62,18 @@ void RdmaClientEndpointGroup::createClientEps(Properties *prop)
std::cout << "clients" << _clients << "\n";
for (int i = 0; i < _clients; i++)
{
std::cout << "creating client for " << prop->getValue("SERVER_IP") << (i + 1);
std::cout << ":" << prop->getValue("SERVER_PORT") << (i + 1) << " \n";