Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
H
hpdos
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
SYNERG
hpdos
Commits
41b6326b
Commit
41b6326b
authored
May 25, 2022
by
Paras Garg
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Fixed double free while deregistering memory
parent
7e25adec
Changes
15
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
340 additions
and
199 deletions
+340
-199
code/cppclient/Makefile
code/cppclient/Makefile
+5
-0
code/cppclient/Readme.md
code/cppclient/Readme.md
+15
-20
code/cppclient/YCSB/hpdos/src/main/java/HpdosClient.java
code/cppclient/YCSB/hpdos/src/main/java/HpdosClient.java
+3
-3
code/cppclient/YCSB/workloads/myworkload
code/cppclient/YCSB/workloads/myworkload
+3
-3
code/cppclient/header/RdmaClientEndpoint.hpp
code/cppclient/header/RdmaClientEndpoint.hpp
+15
-14
code/cppclient/header/RdmaClientEndpointGroup.hpp
code/cppclient/header/RdmaClientEndpointGroup.hpp
+1
-1
code/cppclient/header/RdmaCmProcessor.hpp
code/cppclient/header/RdmaCmProcessor.hpp
+5
-3
code/cppclient/header/RdmaCqProcessor.hpp
code/cppclient/header/RdmaCqProcessor.hpp
+2
-1
code/cppclient/prop.config
code/cppclient/prop.config
+11
-10
code/cppclient/src/JClient.cpp
code/cppclient/src/JClient.cpp
+9
-7
code/cppclient/src/RdmaClientEndpoint.cpp
code/cppclient/src/RdmaClientEndpoint.cpp
+143
-40
code/cppclient/src/RdmaClientEndpointGroup.cpp
code/cppclient/src/RdmaClientEndpointGroup.cpp
+30
-24
code/cppclient/src/RdmaCmProcessor.cpp
code/cppclient/src/RdmaCmProcessor.cpp
+18
-8
code/cppclient/src/RdmaCqProcessor.cpp
code/cppclient/src/RdmaCqProcessor.cpp
+66
-53
code/cppclient/src/RdmaFuture.cpp
code/cppclient/src/RdmaFuture.cpp
+14
-12
No files found.
code/cppclient/Makefile
View file @
41b6326b
...
...
@@ -29,6 +29,7 @@ $(OBJ_DIR)/%.o: $(SRC_DIR)/%.cpp
#$(BINDIR)/$(TARGET): $(OBJS)
$(TARGET)
:
$(OBJS)
mkdir
-p
.build
$(CXX)
-o
$@
$^
$(LIBS)
@
echo
"Linked "
$<
" successfully!"
...
...
@@ -36,6 +37,9 @@ $(TARGET) : $(OBJS)
clean
:
rm
-f
$(OBJ_DIR)
/
*
rm
-f
$(TARGET)
rm
-f
*
.log
rm
-f
YCSB/
*
.log
rm
-f
libhpdosclient.so
.PHONY
:
count
count
:
...
...
@@ -47,6 +51,7 @@ count:
Jclient
:
$(OBJS)
mkdir
-p
.build
$(CXX)
-o
libhpdosclient.so
-L
/usr/local/lib
-shared
$^
$(LIBS)
@
echo
"jclient "
$<
" successfully!"
sudo rm
-f
/usr/lib/libhpdosclient.so
...
...
code/cppclient/Readme.md
View file @
41b6326b
Steps to build jni client
> mkdir -f .build
> make JniHeader <br>
> make J
C
lient <br>
> make J
c
lient <br>
> java -cp jsrc JClient
Running YSCB
> mvn compile <br>
add .build to make
./bin/ycsb load hpdos -P workloads/workloadb -threads 1
mvn -pl site.ycsb:hpdos-binding -am clean package -Dcheckstyle.skip
https://medium.com/@pirogov.alexey/gdb-debug-native-part-of-java-application-c-c-libraries-and-jdk-6593af3b4f3f
to do
delete client endpoint on close
threading in client and hashing in client
add more code for success failure not found etc server error client error
add cache add support for invalidation
interface client API through endpointGroup
Endpointgroup to manage list of servers and caches, Invalidation
Running YSCB
./bin/ycsb shell hpdos
./bin/ycsb run hpdos -P workloads/workloada
./bin/ycsb run hpdos -P workloads/workloada
-threads 1
./bin/ycsb load hpdos -P workloads/workloada
Options:
-P file Specify workload file
...
...
@@ -33,6 +17,17 @@ Options:
-target n Target ops/sec (default: unthrottled)
-threads n Number of client threads (default: 1)
mvn -pl site.ycsb:hpdos-binding -am clean package -Dcheckstyle.skip
https://medium.com/@pirogov.alexey/gdb-debug-native-part-of-java-application-c-c-libraries-and-jdk-6593af3b4f3f
to do
make requestId unique
delete client endpoint on close
threading in client and hashing in client
add more code for success failure not found etc server error client error
add cache add support for invalidation
# Steps to configure spdk target and linux initiator
Build spdk using
https://spdk.io/doc/nvmf.html
...
...
code/cppclient/YCSB/hpdos/src/main/java/HpdosClient.java
View file @
41b6326b
...
...
@@ -50,13 +50,13 @@ public class HpdosClient extends DB {
// Read a single record
public
Status
read
(
String
table
,
String
key
,
Set
<
String
>
fields
,
Map
<
String
,
ByteIterator
>
result
)
{
//System.out.println("Read" + key
);
System
.
out
.
println
(
"Read"
+
key
.
length
()
);
byte
res
[]
=
jclient
.
get
(
jclient
.
endpointGroup
,
key
.
getBytes
());
if
(
res
==
null
)
return
Status
.
NOT_FOUND
;
if
(
fields
==
null
)
{
return
Status
.
OK
;
return
Status
.
BAD_REQUEST
;
}
Iterator
<
String
>
it
=
fields
.
iterator
();
if
(
it
.
hasNext
())
...
...
@@ -72,7 +72,7 @@ public class HpdosClient extends DB {
// Update a single record
public
Status
update
(
String
table
,
String
key
,
Map
<
String
,
ByteIterator
>
values
)
{
//System.out.println("update" + key
);
System
.
out
.
println
(
"update"
+
key
.
getBytes
().
length
);
try
{
ByteArrayOutputStream
stream
=
new
ByteArrayOutputStream
();
for
(
ByteIterator
v
:
values
.
values
())
{
...
...
code/cppclient/YCSB/workloads/myworkload
View file @
41b6326b
...
...
@@ -22,10 +22,10 @@
# Default data size: 1 KB records (10 fields, 100 bytes each, plus key)
# Request distribution: zipfian
recordcount=200
operationcount=
2
00
recordcount=200
0000
operationcount=
5000
00
fieldcount=1
fieldlength=200
fieldlength=2
5
00
workload=site.ycsb.workloads.CoreWorkload
...
...
code/cppclient/header/RdmaClientEndpoint.hpp
View file @
41b6326b
...
...
@@ -11,14 +11,13 @@
//#include <boost/lockfree/queue.hpp>
#include <queue>
#include <map>
#include <mutex>
#include <mutex>
#include "Buffer.hpp"
#include "Logger.hpp"
#include "RdmaEndpointGroup.hpp"
#include "MessageFormats.hpp"
#include "RdmaFuture.hpp"
class
RdmaClientEndpoint
{
...
...
@@ -41,7 +40,7 @@ class RdmaClientEndpoint
int
_state
;
int
_maxInLine
;
int
_timeoutMs
;
const
char
*
_connData
;
char
*
_sendBuff
=
NULL
;
...
...
@@ -49,32 +48,34 @@ class RdmaClientEndpoint
struct
ibv_mr
*
_sendMr
=
NULL
;
struct
ibv_mr
*
_recvMr
=
NULL
;
//boost::lockfree::queue<char *> *_sendBuffers;
std
::
queue
<
char
*>
_sendBuffers
;
//
boost::lockfree::queue<char *> *_sendBuffers;
std
::
queue
<
char
*>
_sendBuffers
;
std
::
mutex
_sendBuffersM
;
std
::
map
<
uint64_t
,
RdmaFuture
*>
futures
;
void
completeClose
();
std
::
map
<
uint64_t
,
RdmaFuture
*>
futures
;
std
::
mutex
_futureMutex
;
void
connect
();
void
registerMemory
();
void
create
Resource
s
();
void
create
QueuePair
s
();
public:
std
::
atomic
<
uint64_t
>
_requestId
{
12
};
RdmaClientEndpoint
(
struct
rdma_cm_id
*
id
,
RdmaEndpointGroup
*
group
,
int
sendQueueSize
,
int
recvQueueSize
,
int
sendMsgSize
,
int
recvMsgSize
,
int
maxInLine
,
int
timeout
);
void
processCmEvent
(
struct
rdma_cm_event
*
event
);
void
connect
(
const
char
*
ip
,
const
char
*
port
,
const
char
*
connData
);
bool
isConnected
();
void
close
();
void
completeClose
();
void
processCmEvent
(
struct
rdma_cm_event
*
event
);
void
processSendComp
(
struct
ibv_wc
);
void
processRecvComp
(
struct
ibv_wc
);
void
processSendComp
(
struct
ibv_wc
&
);
void
processRecvComp
(
struct
ibv_wc
&
);
int
sendMessage
(
const
char
*
buffer
,
int
size
);
RdmaFuture
*
put
(
const
char
*
key
,
int
keySize
,
const
char
*
value
,
int
valueSize
);
RdmaFuture
*
get
(
const
char
*
key
,
int
keySize
);
RdmaFuture
*
deleteKey
(
const
char
*
key
,
int
keySize
);
RdmaFuture
*
put
(
const
char
*
key
,
int
keySize
,
const
char
*
value
,
int
valueSize
);
RdmaFuture
*
get
(
const
char
*
key
,
int
keySize
);
RdmaFuture
*
deleteKey
(
const
char
*
key
,
int
keySize
);
};
#endif
\ No newline at end of file
code/cppclient/header/RdmaClientEndpointGroup.hpp
View file @
41b6326b
...
...
@@ -46,7 +46,7 @@ public:
RdmaFuture
*
put
(
const
char
*
key
,
int
keySize
,
const
char
*
value
,
int
valueSize
);
RdmaFuture
*
get
(
const
char
*
key
,
int
keySize
);
RdmaFuture
*
deleteKey
(
const
char
*
key
,
int
keySize
);
void
close
();
};
#endif
\ No newline at end of file
code/cppclient/header/RdmaCmProcessor.hpp
View file @
41b6326b
#ifndef __RDMACMPROCESSOR__
#define __RDMACMPROCESSOR__
#include <unordered_map>
#include <rdma/rdma_cma.h>
#include <rdma/rdma_verbs.h>
#include <stdint.h>
#include <thread>
#include <iostream>
#ifndef __RDMACMPROCESSOR__
#define __RDMACMPROCESSOR__
#include "RdmaEndpointGroup.hpp"
#include
<unordered_map>
#include
"Logger.hpp"
class
RdmaCmProcessor
{
...
...
code/cppclient/header/RdmaCqProcessor.hpp
View file @
41b6326b
...
...
@@ -12,11 +12,12 @@
class
RdmaCqProcessor
{
bool
_stop
{
false
};
public:
struct
ibv_comp_channel
*
_compChannel
;
struct
ibv_cq
*
_completionQueue
;
std
::
thread
*
_compQueueThread
;
std
::
unordered_map
<
uint32_t
,
RdmaClientEndpoint
*>
*
_qpEndpointMap
{
NULL
}
;
std
::
unordered_map
<
uint32_t
,
RdmaClientEndpoint
*>
_qpEndpointMap
;
RdmaCqProcessor
(
ibv_context
*
verbs
,
int
compQueueSize
);
struct
ibv_cq
*
getCq
();
...
...
code/cppclient/prop.config
View file @
41b6326b
#For commenting used # empty line are ignored
#comments after parameters also supported
# use key=value format
#All Parameters will be taken as string
# Fixed Parameters
ENABLE_LOGGING
=
0
SERVER_IP
=
192
.
168
.
200
.
20
SERVER_PORT
=
1921
EXECUTOR_POOL_SIZE
=
4
\ No newline at end of file
sendQS
=
100
recvQS
=
100
compQS
=
100
sendMS
=
3000
recvMS
=
3000
ENABLE_LOGGING
=
1
NSERVERS
=
2
SERVER_IP1
=
192
.
168
.
200
.
30
SERVER_PORT1
=
1920
SERVER_IP2
=
192
.
168
.
200
.
50
SERVER_PORT2
=
1920
code/cppclient/src/JClient.cpp
View file @
41b6326b
...
...
@@ -32,6 +32,8 @@ JNIEXPORT jlong JNICALL Java_JClient_createEndpointGroup(JNIEnv *jenv, jobject j
JNIEXPORT
void
JNICALL
Java_JClient_closeEndpointGroup
(
JNIEnv
*
jenv
,
jobject
jobj
,
jlong
jclient
)
{
RdmaClientEndpointGroup
*
group
=
reinterpret_cast
<
RdmaClientEndpointGroup
*>
(
jclient
);
group
->
close
();
//delete group;
// group->close();
}
...
...
@@ -61,10 +63,10 @@ JNIEXPORT jbyteArray JNICALL Java_JClient_get(JNIEnv *jenv, jobject jobj, jlong
/* Create Java Byte Array and send data to java side
* after copying data from c char array to java byte array
*/
std
::
cout
<<
"get future.get()
\n
"
;
//
std::cout << "get future.get()\n";
char
*
data
=
future
->
get
();
delete
future
;
std
::
cout
<<
"g future.get()
\n
"
;
//
std::cout << "g future.get()\n";
struct
MessageHeader
*
response
=
(
struct
MessageHeader
*
)
data
;
if
(
response
->
type
==
MessageType
::
FAILURE
)
{
...
...
@@ -117,12 +119,12 @@ JNIEXPORT jint JNICALL Java_JClient_put(JNIEnv *jenv, jobject jobj, jlong jclien
{
return
-
1
;
}
std
::
cout
<<
"put future.get()
\n
"
;
//
std::cout << "put future.get()\n";
auto
data
=
future
->
get
();
delete
future
;
std
::
cout
<<
"p future.get()
\n
"
;
//
std::cout << "p future.get()\n";
struct
MessageHeader
*
response
=
(
struct
MessageHeader
*
)
data
;
std
::
cout
<<
"server response "
<<
response
->
type
<<
"
\n
"
;
//
std::cout<<"server response "<<response->type<<"\n";
if
(
response
->
type
==
MessageType
::
SUCCESS
)
{
// Currently 0 indicate succces and remaing are failure more status code can be added in future
...
...
@@ -153,10 +155,10 @@ JNIEXPORT jint JNICALL Java_JClient_delete(JNIEnv *jenv, jobject jobj, jlong jcl
{
return
-
1
;
}
std
::
cout
<<
"delete future.get()
\n
"
;
//
std::cout << "delete future.get()\n";
auto
data
=
future
->
get
();
delete
future
;
std
::
cout
<<
"d future.get()
\n
"
;
//
std::cout << "d future.get()\n";
struct
MessageHeader
*
response
=
(
struct
MessageHeader
*
)
data
;
if
(
response
->
type
==
MessageType
::
FAILURE
)
{
...
...
code/cppclient/src/RdmaClientEndpoint.cpp
View file @
41b6326b
This diff is collapsed.
Click to expand it.
code/cppclient/src/RdmaClientEndpointGroup.cpp
View file @
41b6326b
...
...
@@ -27,9 +27,10 @@ void RdmaClientEndpointGroup::processCmEvent(struct rdma_cm_event *event)
}
else
{
std
::
cout
<<
"RdmaClientEndpointGroup : Not able to procces CM EVent"
;
std
::
cout
<<
rdma_event_str
(
event
->
event
)
<<
event
->
id
<<
" "
;
std
::
cout
<<
event
->
listen_id
<<
std
::
endl
;
std
::
ostringstream
ss
;
ss
<<
"RdmaClientEndpointGroup : Not able to procces CM EVent"
<<
rdma_event_str
(
event
->
event
)
;
ss
<<
" id "
<<
event
->
id
<<
" listen id"
<<
event
->
listen_id
;
CPPLog
::
LOG_ERROR
(
ss
);
}
}
...
...
@@ -39,6 +40,8 @@ RdmaClientEndpoint *RdmaClientEndpointGroup::createEndpoint()
RdmaClientEndpoint
*
endpoint
=
new
RdmaClientEndpoint
(
id
,
this
,
_sendQueueSize
,
_recvQueueSize
,
_sendMsgSize
,
_recvMsgSize
,
_maxInLine
,
_timeoutMs
);
/* Setting Endpoint in cm_id context so that whenever cm event come we can get endpoint
*/
id
->
context
=
(
void
*
)
endpoint
;
return
endpoint
;
}
...
...
@@ -47,7 +50,6 @@ struct ibv_cq *RdmaClientEndpointGroup::createCq(struct rdma_cm_id *id)
{
if
(
_cqProcessor
==
NULL
)
{
std
::
cout
<<
"RdmaClientEndpointGroup : Creating CQ processor"
<<
std
::
endl
;
_cqProcessor
=
new
RdmaCqProcessor
(
id
->
verbs
,
_compQueueSize
);
_cqProcessor
->
start
();
}
...
...
@@ -60,34 +62,18 @@ void RdmaClientEndpointGroup::createClientEps(Properties *prop)
std
::
cout
<<
"clients"
<<
_clients
<<
"
\n
"
;
for
(
int
i
=
0
;
i
<
_clients
;
i
++
)
{
std
::
cout
<<
"creating client for "
<<
prop
->
getValue
(
"SERVER_IP"
)
<<
(
i
+
1
);
std
::
cout
<<
":"
<<
prop
->
getValue
(
"SERVER_PORT"
)
<<
(
i
+
1
)
<<
"
\n
"
;
std
::
cout
<<
"creating client for "
<<
prop
->
getValue
(
"SERVER_IP"
+
std
::
to_string
(
i
+
1
)
);
std
::
cout
<<
":"
<<
prop
->
getValue
(
"SERVER_PORT"
+
std
::
to_string
(
i
+
1
)
)
<<
"
\n
"
;
RdmaClientEndpoint
*
ep
=
createEndpoint
();
ep
->
connect
((
prop
->
getValue
(
"SERVER_IP"
+
std
::
to_string
(
i
+
1
))).
c_str
(),
(
prop
->
getValue
(
"SERVER_PORT"
+
std
::
to_string
(
i
+
1
))).
c_str
(),
"sal"
);
_clientEps
.
push_back
(
ep
);
std
::
cout
<<
"ep"
<<
ep
<<
std
::
endl
;
}
std
::
cout
<<
"vec size"
<<
_clientEps
.
size
()
<<
"
\n
"
;
for
(
int
i
=
0
;
i
<
_clients
;
i
++
)
{
std
::
cout
<<
"ep"
<<
_clientEps
[
i
]
<<
std
::
endl
;
int
timeout
=
0
;
do
while
(
!
_clientEps
[
i
]
->
isConnected
())
{
if
(
_clientEps
[
i
]
->
isConnected
())
{
break
;
}
std
::
cout
<<
"timeout "
<<
timeout
<<
"
\n
"
<<
_clients
;
std
::
this_thread
::
sleep_for
(
std
::
chrono
::
milliseconds
(
10
));
timeout
+=
10
;
}
while
(
timeout
<
1000
);
if
(
!
_clientEps
[
i
]
->
isConnected
())
{
std
::
cout
<<
"Client Endpoint not connected "
<<
prop
->
getValue
(
"SERVER_IP"
+
std
::
to_string
(
i
+
1
))
<<
"
\n
"
;
exit
(
1
);
continue
;
}
}
std
::
cout
<<
"connected
\n
"
;
...
...
@@ -114,4 +100,24 @@ RdmaFuture *RdmaClientEndpointGroup::deleteKey(const char *key, int keySize)
int
id
=
_counter
;
_counter
=
(
_counter
+
1
)
%
_clients
;
return
_clientEps
[
id
]
->
deleteKey
(
key
,
keySize
);
}
void
RdmaClientEndpointGroup
::
close
()
{
if
(
_cmProcessor
!=
nullptr
)
{
_cmProcessor
->
close
();
delete
_cmProcessor
;
}
if
(
_cqProcessor
!=
nullptr
)
{
_cqProcessor
->
close
();
delete
_cqProcessor
;
}
for
(
auto
ep
:
_clientEps
)
{
ep
->
close
();
ep
->
completeClose
();
delete
ep
;
}
}
\ No newline at end of file
code/cppclient/src/RdmaCmProcessor.cpp
View file @
41b6326b
...
...
@@ -4,12 +4,13 @@
RdmaCmProcessor
::
RdmaCmProcessor
(
RdmaEndpointGroup
*
group
)
:
_endpointGroup
(
group
)
{
std
::
cout
<<
"CMProcessor : Step 1 creating event channel"
<<
std
::
endl
;
CPPLog
::
LOG_ALWAYS
(
"CMProcessor : Step 1 creating event channel"
)
;
_eventChannel
=
rdma_create_event_channel
();
_stop
=
false
;
if
(
_eventChannel
==
NULL
)
{
std
::
cout
<<
"CMProcesor : error creating event channel"
;
CPPLog
::
LOG_ERROR
(
"CMProcesor : error creating event channel"
);
exit
(
1
);
}
}
...
...
@@ -17,39 +18,44 @@ struct rdma_event_channel *RdmaCmProcessor::getEventChannel()
{
return
_eventChannel
;
}
struct
rdma_cm_id
*
RdmaCmProcessor
::
createId
()
{
struct
rdma_cm_id
*
id
=
NULL
;
int
ret
=
rdma_create_id
(
_eventChannel
,
&
id
,
NULL
,
RDMA_PS_TCP
);
if
(
ret
==
-
1
)
std
::
cout
<<
"CMProcesor : rdma_create_id failed"
<<
std
::
endl
;
{
CPPLog
::
LOG_ERROR
(
"CMProcesor : rdma_create_id failed"
);
return
nullptr
;
}
return
id
;
}
void
RdmaCmProcessor
::
start
()
{
_cmEventThread
=
new
std
::
thread
(
&
RdmaCmProcessor
::
processCmEvent
,
this
);
pthread_setname_np
(
_cmEventThread
->
native_handle
(),
"CMProcessor"
);
pthread_setname_np
(
_cmEventThread
->
native_handle
(),
"CMProcessor"
);
}
void
RdmaCmProcessor
::
processCmEvent
()
{
int
ret
;
struct
rdma_cm_event
*
event
;
std
::
cout
<<
"CMProcessor : starting cm processing thread"
<<
std
::
endl
;
CPPLog
::
LOG_ALWAYS
(
"CMProcessor : starting cm processing thread"
)
;
while
(
!
_stop
)
{
ret
=
rdma_get_cm_event
(
_eventChannel
,
&
event
);
if
(
ret
)
{
std
::
cout
<<
"CMProcesor : rdma_get_cm_event failed"
<<
std
::
endl
;
CPPLog
::
LOG_ERROR
(
"CMProcesor : rdma_get_cm_event failed"
)
;
continue
;
}
_endpointGroup
->
processCmEvent
(
event
);
ret
=
rdma_ack_cm_event
(
event
);
if
(
ret
)
{
std
::
cout
<<
"CMProcesor : rdma_ack_cm_event failed"
;
CPPLog
::
LOG_ERROR
(
"CMProcesor : rdma_ack_cm_event failed"
)
;
}
}
}
...
...
@@ -57,6 +63,10 @@ void RdmaCmProcessor::processCmEvent()
void
RdmaCmProcessor
::
close
()
{
_stop
=
true
;
_cmEventThread
->
join
();
if
(
_cmEventThread
!=
nullptr
)
{
_cmEventThread
->
join
();
delete
_cmEventThread
;
}
rdma_destroy_event_channel
(
_eventChannel
);
}
\ No newline at end of file
code/cppclient/src/RdmaCqProcessor.cpp
View file @
41b6326b
...
...
@@ -2,46 +2,49 @@
RdmaCqProcessor
::
RdmaCqProcessor
(
ibv_context
*
verbs
,
int
compQueueSize
)
{
//_qpEndpointMap = new std::unordered_map<>();
_qpEndpointMap
=
new
std
::
unordered_map
<
uint32_t
,
RdmaClientEndpoint
*>
();
CPPLog
::
LOG_ALWAYS
(
"RdmaClientEndpointGroup : Creating CQ processor"
);
//_qpEndpointMap = new std::unordered_map<uint32_t, RdmaClientEndpoint *>();
_compChannel
=
ibv_create_comp_channel
(
verbs
);
if
(
_compChannel
==
NULL
)
{
std
::
cout
<<
"CqProcessr : ibv_create_comp_channel failed
\n
"
;
return
;
CPPLog
::
LOG_ERROR
(
"CqProcessr : ibv_create_comp_channel failed"
)
;
exit
(
1
)
;
}
_completionQueue
=
ibv_create_cq
(
verbs
,
compQueueSize
,
NULL
,
_compChannel
,
0
);
if
(
_completionQueue
==
NULL
)
{
std
::
cout
<<
"CqProcessr : ibv_create_cq failed"
<<
std
::
endl
;
return
;
CPPLog
::
LOG_ERROR
(
"CqProcessr : ibv_create_cq failed"
)
;
exit
(
1
)
;
}
int
ret
=
ibv_req_notify_cq
(
_completionQueue
,
0
);
if
(
ret
)
{
std
::
cout
<<
"CqProcessr : ibv_req_notify_cq failed
\n
"
;
CPPLog
::
LOG_ERROR
(
"CqProcessr : ibv_req_notify_cq failed"
)
;
}
}
struct
ibv_cq
*
RdmaCqProcessor
::
getCq
()
{
return
_completionQueue
;
}
void
RdmaCqProcessor
::
registerEp
(
uint64_t
qp
,
RdmaClientEndpoint
*
ep
)
void
RdmaCqProcessor
::
registerEp
(
uint64_t
qp
,
RdmaClientEndpoint
*
ep
)
{
_qpEndpointMap
->
emplace
(
qp
,
ep
);
_qpEndpointMap
.
emplace
(
qp
,
ep
);
}
void
RdmaCqProcessor
::
deRegisterEp
(
uint64_t
qp
)
{
_qpEndpointMap
.
erase
(
qp
);
}
void
RdmaCqProcessor
::
start
()
{
std
::
cout
<<
"CqProcessr : starting process CQ events"
<<
std
::
endl
;
CPPLog
::
LOG_ALWAYS
(
"CqProcessr : starting process CQ events"
)
;
_compQueueThread
=
new
std
::
thread
(
&
RdmaCqProcessor
::
processCQEvents
,
this
);
pthread_setname_np
(
_compQueueThread
->
native_handle
(),
"compQueueThread"
);
pthread_setname_np
(
_compQueueThread
->
native_handle
(),
"compQueueThread"
);
}
void
RdmaCqProcessor
::
processCQEvents
()
{
int
ret
=
0
;
...
...
@@ -49,26 +52,26 @@ void RdmaCqProcessor::processCQEvents()
void
*
context
;
const
int
nevent
=
10
;
struct
ibv_wc
wc_array
[
nevent
];
while
(
1
)
while
(
_stop
!=
true
)
{
ret
=
ibv_get_cq_event
(
_compChannel
,
&
cq
,
&
context
);
if
(
ret
==
-
1
)
{
std
::
cout
<<
"CqProcessr : ibv_get_cq_event failed
\n
"
;
CPPLog
::
LOG_ERROR
(
"CqProcessr : ibv_get_cq_event failed"
)
;
close
();
}
ibv_ack_cq_events
(
cq
,
1
);
ret
=
ibv_req_notify_cq
(
_completionQueue
,
0
);
if
(
ret
)
{
std
::
cout
<<
"CqProcessr : ibv_req_notify_cq failed
\n
"
;
CPPLog
::
LOG_ERROR
(
"CqProcessr : ibv_req_notify_cq failed"
)
;
close
();
}
ret
=
ibv_poll_cq
(
cq
,
nevent
,
wc_array
);
if
(
ret
<
0
)
{
std
::
cout
<<
"CqProcessr : ibv_poll_cq failed
\n
"
;
CPPLog
::
LOG_ERROR
(
"CqProcessr : ibv_poll_cq failed"
)
;
close
();
}
if
(
ret
==
0
)
...
...
@@ -78,44 +81,54 @@ void RdmaCqProcessor::processCQEvents()
}
}
inline
void
RdmaCqProcessor
::
dispatchCqEvents
(
ibv_wc
wc
[],
int
size
)
{
for
(
int
i
=
0
;
i
<
size
;
i
++
)
{
if
(
wc
[
i
].
status
!=
IBV_WC_SUCCESS
)
{
std
::
cout
<<
"RdmaCqProcessor : failed work completion : "
<<
ibv_wc_status_str
(
wc
[
i
].
status
)
<<
" on qp "
<<
wc
[
i
].
qp_num
<<
std
::
endl
;
return
;
}
auto
it
=
_qpEndpointMap
->
find
(
wc
[
i
].
qp_num
);
if
(
it
==
_qpEndpointMap
->
end
())
{
std
::
cout
<<
"RdmaCqProcessor : endpoint not registered for qp num"
<<
std
::
endl
;
return
;
}
switch
(
wc
[
i
].
opcode
)
{
case
IBV_WC_SEND
:
it
->
second
->
processSendComp
(
wc
[
i
]);
break
;
case
IBV_WC_RECV
:
it
->
second
->
processRecvComp
(
wc
[
i
]);
break
;
case
IBV_WC_RDMA_WRITE
:
std
::
cout
<<
"rdma write completion
\n
"
;
break
;
case
IBV_WC_RDMA_READ
:
std
::
cout
<<
"rdma read completion
\n
"
;
break
;
default:
std
::
cout
<<
"RdmaCqProcessor : invalid opcode"
<<
std
::
endl
;
break
;
}
}
}
inline
void
RdmaCqProcessor
::
dispatchCqEvents
(
ibv_wc
wc
[],
int
size
)
{
for
(
int
i
=
0
;
i
<
size
;
i
++
)
{
if
(
wc
[
i
].
status
!=
IBV_WC_SUCCESS
)
{
std
::
cout
<<
"RdmaCqProcessor : failed work completion : "
<<
ibv_wc_status_str
(
wc
[
i
].
status
)
<<
" on qp "
<<
wc
[
i
].
qp_num
<<
std
::
endl
;
return
;
}
auto
it
=
_qpEndpointMap
.
find
(
wc
[
i
].
qp_num
);
if
(
it
==
_qpEndpointMap
.
end
())
{
CPPLog
::
LOG_ALWAYS
(
"RdmaCqProcessor : endpoint not registered for qp num"
);
return
;
}
switch
(
wc
[
i
].
opcode
)
{
case
IBV_WC_SEND
:
it
->
second
->
processSendComp
(
wc
[
i
]);
break
;
case
IBV_WC_RECV
:
it
->
second
->
processRecvComp
(
wc
[
i
]);
break
;
case
IBV_WC_RDMA_WRITE
:
std
::
cout
<<
"rdma write completion
\n
"
;
break
;
case
IBV_WC_RDMA_READ
:
std
::
cout
<<
"rdma read completion
\n
"
;
break
;
default:
std
::
cout
<<
"RdmaCqProcessor : invalid opcode"
<<
std
::
endl
;
break
;
}
}
}
void
RdmaCqProcessor
::
close
()
{
_stop
=
true
;
if
(
_compQueueThread
!=
nullptr
)
{
_compQueueThread
->
join
();
delete
_compQueueThread
;
}
if
(
_completionQueue
!=
nullptr
)
ibv_destroy_cq
(
_completionQueue
);
if
(
_compChannel
!=
nullptr
)
ibv_destroy_comp_channel
(
_compChannel
);
}
\ No newline at end of file
code/cppclient/src/RdmaFuture.cpp
View file @
41b6326b
...
...
@@ -3,42 +3,44 @@
int
RdmaFuture
::
DONE
=
2
;
int
RdmaFuture
::
PENDING
=
1
;
RdmaFuture
::
RdmaFuture
(
uint64_t
id
)
:
_requestId
(
id
),
state
(
PENDING
),
_data
(
nullptr
)
{}
:
_requestId
(
id
),
_data
(
nullptr
)
{
state
=
PENDING
;
}
char
*
RdmaFuture
::
get
()
{
// std::cout << (unsigned)state << std::endl;
std
::
unique_lock
<
std
::
mutex
>
lock
(
stateMutex
);
while
(
state
!=
DONE
)
while
(
state
!=
DONE
)
{
std
::
cout
<<
"getting data
\n
"
;
stateCv
.
wait
(
lock
);
//std::cout <<
"getting data\n";
stateCv
.
wait
(
lock
,[
&
]{
return
state
==
DONE
;}
);
}
// [this](){return state!=DONE;});
//lock.unlock();
return
_data
;
// [this](){return state!=DONE;});
// lock.unlock();
// stateCv.wait(stateMutex, [](state != DONE;));
// std::cout<<"get"<<std::endl;
return
_data
;
}
char
*
RdmaFuture
::
wait_for
(
int
timeout
)
{
std
::
unique_lock
<
std
::
mutex
>
lock
(
stateMutex
);
if
(
state
==
DONE
)
return
_data
;
//lock.unlock();
// add wait logic
//
lock.unlock();
//
add wait logic
return
nullptr
;
}
void
RdmaFuture
::
put
(
char
*
data
)
{
std
::
unique_lock
<
std
::
mutex
>
lock
(
stateMutex
);
std
::
cout
<<
"putting data
\n
"
;
//std::cout <<
"putting data\n";
_data
=
data
;
state
=
DONE
;
//lock.unlock();
//
lock.unlock();
stateCv
.
notify_one
();
// std::cout << "got data current state" <<data<< (unsigned)state;
// std::unique_lock<std::mutex> lock(stateMutex);
// std::cout << "updated" << (unsigned)state;
}
\ No newline at end of file
Write
Preview