Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
X
xanadu
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Commits
Open sidebar
SYNERG
xanadu
Commits
10c76714
Commit
10c76714
authored
Feb 13, 2020
by
Naman Dixit
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Added Kafka support
parent
64418d8e
Changes
8
Show whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
551 additions
and
889 deletions
+551
-889
resource_manager/build.linux
resource_manager/build.linux
+9
-11
resource_manager/src/arbiter/arbiter.c
resource_manager/src/arbiter/arbiter.c
+172
-522
resource_manager/src/arbiter/socket.c
resource_manager/src/arbiter/socket.c
+0
-70
resource_manager/src/common/kafka.h
resource_manager/src/common/kafka.h
+188
-0
resource_manager/src/grunt/grunt.c
resource_manager/src/grunt/grunt.c
+114
-173
resource_manager/src/grunt/socket.c
resource_manager/src/grunt/socket.c
+0
-27
resource_manager/src/test/test.c
resource_manager/src/test/test.c
+67
-85
resource_manager/version.linux
resource_manager/version.linux
+1
-1
No files found.
resource_manager/build.linux
View file @
10c76714
...
@@ -38,8 +38,7 @@ fi
...
@@ -38,8 +38,7 @@ fi
# For Address Sanitizer: -fsanitize=address -fno-omit-frame-pointer
# For Address Sanitizer: -fsanitize=address -fno-omit-frame-pointer
# Memory Sanitizer : -fsanitize=memory -fno-optimize-sibling-calls -fno-omit-frame-pointer -fsanitize-memory-track-origins
# Memory Sanitizer : -fsanitize=memory -fno-optimize-sibling-calls -fno-omit-frame-pointer -fsanitize-memory-track-origins
ArbiterCompilerFlags
=
"-iquote /code/include -iquote
${
ProjectRoot
}
/src
\
ArbiterCompilerFlags
=
"-iquote
${
ProjectRoot
}
/src/common
\
-iquote
${
ProjectRoot
}
/src/common
\
-g3 -O0 -fno-strict-aliasing -fwrapv -msse2
\
-g3 -O0 -fno-strict-aliasing -fwrapv -msse2
\
"
"
ArbiterLanguageFlags
=
"--std=c11 -DBUILD_INTERNAL -DBUILD_SLOW -DBUILD_DEBUG
\
ArbiterLanguageFlags
=
"--std=c11 -DBUILD_INTERNAL -DBUILD_SLOW -DBUILD_DEBUG
\
...
@@ -47,10 +46,10 @@ ArbiterLanguageFlags="--std=c11 -DBUILD_INTERNAL -DBUILD_SLOW -DBUILD_DEBUG \
...
@@ -47,10 +46,10 @@ ArbiterLanguageFlags="--std=c11 -DBUILD_INTERNAL -DBUILD_SLOW -DBUILD_DEBUG \
-D_POSIX_C_SOURCE=200809L -D_DEFAULT_SOURCE"
-D_POSIX_C_SOURCE=200809L -D_DEFAULT_SOURCE"
ArbiterWarningFlags
=
"-Weverything -Wpedantic -pedantic-errors -Werror
\
ArbiterWarningFlags
=
"-Weverything -Wpedantic -pedantic-errors -Werror
\
-Wno-c++98-compat -Wno-gnu-statement-expression
\
-Wno-c++98-compat -Wno-gnu-statement-expression
\
-Wno-bad-function-cast -Wno-u
nused-function
\
-Wno-bad-function-cast -Wno-u
sed-but-marked-unused
\
-Wno-padded "
-Wno-padded
-Wno-gnu-zero-variadic-macro-arguments
"
ArbiterLinkerFlags
=
"-o
${
ArbiterTargetPath
}
\
ArbiterLinkerFlags
=
"-o
${
ArbiterTargetPath
}
\
-static-libgcc -lm -pthread
\
-static-libgcc -lm -pthread
-lrdkafka
\
-Wl,-rpath=
\$
{ORIGIN} -Wl,-z,origin -Wl,--enable-new-dtags"
-Wl,-rpath=
\$
{ORIGIN} -Wl,-z,origin -Wl,--enable-new-dtags"
${
Compiler
}
${
ArbiterCompilerFlags
}
${
ArbiterLanguageFlags
}
${
ArbiterWarningFlags
}
\
${
Compiler
}
${
ArbiterCompilerFlags
}
${
ArbiterLanguageFlags
}
${
ArbiterWarningFlags
}
\
...
@@ -70,8 +69,7 @@ fi
...
@@ -70,8 +69,7 @@ fi
# For Address Sanitizer: -fsanitize=address -fno-omit-frame-pointer
# For Address Sanitizer: -fsanitize=address -fno-omit-frame-pointer
# Memory Sanitizer : -fsanitize=memory -fno-optimize-sibling-calls -fno-omit-frame-pointer -fsanitize-memory-track-origins
# Memory Sanitizer : -fsanitize=memory -fno-optimize-sibling-calls -fno-omit-frame-pointer -fsanitize-memory-track-origins
GruntCompilerFlags
=
"-iquote /code/include -iquote
${
ProjectRoot
}
/src
\
GruntCompilerFlags
=
"-iquote
${
ProjectRoot
}
/src/common
\
-iquote
${
ProjectRoot
}
/src/common
\
-g3 -O0 -fno-strict-aliasing -fwrapv -msse2
\
-g3 -O0 -fno-strict-aliasing -fwrapv -msse2
\
"
"
GruntLanguageFlags
=
"--std=c11 -DBUILD_INTERNAL -DBUILD_SLOW -DBUILD_DEBUG
\
GruntLanguageFlags
=
"--std=c11 -DBUILD_INTERNAL -DBUILD_SLOW -DBUILD_DEBUG
\
...
@@ -79,10 +77,10 @@ GruntLanguageFlags="--std=c11 -DBUILD_INTERNAL -DBUILD_SLOW -DBUILD_DEBUG \
...
@@ -79,10 +77,10 @@ GruntLanguageFlags="--std=c11 -DBUILD_INTERNAL -DBUILD_SLOW -DBUILD_DEBUG \
-D_POSIX_C_SOURCE=200809L -D_DEFAULT_SOURCE"
-D_POSIX_C_SOURCE=200809L -D_DEFAULT_SOURCE"
GruntWarningFlags
=
"-Weverything -Wpedantic -pedantic-errors -Werror
\
GruntWarningFlags
=
"-Weverything -Wpedantic -pedantic-errors -Werror
\
-Wno-c++98-compat -Wno-gnu-statement-expression
\
-Wno-c++98-compat -Wno-gnu-statement-expression
\
-Wno-bad-function-cast -Wno-u
nused-function
\
-Wno-bad-function-cast -Wno-u
sed-but-marked-unused
\
-Wno-padded "
-Wno-padded "
GruntLinkerFlags
=
"-o
${
GruntTargetPath
}
\
GruntLinkerFlags
=
"-o
${
GruntTargetPath
}
\
-static-libgcc -lm -pthread
\
-static-libgcc -lm -pthread
-lrdkafka
\
-Wl,-rpath=
\$
{ORIGIN} -Wl,-z,origin -Wl,--enable-new-dtags"
-Wl,-rpath=
\$
{ORIGIN} -Wl,-z,origin -Wl,--enable-new-dtags"
${
Compiler
}
${
GruntCompilerFlags
}
${
GruntLanguageFlags
}
${
GruntWarningFlags
}
\
${
Compiler
}
${
GruntCompilerFlags
}
${
GruntLanguageFlags
}
${
GruntWarningFlags
}
\
...
@@ -111,10 +109,10 @@ TestLanguageFlags="--std=c11 -DBUILD_INTERNAL -DBUILD_SLOW -DBUILD_DEBUG \
...
@@ -111,10 +109,10 @@ TestLanguageFlags="--std=c11 -DBUILD_INTERNAL -DBUILD_SLOW -DBUILD_DEBUG \
-D_POSIX_C_SOURCE=200809L -D_DEFAULT_SOURCE"
-D_POSIX_C_SOURCE=200809L -D_DEFAULT_SOURCE"
TestWarningFlags
=
"-Weverything -Wpedantic -pedantic-errors -Werror
\
TestWarningFlags
=
"-Weverything -Wpedantic -pedantic-errors -Werror
\
-Wno-c++98-compat -Wno-gnu-statement-expression
\
-Wno-c++98-compat -Wno-gnu-statement-expression
\
-Wno-bad-function-cast -Wno-u
nused-function
\
-Wno-bad-function-cast -Wno-u
sed-but-marked-unused
\
-Wno-padded "
-Wno-padded "
TestLinkerFlags
=
"-o
${
TestTargetPath
}
\
TestLinkerFlags
=
"-o
${
TestTargetPath
}
\
-static-libgcc -lm -pthread
\
-static-libgcc -lm -pthread
-lrdkafka
\
-Wl,-rpath=
\$
{ORIGIN} -Wl,-z,origin -Wl,--enable-new-dtags"
-Wl,-rpath=
\$
{ORIGIN} -Wl,-z,origin -Wl,--enable-new-dtags"
${
Compiler
}
${
TestCompilerFlags
}
${
TestLanguageFlags
}
${
TestWarningFlags
}
\
${
Compiler
}
${
TestCompilerFlags
}
${
TestLanguageFlags
}
${
TestWarningFlags
}
\
...
...
resource_manager/src/arbiter/arbiter.c
View file @
10c76714
...
@@ -3,6 +3,8 @@
...
@@ -3,6 +3,8 @@
* Notice: © Copyright 2020 Naman Dixit
* Notice: © Copyright 2020 Naman Dixit
*/
*/
#define logMessage(s, ...) printf(s "\n", ##__VA_ARGS__)
#include "nlib/nlib.h"
#include "nlib/nlib.h"
#include <stdio.h>
#include <stdio.h>
...
@@ -15,88 +17,59 @@
...
@@ -15,88 +17,59 @@
#include <fcntl.h>
#include <fcntl.h>
#include <sys/time.h>
#include <sys/time.h>
#include <arpa/inet.h>
#include <arpa/inet.h>
#include <assert.h>
#define MAX_SOCKET_CONNECTIONS_REQUEST 64
#include <signal.h>
#define MAX_EPOLL_EVENTS 1024
#include <librdkafka/rdkafka.h>
typedef
struct
Input_Resume
{
Char
*
buffer
;
Size
buffer_len
;
Size
buffer_cap
;
U32
buffer_expected_len
;
Byte
size_bytes
[
4
];
Size
size_bytes_count
;
int
fd
;
B32
initialized
;
}
Input_Resume
;
typedef
struct
Output_Resume
{
Char
*
buffer
;
Size
buffer_pos
;
Size
buffer_len
;
U64
msg_id_hash
;
Sint
fd
;
Byte
_pad
[
4
];
}
Output_Resume
;
typedef
struct
Input_Output
{
Input_Resume
*
ir
;
Output_Resume
*
ors
;
}
Input_Output
;
typedef
struct
Grunt
{
typedef
struct
Grunt
{
Char
*
i
p
;
Char
*
i
d
;
Sint
memory
;
Sint
memory
;
}
Grunt
;
}
Grunt
;
typedef
struct
Grunt_Survey
{
typedef
struct
Grunt_Survey
{
Char
**
ips
;
Grunt
**
grunt_ptrs
;
U16
*
ports
;
U64
milli_passed
;
U64
milli_passed
;
U64
milli_last
;
U64
milli_last
;
Sint
id
;
Char
*
txn_id
;
Sint
dispatcher_socket
;
}
Grunt_Survey
;
}
Grunt_Survey
;
typedef
struct
Command
{
typedef
struct
Command
{
enum
Command_Kind
{
enum
Command_Kind
{
Command_NONE
,
Command_NONE
,
Command_REQUEST_EXTERNAL
,
Command_REQUEST_DISPATCHER_2_ARBITER
,
Command_REQUEST_INTERNAL
,
Command_RESPONSE_ARBITER_2_DISPATCHER
,
Command_RESPONSE_INTERNAL
,
Command_RESPONSE_EXTERNAL
,
Command_REQUEST_ARBITER_2_GRUNT
,
Command_HEARTBEAT
,
Command_RESPONSE_GRUNT_2_ARBITER
,
Command_HEARTBEAT_GRUNT_2_ARBITER
,
}
kind
;
}
kind
;
Sint
id
;
Char
*
txn_
id
;
union
{
union
{
struct
{
struct
{
Sint
memory
;
Sint
memory
;
}
req
ex
;
}
req
_d2a
;
struct
{
struct
{
Sint
memory
;
Char
**
grunt_ids
;
Sint
dispatcher_socket
;
}
res_a2d
;
}
reqin
;
struct
{
struct
{
Char
**
ips
;
Sint
memory
;
Sint
dispatcher_socket
;
}
req_a2g
;
}
resin
;
struct
{
struct
{
Char
*
*
ips
;
Char
*
id
;
Sint
dispatcher_socke
t
;
U16
por
t
;
}
res
ex
;
}
res
_g2a
;
struct
{
struct
{
Char
*
id
;
Sint
memory
;
Sint
memory
;
}
heartbeat
;
}
beat_g2a
;
};
};
}
Command
;
}
Command
;
...
@@ -111,337 +84,149 @@ typedef struct Command {
...
@@ -111,337 +84,149 @@ typedef struct Command {
# pragma clang diagnostic pop
# pragma clang diagnostic pop
# endif
# endif
#include "
socket.c
"
#include "
kafka.h
"
#include "time.c"
#include "time.c"
global_variable
volatile
sig_atomic_t
global_keep_running
=
1
;
internal_function
void
signalHandlerSIGINT
(
int
_
)
{
(
void
)
_
;
global_keep_running
=
0
;
}
Sint
main
(
Sint
argc
,
Char
*
argv
[])
Sint
main
(
Sint
argc
,
Char
*
argv
[])
{
{
unused_variable
(
argc
);
unused_variable
(
argc
);
unused_variable
(
argv
);
unused_variable
(
argv
);
Sint
sock_fd_external
=
socketCreateListener
(
"9526"
);
signal
(
SIGINT
,
signalHandlerSIGINT
);
Sint
sock_fd_internal
=
socketCreateListener
(
"9527"
);
// NOTE(naman): Create an epoll instance, with no flags set.
int
epoll_fd
=
epoll_create1
(
0
);
if
(
epoll_fd
<
0
)
{
perror
(
"epoll_create1"
);
exit
(
-
1
);
}
// NOTE(naman): Add the socket's file descriptor to the epoll set
struct
epoll_event
accept_event_internal
=
{.
data
.
fd
=
sock_fd_internal
,
.
events
=
EPOLLIN
};
if
(
epoll_ctl
(
epoll_fd
,
EPOLL_CTL_ADD
,
sock_fd_internal
,
&
accept_event_internal
)
<
0
)
{
perror
(
"epoll_ctl EPOLL_CTL_ADD"
);
exit
(
-
1
);
}
struct
epoll_event
accept_event_external
=
{.
data
.
fd
=
sock_fd_external
,
.
events
=
EPOLLIN
};
if
(
epoll_ctl
(
epoll_fd
,
EPOLL_CTL_ADD
,
sock_fd_external
,
&
accept_event_external
)
<
0
)
{
perror
(
"epoll_ctl EPOLL_CTL_ADD"
);
exit
(
-
1
);
}
// NOTE(naman): Allocate memory for epoll array
struct
epoll_event
*
events
=
calloc
(
MAX_EPOLL_EVENTS
,
sizeof
(
struct
epoll_event
));
if
(
events
==
NULL
)
{
fprintf
(
stderr
,
"Unable to allocate memory for epoll_events"
);
exit
(
-
1
);
}
Command
*
commands
=
NULL
;
Command
*
commands
=
NULL
;
Hash_Table
commands_pending
=
htCreate
(
0
);
Grunt
*
grunts
=
NULL
;
Hash_Table
socket_map
=
htCreate
(
0
);
Hash_Table
io_map
=
htCreate
(
0
);
Hash_Table
grunt_map
=
htCreate
(
0
);
Hash_Table
grunt_map
=
htCreate
(
0
);
Hash_Table
grunt_survey_map
=
htCreate
(
0
);
Hash_Table
grunt_survey_map
=
htCreate
(
0
);
while
(
true
)
{
sbufAdd
(
grunts
,
(
Grunt
){
0
});
// SInce 0 index out of hash table will be invalid
// NOTE(naman): Get the fd's that are ready
int
nready
=
epoll_wait
(
epoll_fd
,
events
,
MAX_EPOLL_EVENTS
,
100
);
for
(
int
i
=
0
;
i
<
nready
;
i
++
)
{
Kafka
kafka
=
{
0
};
if
(
events
[
i
].
events
&
EPOLLERR
)
{
perror
(
"epoll_wait returned EPOLLERR"
);
exit
(
-
1
);
}
if
((
events
[
i
].
data
.
fd
==
sock_fd_internal
)
||
kafkaCreateWriter
(
&
kafka
,
"10.129.6.5:9092"
);
(
events
[
i
].
data
.
fd
==
sock_fd_external
))
{
kafkaCreateReader
(
&
kafka
,
"10.129.6.5:9092"
);
Sint
sock_fd
=
events
[
i
].
data
.
fd
;
// NOTE(naman): A new grunt is connecting.
struct
sockaddr_in
peer_addr
=
{
0
};
socklen_t
peer_addr_len
=
sizeof
(
peer_addr
);
int
accept_fd
=
accept
(
sock_fd
,
(
struct
sockaddr
*
)
&
peer_addr
,
&
peer_addr_len
);
if
(
accept_fd
<
0
)
{
if
(
errno
==
EAGAIN
||
errno
==
EWOULDBLOCK
)
{
// This can happen due to the nonblocking socket mode; in this
// case don't do anything, but print a notice (since these events
// are extremely rare and interesting to observe...)
fprintf
(
stderr
,
"accept() returned %s
\n
"
,
errno
==
EAGAIN
?
"EAGAIN"
:
"EWOULDBLOCK"
);
}
else
{
perror
(
"accept"
);
exit
(
-
1
);
}
}
else
{
printf
(
"Log: Connection made: client_fd=%d
\n
"
,
accept_fd
);
// NOTE(naman): Set the socket as non-blocking
rd_kafka_topic_partition_list_t
*
kafka_reader_topics
=
rd_kafka_topic_partition_list_new
(
1
);
int
sock_flags
=
fcntl
(
sock_fd
,
F_GETFL
,
0
);
if
(
sock_flags
==
-
1
)
{
perror
(
"fcntl F_GETFL"
);
exit
(
-
1
);
}
if
(
fcntl
(
sock_fd
,
F_SETFL
,
sock_flags
|
O_NONBLOCK
)
==
-
1
)
{
rd_kafka_topic_t
*
topic_req_d2a
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
perror
(
"fcntl F_SETFL O_NONBLOCK"
);
"REQUEST_DISPATCHER_2_ARBITER"
);
exit
(
-
1
);
rd_kafka_topic_t
*
topic_join_g2a
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
}
"JOIN_GRUNT_2_ARBITER"
);
rd_kafka_topic_t
*
topic_res_g2a
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
"RESPONSE_GRUNT_2_ARBITER"
);
rd_kafka_topic_t
*
topic_beat_g2a
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
"HEARTBEAT_GRUNT_2_ARBITER"
);
// NOTE(naman): Add the new file descriptor to the epoll set
rd_kafka_resp_err_t
kafka_reader_topics_err
=
rd_kafka_subscribe
(
kafka
.
reader
,
struct
epoll_event
event
=
{.
data
.
fd
=
accept_fd
,
kafka_reader_topics
);
.
events
=
EPOLLIN
};
rd_kafka_topic_partition_list_destroy
(
kafka_reader_topics
);
if
(
epoll_ctl
(
epoll_fd
,
EPOLL_CTL_ADD
,
accept_fd
,
&
event
)
<
0
)
{
perror
(
"epoll_ctl EPOLL_CTL_ADD"
);
exit
(
-
1
);
}
if
(
events
[
i
].
data
.
fd
==
sock_fd_internal
)
{
htInsert
(
&
socket_map
,
(
U64
)
accept_fd
,
Socket_Kind_INTERNAL
);
struct
in_addr
ip_addr
=
peer_addr
.
sin_addr
;
char
str
[
INET_ADDRSTRLEN
];
inet_ntop
(
AF_INET
,
&
ip_addr
,
str
,
INET_ADDRSTRLEN
);
Grunt
*
grunt
=
calloc
(
1
,
sizeof
(
*
grunt
));
if
(
kafka_reader_topics_err
)
{
grunt
->
ip
=
strdup
(
str
);
fprintf
(
stderr
,
"Subscribe failed: %s
\n
"
,
htInsert
(
&
grunt_map
,
(
U64
)
accept_fd
,
(
U64
)
grunt
);
rd_kafka_err2str
(
kafka_reader_topics_err
)
);
}
else
if
(
events
[
i
].
data
.
fd
==
sock_fd_external
)
{
rd_kafka_destroy
(
kafka
.
reader
);
htInsert
(
&
socket_map
,
(
U64
)
accept_fd
,
Socket_Kind_EXTERNAL
)
;
return
-
1
;
}
}
Input_Output
*
io
=
calloc
(
1
,
sizeof
(
*
io
));
while
(
global_keep_running
)
{
htInsert
(
&
io_map
,
(
U64
)
accept_fd
,
(
Uptr
)
io
);
// NOTE(naman): Get the fd's that are ready
}
rd_kafka_message_t
*
kafka_message_read
=
rd_kafka_consumer_poll
(
kafka
.
reader
,
100
);
}
else
{
// A peer socket is ready.
if
(
events
[
i
].
events
&
EPOLLIN
)
{
// Ready for reading.
int
fd
=
events
[
i
].
data
.
fd
;
Socket_Kind
socket_kind
=
(
Socket_Kind
)
htLookup
(
&
socket_map
,
(
U64
)
fd
);
Input_Output
*
io
=
(
Input_Output
*
)
htLookup
(
&
io_map
,
(
U64
)
fd
);
if
(
kafka_message_read
!=
NULL
)
{
if
(
kafka_message_read
->
err
)
{
/* Consumer error: typically just informational. */
fprintf
(
stderr
,
"Consumer error: %s
\n
"
,
rd_kafka_message_errstr
(
kafka_message_read
));
}
else
{
/* Proper message */
/* fprintf(stderr, */
/* "Received message on %s [%d] " */
/* "at offset %"PRId64": \n%s\n", */
/* rd_kafka_topic_name(kafka_message_read->rkt), */
/* (int)kafka_message_read->partition, kafka_message_read->offset, */
/* cJSON_Print(cJSON_Parse((char *)kafka_message_read->payload))); */
const
char
*
json_error
=
NULL
;
cJSON
*
root
=
cJSON_ParseWithOpts
(
kafka_message_read
->
payload
,
&
json_error
,
true
);
if
(
io
->
ir
==
NULL
)
{
if
(
kafka_message_read
->
rkt
==
topic_req_d2a
)
{
io
->
ir
=
calloc
(
1
,
sizeof
(
*
io
->
ir
));
Command
c
=
{.
kind
=
Command_REQUEST_ARBITER_2_GRUNT
};
io
->
ir
->
buffer_cap
=
MiB
(
1
);
c
.
txn_id
=
cJSON_GetObjectItem
(
root
,
"id"
)
->
valuestring
;
io
->
ir
->
buffer
=
calloc
(
io
->
ir
->
buffer_cap
,
sizeof
(
*
(
io
->
ir
->
buffer
)));
io
->
ir
->
fd
=
fd
;
}
Input_Resume
*
ir
=
io
->
ir
;
Sint
memory
=
cJSON_GetObjectItem
(
root
,
"memory"
)
->
valueint
;
if
(
ir
->
initialized
==
false
)
{
long
len
=
read
(
fd
,
(
Char
*
)
ir
->
size_bytes
+
ir
->
size_bytes_count
,
4
-
ir
->
size_bytes_count
);
if
(
len
==
0
)
{
logMessage
(
"Request D2A:
\t
id: %s = ([memory] = %d)"
,
c
.
txn_id
,
memory
);
close
(
fd
);
sbufDelete
(
io
->
ors
);
free
(
io
->
ir
);
free
(
io
);
htRemove
(
&
io_map
,
(
U64
)
fd
);
Char
**
grunt_ids
=
NULL
;
htRemove
(
&
socket_map
,
(
U64
)
fd
);
if
(
socket_kind
==
Socket_Kind_INTERNAL
)
{
for
(
Size
j
=
0
;
j
<
sbufElemin
(
grunts
);
j
++
)
{
htRemove
(
&
grunt_map
,
(
U64
)
fd
);
Grunt
g
=
grunts
[
j
];
if
(
g
.
memory
>=
memory
)
{
c
.
kind
=
Command_RESPONSE_ARBITER_2_DISPATCHER
;
sbufAdd
(
grunt_ids
,
g
.
id
);
}
}
continue
;
}
}
ir
->
size_bytes_count
+=
(
Size
)
len
;
if
(
c
.
kind
==
Command_REQUEST_ARBITER_2_GRUNT
)
{
c
.
req_a2g
.
memory
=
memory
;
if
(
ir
->
size_bytes_count
==
4
)
{
Grunt_Survey
*
gs
=
calloc
(
1
,
sizeof
(
*
gs
));
ir
->
initialized
=
true
;
htInsert
(
&
grunt_survey_map
,
hashString
(
c
.
txn_id
),
(
Uptr
)
gs
)
;
ir
->
buffer_expected_len
=
(
U32
)((
ir
->
size_bytes
[
3
]
<<
0U
)
|
gs
->
milli_last
=
timeMilli
();
(
ir
->
size_bytes
[
2
]
<<
8U
)
|
gs
->
txn_id
=
c
.
txn_id
;
(
ir
->
size_bytes
[
1
]
<<
16U
)
|
}
else
if
(
c
.
kind
==
Command_RESPONSE_ARBITER_2_DISPATCHER
)
{
(
ir
->
size_bytes
[
0
]
<<
24U
));
c
.
res_a2d
.
grunt_ids
=
grunt_ids
;
sbufAdd
(
commands
,
c
);
}
}
}
else
if
(
kafka_message_read
->
rkt
==
topic_join_g2a
)
{
Char
*
id
=
cJSON_GetObjectItem
(
root
,
"id"
)
->
valuestring
;
Grunt
grunt
=
{.
id
=
id
};
continue
;
logMessage
(
"Join G2A:
\t
id: %s"
,
id
);
}
else
{
long
len
=
read
(
fd
,
ir
->
buffer
+
ir
->
buffer_len
,
ir
->
buffer_expected_len
-
ir
->
buffer_len
);
ir
->
buffer_len
+=
(
Size
)
len
;
if
(
ir
->
buffer_expected_len
==
ir
->
buffer_len
)
{
if
(
htLookup
(
&
grunt_map
,
hashString
(
id
))
==
0
)
{
// char *json_printed = cJSON_Print(cJSON_Parse(ir->buffer));
sbufAdd
(
grunts
,
grunt
);
htInsert
(
&
grunt_map
,
hashString
(
id
),
sbufElemin
(
grunts
)
-
1
);
const
Char
*
json_error
=
NULL
;
cJSON
*
root
=
cJSON_ParseWithOpts
(
ir
->
buffer
,
&
json_error
,
true
);
B32
prepare_for_output
=
true
;
if
(
root
==
NULL
)
{
// TODO(naman): Error
}
else
{
if
(
socket_kind
==
Socket_Kind_EXTERNAL
)
{
printf
(
"Recieved: REQUEST EXTERNAL:
\n
%s
\n
"
,
cJSON_Print
(
cJSON_Parse
(
ir
->
buffer
)));
Command
c
=
{.
kind
=
Command_REQUEST_INTERNAL
};
c
.
id
=
cJSON_GetObjectItem
(
root
,
"id"
)
->
valueint
;
Sint
memory
=
cJSON_GetObjectItem
(
root
,
"memory"
)
->
valueint
;
Char
**
ips
=
NULL
;
for
(
Size
j
=
0
;
j
<
grunt_map
.
slot_count
;
j
++
)
{
if
(
grunt_map
.
keys
[
j
]
!=
0
)
{
Grunt
*
g
=
(
Grunt
*
)
grunt_map
.
values
;
if
(
g
->
memory
>=
memory
)
{
c
.
kind
=
Command_RESPONSE_EXTERNAL
;
sbufAdd
(
ips
,
g
->
ip
);
}
}
}
}
}
else
if
(
kafka_message_read
->
rkt
==
topic_res_g2a
)
{
Char
*
id
=
cJSON_GetObjectItem
(
root
,
"id"
)
->
valuestring
;
B32
success
=
(
B32
)(
cJSON_GetObjectItem
(
root
,
"success"
)
->
valueint
);
if
(
c
.
kind
==
Command_REQUEST_INTERNAL
)
{
logMessage
(
"Response G2A:
\t
id: %s = %s"
,
id
,
success
?
"succeded"
:
"failed"
);
c
.
reqin
.
dispatcher_socket
=
fd
;
c
.
reqin
.
memory
=
memory
;
}
else
if
(
c
.
kind
==
Command_RESPONSE_EXTERNAL
)
{
c
.
resex
.
ips
=
ips
;
c
.
reqin
.
dispatcher_socket
=
fd
;
}
sbufAdd
(
commands
,
c
);
htInsert
(
&
commands_pending
,
hashInteger
((
U64
)
c
.
id
),
true
);
}
else
if
(
socket_kind
==
Socket_Kind_INTERNAL
)
{
Char
*
type
=
cJSON_GetObjectItem
(
root
,
"type"
)
->
valuestring
;
if
(
strcmp
(
type
,
"response"
)
==
0
)
{
printf
(
"Recieved: RESPONSE INTERNAL:
\n
%s
\n
"
,
cJSON_Print
(
cJSON_Parse
(
ir
->
buffer
)));
B32
success
=
(
B32
)(
cJSON_GetObjectItem
(
root
,
"success"
)
->
valueint
);
if
(
success
)
{
if
(
success
)
{
Command
c
=
{.
kind
=
Command_RESPONSE_EXTERNAL
};
Grunt_Survey
*
gs
=
(
Grunt_Survey
*
)
htLookup
(
&
grunt_survey_map
,
c
.
id
=
cJSON_GetObjectItem
(
root
,
"id"
)
->
valueint
;
hashString
(
id
));
c
.
resex
.
dispatcher_socket
=
cJSON_GetObjectItem
(
root
,
"dispatcher_socket"
)
->
valueint
;
Grunt_Survey
*
gs
=
(
Grunt_Survey
*
)
htLookup
(
&
grunt_survey_map
,
hashInteger
((
U64
)
c
.
id
));
if
(
gs
!=
NULL
)
{
Grunt
*
g
=
(
Grunt
*
)
htLookup
(
&
grunt_map
,
(
U64
)
fd
);
sbufAdd
(
gs
->
ips
,
g
->
ip
);
U64
milli_new
=
timeMilli
();
gs
->
milli_passed
+=
milli_new
-
gs
->
milli_last
;
gs
->
milli_last
=
milli_new
;
if
(
gs
->
milli_passed
>=
1000
)
{
if
(
gs
!=
NULL
)
{
// If it has not been already removed
htRemove
(
&
grunt_survey_map
,
Grunt
*
g
=
&
grunts
[
htLookup
(
&
grunt_map
,
hashString
(
id
))];
hashInteger
((
U64
)
c
.
id
));
sbufAdd
(
gs
->
grunt_ptrs
,
g
);
c
.
resex
.
ips
=
gs
->
ips
;
sbufAdd
(
commands
,
c
);
}
}
}
}
}
}
else
if
(
strcmp
(
type
,
"heartbeat"
)
==
0
)
{
}
else
if
(
kafka_message_read
->
rkt
==
topic_beat_g2a
)
{
printf
(
"Recieved: HEARTBEAT:
\n
%s
\n
"
,
Char
*
id
=
cJSON_GetObjectItem
(
root
,
"id"
)
->
valuestring
;
cJSON_Print
(
cJSON_Parse
(
ir
->
buffer
)));
Grunt
*
grunt
=
(
Grunt
*
)
htLookup
(
&
grunt_map
,
(
U64
)
fd
);
logMessage
(
"Beat G2A:
\t
id: %s"
,
id
);
grunt
->
memory
=
cJSON_GetObjectItem
(
root
,
"memory"
)
->
valueint
;
prepare_for_output
=
false
;
U64
index
=
htLookup
(
&
grunt_map
,
hashString
(
id
));
if
(
index
!=
0
)
{
// Prevent any left over message
grunts
[
index
].
memory
=
cJSON_GetObjectItem
(
root
,
"memory"
)
->
valueint
;
}
}
}
}
free
(
ir
->
buffer
);
free
(
ir
);
io
->
ir
=
NULL
;
if
(
prepare_for_output
)
{
struct
epoll_event
event
=
{.
data
.
fd
=
fd
,
.
events
=
EPOLLIN
|
EPOLLOUT
};
if
(
epoll_ctl
(
epoll_fd
,
EPOLL_CTL_MOD
,
fd
,
&
event
)
<
0
)
{
perror
(
"epoll_ctl EPOLL_CTL_MOD"
);
exit
(
-
1
);
}
}
}
}
}
else
if
(
events
[
i
].
events
&
EPOLLOUT
)
{
// Writing into fd in which we previously were not able to finish writing to
int
fd
=
events
[
i
].
data
.
fd
;
Input_Output
*
io
=
(
Input_Output
*
)
htLookup
(
&
io_map
,
(
U64
)
fd
);
if
(
sbufElemin
(
io
->
ors
)
==
0
)
{
// fprintf(stderr, "hmLookup returned NULL\n");
// NOTE(naman): We haven't popped the Result yet, first go do that.
continue
;
}
else
{
Output_Resume
*
or
=
&
io
->
ors
[
0
];
Char
*
output
=
or
->
buffer
;
Size
output_len
=
or
->
buffer_len
;
Size
output_pos
=
or
->
buffer_pos
;
ssize_t
nsent
=
write
(
or
->
fd
,
output
+
output_pos
,
output_len
-
output_pos
);
if
(
nsent
==
-
1
)
{
if
(
errno
==
EAGAIN
||
errno
==
EWOULDBLOCK
)
{
// Try next time
}
else
{
}
else
{
perror
(
"write() failed
\n
"
);
// TODO(naman): Error
exit
(
-
1
);
}
}
}
else
if
((
Size
)
nsent
<
(
output_len
-
output_pos
))
{
or
->
buffer_pos
+=
(
Size
)
nsent
;
}
else
{
sbufUnsortedRemove
(
io
->
ors
,
0
);
struct
epoll_event
event
=
{.
data
.
fd
=
fd
,
.
events
=
EPOLLIN
};
if
(
epoll_ctl
(
epoll_fd
,
EPOLL_CTL_MOD
,
or
->
fd
,
&
event
)
<
0
)
{
perror
(
"epoll_ctl EPOLL_CTL_MOD"
);
exit
(
-
1
);
}
}
sbufDelete
(
output
);
rd_kafka_message_destroy
(
kafka_message_read
);
}
}
}
}
}
}
for
(
Size
i
=
0
;
i
<
grunt_survey_map
.
slot_count
;
i
++
)
{
for
(
Size
i
=
0
;
i
<
grunt_survey_map
.
slot_count
;
i
++
)
{
...
@@ -451,15 +236,15 @@ Sint main (Sint argc, Char *argv[])
...
@@ -451,15 +236,15 @@ Sint main (Sint argc, Char *argv[])
U64
milli_new
=
timeMilli
();
U64
milli_new
=
timeMilli
();
gs
->
milli_passed
+=
milli_new
-
gs
->
milli_last
;
gs
->
milli_passed
+=
milli_new
-
gs
->
milli_last
;
gs
->
milli_last
=
milli_new
;
gs
->
milli_last
=
milli_new
;
if
(
gs
->
milli_passed
>=
1000
)
{
if
(
gs
->
milli_passed
>=
1000
)
{
htRemove
(
&
grunt_survey_map
,
htRemove
(
&
grunt_survey_map
,
hashString
(
gs
->
txn_id
));
hashInteger
((
U64
)
gs
->
id
));
Command
c
=
{.
kind
=
Command_RESPONSE_EXTERNAL
};
Command
c
=
{.
kind
=
Command_RESPONSE_ARBITER_2_DISPATCHER
};
c
.
id
=
gs
->
id
;
c
.
txn_id
=
gs
->
txn_id
;
c
.
resex
.
dispatcher_socket
=
gs
->
dispatcher_socket
;
c
.
resex
.
ips
=
gs
->
ips
;
for
(
Size
k
=
0
;
k
<
sbufElemin
(
gs
->
grunt_ptrs
);
k
++
)
{
sbufAdd
(
c
.
res_a2d
.
grunt_ids
,
gs
->
grunt_ptrs
[
k
]
->
id
);
}
sbufAdd
(
commands
,
c
);
sbufAdd
(
commands
,
c
);
}
}
...
@@ -469,185 +254,50 @@ Sint main (Sint argc, Char *argv[])
...
@@ -469,185 +254,50 @@ Sint main (Sint argc, Char *argv[])
for
(
Size
j
=
0
;
j
<
sbufElemin
(
commands
);
j
++
)
{
for
(
Size
j
=
0
;
j
<
sbufElemin
(
commands
);
j
++
)
{
Command
c
=
commands
[
j
];
Command
c
=
commands
[
j
];
if
(
c
.
kind
==
Command_REQUEST_INTERNAL
)
{
for
(
Size
i
=
0
;
i
<
socket_map
.
slot_count
;
i
++
)
{
if
((
socket_map
.
values
[
i
]
==
Socket_Kind_INTERNAL
)
&&
(
socket_map
.
keys
[
i
]
!=
0
))
{
Char
*
output
=
NULL
;
Char
*
output
=
NULL
;
Char
*
topic
=
NULL
;
sbufPrint
(
output
,
" "
);
if
(
c
.
kind
==
Command_REQUEST_ARBITER_2_GRUNT
)
{
sbufPrint
(
output
,
"{
\n\"
id
\"
: %d"
,
c
.
id
);
topic
=
"REQUEST_ARBITER_2_GRUNT"
;
sbufPrint
(
output
,
",
\n\"
dispatcher_socket
\"
: %d"
,
c
.
reqin
.
dispatcher_socket
);
sbufPrint
(
output
,
",
\n\"
memory
\"
: %d
\n
"
,
c
.
reqin
.
memory
);
sbufPrint
(
output
,
"{
\n\"
id
\"
:
\"
%s
\"
"
,
c
.
txn_id
);
sbufPrint
(
output
,
",
\n\"
memory
\"
: %d
\n
"
,
c
.
req_a2g
.
memory
);
sbufPrint
(
output
,
"
\n
}
\n
"
);
sbufPrint
(
output
,
"
\n
}
\n
"
);
}
else
if
(
c
.
kind
==
Command_RESPONSE_ARBITER_2_DISPATCHER
)
{
printf
(
"Sending: REQUEST INTERNAL:
\n
%s
\n
"
,
topic
=
"RESPONSE_ARBITER_2_DISPATCHER"
;
cJSON_Print
(
cJSON_Parse
(
output
+
4
)));
sbufPrint
(
output
,
"{
\n\"
id
\"
:
\"
%s
\"
"
,
c
.
txn_id
);
Size
output_len
=
strlen
(
output
);
sbufPrint
(
output
,
",
\n\"
grunts
\"
: ["
);
for
(
Size
k
=
0
;
k
<
sbufElemin
(
c
.
res_a2d
.
grunt_ids
);
k
++
)
{
#if defined(ENDIAN_LITTLE)
sbufPrint
(
output
,
"
\"
%s
\"
"
,
c
.
res_a2d
.
grunt_ids
[
k
]);
U32
json_len
=
(
U32
)
output_len
-
4
;
if
(
k
<
sbufElemin
(
c
.
res_a2d
.
grunt_ids
)
-
1
)
{
U32
json_len_be
=
swap_endian
(
json_len
);
output
[
0
]
=
((
Char
*
)
&
json_len_be
)[
0
];
output
[
1
]
=
((
Char
*
)
&
json_len_be
)[
1
];
output
[
2
]
=
((
Char
*
)
&
json_len_be
)[
2
];
output
[
3
]
=
((
Char
*
)
&
json_len_be
)[
3
];
#endif
Grunt_Survey
*
gs
=
(
Grunt_Survey
*
)
htLookup
(
&
grunt_survey_map
,
hashInteger
((
U64
)
c
.
id
));
if
(
gs
==
NULL
)
{
gs
=
calloc
(
1
,
sizeof
(
*
gs
));
htInsert
(
&
grunt_survey_map
,
hashInteger
((
U64
)
c
.
id
),
(
U64
)
gs
);
}
gs
->
milli_last
=
timeMilli
();
gs
->
id
=
c
.
id
;
gs
->
dispatcher_socket
=
c
.
reqin
.
dispatcher_socket
;
Sint
fd
=
(
Sint
)
socket_map
.
keys
[
i
];
ssize_t
nsent
=
write
(
fd
,
output
,
output_len
);
if
(
nsent
==
-
1
)
{
if
(
errno
==
EAGAIN
||
errno
==
EWOULDBLOCK
)
{
Output_Resume
or
=
{
0
};
or
.
fd
=
fd
;
or
.
buffer
=
output
;
or
.
buffer_pos
=
0
;
or
.
buffer_len
=
output_len
;
Input_Output
*
io
=
(
Input_Output
*
)
htLookup
(
&
io_map
,
(
U64
)
fd
);
sbufAdd
(
io
->
ors
,
or
);
struct
epoll_event
event
=
{.
data
.
fd
=
fd
,
.
events
=
EPOLLIN
|
EPOLLOUT
};
if
(
epoll_ctl
(
epoll_fd
,
EPOLL_CTL_MOD
,
fd
,
&
event
)
<
0
)
{
perror
(
"epoll_ctl EPOLL_CTL_MOD"
);
exit
(
-
1
);
}
}
else
{
perror
(
"write() failed"
);
exit
(
-
1
);
}
}
else
if
((
Size
)
nsent
<
output_len
)
{
Output_Resume
or
=
{
0
};
or
.
fd
=
fd
;
or
.
buffer
=
output
;
or
.
buffer_pos
=
(
Size
)
nsent
;
or
.
buffer_len
=
output_len
;
Input_Output
*
io
=
(
Input_Output
*
)
htLookup
(
&
io_map
,
(
U64
)
fd
);
sbufAdd
(
io
->
ors
,
or
);
struct
epoll_event
event
=
{.
data
.
fd
=
fd
,
.
events
=
EPOLLIN
|
EPOLLOUT
};
if
(
epoll_ctl
(
epoll_fd
,
EPOLL_CTL_MOD
,
fd
,
&
event
)
<
0
)
{
perror
(
"epoll_ctl EPOLL_CTL_MOD"
);
exit
(
-
1
);
}
}
else
{
struct
epoll_event
event
=
{.
data
.
fd
=
fd
,
.
events
=
EPOLLIN
};
if
(
epoll_ctl
(
epoll_fd
,
EPOLL_CTL_MOD
,
fd
,
&
event
)
<
0
)
{
perror
(
"epoll_ctl EPOLL_CTL_MOD"
);
exit
(
-
1
);
}
sbufDelete
(
output
);
}
}
}
}
else
if
(
c
.
kind
==
Command_RESPONSE_EXTERNAL
)
{
if
(
htLookup
(
&
commands_pending
,
hashInteger
((
U64
)
c
.
id
)))
{
htRemove
(
&
commands_pending
,
hashInteger
((
U64
)
c
.
id
));
Sint
fd
=
c
.
resex
.
dispatcher_socket
;
Char
*
output
=
NULL
;
sbufPrint
(
output
,
" "
);
sbufPrint
(
output
,
"{
\n\"
id
\"
: %d"
,
c
.
id
);
sbufPrint
(
output
,
",
\n\"
ip
\"
: ["
);
for
(
Size
k
=
0
;
k
<
sbufElemin
(
c
.
resex
.
ips
);
k
++
)
{
sbufPrint
(
output
,
"
\"
%s
\"
"
,
c
.
resex
.
ips
[
k
]);
if
(
k
<
sbufElemin
(
c
.
resex
.
ips
)
-
1
)
{
sbufPrint
(
output
,
","
);
sbufPrint
(
output
,
","
);
}
}
}
}
sbufPrint
(
output
,
"]"
);
sbufPrint
(
output
,
"]"
);
sbufPrint
(
output
,
"
\n
}"
);
sbufPrint
(
output
,
"
\n
}"
);
Size
output_len
=
strlen
(
output
);
printf
(
"Sending: RESPONSE EXTERNAL:
\n
%s
\n
"
,
cJSON_Print
(
cJSON_Parse
(
output
+
4
)));
#if defined(ENDIAN_LITTLE)
U32
json_len
=
(
U32
)
output_len
-
4
;
U32
json_len_be
=
swap_endian
(
json_len
);
output
[
0
]
=
((
Char
*
)
&
json_len_be
)[
0
];
output
[
1
]
=
((
Char
*
)
&
json_len_be
)[
1
];
output
[
2
]
=
((
Char
*
)
&
json_len_be
)[
2
];
output
[
3
]
=
((
Char
*
)
&
json_len_be
)[
3
];
#endif
ssize_t
nsent
=
write
(
fd
,
output
,
output_len
);
if
(
nsent
==
-
1
)
{
if
(
errno
==
EAGAIN
||
errno
==
EWOULDBLOCK
)
{
Output_Resume
or
=
{
0
};
or
.
fd
=
fd
;
or
.
buffer
=
output
;
or
.
buffer_pos
=
0
;
or
.
buffer_len
=
output_len
;
Input_Output
*
io
=
(
Input_Output
*
)
htLookup
(
&
io_map
,
(
U64
)
fd
);
sbufAdd
(
io
->
ors
,
or
);
struct
epoll_event
event
=
{.
data
.
fd
=
fd
,
.
events
=
EPOLLIN
|
EPOLLOUT
};
if
(
epoll_ctl
(
epoll_fd
,
EPOLL_CTL_MOD
,
fd
,
&
event
)
<
0
)
{
perror
(
"epoll_ctl EPOLL_CTL_MOD"
);
exit
(
-
1
);
}
}
else
{
perror
(
"write() failed"
);
exit
(
-
1
);
}
}
else
if
((
Size
)
nsent
<
output_len
)
{
Output_Resume
or
=
{
0
};
or
.
fd
=
fd
;
or
.
buffer
=
output
;
or
.
buffer_pos
=
(
Size
)
nsent
;
or
.
buffer_len
=
output_len
;
Input_Output
*
io
=
(
Input_Output
*
)
htLookup
(
&
io_map
,
(
U64
)
fd
);
sbufAdd
(
io
->
ors
,
or
);
struct
epoll_event
event
=
{.
data
.
fd
=
fd
,
.
events
=
EPOLLIN
|
EPOLLOUT
};
if
(
epoll_ctl
(
epoll_fd
,
EPOLL_CTL_MOD
,
fd
,
&
event
)
<
0
)
{
perror
(
"epoll_ctl EPOLL_CTL_MOD"
);
exit
(
-
1
);
}
}
else
{
struct
epoll_event
event
=
{.
data
.
fd
=
fd
,
.
events
=
EPOLLIN
};
if
(
epoll_ctl
(
epoll_fd
,
EPOLL_CTL_MOD
,
fd
,
&
event
)
<
0
)
{
perror
(
"epoll_ctl EPOLL_CTL_MOD"
);
exit
(
-
1
);
}
}
sbufDelete
(
output
);
if
(
output
!=
NULL
)
{
printf
(
"Sending to %s
\n
%s
\n
"
,
topic
,
output
);
if
(
!
kafkaWrite
(
kafka
.
writer
,
topic
,
"rm_arbiter"
,
output
))
{
return
-
1
;
}
}
}
}
sbufDelete
(
output
);
sbufDelete
(
commands
);
}
}
}
}
sbufDelete
(
commands
);
for
(
Size
i
=
0
;
i
<
sbufElemin
(
kafka
.
topics
);
i
++
)
{
rd_kafka_topic_destroy
(
kafka
.
topics
[
i
]);
}
rd_kafka_consumer_close
(
kafka
.
reader
);
rd_kafka_destroy
(
kafka
.
reader
);
for
(
Size
i
=
0
;
i
<
sbufElemin
(
kafka
.
queues
);
i
++
)
{
rd_kafka_queue_destroy
(
kafka
.
queues
[
i
]);
}
}
rd_kafka_destroy
(
kafka
.
writer
);
return
0
;
}
}
resource_manager/src/arbiter/socket.c
deleted
100644 → 0
View file @
64418d8e
/*
* Creator: Naman Dixit
* Notice: © Copyright 2020 Naman Dixit
*/
typedef
enum
Socket_Kind
{
Socket_Kind_NONE
,
Socket_Kind_INTERNAL
,
Socket_Kind_EXTERNAL
,
}
Socket_Kind
;
internal_function
Sint
socketCreateListener
(
Char
*
port
)
{
printf
(
"Openiing socket on port %s
\n
"
,
port
);
// NOTE(naman): Create a socket for IPv4 and TCP.
Sint
sock_fd
=
socket
(
AF_INET
,
SOCK_STREAM
,
0
);
if
(
sock_fd
<
0
)
{
perror
(
"ERROR opening socket"
);
exit
(
-
1
);
}
// NOTE(naman): This helps avoid spurious EADDRINUSE when the previous instance of this
// server died.
int
opt
=
1
;
if
(
setsockopt
(
sock_fd
,
SOL_SOCKET
,
SO_REUSEADDR
,
&
opt
,
sizeof
(
opt
))
<
0
)
{
perror
(
"setsockopt"
);
exit
(
-
1
);
}
// NOTE(naman): Get actual internet address to bind to using IPv4 and TCP,
// and listening passively
struct
addrinfo
hints
=
{.
ai_family
=
AF_INET
,
.
ai_socktype
=
SOCK_STREAM
,
.
ai_flags
=
AI_PASSIVE
};
struct
addrinfo
*
addrinfo
=
NULL
;
Sint
s
=
getaddrinfo
(
NULL
,
port
,
&
hints
,
&
addrinfo
);
if
(
s
!=
0
)
{
fprintf
(
stderr
,
"Error: getaddrinfo: %s
\n
"
,
gai_strerror
(
s
));
exit
(
-
1
);
}
// NOTE(naman): Assign an address to the socket
if
(
bind
(
sock_fd
,
addrinfo
->
ai_addr
,
addrinfo
->
ai_addrlen
)
!=
0
)
{
perror
(
"bind()"
);
exit
(
-
1
);
}
// NOTE(naman): Start listening for incoming connections
if
(
listen
(
sock_fd
,
MAX_SOCKET_CONNECTIONS_REQUEST
)
!=
0
)
{
perror
(
"listen()"
);
exit
(
-
1
);
}
// NOTE(naman): Set the socket as non-blocking
int
flags
=
fcntl
(
sock_fd
,
F_GETFL
,
0
);
if
(
flags
==
-
1
)
{
perror
(
"fcntl F_GETFL"
);
exit
(
-
1
);
}
if
(
fcntl
(
sock_fd
,
F_SETFL
,
flags
|
O_NONBLOCK
)
==
-
1
)
{
perror
(
"fcntl F_SETFL O_NONBLOCK"
);
exit
(
-
1
);
}
printf
(
"Log: Waiting for connection on port %s...
\n
"
,
port
);
return
sock_fd
;
}
resource_manager/src/common/kafka.h
0 → 100644
View file @
10c76714
/*
* Creator: Naman Dixit
* Notice: © Copyright 2020 Naman Dixit
*/
typedef
struct
Kafka
{
rd_kafka_t
*
writer
;
rd_kafka_t
*
reader
;
rd_kafka_queue_t
**
queues
;
rd_kafka_topic_t
**
topics
;
}
Kafka
;
header_function
int
kafkaCreateTopic
(
Kafka
*
kafka
,
Char
*
topic
,
Sint
num_partitions
,
Sint
replication_factor
)
{
char
errstr
[
256
];
rd_kafka_NewTopic_t
*
new_topic
=
rd_kafka_NewTopic_new
(
topic
,
num_partitions
,
replication_factor
,
errstr
,
sizeof
(
errstr
));
if
(
!
new_topic
)
{
fprintf
(
stderr
,
"Failed to create NewTopic object: %s
\n
"
,
errstr
);
return
-
1
;
}
/* Use a temporary queue for the asynchronous Admin result */
rd_kafka_queue_t
*
queue
=
rd_kafka_queue_new
(
kafka
->
writer
);
sbufAdd
(
kafka
->
queues
,
queue
);
/* Asynchronously create topic, result will be available on queue */
rd_kafka_CreateTopics
(
kafka
->
writer
,
&
new_topic
,
1
,
NULL
,
queue
);
rd_kafka_NewTopic_destroy
(
new_topic
);
/* Wait for result event */
rd_kafka_event_t
*
event
=
rd_kafka_queue_poll
(
queue
,
15
*
1000
);
if
(
!
event
)
{
/* There will eventually be a result, after operation
* and request timeouts, but in this example we'll only
* wait 15s to avoid stalling too long when cluster
* is not available. */
fprintf
(
stderr
,
"No create topics result in 15s
\n
"
);
return
-
1
;
}
if
(
rd_kafka_event_error
(
event
))
{
/* Request-level failure */
fprintf
(
stderr
,
"Create topics request failed: %s
\n
"
,
rd_kafka_event_error_string
(
event
));
rd_kafka_event_destroy
(
event
);
return
-
1
;
}
/* Extract the result type from the event. */
const
rd_kafka_CreateTopics_result_t
*
result
=
rd_kafka_event_CreateTopics_result
(
event
);
assert
(
result
);
/* Since we're using a dedicated queue we know this is
* a CreateTopics result type. */
/* Extract the per-topic results from the result type. */
size_t
result_topics_count
;
const
rd_kafka_topic_result_t
**
result_topics
=
rd_kafka_CreateTopics_result_topics
(
result
,
&
result_topics_count
);
assert
(
result_topics
&&
result_topics_count
==
1
);
int
return_value
=
0
;
if
(
rd_kafka_topic_result_error
(
result_topics
[
0
])
==
RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS
)
{
fprintf
(
stderr
,
"Topic %s already exists
\n
"
,
rd_kafka_topic_result_name
(
result_topics
[
0
]));
}
else
if
(
rd_kafka_topic_result_error
(
result_topics
[
0
]))
{
fprintf
(
stderr
,
"Failed to create topic %s: %s
\n
"
,
rd_kafka_topic_result_name
(
result_topics
[
0
]),
rd_kafka_topic_result_error_string
(
result_topics
[
0
]));
return_value
=
-
1
;
}
else
{
fprintf
(
stderr
,
"Topic %s successfully created
\n
"
,
rd_kafka_topic_result_name
(
result_topics
[
0
]));
}
rd_kafka_event_destroy
(
event
);
return
return_value
;
}
header_function
rd_kafka_t
*
kafkaCreateWriter
(
Kafka
*
kafka
,
Char
*
address
)
{
char
errstr
[
512
]
=
{
0
};
printf
(
"Creating writer conf
\n
"
);
rd_kafka_conf_t
*
kafka_writer_conf
=
rd_kafka_conf_new
();
rd_kafka_conf_set_dr_msg_cb
(
kafka_writer_conf
,
NULL
);
printf
(
"Creating writer
\n
"
);
kafka
->
writer
=
rd_kafka_new
(
RD_KAFKA_PRODUCER
,
kafka_writer_conf
,
errstr
,
sizeof
(
errstr
));
if
(
!
kafka
->
writer
)
{
fprintf
(
stderr
,
"Failed to create producer: %s
\n
"
,
errstr
);
rd_kafka_conf_destroy
(
kafka_writer_conf
);
return
NULL
;
}
printf
(
"Ading brokers to writer
\n
"
);
rd_kafka_brokers_add
(
kafka
->
writer
,
address
);
#define CREATE_TOPIC(s) \
do { \
if (kafkaCreateTopic(kafka, s, 1, 1) == -1) { \
rd_kafka_destroy(kafka->writer); \
return NULL; \
} \
} while (0)
CREATE_TOPIC
(
"REQUEST_DISPATCHER_2_ARBITER"
);
//
CREATE_TOPIC
(
"RESPONSE_ARBITER_2_DISPATCHER"
);
CREATE_TOPIC
(
"REQUEST_ARBITER_2_GRUNT"
);
CREATE_TOPIC
(
"RESPONSE_GRUNT_2_ARBITER"
);
//
CREATE_TOPIC
(
"JOIN_GRUNT_2_ARBITER"
);
//
CREATE_TOPIC
(
"HEARTBEAT_GRUNT_2_ARBITER"
);
//
#undef CREATE_TOPIC
return
kafka
->
writer
;
}
header_function
rd_kafka_t
*
kafkaCreateReader
(
Kafka
*
kafka
,
Char
*
address
)
{
char
errstr
[
512
]
=
{
0
};
rd_kafka_conf_t
*
kafka_reader_conf
=
rd_kafka_conf_new
();
rd_kafka_conf_set
(
kafka_reader_conf
,
"group.id"
,
"cloud-example-c"
,
NULL
,
0
);
/* If there is no committed offset for this group, start reading
* partitions from the beginning. */
rd_kafka_conf_set
(
kafka_reader_conf
,
"auto.offset.reset"
,
"earliest"
,
NULL
,
0
);
/* Disable ERR__PARTITION_EOF when reaching end of partition. */
rd_kafka_conf_set
(
kafka_reader_conf
,
"enable.partition.eof"
,
"false"
,
NULL
,
0
);
kafka
->
reader
=
rd_kafka_new
(
RD_KAFKA_CONSUMER
,
kafka_reader_conf
,
errstr
,
sizeof
(
errstr
));
if
(
!
kafka
->
reader
)
{
fprintf
(
stderr
,
"Failed to create consumer: %s
\n
"
,
errstr
);
rd_kafka_conf_destroy
(
kafka_reader_conf
);
return
NULL
;
}
rd_kafka_brokers_add
(
kafka
->
reader
,
address
);
rd_kafka_poll_set_consumer
(
kafka
->
reader
);
return
kafka
->
reader
;
}
header_function
rd_kafka_topic_t
*
kafkaSubscribe
(
Kafka
*
kafka
,
rd_kafka_topic_partition_list_t
*
topics
,
Char
*
topic
)
{
rd_kafka_topic_partition_list_add
(
topics
,
topic
,
RD_KAFKA_PARTITION_UA
);
rd_kafka_topic_t
*
topic_result
=
rd_kafka_topic_new
(
kafka
->
reader
,
topic
,
NULL
);
sbufAdd
(
kafka
->
topics
,
topic_result
);
printf
(
"Subscribe to %s
\n
"
,
topic
);
return
topic_result
;
}
header_function
B32
kafkaWrite
(
rd_kafka_t
*
kafka_writer
,
Char
*
topic
,
Char
*
user
,
Char
*
msg
)
{
int
delivery_counter
=
0
;
rd_kafka_resp_err_t
err
=
rd_kafka_producev
(
kafka_writer
,
RD_KAFKA_V_TOPIC
(
topic
),
RD_KAFKA_V_KEY
(
user
,
strlen
(
user
)),
RD_KAFKA_V_VALUE
(
msg
,
strlen
(
msg
)),
/* producev() will make a copy of the message
* value (the key is always copied), so we
* can reuse the same json buffer on the
* next iteration. */
RD_KAFKA_V_MSGFLAGS
(
RD_KAFKA_MSG_F_COPY
),
RD_KAFKA_V_OPAQUE
(
&
delivery_counter
),
RD_KAFKA_V_END
);
if
(
err
)
{
fprintf
(
stderr
,
"Produce failed: %s
\n
"
,
rd_kafka_err2str
(
err
));
return
false
;
}
return
true
;
}
resource_manager/src/grunt/grunt.c
View file @
10c76714
...
@@ -18,8 +18,9 @@
...
@@ -18,8 +18,9 @@
#include <netinet/in.h>
#include <netinet/in.h>
#include <sys/epoll.h>
#include <sys/epoll.h>
#include <sys/time.h>
#include <sys/time.h>
#include <assert.h>
#define MAX_EPOLL_EVENTS 1024
#include <signal.h>
#include <librdkafka/rdkafka.h>
# if defined(COMPILER_CLANG)
# if defined(COMPILER_CLANG)
# pragma clang diagnostic push
# pragma clang diagnostic push
...
@@ -32,121 +33,87 @@
...
@@ -32,121 +33,87 @@
# pragma clang diagnostic pop
# pragma clang diagnostic pop
# endif
# endif
typedef
struct
Command
{
typedef
struct
Resources
{
Sint
id
;
Sint
dispatcher_socket
;
Sint
memory
;
Sint
memory
;
}
Resources
;
typedef
struct
Command
{
Char
*
txn_id
;
Resources
res
;
}
Command
;
}
Command
;
#include "kafka.h"
#include "time.c"
#include "time.c"
#include"socket.c"
global_variable
volatile
sig_atomic_t
global_keep_running
=
1
;
internal_function
void
signalHandlerSIGINT
(
int
_
)
{
(
void
)
_
;
global_keep_running
=
0
;
}
int
main
(
int
argc
,
char
**
argv
)
int
main
(
int
argc
,
char
**
argv
)
{
{
unused_variable
(
argc
);
unused_variable
(
argc
);
unused_variable
(
argv
);
unused_variable
(
argv
);
Char
*
port
=
"9527"
;
signal
(
SIGINT
,
signalHandlerSIGINT
)
;
Sint
sock_fd
=
socket
(
AF_INET
,
SOCK_STREAM
,
0
)
;
Kafka
kafka
=
{
0
}
;
struct
addrinfo
hints
=
{.
ai_family
=
AF_INET
,
kafka
.
writer
=
kafkaCreateWriter
(
&
kafka
,
"10.129.6.5:9092"
);
.
ai_socktype
=
SOCK_STREAM
};
kafka
.
reader
=
kafkaCreateReader
(
&
kafka
,
"10.129.6.5:9092"
);
struct
addrinfo
*
result
=
NULL
;
Sint
s
=
getaddrinfo
(
NULL
,
port
,
&
hints
,
&
result
);
if
(
s
!=
0
)
{
fprintf
(
stderr
,
"Error: getaddrinfo: %s
\n
"
,
gai_strerror
(
s
));
exit
(
-
1
);
}
// NOTE(naman): Create an epoll instance, with no flags set.
rd_kafka_topic_partition_list_t
*
kafka_reader_topics
=
rd_kafka_topic_partition_list_new
(
1
);
int
epoll_fd
=
epoll_create1
(
0
);
if
(
epoll_fd
<
0
)
{
perror
(
"epoll_create1"
);
exit
(
-
1
);
}
// NOTE(naman): Add the socket's file descriptor to the epoll set
rd_kafka_topic_t
*
topic_req_a2g
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
struct
epoll_event
accept_event
=
{.
data
.
fd
=
sock_fd
,
"REQUEST_ARBITER_2_GRUNT"
);
.
events
=
EPOLLIN
};
unused_variable
(
topic_req_a2g
);
if
(
epoll_ctl
(
epoll_fd
,
EPOLL_CTL_ADD
,
sock_fd
,
&
accept_event
)
<
0
)
{
perror
(
"epoll_ctl EPOLL_CTL_ADD"
);
exit
(
-
1
);
}
// NOTE(naman): Allocate memory for epoll array
rd_kafka_resp_err_t
kafka_reader_topics_err
=
rd_kafka_subscribe
(
kafka
.
reader
,
kafka_reader_topics
);
struct
epoll_event
*
events
=
calloc
(
MAX_EPOLL_EVENTS
,
sizeof
(
struct
epoll_event
));
rd_kafka_topic_partition_list_destroy
(
kafka_reader_topics
);
if
(
events
==
NULL
)
{
fprintf
(
stderr
,
"Unable to allocate memory for epoll_events"
);
exit
(
-
1
);
}
while
(
connect
(
sock_fd
,
result
->
ai_addr
,
result
->
ai_addrlen
)
==
-
1
)
{
if
(
kafka_reader_topics_err
)
{
fprintf
(
stderr
,
"Error: Couldn't connect on port %s, trying again in one second...
\n
"
,
port
);
fprintf
(
stderr
,
"Subscribe failed: %s
\n
"
,
sleep
(
1
);
rd_kafka_err2str
(
kafka_reader_topics_err
));
rd_kafka_destroy
(
kafka
.
reader
);
return
-
1
;
}
}
printf
(
"Log: Starting communication with server on port %s...
\n
"
,
port
);
Char
*
join_msg
=
NULL
;
sbufPrint
(
join_msg
,
"{
\"
id
\"
:
\"
my-machine
\"
"
);
sbufPrint
(
join_msg
,
"
\n
}
\n
"
);
if
(
!
kafkaWrite
(
kafka
.
writer
,
"JOIN_GRUNT_2_ARBITER"
,
"rm_grunt"
,
join_msg
))
{
return
-
1
;
}
U64
time_begin
=
timeMilli
();
U64
time_begin
=
timeMilli
();
U64
time_accum
=
0
;
U64
time_accum
=
0
;
while
(
true
)
{
while
(
global_keep_running
)
{
// NOTE(naman): Get the fd's that are ready
// NOTE(naman): Get the fd's that are ready
int
nready
=
epoll_wait
(
epoll_fd
,
events
,
MAX_EPOLL_EVENTS
,
1000
);
rd_kafka_message_t
*
kafka_message_read
=
rd_kafka_consumer_poll
(
kafka
.
reader
,
100
);
for
(
int
i
=
0
;
i
<
nready
;
i
++
)
{
if
(
events
[
i
].
events
&
EPOLLERR
)
{
perror
(
"epoll_wait returned EPOLLERR"
);
exit
(
-
1
);
}
if
(
events
[
i
].
events
&
EPOLLIN
)
{
B32
initialized
=
false
;
Size
buffer_len
=
0
;
Size
buffer_cap
=
MiB
(
1
);
U32
buffer_expected_len
=
0
;
Char
*
buffer
=
calloc
(
buffer_cap
,
sizeof
(
*
buffer
));
Char
size_bytes
[
4
]
=
{
0
};
Size
size_bytes_count
=
0
;
Command
*
c
=
NULL
;
while
(
true
)
{
B32
command_found
=
false
;
if
(
initialized
==
false
)
{
Command
c
=
{
0
};
long
len
=
read
(
sock_fd
,
(
Char
*
)
size_bytes
+
size_bytes_count
,
4
-
size_bytes_count
);
if
(
len
==
0
)
{
perror
(
"read() returned zero"
);
exit
(
-
1
);
}
size_bytes_count
+=
(
Size
)
len
;
if
(
size_bytes_count
==
4
)
{
initialized
=
true
;
buffer_expected_len
=
(
U32
)((
size_bytes
[
3
]
<<
0U
)
|
(
size_bytes
[
2
]
<<
8U
)
|
(
size_bytes
[
1
]
<<
16U
)
|
(
size_bytes
[
0
]
<<
24U
));
}
continue
;
if
(
kafka_message_read
!=
NULL
)
{
if
(
kafka_message_read
->
err
)
{
/* Consumer error: typically just informational. */
fprintf
(
stderr
,
"Consumer error: %s
\n
"
,
rd_kafka_message_errstr
(
kafka_message_read
));
}
else
{
}
else
{
long
len
=
read
(
sock_fd
,
fprintf
(
stderr
,
buffer
+
buffer_len
,
"Received message on %s [%d] "
buffer_expected_len
-
buffer_len
);
"at offset %"
PRId64
":
\n
%s
\n
"
,
rd_kafka_topic_name
(
kafka_message_read
->
rkt
),
(
int
)
kafka_message_read
->
partition
,
kafka_message_read
->
offset
,
cJSON_Print
(
cJSON_Parse
((
char
*
)
kafka_message_read
->
payload
)));
buffer_len
+=
(
Size
)
len
;
char
*
buffer
=
(
char
*
)
kafka_message_read
->
payload
;
if
(
buffer_expected_len
==
buffer_len
)
{
cJSON_Print
(
cJSON_Parse
(
buffer
));
printf
(
"%.*s"
,
(
int
)
buffer_len
,
buffer
);
const
Char
*
json_error
=
NULL
;
const
Char
*
json_error
=
NULL
;
cJSON
*
root
=
cJSON_ParseWithOpts
(
buffer
,
&
json_error
,
true
);
cJSON
*
root
=
cJSON_ParseWithOpts
(
buffer
,
&
json_error
,
true
);
...
@@ -154,18 +121,12 @@ int main(int argc, char** argv)
...
@@ -154,18 +121,12 @@ int main(int argc, char** argv)
if
(
root
==
NULL
)
{
if
(
root
==
NULL
)
{
// TODO(naman): Error
// TODO(naman): Error
}
else
{
}
else
{
c
=
calloc
(
1
,
sizeof
(
*
c
));
command_found
=
true
;
c
.
txn_id
=
cJSON_GetObjectItem
(
root
,
"id"
)
->
valuestring
;
c
->
id
=
cJSON_GetObjectItem
(
root
,
"id"
)
->
valueint
;
c
.
res
.
memory
=
cJSON_GetObjectItem
(
root
,
"memory"
)
->
valueint
;
c
->
dispatcher_socket
=
cJSON_GetObjectItem
(
root
,
"dispatcher_socket"
)
->
valueint
;
c
->
memory
=
cJSON_GetObjectItem
(
root
,
"memory"
)
->
valueint
;
}
free
(
buffer
);
break
;
}
}
}
}
rd_kafka_message_destroy
(
kafka_message_read
);
}
}
int
memory
=
0
;
int
memory
=
0
;
...
@@ -173,42 +134,31 @@ int main(int argc, char** argv)
...
@@ -173,42 +134,31 @@ int main(int argc, char** argv)
FILE
*
meminfo
=
fopen
(
"/proc/meminfo"
,
"r"
);
FILE
*
meminfo
=
fopen
(
"/proc/meminfo"
,
"r"
);
Char
line
[
256
]
=
{
0
};
Char
line
[
256
]
=
{
0
};
while
(
fgets
(
line
,
sizeof
(
line
),
meminfo
))
{
while
(
fgets
(
line
,
sizeof
(
line
),
meminfo
))
{
if
(
sscanf
(
line
,
"MemTotal
: %d kB"
,
&
memory
)
==
1
)
{
if
(
sscanf
(
line
,
"MemAvailable
: %d kB"
,
&
memory
)
==
1
)
{
fclose
(
meminfo
);
fclose
(
meminfo
);
break
;
break
;
}
}
}
}
if
(
command_found
)
{
Char
*
output
=
NULL
;
Char
*
output
=
NULL
;
sbufPrint
(
output
,
" "
);
sbufPrint
(
output
,
"{
\n\"
id
\"
:
\"
%s
\"
"
,
c
.
txn_id
);
sbufPrint
(
output
,
"{
\n\"
id
\"
: %d"
,
c
->
id
);
sbufPrint
(
output
,
",
\n\"
dispatcher_socket
\"
: %d"
,
c
->
dispatcher_socket
);
sbufPrint
(
output
,
",
\n\"
type
\"
:
\"
response
\"
"
);
if
(
memory
>=
c
->
memory
)
{
if
(
memory
>=
c
.
res
.
memory
)
{
sbufPrint
(
output
,
",
\n\"
success
\"
: %d
\n
"
,
1
);
sbufPrint
(
output
,
",
\n\"
success
\"
: %d
\n
"
,
1
);
// TODO(naman): Add port
// sbufPrint(output, ",\n\"port\": %d\n", port);
}
else
{
}
else
{
sbufPrint
(
output
,
",
\n\"
success
\"
: %d
\n
"
,
0
);
sbufPrint
(
output
,
",
\n\"
success
\"
: %d
\n
"
,
0
);
}
}
sbufPrint
(
output
,
"
\n
}
\n
"
);
sbufPrint
(
output
,
"
\n
}
\n
"
);
Size
output_len
=
strlen
(
output
);
#if defined(ENDIAN_LITTLE)
U32
json_len
=
(
U32
)
output_len
-
4
;
U32
json_len_be
=
swap_endian
(
json_len
);
output
[
0
]
=
((
Char
*
)
&
json_len_be
)[
0
];
output
[
1
]
=
((
Char
*
)
&
json_len_be
)[
1
];
output
[
2
]
=
((
Char
*
)
&
json_len_be
)[
2
];
output
[
3
]
=
((
Char
*
)
&
json_len_be
)[
3
];
#endif
socketWrite
(
output
,
output_len
,
sock_fd
);
}
}
{
// Send a heartbeat message if it is time to do so
if
(
!
kafkaWrite
(
kafka
.
writer
,
"RESPONSE_GRUNT_2_ARBITER"
,
"rm_grunt"
,
output
))
{
return
-
1
;
}
}
else
{
// Send a heartbeat message if it is time to do so
U64
time_new
=
timeMilli
();
U64
time_new
=
timeMilli
();
U64
time_passed
=
time_new
-
time_begin
;
U64
time_passed
=
time_new
-
time_begin
;
time_begin
=
time_new
;
time_begin
=
time_new
;
...
@@ -217,38 +167,29 @@ int main(int argc, char** argv)
...
@@ -217,38 +167,29 @@ int main(int argc, char** argv)
if
(
time_accum
>=
1000
)
{
if
(
time_accum
>=
1000
)
{
time_accum
=
0
;
time_accum
=
0
;
int
memory
=
0
;
FILE
*
meminfo
=
fopen
(
"/proc/meminfo"
,
"r"
);
Char
line
[
256
]
=
{
0
};
while
(
fgets
(
line
,
sizeof
(
line
),
meminfo
))
{
if
(
sscanf
(
line
,
"MemTotal: %d kB"
,
&
memory
)
==
1
)
{
fclose
(
meminfo
);
break
;
}
}
Char
*
output
=
NULL
;
Char
*
output
=
NULL
;
sbufPrint
(
output
,
" "
);
sbufPrint
(
output
,
"{
\"
id
\"
:
\"
my-machine
\"
"
);
sbufPrint
(
output
,
"{"
);
sbufPrint
(
output
,
"
\n\"
type
\"
:
\"
heartbeat
\"
"
);
sbufPrint
(
output
,
",
\n\"
memory
\"
: %d"
,
memory
);
sbufPrint
(
output
,
",
\n\"
memory
\"
: %d"
,
memory
);
sbufPrint
(
output
,
"
\n
}
\n
"
);
sbufPrint
(
output
,
"
\n
}
\n
"
);
Size
output_len
=
strlen
(
output
);
if
(
!
kafkaWrite
(
kafka
.
writer
,
"HEARTBEAT_GRUNT_2_ARBITER"
,
"rm_grunt"
,
output
))
{
#if defined(ENDIAN_LITTLE)
return
-
1
;
U32
json_len
=
(
U32
)
output_len
-
4
;
}
U32
json_len_be
=
swap_endian
(
json_len
);
}
output
[
0
]
=
((
Char
*
)
&
json_len_be
)[
0
];
output
[
1
]
=
((
Char
*
)
&
json_len_be
)[
1
];
output
[
2
]
=
((
Char
*
)
&
json_len_be
)[
2
];
output
[
3
]
=
((
Char
*
)
&
json_len_be
)[
3
];
#endif
socketWrite
(
output
,
output_len
,
sock_fd
);
}
}
}
}
for
(
Size
i
=
0
;
i
<
sbufElemin
(
kafka
.
topics
);
i
++
)
{
rd_kafka_topic_destroy
(
kafka
.
topics
[
i
]);
}
rd_kafka_consumer_close
(
kafka
.
reader
);
rd_kafka_destroy
(
kafka
.
reader
);
for
(
Size
i
=
0
;
i
<
sbufElemin
(
kafka
.
queues
);
i
++
)
{
rd_kafka_queue_destroy
(
kafka
.
queues
[
i
]);
}
}
rd_kafka_destroy
(
kafka
.
writer
);
return
0
;
}
}
resource_manager/src/grunt/socket.c
deleted
100644 → 0
View file @
64418d8e
/*
* Creator: Naman Dixit
* Notice: © Copyright 2020 Naman Dixit
*/
internal_function
void
socketWrite
(
Char
*
output
,
Size
output_len
,
int
sock_fd
)
{
ssize_t
nsent
=
0
;
Size
output_cursor
=
0
;
while
(
true
)
{
nsent
=
write
(
sock_fd
,
output
+
output_cursor
,
output_len
-
output_cursor
);
if
(
nsent
==
-
1
)
{
if
(
errno
==
EAGAIN
||
errno
==
EWOULDBLOCK
)
{
continue
;
}
else
{
exit
(
-
1
);
}
}
else
if
((
Size
)
nsent
<
output_len
)
{
output_cursor
+=
(
Size
)
nsent
;
}
else
{
break
;
}
}
}
resource_manager/src/test/test.c
View file @
10c76714
...
@@ -18,6 +18,10 @@
...
@@ -18,6 +18,10 @@
#include <netdb.h>
#include <netdb.h>
#include <unistd.h>
#include <unistd.h>
#include <ctype.h>
#include <ctype.h>
#include <assert.h>
#include <signal.h>
#include <librdkafka/rdkafka.h>
# if defined(COMPILER_CLANG)
# if defined(COMPILER_CLANG)
# pragma clang diagnostic push
# pragma clang diagnostic push
...
@@ -30,6 +34,7 @@
...
@@ -30,6 +34,7 @@
# pragma clang diagnostic pop
# pragma clang diagnostic pop
# endif
# endif
#include "kafka.h"
int
main
(
int
argc
,
char
**
argv
)
int
main
(
int
argc
,
char
**
argv
)
{
{
...
@@ -40,93 +45,70 @@ int main(int argc, char** argv)
...
@@ -40,93 +45,70 @@ int main(int argc, char** argv)
Sint
memory_required
=
(
Sint
)
strtol
(
argv
[
1
],
NULL
,
10
);
Sint
memory_required
=
(
Sint
)
strtol
(
argv
[
1
],
NULL
,
10
);
Char
*
port
=
"9526"
;
Kafka
kafka
=
{
0
}
;
Sint
sock_fd
=
socket
(
AF_INET
,
SOCK_STREAM
,
0
);
kafka
.
writer
=
kafkaCreateWriter
(
&
kafka
,
"10.129.6.5:9092"
);
kafka
.
reader
=
kafkaCreateReader
(
&
kafka
,
"10.129.6.5:9092"
);
struct
addrinfo
hints
=
{.
ai_family
=
AF_INET
,
rd_kafka_topic_partition_list_t
*
kafka_reader_topics
=
rd_kafka_topic_partition_list_new
(
1
);
.
ai_socktype
=
SOCK_STREAM
};
struct
addrinfo
*
result
=
NULL
;
Sint
s
=
getaddrinfo
(
NULL
,
port
,
&
hints
,
&
result
);
if
(
s
!=
0
)
{
fprintf
(
stderr
,
"Error: getaddrinfo: %s
\n
"
,
gai_strerror
(
s
));
exit
(
-
1
);
}
while
(
connect
(
sock_fd
,
result
->
ai_addr
,
result
->
ai_addrlen
)
==
-
1
)
{
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
"RESPONSE_ARBITER_2_DISPATCHER"
);
fprintf
(
stderr
,
"Error: Couldn't connect on port %s, trying again in one second...
\n
"
,
port
);
sleep
(
1
);
}
printf
(
"Log: Starting communication with server on port %s...
\n
"
,
port
);
rd_kafka_resp_err_t
kafka_reader_topics_err
=
rd_kafka_subscribe
(
kafka
.
reader
,
kafka_reader_topics
);
rd_kafka_topic_partition_list_destroy
(
kafka_reader_topics
);
if
(
kafka_reader_topics_err
)
{
fprintf
(
stderr
,
"Subscribe failed: %s
\n
"
,
rd_kafka_err2str
(
kafka_reader_topics_err
));
rd_kafka_destroy
(
kafka
.
reader
);
return
-
1
;
}
Char
*
output
=
NULL
;
Char
*
output
=
NULL
;
sbufPrint
(
output
,
" "
);
Sint
id
=
(
Sint
)
time
(
NULL
);
sbufPrint
(
output
,
"{
\n\"
id
\"
:
%d"
,
(
Sint
)
time
(
NULL
)
);
sbufPrint
(
output
,
"{
\n\"
id
\"
:
\"
%d
\"
"
,
id
);
sbufPrint
(
output
,
",
\n\"
memory
\"
: %d"
,
memory_required
);
sbufPrint
(
output
,
",
\n\"
memory
\"
: %d"
,
memory_required
);
sbufPrint
(
output
,
"
\n
}
\n
"
);
sbufPrint
(
output
,
"
\n
}
\n
"
);
Size
output_len
=
strlen
(
output
);
printf
(
"Sending to Arbiter:
\n
%s
\n
"
,
cJSON_Print
(
cJSON_Parse
(
output
+
4
)));
#if defined(ENDIAN_LITTLE)
printf
(
"Sending to Arbiter:
\n
%s
\n
"
,
cJSON_Print
(
cJSON_Parse
(
output
)));
U32
json_len
=
(
U32
)
output_len
-
4
;
U32
json_len_be
=
swap_endian
(
json_len
);
output
[
0
]
=
((
Char
*
)
&
json_len_be
)[
0
];
output
[
1
]
=
((
Char
*
)
&
json_len_be
)[
1
];
output
[
2
]
=
((
Char
*
)
&
json_len_be
)[
2
];
output
[
3
]
=
((
Char
*
)
&
json_len_be
)[
3
];
#endif
write
(
sock_fd
,
output
,
output_len
);
if
(
output
!=
NULL
)
{
if
(
!
kafkaWrite
(
kafka
.
writer
,
"REQUEST_DISPATCHER_2_ARBITER"
,
"rm_test"
,
output
))
{
{
return
-
1
;
cJSON
*
array
=
NULL
;
}
}
B32
initialized
=
false
;
Size
buffer_len
=
0
;
Size
buffer_cap
=
MiB
(
1
);
U32
buffer_expected_len
=
0
;
Char
*
buffer
=
calloc
(
buffer_cap
,
sizeof
(
*
buffer
));
Char
size_bytes
[
4
]
=
{
0
};
Size
size_bytes_count
=
0
;
rd_kafka_message_t
*
kafka_message_read
=
rd_kafka_consumer_poll
(
kafka
.
reader
,
0
);
while
(
true
)
{
while
(
true
)
{
if
(
initialized
==
false
)
{
if
(
kafka_message_read
!=
NULL
)
{
long
len
=
read
(
sock_fd
,
const
Char
*
json_error
=
NULL
;
(
Char
*
)
size_bytes
+
size_bytes_count
,
cJSON
*
root
=
cJSON_ParseWithOpts
((
char
*
)
kafka_message_read
->
payload
,
&
json_error
,
true
);
4
-
size_bytes_count
);
Sint
id_now
=
atoi
(
cJSON_GetObjectItem
(
root
,
"id"
)
->
valuestring
);
if
(
id_now
==
id
)
{
if
(
len
==
0
)
{
break
;
perror
(
"read() returned zero"
);
}
else
{
exit
(
-
1
);
printf
(
"Found a cranky old message: %d
\n
"
,
id_now
);
rd_kafka_message_destroy
(
kafka_message_read
);
}
}
}
size_bytes_count
+=
(
Size
)
len
;
kafka_message_read
=
rd_kafka_consumer_poll
(
kafka
.
reader
,
0
);
if
(
size_bytes_count
==
4
)
{
initialized
=
true
;
buffer_expected_len
=
(
U32
)((
size_bytes
[
3
]
<<
0U
)
|
(
size_bytes
[
2
]
<<
8U
)
|
(
size_bytes
[
1
]
<<
16U
)
|
(
size_bytes
[
0
]
<<
24U
));
}
}
continue
;
if
(
kafka_message_read
!=
NULL
)
{
if
(
kafka_message_read
->
err
)
{
/* Consumer error: typically just informational. */
fprintf
(
stderr
,
"Consumer error: %s
\n
"
,
rd_kafka_message_errstr
(
kafka_message_read
));
}
else
{
}
else
{
long
len
=
read
(
sock_fd
,
fprintf
(
stderr
,
buffer
+
buffer_len
,
"Received message on %s [%d] "
buffer_expected_len
-
buffer_len
);
"at offset %"
PRId64
":
\n
%s
\n
"
,
rd_kafka_topic_name
(
kafka_message_read
->
rkt
),
buffer_len
+=
(
Size
)
len
;
(
int
)
kafka_message_read
->
partition
,
kafka_message_read
->
offset
,
cJSON_Print
(
cJSON_Parse
((
char
*
)
kafka_message_read
->
payload
)));
if
(
buffer_expected_len
==
buffer_len
)
{
char
*
buffer
=
(
char
*
)
kafka_message_read
->
payload
;
printf
(
"Recieved: Final Response:
\n
%s
\n
"
,
cJSON_Print
(
cJSON_Parse
(
buffer
)));
const
Char
*
json_error
=
NULL
;
const
Char
*
json_error
=
NULL
;
cJSON
*
root
=
cJSON_ParseWithOpts
(
buffer
,
&
json_error
,
true
);
cJSON
*
root
=
cJSON_ParseWithOpts
(
buffer
,
&
json_error
,
true
);
...
@@ -134,14 +116,14 @@ int main(int argc, char** argv)
...
@@ -134,14 +116,14 @@ int main(int argc, char** argv)
if
(
root
==
NULL
)
{
if
(
root
==
NULL
)
{
// TODO(naman): Error
// TODO(naman): Error
}
else
{
}
else
{
array
=
cJSON_GetObjectItem
(
root
,
"ip"
);
cJSON
*
array
=
cJSON_GetObjectItem
(
root
,
"id"
);
}
cJSON
*
elem
=
NULL
;
cJSON_ArrayForEach
(
elem
,
array
)
{
free
(
buffer
);
printf
(
"%s
\n
"
,
elem
->
valuestring
);
break
;
}
}
}
}
}
}
rd_kafka_message_destroy
(
kafka_message_read
);
}
}
return
0
;
return
0
;
...
...
resource_manager/version.linux
View file @
10c76714
368
623
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment