Commit 10c76714 authored by Naman Dixit's avatar Naman Dixit

Added Kafka support

parent 64418d8e
......@@ -38,8 +38,7 @@ fi
# For Address Sanitizer: -fsanitize=address -fno-omit-frame-pointer
# Memory Sanitizer : -fsanitize=memory -fno-optimize-sibling-calls -fno-omit-frame-pointer -fsanitize-memory-track-origins
ArbiterCompilerFlags="-iquote /code/include -iquote ${ProjectRoot}/src \
-iquote ${ProjectRoot}/src/common \
ArbiterCompilerFlags="-iquote ${ProjectRoot}/src/common \
-g3 -O0 -fno-strict-aliasing -fwrapv -msse2 \
"
ArbiterLanguageFlags="--std=c11 -DBUILD_INTERNAL -DBUILD_SLOW -DBUILD_DEBUG \
......@@ -47,10 +46,10 @@ ArbiterLanguageFlags="--std=c11 -DBUILD_INTERNAL -DBUILD_SLOW -DBUILD_DEBUG \
-D_POSIX_C_SOURCE=200809L -D_DEFAULT_SOURCE"
ArbiterWarningFlags="-Weverything -Wpedantic -pedantic-errors -Werror \
-Wno-c++98-compat -Wno-gnu-statement-expression \
-Wno-bad-function-cast -Wno-unused-function \
-Wno-padded "
-Wno-bad-function-cast -Wno-used-but-marked-unused \
-Wno-padded -Wno-gnu-zero-variadic-macro-arguments "
ArbiterLinkerFlags="-o ${ArbiterTargetPath} \
-static-libgcc -lm -pthread \
-static-libgcc -lm -pthread -lrdkafka \
-Wl,-rpath=\${ORIGIN} -Wl,-z,origin -Wl,--enable-new-dtags"
${Compiler} ${ArbiterCompilerFlags} ${ArbiterLanguageFlags} ${ArbiterWarningFlags} \
......@@ -70,8 +69,7 @@ fi
# For Address Sanitizer: -fsanitize=address -fno-omit-frame-pointer
# Memory Sanitizer : -fsanitize=memory -fno-optimize-sibling-calls -fno-omit-frame-pointer -fsanitize-memory-track-origins
GruntCompilerFlags="-iquote /code/include -iquote ${ProjectRoot}/src \
-iquote ${ProjectRoot}/src/common \
GruntCompilerFlags="-iquote ${ProjectRoot}/src/common \
-g3 -O0 -fno-strict-aliasing -fwrapv -msse2 \
"
GruntLanguageFlags="--std=c11 -DBUILD_INTERNAL -DBUILD_SLOW -DBUILD_DEBUG \
......@@ -79,10 +77,10 @@ GruntLanguageFlags="--std=c11 -DBUILD_INTERNAL -DBUILD_SLOW -DBUILD_DEBUG \
-D_POSIX_C_SOURCE=200809L -D_DEFAULT_SOURCE"
GruntWarningFlags="-Weverything -Wpedantic -pedantic-errors -Werror \
-Wno-c++98-compat -Wno-gnu-statement-expression \
-Wno-bad-function-cast -Wno-unused-function \
-Wno-bad-function-cast -Wno-used-but-marked-unused \
-Wno-padded "
GruntLinkerFlags="-o ${GruntTargetPath} \
-static-libgcc -lm -pthread \
-static-libgcc -lm -pthread -lrdkafka \
-Wl,-rpath=\${ORIGIN} -Wl,-z,origin -Wl,--enable-new-dtags"
${Compiler} ${GruntCompilerFlags} ${GruntLanguageFlags} ${GruntWarningFlags} \
......@@ -111,10 +109,10 @@ TestLanguageFlags="--std=c11 -DBUILD_INTERNAL -DBUILD_SLOW -DBUILD_DEBUG \
-D_POSIX_C_SOURCE=200809L -D_DEFAULT_SOURCE"
TestWarningFlags="-Weverything -Wpedantic -pedantic-errors -Werror \
-Wno-c++98-compat -Wno-gnu-statement-expression \
-Wno-bad-function-cast -Wno-unused-function \
-Wno-bad-function-cast -Wno-used-but-marked-unused \
-Wno-padded "
TestLinkerFlags="-o ${TestTargetPath} \
-static-libgcc -lm -pthread \
-static-libgcc -lm -pthread -lrdkafka \
-Wl,-rpath=\${ORIGIN} -Wl,-z,origin -Wl,--enable-new-dtags"
${Compiler} ${TestCompilerFlags} ${TestLanguageFlags} ${TestWarningFlags} \
......
......@@ -3,6 +3,8 @@
* Notice: © Copyright 2020 Naman Dixit
*/
#define logMessage(s, ...) printf(s "\n", ##__VA_ARGS__)
#include "nlib/nlib.h"
#include <stdio.h>
......@@ -15,88 +17,59 @@
#include <fcntl.h>
#include <sys/time.h>
#include <arpa/inet.h>
#define MAX_SOCKET_CONNECTIONS_REQUEST 64
#define MAX_EPOLL_EVENTS 1024
typedef struct Input_Resume {
Char *buffer;
Size buffer_len;
Size buffer_cap;
U32 buffer_expected_len;
Byte size_bytes[4];
Size size_bytes_count;
int fd;
B32 initialized;
} Input_Resume;
typedef struct Output_Resume {
Char *buffer;
Size buffer_pos;
Size buffer_len;
U64 msg_id_hash;
Sint fd;
Byte _pad[4];
} Output_Resume;
typedef struct Input_Output {
Input_Resume *ir;
Output_Resume *ors;
} Input_Output;
#include <assert.h>
#include <signal.h>
#include <librdkafka/rdkafka.h>
typedef struct Grunt {
Char *ip;
Char *id;
Sint memory;
} Grunt;
typedef struct Grunt_Survey {
Char **ips;
Grunt **grunt_ptrs;
U16 *ports;
U64 milli_passed;
U64 milli_last;
Sint id;
Sint dispatcher_socket;
Char *txn_id;
} Grunt_Survey;
typedef struct Command {
enum Command_Kind {
Command_NONE,
Command_REQUEST_EXTERNAL,
Command_REQUEST_INTERNAL,
Command_RESPONSE_INTERNAL,
Command_RESPONSE_EXTERNAL,
Command_HEARTBEAT,
Command_REQUEST_DISPATCHER_2_ARBITER,
Command_RESPONSE_ARBITER_2_DISPATCHER,
Command_REQUEST_ARBITER_2_GRUNT,
Command_RESPONSE_GRUNT_2_ARBITER,
Command_HEARTBEAT_GRUNT_2_ARBITER,
} kind;
Sint id;
Char *txn_id;
union {
struct {
Sint memory;
} reqex;
} req_d2a;
struct {
Sint memory;
Sint dispatcher_socket;
} reqin;
Char **grunt_ids;
} res_a2d;
struct {
Char **ips;
Sint dispatcher_socket;
} resin;
Sint memory;
} req_a2g;
struct {
Char **ips;
Sint dispatcher_socket;
} resex;
Char *id;
U16 port;
} res_g2a;
struct {
Char *id;
Sint memory;
} heartbeat;
} beat_g2a;
};
} Command;
......@@ -111,337 +84,149 @@ typedef struct Command {
# pragma clang diagnostic pop
# endif
#include "socket.c"
#include "kafka.h"
#include "time.c"
global_variable volatile sig_atomic_t global_keep_running = 1;
internal_function
void signalHandlerSIGINT (int _)
{
(void)_;
global_keep_running = 0;
}
Sint main (Sint argc, Char *argv[])
{
unused_variable(argc);
unused_variable(argv);
Sint sock_fd_external = socketCreateListener("9526");
Sint sock_fd_internal = socketCreateListener("9527");
signal(SIGINT, signalHandlerSIGINT);
// NOTE(naman): Create an epoll instance, with no flags set.
int epoll_fd = epoll_create1(0);
if (epoll_fd < 0) {
perror("epoll_create1");
exit(-1);
}
// NOTE(naman): Add the socket's file descriptor to the epoll set
struct epoll_event accept_event_internal = {.data.fd = sock_fd_internal,
.events = EPOLLIN};
if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock_fd_internal, &accept_event_internal) < 0) {
perror("epoll_ctl EPOLL_CTL_ADD");
exit(-1);
}
struct epoll_event accept_event_external = {.data.fd = sock_fd_external,
.events = EPOLLIN};
if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock_fd_external, &accept_event_external) < 0) {
perror("epoll_ctl EPOLL_CTL_ADD");
exit(-1);
}
// NOTE(naman): Allocate memory for epoll array
struct epoll_event* events = calloc(MAX_EPOLL_EVENTS, sizeof(struct epoll_event));
if (events == NULL) {
fprintf(stderr, "Unable to allocate memory for epoll_events");
exit(-1);
}
Command *commands = NULL;
Hash_Table commands_pending = htCreate(0);
Hash_Table socket_map = htCreate(0);
Hash_Table io_map = htCreate(0);
Command *commands = NULL;
Grunt *grunts = NULL;
Hash_Table grunt_map = htCreate(0);
Hash_Table grunt_survey_map = htCreate(0);
while (true) {
// NOTE(naman): Get the fd's that are ready
int nready = epoll_wait(epoll_fd, events, MAX_EPOLL_EVENTS, 100);
sbufAdd(grunts, (Grunt){0}); // SInce 0 index out of hash table will be invalid
for (int i = 0; i < nready; i++) {
if (events[i].events & EPOLLERR) {
perror("epoll_wait returned EPOLLERR");
exit(-1);
}
Kafka kafka = {0};
if ((events[i].data.fd == sock_fd_internal) ||
(events[i].data.fd == sock_fd_external)) {
Sint sock_fd = events[i].data.fd;
// NOTE(naman): A new grunt is connecting.
struct sockaddr_in peer_addr = {0};
socklen_t peer_addr_len = sizeof(peer_addr);
int accept_fd = accept(sock_fd, (struct sockaddr*)&peer_addr,
&peer_addr_len);
if (accept_fd < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
// This can happen due to the nonblocking socket mode; in this
// case don't do anything, but print a notice (since these events
// are extremely rare and interesting to observe...)
fprintf(stderr, "accept() returned %s\n",
errno == EAGAIN ? "EAGAIN" : "EWOULDBLOCK");
} else {
perror("accept");
exit(-1);
}
} else {
printf("Log: Connection made: client_fd=%d\n", accept_fd);
// NOTE(naman): Set the socket as non-blocking
int sock_flags = fcntl(sock_fd, F_GETFL, 0);
if (sock_flags == -1) {
perror("fcntl F_GETFL");
exit(-1);
}
kafkaCreateWriter(&kafka, "10.129.6.5:9092");
kafkaCreateReader(&kafka, "10.129.6.5:9092");
if (fcntl(sock_fd, F_SETFL, sock_flags | O_NONBLOCK) == -1) {
perror("fcntl F_SETFL O_NONBLOCK");
exit(-1);
}
rd_kafka_topic_partition_list_t *kafka_reader_topics = rd_kafka_topic_partition_list_new(1);
// NOTE(naman): Add the new file descriptor to the epoll set
struct epoll_event event = {.data.fd = accept_fd,
.events = EPOLLIN};
if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, accept_fd, &event) < 0) {
perror("epoll_ctl EPOLL_CTL_ADD");
exit(-1);
}
rd_kafka_topic_t *topic_req_d2a = kafkaSubscribe(&kafka, kafka_reader_topics,
"REQUEST_DISPATCHER_2_ARBITER");
rd_kafka_topic_t *topic_join_g2a = kafkaSubscribe(&kafka, kafka_reader_topics,
"JOIN_GRUNT_2_ARBITER");
rd_kafka_topic_t *topic_res_g2a = kafkaSubscribe(&kafka, kafka_reader_topics,
"RESPONSE_GRUNT_2_ARBITER");
rd_kafka_topic_t *topic_beat_g2a = kafkaSubscribe(&kafka, kafka_reader_topics,
"HEARTBEAT_GRUNT_2_ARBITER");
if (events[i].data.fd == sock_fd_internal) {
htInsert(&socket_map, (U64)accept_fd, Socket_Kind_INTERNAL);
rd_kafka_resp_err_t kafka_reader_topics_err = rd_kafka_subscribe(kafka.reader,
kafka_reader_topics);
rd_kafka_topic_partition_list_destroy(kafka_reader_topics);
struct in_addr ip_addr = peer_addr.sin_addr;
char str[INET_ADDRSTRLEN];
inet_ntop(AF_INET, &ip_addr, str, INET_ADDRSTRLEN);
if (kafka_reader_topics_err) {
fprintf(stderr, "Subscribe failed: %s\n",
rd_kafka_err2str(kafka_reader_topics_err));
rd_kafka_destroy(kafka.reader);
return -1;
}
Grunt *grunt = calloc(1, sizeof(*grunt));
grunt->ip = strdup(str);
htInsert(&grunt_map, (U64)accept_fd, (U64)grunt);
} else if (events[i].data.fd == sock_fd_external) {
htInsert(&socket_map, (U64)accept_fd, Socket_Kind_EXTERNAL);
}
while (global_keep_running) {
// NOTE(naman): Get the fd's that are ready
rd_kafka_message_t *kafka_message_read = rd_kafka_consumer_poll(kafka.reader, 100);
Input_Output *io = calloc(1, sizeof(*io));
htInsert(&io_map, (U64)accept_fd, (Uptr)io);
}
if (kafka_message_read != NULL) {
if (kafka_message_read->err) {
/* Consumer error: typically just informational. */
fprintf(stderr, "Consumer error: %s\n",
rd_kafka_message_errstr(kafka_message_read));
} else {
// A peer socket is ready.
if (events[i].events & EPOLLIN) {
// Ready for reading.
int fd = events[i].data.fd;
/* Proper message */
/* fprintf(stderr, */
/* "Received message on %s [%d] " */
/* "at offset %"PRId64": \n%s\n", */
/* rd_kafka_topic_name(kafka_message_read->rkt), */
/* (int)kafka_message_read->partition, kafka_message_read->offset, */
/* cJSON_Print(cJSON_Parse((char *)kafka_message_read->payload))); */
Socket_Kind socket_kind = (Socket_Kind)htLookup(&socket_map, (U64)fd);
const char *json_error = NULL;
cJSON *root = cJSON_ParseWithOpts(kafka_message_read->payload, &json_error, true);
Input_Output *io = (Input_Output *)htLookup(&io_map, (U64)fd);
if (kafka_message_read->rkt == topic_req_d2a) {
Command c = {.kind = Command_REQUEST_ARBITER_2_GRUNT};
c.txn_id = cJSON_GetObjectItem(root, "id")->valuestring;
Sint memory = cJSON_GetObjectItem(root, "memory")->valueint;
if (io->ir == NULL) {
io->ir = calloc(1, sizeof(*io->ir));
io->ir->buffer_cap = MiB(1);
io->ir->buffer = calloc(io->ir->buffer_cap,
sizeof(*(io->ir->buffer)));
io->ir->fd = fd;
}
logMessage("Request D2A:\tid: %s = ([memory] = %d)", c.txn_id, memory);
Input_Resume *ir = io->ir;
Char **grunt_ids = NULL;
if (ir->initialized == false) {
long len = read(fd,
(Char *)ir->size_bytes + ir->size_bytes_count,
4 - ir->size_bytes_count);
for (Size j = 0; j < sbufElemin(grunts); j++) {
Grunt g = grunts[j];
if (g.memory >= memory) {
c.kind = Command_RESPONSE_ARBITER_2_DISPATCHER;
sbufAdd(grunt_ids, g.id);
}
}
if (len == 0) {
close(fd);
sbufDelete(io->ors);
free(io->ir);
free(io);
if (c.kind == Command_REQUEST_ARBITER_2_GRUNT) {
c.req_a2g.memory = memory;
Grunt_Survey *gs = calloc(1, sizeof(*gs));
htInsert(&grunt_survey_map, hashString(c.txn_id), (Uptr)gs);
htRemove(&io_map, (U64)fd);
htRemove(&socket_map, (U64)fd);
gs->milli_last = timeMilli();
gs->txn_id = c.txn_id;
} else if (c.kind == Command_RESPONSE_ARBITER_2_DISPATCHER) {
c.res_a2d.grunt_ids = grunt_ids;
sbufAdd(commands, c);
}
} else if (kafka_message_read->rkt == topic_join_g2a) {
Char *id = cJSON_GetObjectItem(root, "id")->valuestring;
Grunt grunt = {.id = id};
if (socket_kind == Socket_Kind_INTERNAL) {
htRemove(&grunt_map, (U64)fd);
}
logMessage("Join G2A:\tid: %s", id);
continue;
}
if (htLookup(&grunt_map, hashString(id)) == 0) {
sbufAdd(grunts, grunt);
htInsert(&grunt_map, hashString(id), sbufElemin(grunts) - 1);
}
} else if (kafka_message_read->rkt == topic_res_g2a) {
Char *id = cJSON_GetObjectItem(root, "id")->valuestring;
B32 success = (B32)(cJSON_GetObjectItem(root, "success")->valueint);
ir->size_bytes_count += (Size)len;
logMessage("Response G2A:\tid: %s = %s", id, success ? "succeded" : "failed");
if (ir->size_bytes_count == 4) {
ir->initialized = true;
if (success) {
Grunt_Survey *gs = (Grunt_Survey *)htLookup(&grunt_survey_map,
hashString(id));
ir->buffer_expected_len = (U32)((ir->size_bytes[3] << 0U) |
(ir->size_bytes[2] << 8U) |
(ir->size_bytes[1] << 16U) |
(ir->size_bytes[0] << 24U));
if (gs != NULL) { // If it has not been already removed
Grunt *g = &grunts[htLookup(&grunt_map, hashString(id))];
sbufAdd(gs->grunt_ptrs, g);
}
}
} else if (kafka_message_read->rkt == topic_beat_g2a) {
Char *id = cJSON_GetObjectItem(root, "id")->valuestring;
continue;
} else {
long len = read(fd,
ir->buffer + ir->buffer_len,
ir->buffer_expected_len - ir->buffer_len);
logMessage("Beat G2A:\tid: %s", id);
ir->buffer_len += (Size)len;
if (ir->buffer_expected_len == ir->buffer_len) {
// char *json_printed = cJSON_Print(cJSON_Parse(ir->buffer));
const Char *json_error = NULL;
cJSON *root = cJSON_ParseWithOpts(ir->buffer, &json_error, true);
B32 prepare_for_output = true;
if (root == NULL) {
// TODO(naman): Error
} else {
if (socket_kind == Socket_Kind_EXTERNAL) {
printf("Recieved: REQUEST EXTERNAL:\n%s\n",
cJSON_Print(cJSON_Parse(ir->buffer)));
Command c = {.kind = Command_REQUEST_INTERNAL};
c.id = cJSON_GetObjectItem(root, "id")->valueint;
Sint memory = cJSON_GetObjectItem(root, "memory")->valueint;
Char **ips = NULL;
for (Size j = 0; j < grunt_map.slot_count; j++) {
if (grunt_map.keys[j] != 0) {
Grunt *g = (Grunt *)grunt_map.values;
if (g->memory >= memory) {
c.kind = Command_RESPONSE_EXTERNAL;
sbufAdd(ips, g->ip);
}
}
}
if (c.kind == Command_REQUEST_INTERNAL) {
c.reqin.dispatcher_socket = fd;
c.reqin.memory = memory;
} else if (c.kind == Command_RESPONSE_EXTERNAL) {
c.resex.ips = ips;
c.reqin.dispatcher_socket = fd;
}
sbufAdd(commands, c);
htInsert(&commands_pending, hashInteger((U64)c.id), true);
} else if (socket_kind == Socket_Kind_INTERNAL) {
Char *type = cJSON_GetObjectItem(root, "type")->valuestring;
if (strcmp(type, "response") == 0) {
printf("Recieved: RESPONSE INTERNAL:\n%s\n",
cJSON_Print(cJSON_Parse(ir->buffer)));
B32 success = (B32)(cJSON_GetObjectItem(root,
"success")->valueint);
if (success) {
Command c = {.kind = Command_RESPONSE_EXTERNAL};
c.id = cJSON_GetObjectItem(root, "id")->valueint;
c.resex.dispatcher_socket = cJSON_GetObjectItem(root,
"dispatcher_socket")->valueint;
Grunt_Survey *gs =
(Grunt_Survey *)htLookup(&grunt_survey_map,
hashInteger((U64)c.id));
if (gs != NULL) {
Grunt *g = (Grunt *)htLookup(&grunt_map, (U64)fd);
sbufAdd(gs->ips, g->ip);
U64 milli_new = timeMilli();
gs->milli_passed += milli_new - gs->milli_last;
gs->milli_last = milli_new;
if (gs->milli_passed >= 1000) {
htRemove(&grunt_survey_map,
hashInteger((U64)c.id));
c.resex.ips = gs->ips;
sbufAdd(commands, c);
}
}
}
} else if (strcmp(type, "heartbeat") == 0) {
printf("Recieved: HEARTBEAT:\n%s\n",
cJSON_Print(cJSON_Parse(ir->buffer)));
Grunt *grunt = (Grunt *)htLookup(&grunt_map, (U64)fd);
grunt->memory = cJSON_GetObjectItem(root, "memory")->valueint;
prepare_for_output = false;
}
}
}
free(ir->buffer);
free(ir);
io->ir = NULL;
if (prepare_for_output) {
struct epoll_event event = {.data.fd = fd,
.events = EPOLLIN | EPOLLOUT};
if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, &event) < 0) {
perror("epoll_ctl EPOLL_CTL_MOD");
exit(-1);
}
}
}
}
} else if (events[i].events & EPOLLOUT) {
// Writing into fd in which we previously were not able to finish writing to
int fd = events[i].data.fd;
Input_Output *io = (Input_Output *)htLookup(&io_map, (U64)fd);
if (sbufElemin(io->ors) == 0) {
// fprintf(stderr, "hmLookup returned NULL\n");
// NOTE(naman): We haven't popped the Result yet, first go do that.
continue;
} else {
Output_Resume *or = &io->ors[0];
Char *output = or->buffer;
Size output_len = or->buffer_len;
Size output_pos = or->buffer_pos;
ssize_t nsent = write(or->fd, output + output_pos, output_len - output_pos);
if (nsent == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
// Try next time
} else {
perror("write() failed\n");
exit(-1);
}
} else if ((Size)nsent < (output_len - output_pos)) {
or->buffer_pos += (Size)nsent;
} else {
sbufUnsortedRemove(io->ors, 0);
struct epoll_event event = {.data.fd = fd,
.events = EPOLLIN};
if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD,
or->fd, &event) < 0) {
perror("epoll_ctl EPOLL_CTL_MOD");
exit(-1);
}
sbufDelete(output);
}
U64 index = htLookup(&grunt_map, hashString(id));
if (index != 0) { // Prevent any left over message
grunts[index].memory = cJSON_GetObjectItem(root, "memory")->valueint;
}
} else {
// TODO(naman): Error
}
}
rd_kafka_message_destroy(kafka_message_read);
}
for (Size i = 0; i < grunt_survey_map.slot_count; i++) {
......@@ -451,15 +236,15 @@ Sint main (Sint argc, Char *argv[])
U64 milli_new = timeMilli();
gs->milli_passed += milli_new - gs->milli_last;
gs->milli_last = milli_new;
if (gs->milli_passed >= 1000) {
htRemove(&grunt_survey_map,
hashInteger((U64)gs->id));
htRemove(&grunt_survey_map, hashString(gs->txn_id));
Command c = {.kind = Command_RESPONSE_EXTERNAL};
c.id = gs->id;
c.resex.dispatcher_socket = gs->dispatcher_socket;
c.resex.ips = gs->ips;
Command c = {.kind = Command_RESPONSE_ARBITER_2_DISPATCHER};
c.txn_id = gs->txn_id;
for (Size k = 0; k < sbufElemin(gs->grunt_ptrs); k++) {
sbufAdd(c.res_a2d.grunt_ids, gs->grunt_ptrs[k]->id);
}
sbufAdd(commands, c);
}
......@@ -469,185 +254,50 @@ Sint main (Sint argc, Char *argv[])
for (Size j = 0; j < sbufElemin(commands); j++) {
Command c = commands[j];
if (c.kind == Command_REQUEST_INTERNAL) {
for (Size i = 0; i < socket_map.slot_count; i++) {
if ((socket_map.values[i] == Socket_Kind_INTERNAL) &&
(socket_map.keys[i] != 0)) {
Char *output = NULL;
sbufPrint(output, " ");
sbufPrint(output, "{\n\"id\": %d", c.id);
sbufPrint(output, ",\n\"dispatcher_socket\": %d", c.reqin.dispatcher_socket);
sbufPrint(output, ",\n\"memory\": %d\n", c.reqin.memory);
Char *output = NULL;
Char *topic = NULL;
sbufPrint(output, "\n}\n");
if (c.kind == Command_REQUEST_ARBITER_2_GRUNT) {
topic = "REQUEST_ARBITER_2_GRUNT";
printf("Sending: REQUEST INTERNAL:\n%s\n",
cJSON_Print(cJSON_Parse(output + 4)));
sbufPrint(output, "{\n\"id\": \"%s\"", c.txn_id);
sbufPrint(output, ",\n\"memory\": %d\n", c.req_a2g.memory);
sbufPrint(output, "\n}\n");
} else if (c.kind == Command_RESPONSE_ARBITER_2_DISPATCHER) {
topic = "RESPONSE_ARBITER_2_DISPATCHER";
Size output_len = strlen(output);
#if defined(ENDIAN_LITTLE)
U32 json_len = (U32)output_len - 4;
U32 json_len_be = swap_endian(json_len);
output[0] = ((Char*)&json_len_be)[0];
output[1] = ((Char*)&json_len_be)[1];
output[2] = ((Char*)&json_len_be)[2];
output[3] = ((Char*)&json_len_be)[3];
#endif
Grunt_Survey *gs =(Grunt_Survey *) htLookup(&grunt_survey_map,
hashInteger((U64)c.id));
if (gs == NULL) {
gs = calloc(1, sizeof(*gs));
htInsert(&grunt_survey_map, hashInteger((U64)c.id), (U64)gs);
}
gs->milli_last = timeMilli();
gs->id = c.id;
gs->dispatcher_socket = c.reqin.dispatcher_socket;
Sint fd = (Sint)socket_map.keys[i];
ssize_t nsent = write(fd, output, output_len);
if (nsent == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
Output_Resume or = {0};
or.fd = fd;
or.buffer = output;
or.buffer_pos = 0;
or.buffer_len = output_len;
Input_Output *io = (Input_Output *)htLookup(&io_map, (U64)fd);
sbufAdd(io->ors, or);
struct epoll_event event = {.data.fd = fd,
.events = EPOLLIN | EPOLLOUT};
if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, &event) < 0) {
perror("epoll_ctl EPOLL_CTL_MOD");
exit(-1);
}
} else {
perror("write() failed");
exit(-1);
}
} else if ((Size)nsent < output_len) {
Output_Resume or = {0};
or.fd = fd;
or.buffer = output;
or.buffer_pos = (Size)nsent;
or.buffer_len = output_len;
Input_Output *io = (Input_Output *)htLookup(&io_map, (U64)fd);
sbufAdd(io->ors, or);
struct epoll_event event = {.data.fd = fd,
.events = EPOLLIN | EPOLLOUT};
if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, &event) < 0) {
perror("epoll_ctl EPOLL_CTL_MOD");
exit(-1);
}
} else {
struct epoll_event event = {.data.fd = fd,
.events = EPOLLIN};
if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD,
fd, &event) < 0) {
perror("epoll_ctl EPOLL_CTL_MOD");
exit(-1);
}
sbufDelete(output);
}
sbufPrint(output, "{\n\"id\": \"%s\"", c.txn_id);
sbufPrint(output, ",\n\"grunts\": [");
for (Size k = 0; k < sbufElemin(c.res_a2d.grunt_ids); k++) {
sbufPrint(output, "\"%s\"", c.res_a2d.grunt_ids[k]);
if (k < sbufElemin(c.res_a2d.grunt_ids) - 1) {
sbufPrint(output, ",");
}
}
} else if (c.kind == Command_RESPONSE_EXTERNAL) {
if (htLookup(&commands_pending, hashInteger((U64)c.id))) {
htRemove(&commands_pending, hashInteger((U64)c.id));
Sint fd = c.resex.dispatcher_socket;
Char *output = NULL;
sbufPrint(output, " ");
sbufPrint(output, "{\n\"id\": %d", c.id);
sbufPrint(output, ",\n\"ip\": [");
for (Size k = 0; k < sbufElemin(c.resex.ips); k++) {
sbufPrint(output, "\"%s\"", c.resex.ips[k]);
if (k < sbufElemin(c.resex.ips) - 1) {
sbufPrint(output, ",");
}
}
sbufPrint(output, "]");
sbufPrint(output, "\n}");
Size output_len = strlen(output);
printf("Sending: RESPONSE EXTERNAL:\n%s\n",
cJSON_Print(cJSON_Parse(output + 4)));
#if defined(ENDIAN_LITTLE)
U32 json_len = (U32)output_len - 4;
U32 json_len_be = swap_endian(json_len);
output[0] = ((Char*)&json_len_be)[0];
output[1] = ((Char*)&json_len_be)[1];
output[2] = ((Char*)&json_len_be)[2];
output[3] = ((Char*)&json_len_be)[3];
#endif
ssize_t nsent = write(fd, output, output_len);
if (nsent == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
Output_Resume or = {0};
or.fd = fd;
or.buffer = output;
or.buffer_pos = 0;
or.buffer_len = output_len;
Input_Output *io = (Input_Output *)htLookup(&io_map, (U64)fd);
sbufAdd(io->ors, or);
struct epoll_event event = {.data.fd = fd,
.events = EPOLLIN | EPOLLOUT};
if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, &event) < 0) {
perror("epoll_ctl EPOLL_CTL_MOD");
exit(-1);
}
} else {
perror("write() failed");
exit(-1);
}
} else if ((Size)nsent < output_len) {
Output_Resume or = {0};
or.fd = fd;
or.buffer = output;
or.buffer_pos = (Size)nsent;
or.buffer_len = output_len;
Input_Output *io = (Input_Output *)htLookup(&io_map, (U64)fd);
sbufAdd(io->ors, or);
struct epoll_event event = {.data.fd = fd,
.events = EPOLLIN | EPOLLOUT};
if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, &event) < 0) {
perror("epoll_ctl EPOLL_CTL_MOD");
exit(-1);
}
} else {
struct epoll_event event = {.data.fd = fd,
.events = EPOLLIN};
if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD,
fd, &event) < 0) {
perror("epoll_ctl EPOLL_CTL_MOD");
exit(-1);
}
sbufPrint(output, "]");
sbufPrint(output, "\n}");
}
sbufDelete(output);
}
if (output != NULL) {
printf("Sending to %s\n%s\n", topic, output);
if (!kafkaWrite(kafka.writer, topic, "rm_arbiter", output)) {
return -1;
}
}
sbufDelete(output);
sbufDelete(commands);
}
}
sbufDelete(commands);
for (Size i = 0; i < sbufElemin(kafka.topics); i++) {
rd_kafka_topic_destroy(kafka.topics[i]);
}
rd_kafka_consumer_close(kafka.reader);
rd_kafka_destroy(kafka.reader);
for (Size i = 0; i < sbufElemin(kafka.queues); i++) {
rd_kafka_queue_destroy(kafka.queues[i]);
}
rd_kafka_destroy(kafka.writer);
return 0;
}
/*
* Creator: Naman Dixit
* Notice: © Copyright 2020 Naman Dixit
*/
typedef enum Socket_Kind {
Socket_Kind_NONE,
Socket_Kind_INTERNAL,
Socket_Kind_EXTERNAL,
} Socket_Kind;
internal_function
Sint socketCreateListener (Char *port)
{
printf("Openiing socket on port %s\n", port);
// NOTE(naman): Create a socket for IPv4 and TCP.
Sint sock_fd = socket(AF_INET, SOCK_STREAM, 0);
if (sock_fd < 0) {
perror("ERROR opening socket");
exit(-1);
}
// NOTE(naman): This helps avoid spurious EADDRINUSE when the previous instance of this
// server died.
int opt = 1;
if (setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)) < 0) {
perror("setsockopt");
exit(-1);
}
// NOTE(naman): Get actual internet address to bind to using IPv4 and TCP,
// and listening passively
struct addrinfo hints = {.ai_family = AF_INET,
.ai_socktype = SOCK_STREAM,
.ai_flags = AI_PASSIVE};
struct addrinfo *addrinfo = NULL;
Sint s = getaddrinfo(NULL, port, &hints, &addrinfo);
if (s != 0) {
fprintf(stderr, "Error: getaddrinfo: %s\n", gai_strerror(s));
exit(-1);
}
// NOTE(naman): Assign an address to the socket
if (bind(sock_fd, addrinfo->ai_addr, addrinfo->ai_addrlen) != 0) {
perror("bind()");
exit(-1);
}
// NOTE(naman): Start listening for incoming connections
if (listen(sock_fd, MAX_SOCKET_CONNECTIONS_REQUEST) != 0) {
perror("listen()");
exit(-1);
}
// NOTE(naman): Set the socket as non-blocking
int flags = fcntl(sock_fd, F_GETFL, 0);
if (flags == -1) {
perror("fcntl F_GETFL");
exit(-1);
}
if (fcntl(sock_fd, F_SETFL, flags | O_NONBLOCK) == -1) {
perror("fcntl F_SETFL O_NONBLOCK");
exit(-1);
}
printf("Log: Waiting for connection on port %s...\n", port);
return sock_fd;
}
/*
* Creator: Naman Dixit
* Notice: © Copyright 2020 Naman Dixit
*/
typedef struct Kafka {
rd_kafka_t *writer;
rd_kafka_t *reader;
rd_kafka_queue_t **queues;
rd_kafka_topic_t **topics;
} Kafka;
header_function
int kafkaCreateTopic (Kafka *kafka, Char *topic,
Sint num_partitions, Sint replication_factor)
{
char errstr[256];
rd_kafka_NewTopic_t *new_topic = rd_kafka_NewTopic_new(topic,
num_partitions,
replication_factor,
errstr, sizeof(errstr));
if (!new_topic) {
fprintf(stderr, "Failed to create NewTopic object: %s\n",
errstr);
return -1;
}
/* Use a temporary queue for the asynchronous Admin result */
rd_kafka_queue_t *queue = rd_kafka_queue_new(kafka->writer);
sbufAdd(kafka->queues, queue);
/* Asynchronously create topic, result will be available on queue */
rd_kafka_CreateTopics(kafka->writer, &new_topic, 1, NULL, queue);
rd_kafka_NewTopic_destroy(new_topic);
/* Wait for result event */
rd_kafka_event_t *event = rd_kafka_queue_poll(queue, 15*1000);
if (!event) {
/* There will eventually be a result, after operation
* and request timeouts, but in this example we'll only
* wait 15s to avoid stalling too long when cluster
* is not available. */
fprintf(stderr, "No create topics result in 15s\n");
return -1;
}
if (rd_kafka_event_error(event)) {
/* Request-level failure */
fprintf(stderr, "Create topics request failed: %s\n",
rd_kafka_event_error_string(event));
rd_kafka_event_destroy(event);
return -1;
}
/* Extract the result type from the event. */
const rd_kafka_CreateTopics_result_t *result = rd_kafka_event_CreateTopics_result(event);
assert(result); /* Since we're using a dedicated queue we know this is
* a CreateTopics result type. */
/* Extract the per-topic results from the result type. */
size_t result_topics_count;
const rd_kafka_topic_result_t **result_topics = rd_kafka_CreateTopics_result_topics(result,
&result_topics_count);
assert(result_topics && result_topics_count == 1);
int return_value = 0;
if (rd_kafka_topic_result_error(result_topics[0]) ==
RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS) {
fprintf(stderr, "Topic %s already exists\n",
rd_kafka_topic_result_name(result_topics[0]));
} else if (rd_kafka_topic_result_error(result_topics[0])) {
fprintf(stderr, "Failed to create topic %s: %s\n",
rd_kafka_topic_result_name(result_topics[0]),
rd_kafka_topic_result_error_string(result_topics[0]));
return_value = -1;
} else {
fprintf(stderr, "Topic %s successfully created\n",
rd_kafka_topic_result_name(result_topics[0]));
}
rd_kafka_event_destroy(event);
return return_value;
}
header_function
rd_kafka_t* kafkaCreateWriter (Kafka *kafka, Char *address)
{
char errstr[512] = {0};
printf("Creating writer conf\n");
rd_kafka_conf_t *kafka_writer_conf = rd_kafka_conf_new();
rd_kafka_conf_set_dr_msg_cb(kafka_writer_conf, NULL);
printf("Creating writer\n");
kafka->writer = rd_kafka_new(RD_KAFKA_PRODUCER, kafka_writer_conf,
errstr, sizeof(errstr));
if (!kafka->writer) {
fprintf(stderr, "Failed to create producer: %s\n", errstr);
rd_kafka_conf_destroy(kafka_writer_conf);
return NULL;
}
printf("Ading brokers to writer\n");
rd_kafka_brokers_add(kafka->writer, address);
#define CREATE_TOPIC(s) \
do { \
if (kafkaCreateTopic(kafka, s, 1, 1) == -1) { \
rd_kafka_destroy(kafka->writer); \
return NULL; \
} \
} while (0)
CREATE_TOPIC("REQUEST_DISPATCHER_2_ARBITER"); //
CREATE_TOPIC("RESPONSE_ARBITER_2_DISPATCHER");
CREATE_TOPIC("REQUEST_ARBITER_2_GRUNT");
CREATE_TOPIC("RESPONSE_GRUNT_2_ARBITER"); //
CREATE_TOPIC("JOIN_GRUNT_2_ARBITER"); //
CREATE_TOPIC("HEARTBEAT_GRUNT_2_ARBITER"); //
#undef CREATE_TOPIC
return kafka->writer;
}
header_function
rd_kafka_t* kafkaCreateReader (Kafka *kafka, Char *address)
{
char errstr[512] = {0};
rd_kafka_conf_t *kafka_reader_conf = rd_kafka_conf_new();
rd_kafka_conf_set(kafka_reader_conf, "group.id", "cloud-example-c", NULL, 0);
/* If there is no committed offset for this group, start reading
* partitions from the beginning. */
rd_kafka_conf_set(kafka_reader_conf, "auto.offset.reset", "earliest", NULL, 0);
/* Disable ERR__PARTITION_EOF when reaching end of partition. */
rd_kafka_conf_set(kafka_reader_conf, "enable.partition.eof", "false", NULL, 0);
kafka->reader = rd_kafka_new(RD_KAFKA_CONSUMER, kafka_reader_conf,
errstr, sizeof(errstr));
if (!kafka->reader) {
fprintf(stderr, "Failed to create consumer: %s\n", errstr);
rd_kafka_conf_destroy(kafka_reader_conf);
return NULL;
}
rd_kafka_brokers_add(kafka->reader, address);
rd_kafka_poll_set_consumer(kafka->reader);
return kafka->reader;
}
header_function
rd_kafka_topic_t* kafkaSubscribe (Kafka *kafka,
rd_kafka_topic_partition_list_t* topics, Char *topic)
{
rd_kafka_topic_partition_list_add(topics, topic, RD_KAFKA_PARTITION_UA);
rd_kafka_topic_t *topic_result = rd_kafka_topic_new(kafka->reader, topic, NULL);
sbufAdd(kafka->topics, topic_result);
printf("Subscribe to %s\n", topic);
return topic_result;
}
header_function
B32 kafkaWrite (rd_kafka_t *kafka_writer, Char *topic, Char *user, Char *msg)
{
int delivery_counter = 0;
rd_kafka_resp_err_t err = rd_kafka_producev(
kafka_writer,
RD_KAFKA_V_TOPIC(topic),
RD_KAFKA_V_KEY(user, strlen(user)),
RD_KAFKA_V_VALUE(msg, strlen(msg)),
/* producev() will make a copy of the message
* value (the key is always copied), so we
* can reuse the same json buffer on the
* next iteration. */
RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
RD_KAFKA_V_OPAQUE(&delivery_counter),
RD_KAFKA_V_END);
if (err) {
fprintf(stderr, "Produce failed: %s\n",
rd_kafka_err2str(err));
return false;
}
return true;
}
......@@ -18,8 +18,9 @@
#include <netinet/in.h>
#include <sys/epoll.h>
#include <sys/time.h>
#define MAX_EPOLL_EVENTS 1024
#include <assert.h>
#include <signal.h>
#include <librdkafka/rdkafka.h>
# if defined(COMPILER_CLANG)
# pragma clang diagnostic push
......@@ -32,183 +33,132 @@
# pragma clang diagnostic pop
# endif
typedef struct Command {
Sint id;
Sint dispatcher_socket;
typedef struct Resources {
Sint memory;
} Resources;
typedef struct Command {
Char *txn_id;
Resources res;
} Command;
#include "kafka.h"
#include "time.c"
#include"socket.c"
global_variable volatile sig_atomic_t global_keep_running = 1;
internal_function
void signalHandlerSIGINT (int _)
{
(void)_;
global_keep_running = 0;
}
int main(int argc, char** argv)
{
unused_variable(argc);
unused_variable(argv);
Char *port = "9527";
signal(SIGINT, signalHandlerSIGINT);
Sint sock_fd = socket(AF_INET, SOCK_STREAM, 0);
Kafka kafka = {0};
struct addrinfo hints = {.ai_family = AF_INET,
.ai_socktype = SOCK_STREAM};
struct addrinfo *result = NULL;
Sint s = getaddrinfo(NULL, port, &hints, &result);
if (s != 0) {
fprintf(stderr, "Error: getaddrinfo: %s\n", gai_strerror(s));
exit(-1);
}
kafka.writer = kafkaCreateWriter(&kafka, "10.129.6.5:9092");
kafka.reader = kafkaCreateReader(&kafka, "10.129.6.5:9092");
// NOTE(naman): Create an epoll instance, with no flags set.
int epoll_fd = epoll_create1(0);
if (epoll_fd < 0) {
perror("epoll_create1");
exit(-1);
}
rd_kafka_topic_partition_list_t *kafka_reader_topics = rd_kafka_topic_partition_list_new(1);
// NOTE(naman): Add the socket's file descriptor to the epoll set
struct epoll_event accept_event = {.data.fd = sock_fd,
.events = EPOLLIN};
if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock_fd, &accept_event) < 0) {
perror("epoll_ctl EPOLL_CTL_ADD");
exit(-1);
}
rd_kafka_topic_t *topic_req_a2g = kafkaSubscribe(&kafka, kafka_reader_topics,
"REQUEST_ARBITER_2_GRUNT");
unused_variable(topic_req_a2g);
// NOTE(naman): Allocate memory for epoll array
struct epoll_event* events = calloc(MAX_EPOLL_EVENTS, sizeof(struct epoll_event));
if (events == NULL) {
fprintf(stderr, "Unable to allocate memory for epoll_events");
exit(-1);
}
rd_kafka_resp_err_t kafka_reader_topics_err = rd_kafka_subscribe(kafka.reader, kafka_reader_topics);
rd_kafka_topic_partition_list_destroy(kafka_reader_topics);
while (connect(sock_fd, result->ai_addr, result->ai_addrlen) == -1) {
fprintf(stderr, "Error: Couldn't connect on port %s, trying again in one second...\n", port);
sleep(1);
if (kafka_reader_topics_err) {
fprintf(stderr, "Subscribe failed: %s\n",
rd_kafka_err2str(kafka_reader_topics_err));
rd_kafka_destroy(kafka.reader);
return -1;
}
printf("Log: Starting communication with server on port %s...\n", port);
Char *join_msg = NULL;
sbufPrint(join_msg, "{\"id\": \"my-machine\"");
sbufPrint(join_msg, "\n}\n");
if (!kafkaWrite(kafka.writer, "JOIN_GRUNT_2_ARBITER", "rm_grunt", join_msg)) {
return -1;
}
U64 time_begin = timeMilli();
U64 time_accum = 0;
while (true) {
while (global_keep_running) {
// NOTE(naman): Get the fd's that are ready
int nready = epoll_wait(epoll_fd, events, MAX_EPOLL_EVENTS, 1000);
for (int i = 0; i < nready; i++) {
if (events[i].events & EPOLLERR) {
perror("epoll_wait returned EPOLLERR");
exit(-1);
rd_kafka_message_t *kafka_message_read = rd_kafka_consumer_poll(kafka.reader, 100);
B32 command_found = false;
Command c = {0};
if (kafka_message_read != NULL) {
if (kafka_message_read->err) {
/* Consumer error: typically just informational. */
fprintf(stderr, "Consumer error: %s\n",
rd_kafka_message_errstr(kafka_message_read));
} else {
fprintf(stderr,
"Received message on %s [%d] "
"at offset %"PRId64": \n%s\n",
rd_kafka_topic_name(kafka_message_read->rkt),
(int)kafka_message_read->partition, kafka_message_read->offset,
cJSON_Print(cJSON_Parse((char *)kafka_message_read->payload)));
char *buffer = (char *)kafka_message_read->payload;
const Char *json_error = NULL;
cJSON *root = cJSON_ParseWithOpts(buffer, &json_error, true);
if (root == NULL) {
// TODO(naman): Error
} else {
command_found = true;
c.txn_id = cJSON_GetObjectItem(root, "id")->valuestring;
c.res.memory = cJSON_GetObjectItem(root, "memory")->valueint;
}
}
rd_kafka_message_destroy(kafka_message_read);
}
if (events[i].events & EPOLLIN) {
B32 initialized = false;
Size buffer_len = 0;
Size buffer_cap = MiB(1);
U32 buffer_expected_len = 0;
Char *buffer = calloc(buffer_cap, sizeof(*buffer));
Char size_bytes[4] = {0};
Size size_bytes_count = 0;
Command *c = NULL;
while (true) {
if (initialized == false) {
long len = read(sock_fd,
(Char*)size_bytes + size_bytes_count,
4 - size_bytes_count);
if (len == 0) {
perror("read() returned zero");
exit(-1);
}
size_bytes_count += (Size)len;
if (size_bytes_count == 4) {
initialized = true;
buffer_expected_len = (U32)((size_bytes[3] << 0U) |
(size_bytes[2] << 8U) |
(size_bytes[1] << 16U) |
(size_bytes[0] << 24U));
}
continue;
} else {
long len = read(sock_fd,
buffer + buffer_len,
buffer_expected_len - buffer_len);
buffer_len += (Size)len;
if (buffer_expected_len == buffer_len) {
cJSON_Print(cJSON_Parse(buffer));
printf("%.*s", (int)buffer_len, buffer);
const Char *json_error = NULL;
cJSON *root = cJSON_ParseWithOpts(buffer, &json_error, true);
if (root == NULL) {
// TODO(naman): Error
} else {
c = calloc(1, sizeof(*c));
c->id = cJSON_GetObjectItem(root, "id")->valueint;
c->dispatcher_socket = cJSON_GetObjectItem(root, "dispatcher_socket")->valueint;
c->memory = cJSON_GetObjectItem(root, "memory")->valueint;
}
int memory = 0;
free(buffer);
break;
}
}
}
FILE *meminfo = fopen("/proc/meminfo", "r");
Char line[256] = {0};
while(fgets(line, sizeof(line), meminfo)) {
if (sscanf(line, "MemAvailable: %d kB", &memory) == 1) {
fclose(meminfo);
break;
}
}
int memory = 0;
if (command_found) {
Char *output = NULL;
FILE *meminfo = fopen("/proc/meminfo", "r");
Char line[256] = {0};
while(fgets(line, sizeof(line), meminfo)) {
if (sscanf(line, "MemTotal: %d kB", &memory) == 1) {
fclose(meminfo);
break;
}
}
sbufPrint(output, "{\n\"id\": \"%s\"", c.txn_id);
Char *output = NULL;
if (memory >= c.res.memory) {
sbufPrint(output, ",\n\"success\": %d\n", 1);
// TODO(naman): Add port
// sbufPrint(output, ",\n\"port\": %d\n", port);
} else {
sbufPrint(output, ",\n\"success\": %d\n", 0);
}
sbufPrint(output, " ");
sbufPrint(output, "{\n\"id\": %d", c->id);
sbufPrint(output, ",\n\"dispatcher_socket\": %d", c->dispatcher_socket);
sbufPrint(output, ",\n\"type\": \"response\"");
sbufPrint(output, "\n}\n");
if (memory >= c->memory) {
sbufPrint(output, ",\n\"success\": %d\n", 1);
} else {
sbufPrint(output, ",\n\"success\": %d\n", 0);
}
sbufPrint(output, "\n}\n");
Size output_len = strlen(output);
#if defined(ENDIAN_LITTLE)
U32 json_len = (U32)output_len - 4;
U32 json_len_be = swap_endian(json_len);
output[0] = ((Char*)&json_len_be)[0];
output[1] = ((Char*)&json_len_be)[1];
output[2] = ((Char*)&json_len_be)[2];
output[3] = ((Char*)&json_len_be)[3];
#endif
socketWrite(output, output_len, sock_fd);
if (!kafkaWrite(kafka.writer, "RESPONSE_GRUNT_2_ARBITER", "rm_grunt", output)) {
return -1;
}
}
{ // Send a heartbeat message if it is time to do so
} else { // Send a heartbeat message if it is time to do so
U64 time_new = timeMilli();
U64 time_passed = time_new - time_begin;
time_begin = time_new;
......@@ -217,38 +167,29 @@ int main(int argc, char** argv)
if (time_accum >= 1000) {
time_accum = 0;
int memory = 0;
FILE *meminfo = fopen("/proc/meminfo", "r");
Char line[256] = {0};
while(fgets(line, sizeof(line), meminfo)) {
if (sscanf(line, "MemTotal: %d kB", &memory) == 1) {
fclose(meminfo);
break;
}
}
Char *output = NULL;
sbufPrint(output, " ");
sbufPrint(output, "{");
sbufPrint(output, " \n\"type\": \"heartbeat\"");
sbufPrint(output, "{\"id\": \"my-machine\"");
sbufPrint(output, ",\n\"memory\": %d", memory);
sbufPrint(output, "\n}\n");
Size output_len = strlen(output);
#if defined(ENDIAN_LITTLE)
U32 json_len = (U32)output_len - 4;
U32 json_len_be = swap_endian(json_len);
output[0] = ((Char*)&json_len_be)[0];
output[1] = ((Char*)&json_len_be)[1];
output[2] = ((Char*)&json_len_be)[2];
output[3] = ((Char*)&json_len_be)[3];
#endif
socketWrite(output, output_len, sock_fd);
if (!kafkaWrite(kafka.writer, "HEARTBEAT_GRUNT_2_ARBITER", "rm_grunt", output)) {
return -1;
}
}
}
}
for (Size i = 0; i < sbufElemin(kafka.topics); i++) {
rd_kafka_topic_destroy(kafka.topics[i]);
}
rd_kafka_consumer_close(kafka.reader);
rd_kafka_destroy(kafka.reader);
for (Size i = 0; i < sbufElemin(kafka.queues); i++) {
rd_kafka_queue_destroy(kafka.queues[i]);
}
rd_kafka_destroy(kafka.writer);
return 0;
}
/*
* Creator: Naman Dixit
* Notice: © Copyright 2020 Naman Dixit
*/
internal_function
void socketWrite (Char *output, Size output_len,
int sock_fd)
{
ssize_t nsent = 0;
Size output_cursor = 0;
while (true) {
nsent = write(sock_fd, output + output_cursor, output_len - output_cursor);
if (nsent == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
continue;
} else {
exit(-1);
}
} else if ((Size)nsent < output_len) {
output_cursor += (Size)nsent;
} else {
break;
}
}
}
......@@ -18,6 +18,10 @@
#include <netdb.h>
#include <unistd.h>
#include <ctype.h>
#include <assert.h>
#include <signal.h>
#include <librdkafka/rdkafka.h>
# if defined(COMPILER_CLANG)
# pragma clang diagnostic push
......@@ -30,6 +34,7 @@
# pragma clang diagnostic pop
# endif
#include "kafka.h"
int main(int argc, char** argv)
{
......@@ -40,108 +45,85 @@ int main(int argc, char** argv)
Sint memory_required = (Sint)strtol(argv[1], NULL, 10);
Char *port = "9526";
Kafka kafka = {0};
Sint sock_fd = socket(AF_INET, SOCK_STREAM, 0);
kafka.writer = kafkaCreateWriter(&kafka, "10.129.6.5:9092");
kafka.reader = kafkaCreateReader(&kafka, "10.129.6.5:9092");
struct addrinfo hints = {.ai_family = AF_INET,
.ai_socktype = SOCK_STREAM};
struct addrinfo *result = NULL;
Sint s = getaddrinfo(NULL, port, &hints, &result);
if (s != 0) {
fprintf(stderr, "Error: getaddrinfo: %s\n", gai_strerror(s));
exit(-1);
}
rd_kafka_topic_partition_list_t *kafka_reader_topics = rd_kafka_topic_partition_list_new(1);
while (connect(sock_fd, result->ai_addr, result->ai_addrlen) == -1) {
fprintf(stderr, "Error: Couldn't connect on port %s, trying again in one second...\n", port);
sleep(1);
}
kafkaSubscribe(&kafka, kafka_reader_topics, "RESPONSE_ARBITER_2_DISPATCHER");
rd_kafka_resp_err_t kafka_reader_topics_err = rd_kafka_subscribe(kafka.reader, kafka_reader_topics);
rd_kafka_topic_partition_list_destroy(kafka_reader_topics);
printf("Log: Starting communication with server on port %s...\n", port);
if (kafka_reader_topics_err) {
fprintf(stderr, "Subscribe failed: %s\n",
rd_kafka_err2str(kafka_reader_topics_err));
rd_kafka_destroy(kafka.reader);
return -1;
}
Char *output = NULL;
sbufPrint(output, " ");
sbufPrint(output, "{\n\"id\": %d", (Sint)time(NULL));
Sint id = (Sint)time(NULL);
sbufPrint(output, "{\n\"id\": \"%d\"", id);
sbufPrint(output, ",\n\"memory\": %d", memory_required);
sbufPrint(output, "\n}\n");
Size output_len = strlen(output);
printf("Sending to Arbiter:\n%s\n",
cJSON_Print(cJSON_Parse(output + 4)));
#if defined(ENDIAN_LITTLE)
U32 json_len = (U32)output_len - 4;
U32 json_len_be = swap_endian(json_len);
output[0] = ((Char*)&json_len_be)[0];
output[1] = ((Char*)&json_len_be)[1];
output[2] = ((Char*)&json_len_be)[2];
output[3] = ((Char*)&json_len_be)[3];
#endif
write(sock_fd, output, output_len);
{
cJSON *array = NULL;
B32 initialized = false;
Size buffer_len = 0;
Size buffer_cap = MiB(1);
U32 buffer_expected_len = 0;
Char *buffer = calloc(buffer_cap, sizeof(*buffer));
Char size_bytes[4] = {0};
Size size_bytes_count = 0;
while (true) {
if (initialized == false) {
long len = read(sock_fd,
(Char*)size_bytes + size_bytes_count,
4 - size_bytes_count);
if (len == 0) {
perror("read() returned zero");
exit(-1);
}
size_bytes_count += (Size)len;
if (size_bytes_count == 4) {
initialized = true;
printf("Sending to Arbiter:\n%s\n", cJSON_Print(cJSON_Parse(output)));
buffer_expected_len = (U32)((size_bytes[3] << 0U) |
(size_bytes[2] << 8U) |
(size_bytes[1] << 16U) |
(size_bytes[0] << 24U));
}
if (output != NULL) {
if (!kafkaWrite(kafka.writer, "REQUEST_DISPATCHER_2_ARBITER", "rm_test", output)) {
return -1;
}
}
continue;
rd_kafka_message_t *kafka_message_read = rd_kafka_consumer_poll(kafka.reader, 0);
while (true) {
if (kafka_message_read != NULL) {
const Char *json_error = NULL;
cJSON *root = cJSON_ParseWithOpts((char *)kafka_message_read->payload, &json_error, true);
Sint id_now = atoi(cJSON_GetObjectItem(root, "id")->valuestring);
if (id_now == id) {
break;
} else {
long len = read(sock_fd,
buffer + buffer_len,
buffer_expected_len - buffer_len);
buffer_len += (Size)len;
if (buffer_expected_len == buffer_len) {
printf("Recieved: Final Response:\n%s\n",
cJSON_Print(cJSON_Parse(buffer)));
const Char *json_error = NULL;
cJSON *root = cJSON_ParseWithOpts(buffer, &json_error, true);
if (root == NULL) {
// TODO(naman): Error
} else {
array = cJSON_GetObjectItem(root, "ip");
}
printf("Found a cranky old message: %d\n", id_now);
rd_kafka_message_destroy(kafka_message_read);
}
}
kafka_message_read = rd_kafka_consumer_poll(kafka.reader, 0);
}
free(buffer);
break;
if (kafka_message_read != NULL) {
if (kafka_message_read->err) {
/* Consumer error: typically just informational. */
fprintf(stderr, "Consumer error: %s\n",
rd_kafka_message_errstr(kafka_message_read));
} else {
fprintf(stderr,
"Received message on %s [%d] "
"at offset %"PRId64": \n%s\n",
rd_kafka_topic_name(kafka_message_read->rkt),
(int)kafka_message_read->partition, kafka_message_read->offset,
cJSON_Print(cJSON_Parse((char *)kafka_message_read->payload)));
char *buffer = (char *)kafka_message_read->payload;
const Char *json_error = NULL;
cJSON *root = cJSON_ParseWithOpts(buffer, &json_error, true);
if (root == NULL) {
// TODO(naman): Error
} else {
cJSON *array = cJSON_GetObjectItem(root, "id");
cJSON *elem = NULL;
cJSON_ArrayForEach(elem, array) {
printf("%s\n", elem->valuestring);
}
}
}
rd_kafka_message_destroy(kafka_message_read);
}
return 0;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment