Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
X
xanadu
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Commits
Open sidebar
SYNERG
xanadu
Commits
b2c7f892
Commit
b2c7f892
authored
Feb 17, 2020
by
Nilanjan Daw
Browse files
Options
Browse Files
Download
Plain Diff
Merge remote-tracking branch 'origin/master'
parents
ca680d11
626ea0c0
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
113 additions
and
64 deletions
+113
-64
resource_manager/src/arbiter/arbiter.c
resource_manager/src/arbiter/arbiter.c
+73
-35
resource_manager/src/common/kafka.h
resource_manager/src/common/kafka.h
+0
-16
resource_manager/src/grunt/grunt.c
resource_manager/src/grunt/grunt.c
+40
-13
No files found.
resource_manager/src/arbiter/arbiter.c
View file @
b2c7f892
...
...
@@ -31,14 +31,14 @@ typedef struct Grunt_Survey {
U16
*
ports
;
U64
milli_passed
;
U64
milli_last
;
Char
*
txn
_id
;
Char
*
resource
_id
;
}
Grunt_Survey
;
typedef
struct
Command
{
enum
Command_Kind
{
Command_NONE
,
Command_REQUEST_D
ISPATCHER
_2_ARBITER
,
Command_RESPONSE_ARBITER_2_D
ISPATCHER
,
Command_REQUEST_D
M
_2_ARBITER
,
Command_RESPONSE_ARBITER_2_D
M
,
Command_REQUEST_ARBITER_2_GRUNT
,
Command_RESPONSE_GRUNT_2_ARBITER
,
...
...
@@ -46,7 +46,7 @@ typedef struct Command {
Command_HEARTBEAT_GRUNT_2_ARBITER
,
}
kind
;
Char
*
txn
_id
;
Char
*
resource
_id
;
union
{
struct
{
...
...
@@ -113,18 +113,38 @@ Sint main (Sint argc, Char *argv[])
Kafka
kafka
=
{
0
};
kafkaCreateWriter
(
&
kafka
,
"10.129.6.5:9092"
);
#define CREATE_TOPIC(s) \
do { \
if (kafkaCreateTopic(&kafka, s, 1, 1) == -1) { \
rd_kafka_destroy(kafka.writer); \
return -1; \
} \
} while (0)
CREATE_TOPIC
(
"REQUEST_DM_2_RM"
);
//
CREATE_TOPIC
(
"RESPONSE_RM_2_DM"
);
CREATE_TOPIC
(
"REQUEST_RM_2_RD"
);
CREATE_TOPIC
(
"RESPONSE_RD_2_RM"
);
//
CREATE_TOPIC
(
"JOIN_RD_2_RM"
);
//
CREATE_TOPIC
(
"HEARTBEAT_RD_2_RM"
);
//
CREATE_TOPIC
(
"LOG_COMMON"
);
//
#undef CREATE_TOPIC
kafkaCreateReader
(
&
kafka
,
"10.129.6.5:9092"
);
rd_kafka_topic_partition_list_t
*
kafka_reader_topics
=
rd_kafka_topic_partition_list_new
(
1
);
rd_kafka_topic_t
*
topic_req_d2a
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
"REQUEST_DISPATCHER_2_ARBITER
"
);
rd_kafka_topic_t
*
topic_req_d
m
2a
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
"REQUEST_DM_2_RM
"
);
rd_kafka_topic_t
*
topic_join_g2a
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
"JOIN_
GRUNT_2_ARBITER
"
);
"JOIN_
RD_2_RM
"
);
rd_kafka_topic_t
*
topic_res_g2a
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
"RESPONSE_
GRUNT_2_ARBITER
"
);
"RESPONSE_
RD_2_RM
"
);
rd_kafka_topic_t
*
topic_beat_g2a
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
"HEARTBEAT_GRUNT_2_ARBITER"
);
"HEARTBEAT_RD_2_RM"
);
rd_kafka_topic_t
*
topic_log
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
"LOG_COMMON"
);
rd_kafka_resp_err_t
kafka_reader_topics_err
=
rd_kafka_subscribe
(
kafka
.
reader
,
kafka_reader_topics
);
...
...
@@ -158,20 +178,22 @@ Sint main (Sint argc, Char *argv[])
const
char
*
json_error
=
NULL
;
cJSON
*
root
=
cJSON_ParseWithOpts
(
kafka_message_read
->
payload
,
&
json_error
,
true
);
if
(
kafka_message_read
->
rkt
==
topic_req_d2a
)
{
Command
c
=
{.
kind
=
Command_REQUEST_
ARBITER_2_GRUNT
};
c
.
txn_id
=
cJSON_GetObjectItem
(
root
,
"id"
)
->
valuestring
;
if
(
kafka_message_read
->
rkt
==
topic_req_d
m
2a
)
{
Command
c
=
{.
kind
=
Command_REQUEST_
DM_2_ARBITER
};
c
.
resource_id
=
strdup
(
cJSON_GetObjectItem
(
root
,
"resource_id"
)
->
valuestring
)
;
// TODO(naman): Add any new resource fields here
Sint
memory
=
cJSON_GetObjectItem
(
root
,
"memory"
)
->
valueint
;
logMessage
(
"Request D2A:
\t
id: %s = ([memory] = %d)"
,
c
.
txn_id
,
memory
);
logMessage
(
"Request DM2RM:
\t
id: %s = ([memory] = %d)"
,
c
.
resource_id
,
memory
);
Char
**
grunt_ids
=
NULL
;
for
(
Size
j
=
0
;
j
<
sbufElemin
(
grunts
);
j
++
)
{
Grunt
g
=
grunts
[
j
];
if
(
g
.
memory
>=
memory
)
{
c
.
kind
=
Command_RESPONSE_ARBITER_2_D
ISPATCHER
;
c
.
kind
=
Command_RESPONSE_ARBITER_2_D
M
;
sbufAdd
(
grunt_ids
,
g
.
id
);
}
}
...
...
@@ -179,16 +201,16 @@ Sint main (Sint argc, Char *argv[])
if
(
c
.
kind
==
Command_REQUEST_ARBITER_2_GRUNT
)
{
c
.
req_a2g
.
memory
=
memory
;
Grunt_Survey
*
gs
=
calloc
(
1
,
sizeof
(
*
gs
));
htInsert
(
&
grunt_survey_map
,
hashString
(
c
.
txn
_id
),
(
Uptr
)
gs
);
htInsert
(
&
grunt_survey_map
,
hashString
(
c
.
resource
_id
),
(
Uptr
)
gs
);
gs
->
milli_last
=
timeMilli
();
gs
->
txn_id
=
c
.
txn
_id
;
}
else
if
(
c
.
kind
==
Command_RESPONSE_ARBITER_2_D
ISPATCHER
)
{
gs
->
resource_id
=
c
.
resource
_id
;
}
else
if
(
c
.
kind
==
Command_RESPONSE_ARBITER_2_D
M
)
{
c
.
res_a2d
.
grunt_ids
=
grunt_ids
;
sbufAdd
(
commands
,
c
);
}
}
else
if
(
kafka_message_read
->
rkt
==
topic_join_g2a
)
{
Char
*
id
=
cJSON_GetObjectItem
(
root
,
"id"
)
->
valuestring
;
Char
*
id
=
strdup
(
cJSON_GetObjectItem
(
root
,
"node_id"
)
->
valuestring
)
;
Grunt
grunt
=
{.
id
=
id
};
logMessage
(
"Join G2A:
\t
id: %s"
,
id
);
...
...
@@ -198,32 +220,45 @@ Sint main (Sint argc, Char *argv[])
htInsert
(
&
grunt_map
,
hashString
(
id
),
sbufElemin
(
grunts
)
-
1
);
}
}
else
if
(
kafka_message_read
->
rkt
==
topic_res_g2a
)
{
Char
*
id
=
cJSON_GetObjectItem
(
root
,
"id"
)
->
valuestring
;
B32
success
=
(
B32
)(
cJSON_GetObjectItem
(
root
,
"success"
)
->
valueint
);
Char
*
node_id
=
cJSON_GetObjectItem
(
root
,
"node_id"
)
->
valuestring
;
Char
*
resource_id
=
cJSON_GetObjectItem
(
root
,
"resource_id"
)
->
valuestring
;
B32
success
=
(
B32
)
cJSON_IsTrue
(
cJSON_GetObjectItem
(
root
,
"success"
));
logMessage
(
"Response G2A:
\t
id: %s = %s"
,
id
,
success
?
"succeded"
:
"failed"
);
logMessage
(
"Response G2A:
\t
id: %s (%s) = %s"
,
resource_id
,
node_id
,
success
?
"succeded"
:
"failed"
);
if
(
success
)
{
Grunt_Survey
*
gs
=
(
Grunt_Survey
*
)
htLookup
(
&
grunt_survey_map
,
hashString
(
id
));
hashString
(
resource_
id
));
if
(
gs
!=
NULL
)
{
// If it has not been already removed
Grunt
*
g
=
&
grunts
[
htLookup
(
&
grunt_map
,
hashString
(
id
))];
Grunt
*
g
=
&
grunts
[
htLookup
(
&
grunt_map
,
hashString
(
node_
id
))];
sbufAdd
(
gs
->
grunt_ptrs
,
g
);
}
}
}
else
if
(
kafka_message_read
->
rkt
==
topic_beat_g2a
)
{
Char
*
id
=
cJSON_GetObjectItem
(
root
,
"id"
)
->
valuestring
;
Char
*
id
=
cJSON_GetObjectItem
(
root
,
"
node_
id"
)
->
valuestring
;
logMessage
(
"Beat G2A:
\t
id: %s"
,
id
);
U64
index
=
htLookup
(
&
grunt_map
,
hashString
(
id
));
if
(
index
!=
0
)
{
// Prevent any left over message
// TODO(naman): Add any new resource fields here
grunts
[
index
].
memory
=
cJSON_GetObjectItem
(
root
,
"memory"
)
->
valueint
;
}
}
else
if
(
kafka_message_read
->
rkt
==
topic_log
)
{
Char
*
node_id
=
cJSON_GetObjectItem
(
root
,
"node_id"
)
->
valuestring
;
Char
*
resource_id
=
cJSON_GetObjectItem
(
root
,
"resource_id"
)
->
valuestring
;
Char
*
function_id
=
cJSON_GetObjectItem
(
root
,
"function_id"
)
->
valuestring
;
unused_variable
(
node_id
);
unused_variable
(
resource_id
);
unused_variable
(
function_id
);
}
else
{
// TODO(naman): Error
}
cJSON_Delete
(
root
);
}
rd_kafka_message_destroy
(
kafka_message_read
);
...
...
@@ -237,16 +272,18 @@ Sint main (Sint argc, Char *argv[])
gs
->
milli_passed
+=
milli_new
-
gs
->
milli_last
;
gs
->
milli_last
=
milli_new
;
if
(
gs
->
milli_passed
>=
1000
)
{
htRemove
(
&
grunt_survey_map
,
hashString
(
gs
->
txn_id
));
Command
c
=
{.
kind
=
Command_RESPONSE_ARBITER_2_DISPATCHER
};
c
.
txn_id
=
gs
->
txn_id
;
Command
c
=
{.
kind
=
Command_RESPONSE_ARBITER_2_DM
};
c
.
resource_id
=
gs
->
resource_id
;
for
(
Size
k
=
0
;
k
<
sbufElemin
(
gs
->
grunt_ptrs
);
k
++
)
{
sbufAdd
(
c
.
res_a2d
.
grunt_ids
,
gs
->
grunt_ptrs
[
k
]
->
id
);
}
sbufAdd
(
commands
,
c
);
free
(
gs
->
grunt_ptrs
);
free
(
gs
->
ports
);
htRemove
(
&
grunt_survey_map
,
hashString
(
gs
->
resource_id
));
}
}
}
...
...
@@ -260,14 +297,14 @@ Sint main (Sint argc, Char *argv[])
if
(
c
.
kind
==
Command_REQUEST_ARBITER_2_GRUNT
)
{
topic
=
"REQUEST_ARBITER_2_GRUNT"
;
sbufPrint
(
output
,
"{
\n\"
id
\"
:
\"
%s
\"
"
,
c
.
txn
_id
);
sbufPrint
(
output
,
"{
\n\"
resource_id
\"
:
\"
%s
\"
"
,
c
.
resource
_id
);
sbufPrint
(
output
,
",
\n\"
memory
\"
: %d
\n
"
,
c
.
req_a2g
.
memory
);
sbufPrint
(
output
,
"
\n
}
\n
"
);
}
else
if
(
c
.
kind
==
Command_RESPONSE_ARBITER_2_D
ISPATCHER
)
{
}
else
if
(
c
.
kind
==
Command_RESPONSE_ARBITER_2_D
M
)
{
topic
=
"RESPONSE_ARBITER_2_DISPATCHER"
;
sbufPrint
(
output
,
"{
\n\"
id
\"
:
\"
%s
\"
"
,
c
.
txn
_id
);
sbufPrint
(
output
,
",
\n\"
grunt
s
\"
: ["
);
sbufPrint
(
output
,
"{
\n\"
id
\"
:
\"
%s
\"
"
,
c
.
resource
_id
);
sbufPrint
(
output
,
",
\n\"
node
s
\"
: ["
);
for
(
Size
k
=
0
;
k
<
sbufElemin
(
c
.
res_a2d
.
grunt_ids
);
k
++
)
{
sbufPrint
(
output
,
"
\"
%s
\"
"
,
c
.
res_a2d
.
grunt_ids
[
k
]);
if
(
k
<
sbufElemin
(
c
.
res_a2d
.
grunt_ids
)
-
1
)
{
...
...
@@ -280,12 +317,13 @@ Sint main (Sint argc, Char *argv[])
if
(
output
!=
NULL
)
{
printf
(
"Sending to %s
\n
%s
\n
"
,
topic
,
output
);
if
(
!
kafkaWrite
(
kafka
.
writer
,
topic
,
"r
m_arbit
er"
,
output
))
{
if
(
!
kafkaWrite
(
kafka
.
writer
,
topic
,
"r
esource_manag
er"
,
output
))
{
return
-
1
;
}
}
sbufDelete
(
output
);
sbufDelete
(
commands
);
free
(
c
.
resource_id
);
sbufUnsortedRemove
(
commands
,
j
);
}
}
...
...
resource_manager/src/common/kafka.h
View file @
b2c7f892
...
...
@@ -106,22 +106,6 @@ rd_kafka_t* kafkaCreateWriter (Kafka *kafka, Char *address)
printf
(
"Ading brokers to writer
\n
"
);
rd_kafka_brokers_add
(
kafka
->
writer
,
address
);
#define CREATE_TOPIC(s) \
do { \
if (kafkaCreateTopic(kafka, s, 1, 1) == -1) { \
rd_kafka_destroy(kafka->writer); \
return NULL; \
} \
} while (0)
CREATE_TOPIC
(
"REQUEST_DISPATCHER_2_ARBITER"
);
//
CREATE_TOPIC
(
"RESPONSE_ARBITER_2_DISPATCHER"
);
CREATE_TOPIC
(
"REQUEST_ARBITER_2_GRUNT"
);
CREATE_TOPIC
(
"RESPONSE_GRUNT_2_ARBITER"
);
//
CREATE_TOPIC
(
"JOIN_GRUNT_2_ARBITER"
);
//
CREATE_TOPIC
(
"HEARTBEAT_GRUNT_2_ARBITER"
);
//
#undef CREATE_TOPIC
return
kafka
->
writer
;
}
...
...
resource_manager/src/grunt/grunt.c
View file @
b2c7f892
...
...
@@ -56,20 +56,44 @@ void signalHandlerSIGINT (int _)
int
main
(
int
argc
,
char
**
argv
)
{
unused_variable
(
argc
);
unused_variable
(
argv
);
Char
*
node_name
=
NULL
;
if
(
argc
>
1
)
{
node_name
=
argv
[
1
];
}
else
{
Char
hostname
[
1024
]
=
{
0
};
gethostname
(
hostname
,
1023
);
sbufPrint
(
node_name
,
"%s"
,
hostname
);
}
signal
(
SIGINT
,
signalHandlerSIGINT
);
Kafka
kafka
=
{
0
};
kafka
.
writer
=
kafkaCreateWriter
(
&
kafka
,
"10.129.6.5:9092"
);
#define CREATE_TOPIC(s) \
do { \
if (kafkaCreateTopic(&kafka, s, 1, 1) == -1) { \
rd_kafka_destroy(kafka.writer); \
return -1; \
} \
} while (0)
CREATE_TOPIC
(
"REQUEST_DM_2_RM"
);
//
CREATE_TOPIC
(
"RESPONSE_RM_2_DM"
);
CREATE_TOPIC
(
"REQUEST_RM_2_RD"
);
CREATE_TOPIC
(
"RESPONSE_RD_2_RM"
);
//
CREATE_TOPIC
(
"JOIN_RD_2_RM"
);
//
CREATE_TOPIC
(
"HEARTBEAT_RD_2_RM"
);
//
CREATE_TOPIC
(
"LOG_COMMON"
);
//
kafka
.
reader
=
kafkaCreateReader
(
&
kafka
,
"10.129.6.5:9092"
);
rd_kafka_topic_partition_list_t
*
kafka_reader_topics
=
rd_kafka_topic_partition_list_new
(
1
);
rd_kafka_topic_t
*
topic_req_a2g
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
"REQUEST_
ARBITER_2_GRUNT
"
);
"REQUEST_
RM_2_RD
"
);
unused_variable
(
topic_req_a2g
);
rd_kafka_resp_err_t
kafka_reader_topics_err
=
rd_kafka_subscribe
(
kafka
.
reader
,
kafka_reader_topics
);
...
...
@@ -83,10 +107,10 @@ int main(int argc, char** argv)
}
Char
*
join_msg
=
NULL
;
sbufPrint
(
join_msg
,
"{
\"
id
\"
:
\"
my-machine
\"
"
);
sbufPrint
(
join_msg
,
"{
\"
node_id
\"
:
\"
%s
\"
"
,
node_name
);
sbufPrint
(
join_msg
,
"
\n
}
\n
"
);
if
(
!
kafkaWrite
(
kafka
.
writer
,
"JOIN_
GRUNT_2_ARBITER"
,
"rm_grunt
"
,
join_msg
))
{
if
(
!
kafkaWrite
(
kafka
.
writer
,
"JOIN_
RD_2_RM"
,
"resource_daemon
"
,
join_msg
))
{
return
-
1
;
}
...
...
@@ -122,7 +146,7 @@ int main(int argc, char** argv)
// TODO(naman): Error
}
else
{
command_found
=
true
;
c
.
txn_id
=
cJSON_GetObjectItem
(
root
,
"id"
)
->
valuestring
;
c
.
txn_id
=
cJSON_GetObjectItem
(
root
,
"
resource_
id"
)
->
valuestring
;
c
.
res
.
memory
=
cJSON_GetObjectItem
(
root
,
"memory"
)
->
valueint
;
}
}
...
...
@@ -144,22 +168,25 @@ int main(int argc, char** argv)
if
(
command_found
)
{
Char
*
output
=
NULL
;
sbufPrint
(
output
,
"{
\n\"
id
\"
:
\"
%s
\"
"
,
c
.
txn_id
);
sbufPrint
(
output
,
"{
\n\"
node_id
\"
:
\"
%s
\"
"
,
node_name
);
sbufPrint
(
output
,
",
\n\"
resource_id
\"
:
\"
%s
\"
"
,
c
.
txn_id
);
if
(
memory
>=
c
.
res
.
memory
)
{
sbufPrint
(
output
,
",
\n\"
success
\"
:
%d
\n
"
,
1
);
sbufPrint
(
output
,
",
\n\"
success
\"
:
true
\n
"
);
// TODO(naman): Add port
// sbufPrint(output, ",\n\"port\": %d\n", port);
}
else
{
sbufPrint
(
output
,
",
\n\"
success
\"
:
%d
\n
"
,
0
);
sbufPrint
(
output
,
",
\n\"
success
\"
:
false
\n
"
);
}
sbufPrint
(
output
,
"
\n
}
\n
"
);
if
(
!
kafkaWrite
(
kafka
.
writer
,
"RESPONSE_
GRUNT_2_ARBITER"
,
"rm_grunt
"
,
output
))
{
if
(
!
kafkaWrite
(
kafka
.
writer
,
"RESPONSE_
RD_2_RM"
,
"resource_daemon
"
,
output
))
{
return
-
1
;
}
}
else
{
// Send a heartbeat message if it is time to do so
}
{
// Send a heartbeat message if it is time to do so
U64
time_new
=
timeMilli
();
U64
time_passed
=
time_new
-
time_begin
;
time_begin
=
time_new
;
...
...
@@ -170,12 +197,12 @@ int main(int argc, char** argv)
Char
*
output
=
NULL
;
sbufPrint
(
output
,
"{
\"
id
\"
:
\"
my-machine
\"
"
);
sbufPrint
(
output
,
"{
\"
node_id
\"
:
\"
%s
\"
"
,
node_name
);
sbufPrint
(
output
,
",
\n\"
memory
\"
: %d"
,
memory
);
sbufPrint
(
output
,
"
\n
}
\n
"
);
if
(
!
kafkaWrite
(
kafka
.
writer
,
"HEARTBEAT_
GRUNT_2_ARBITER"
,
"rm_grunt
"
,
output
))
{
if
(
!
kafkaWrite
(
kafka
.
writer
,
"HEARTBEAT_
RD_2_RM"
,
"resource_daemon
"
,
output
))
{
return
-
1
;
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment