Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
X
xanadu
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Commits
Open sidebar
SYNERG
xanadu
Commits
5a17b288
Commit
5a17b288
authored
Mar 05, 2020
by
Naman Dixit
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Added timestamps, started implementing the instrumentation dumper
parent
b849dcc9
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
72 additions
and
56 deletions
+72
-56
resource_system/src/grunt/grunt.c
resource_system/src/grunt/grunt.c
+72
-56
No files found.
resource_system/src/grunt/grunt.c
View file @
5a17b288
...
...
@@ -55,17 +55,8 @@ typedef struct Thread_Manager_Command {
Thread_Manager_Command_DOCKER_DESTROY
,
}
kind
;
Char
*
id
;
union
{
struct
{
Sint
placeholder
;
}
docker_create
;
struct
{
Sint
placeholder
;
}
docker_destroy
;
};
Char
*
entity_id
;
Char
*
resource_id
;
}
Thread_Manager_Command
;
typedef
struct
JSON_Print_Command
{
...
...
@@ -158,17 +149,23 @@ int main(int argc, char** argv)
return
-
1
;
}
Char
*
join_msg
=
NULL
;
sbufPrint
(
join_msg
,
"{
\"
node_id
\"
:
\"
%s
\"
"
,
node_name
);
sbufPrint
(
join_msg
,
"
\n
}
\n
"
);
{
Sint
timestamp
=
(
Sint
)
time
(
0
);
Char
*
join_msg
=
NULL
;
sbufPrint
(
join_msg
,
"{
\"
node_id
\"
:
\"
%s
\"
"
,
node_name
);
sbufPrint
(
join_msg
,
",
\n\"
timestamp
\"
: %d
\n
"
,
timestamp
);
sbufPrint
(
join_msg
,
"
\n
}
\n
"
);
if
(
!
kafkaWrite
(
kafka
.
writer
,
"JOIN_RD_2_RM"
,
"resource_daemon"
,
join_msg
))
{
return
-
1
;
if
(
!
kafkaWrite
(
kafka
.
writer
,
"JOIN_RD_2_RM"
,
"resource_daemon"
,
join_msg
))
{
return
-
1
;
}
}
U64
time_begin
=
timeMilli
();
U64
time_accum
=
0
;
Sint
time_of_launch
=
(
Sint
)
time
(
0
);
while
(
global_keep_running
)
{
// NOTE(naman): Get the fd's that are ready
rd_kafka_message_t
*
kafka_message_read
=
rd_kafka_consumer_poll
(
kafka
.
reader
,
100
);
...
...
@@ -177,23 +174,31 @@ int main(int argc, char** argv)
Command
c
=
{
0
};
if
(
kafka_message_read
!=
NULL
)
{
fprintf
(
stderr
,
"Received message on %s [%d] "
"at offset %"
PRId64
":
\n
%s
\n
"
,
rd_kafka_topic_name
(
kafka_message_read
->
rkt
),
(
int
)
kafka_message_read
->
partition
,
kafka_message_read
->
offset
,
cJSON_Print
(
cJSON_Parse
((
char
*
)
kafka_message_read
->
payload
)));
char
*
buffer
=
(
char
*
)
kafka_message_read
->
payload
;
const
Char
*
json_error
=
NULL
;
cJSON
*
root
=
cJSON_ParseWithOpts
(
buffer
,
&
json_error
,
true
);
if
((
cJSON_GetObjectItem
(
root
,
"timestamp"
)
==
NULL
)
||
(
cJSON_GetObjectItem
(
root
,
"timestamp"
)
->
valueint
)
<
time_of_launch
)
{
printf
(
"Ignoring : %s
\n
"
,
buffer
);
cJSON_Delete
(
root
);
rd_kafka_message_destroy
(
kafka_message_read
);
continue
;
}
if
(
kafka_message_read
->
err
)
{
/* Consumer error: typically just informational. */
fprintf
(
stderr
,
"Consumer error: %s
\n
"
,
rd_kafka_message_errstr
(
kafka_message_read
));
}
else
if
(
kafka_message_read
->
rkt
==
topic_req_a2g
)
{
fprintf
(
stderr
,
"Received message on %s [%d] "
"at offset %"
PRId64
":
\n
%s
\n
"
,
rd_kafka_topic_name
(
kafka_message_read
->
rkt
),
(
int
)
kafka_message_read
->
partition
,
kafka_message_read
->
offset
,
cJSON_Print
(
cJSON_Parse
((
char
*
)
kafka_message_read
->
payload
)));
char
*
buffer
=
(
char
*
)
kafka_message_read
->
payload
;
const
Char
*
json_error
=
NULL
;
cJSON
*
root
=
cJSON_ParseWithOpts
(
buffer
,
&
json_error
,
true
);
if
(
root
==
NULL
)
{
// TODO(naman): Error
}
else
{
...
...
@@ -202,37 +207,43 @@ int main(int argc, char** argv)
c
.
res
.
memory
=
cJSON_GetObjectItem
(
root
,
"memory"
)
->
valueint
;
}
}
else
if
(
kafka_message_read
->
rkt
==
topic_rej_a2g
)
{
if
(
!
kafkaWrite
(
kafka
.
writer
,
"JOIN_RD_2_RM"
,
"resource_daemon"
,
join_msg
))
{
Sint
timestamp
=
(
Sint
)
time
(
0
);
Char
*
rejoin_msg
=
NULL
;
sbufPrint
(
rejoin_msg
,
"{
\"
node_id
\"
:
\"
%s
\"
"
,
node_name
);
sbufPrint
(
rejoin_msg
,
",
\n\"
timestamp
\"
: %d
\n
"
,
timestamp
);
sbufPrint
(
rejoin_msg
,
"
\n
}
\n
"
);
if
(
!
kafkaWrite
(
kafka
.
writer
,
"JOIN_RD_2_RM"
,
"resource_daemon"
,
rejoin_msg
))
{
return
-
1
;
}
}
else
if
(
kafka_message_read
->
rkt
==
topic_log
)
{
fprintf
(
stderr
,
"Received message on %s [%d] "
"at offset %"
PRId64
":
\n
%s
\n
"
,
rd_kafka_topic_name
(
kafka_message_read
->
rkt
),
(
int
)
kafka_message_read
->
partition
,
kafka_message_read
->
offset
,
cJSON_Print
(
cJSON_Parse
((
char
*
)
kafka_message_read
->
payload
)))
;
char
*
buffer
=
(
char
*
)
kafka_message_read
->
payload
;
const
Char
*
json_error
=
NULL
;
cJSON
*
root
=
cJSON_ParseWithOpts
(
buffer
,
&
json_error
,
true
)
;
if
(
root
==
NULL
)
{
// TODO(naman): Error
}
else
{
Char
*
node_id
=
cJSON_GetObjectItem
(
root
,
"node_id"
)
->
valuestring
;
if
(
strequal
(
node_id
,
node_name
))
{
// FIXME(naman): Fix this placeholder
Thread_Manager_Command
tmc
=
{
0
};
tmCommandEnqueue
(
tmc
);
/* "resource_id": "logical-entity-id", */
/* "function_id": "unique-function-id", */
/* "timestamp" : "iso-8601-timestamp", */
/* "reason": "deployment"/"termination", */
/* "status": true/false // Only valid if reason==deployment; */
}
}
cJSON
*
msg_type_json
=
cJSON_GetObjectItem
(
root
,
"message_type"
);
if
(
msg_type_json
==
NULL
)
{
if
(
strequal
(
msg_type_json
->
valuestring
,
"deployment_launch"
))
{
Char
*
node_id
=
cJSON_GetObjectItem
(
root
,
"node_id"
)
->
valuestring
;
if
(
strequal
(
node_id
,
node_name
))
{
Char
*
resource_id
=
cJSON_GetObjectItem
(
root
,
"resource_id"
)
->
valuestring
;
Char
*
entity_id
=
cJSON_GetObjectItem
(
root
,
"entity_id"
)
->
valuestring
;
Char
*
entity_type
=
cJSON_GetObjectItem
(
root
,
"entity_type"
)
->
valuestring
;
Thread_Manager_Command
tmc
=
{.
entity_id
=
strdup
(
entity_id
),
.
resource_id
=
strdup
(
resource_id
)}
;
B32
add_command
=
false
;
if
(
strequal
(
entity_type
,
"docker"
))
{
tmc
.
kind
=
Thread_Manager_Command_DOCKER_CREATE
;
add_command
=
true
;
}
if
(
add_command
)
{
tmCommandEnqueue
(
tmc
);
}
else
{
free
(
tmc
.
entity_id
);
free
(
tmc
.
resource_id
);
}
}
}
}
}
rd_kafka_message_destroy
(
kafka_message_read
);
}
...
...
@@ -252,8 +263,11 @@ int main(int argc, char** argv)
if
(
command_found
)
{
Char
*
output
=
NULL
;
Sint
timestamp
=
(
Sint
)
time
(
0
);
sbufPrint
(
output
,
"{
\n\"
node_id
\"
:
\"
%s
\"
"
,
node_name
);
sbufPrint
(
output
,
",
\n\"
resource_id
\"
:
\"
%s
\"
"
,
c
.
txn_id
);
sbufPrint
(
output
,
",
\n\"
timestamp
\"
: %d
\n
"
,
timestamp
);
if
(
memory
>=
c
.
res
.
memory
)
{
sbufPrint
(
output
,
",
\n\"
success
\"
: true
\n
"
);
...
...
@@ -290,8 +304,10 @@ int main(int argc, char** argv)
time_accum
=
0
;
Char
*
output
=
NULL
;
Sint
timestamp
=
(
Sint
)
time
(
0
);
sbufPrint
(
output
,
"{
\"
node_id
\"
:
\"
%s
\"
"
,
node_name
);
sbufPrint
(
output
,
",
\n\"
timestamp
\"
: %d
\n
"
,
timestamp
);
sbufPrint
(
output
,
",
\n\"
memory
\"
: %d"
,
memory
);
sbufPrint
(
output
,
"
\n
}
\n
"
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment