Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
X
xanadu
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Commits
Open sidebar
SYNERG
xanadu
Commits
e349248d
Commit
e349248d
authored
Mar 07, 2020
by
Nilanjan Daw
Browse files
Options
Browse Files
Download
Plain Diff
Merge remote-tracking branch 'origin/master'
parents
936b0965
8172c839
Changes
14
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
512 additions
and
232 deletions
+512
-232
.gitmodules
.gitmodules
+3
-0
config.ini
config.ini
+11
-0
grunt
grunt
+0
-0
readme.md
readme.md
+20
-14
resource_system/src/arbiter/arbiter.c
resource_system/src/arbiter/arbiter.c
+87
-97
resource_system/src/arbiter/command.c
resource_system/src/arbiter/command.c
+34
-0
resource_system/src/arbiter/conf.c
resource_system/src/arbiter/conf.c
+48
-0
resource_system/src/arbiter/grunt_manager.c
resource_system/src/arbiter/grunt_manager.c
+58
-0
resource_system/src/common/inih
resource_system/src/common/inih
+1
-0
resource_system/src/grunt/conf.c
resource_system/src/grunt/conf.c
+45
-0
resource_system/src/grunt/grunt.c
resource_system/src/grunt/grunt.c
+179
-112
resource_system/src/grunt/instrument_docker.c
resource_system/src/grunt/instrument_docker.c
+12
-2
resource_system/src/grunt/thread_manager.c
resource_system/src/grunt/thread_manager.c
+11
-7
resource_system/src/test/test.c
resource_system/src/test/test.c
+3
-0
No files found.
.gitmodules
View file @
e349248d
...
@@ -4,3 +4,6 @@
...
@@ -4,3 +4,6 @@
[submodule "resource_manager/src/common/nlib"]
[submodule "resource_manager/src/common/nlib"]
path = resource_system/src/common/nlib
path = resource_system/src/common/nlib
url = https://github.com/namandixit/nlib
url = https://github.com/namandixit/nlib
[submodule "resource_system/src/common/inih"]
path = resource_system/src/common/inih
url = https://github.com/benhoyt/inih
config.ini
0 → 100644
View file @
e349248d
[Kafka]
Address
=
10.129.6.5:9092
[Arbiter]
MessageReadGap
=
10
GruntTimeToDie
=
10000
GruntResponseWaitTime
=
100
[Grunt]
MessageReadGap
=
10
HeartbeatGap
=
1000
\ No newline at end of file
grunt
deleted
100755 → 0
View file @
936b0965
File deleted
readme.md
View file @
e349248d
...
@@ -23,7 +23,7 @@ The Dispatch Manager (DM) sends a request to the Resource Manager (RM), detailin
...
@@ -23,7 +23,7 @@ The Dispatch Manager (DM) sends a request to the Resource Manager (RM), detailin
```
javascript
```
javascript
{
{
"
resource_id
"
:
"
unique-transaction-id
"
,
"
resource_id
"
:
"
unique-transaction-id
"
,
"
timestamp
"
:
"
iso-8601-
timestamp
"
,
"
timestamp
"
:
"
time(2) compatible
timestamp
"
,
"
memory
"
:
1024
,
// in MiB
"
memory
"
:
1024
,
// in MiB
...
// Any other resources
...
// Any other resources
}
}
...
@@ -34,7 +34,7 @@ Format:
...
@@ -34,7 +34,7 @@ Format:
```
javascript
```
javascript
{
{
"
resource_id
"
:
"
unique-transaction-id
"
,
"
resource_id
"
:
"
unique-transaction-id
"
,
"
timestamp
"
:
"
iso-8601-
timestamp
"
,
"
timestamp
"
:
"
time(2) compatible
timestamp
"
,
"
grunts
"
:
[
"
grunts
"
:
[
{
node_id
:
some
unique
ID
,
port
:
port
address
},
...
{
node_id
:
some
unique
ID
,
port
:
port
address
},
...
]
// List of machine IDs
]
// List of machine IDs
...
@@ -44,10 +44,13 @@ Format:
...
@@ -44,10 +44,13 @@ Format:
Once the runtime entity has been launched (or the launch has failed), the Executor sends back a status message on the
`LOG_COMMON`
topic.
Once the runtime entity has been launched (or the launch has failed), the Executor sends back a status message on the
`LOG_COMMON`
topic.
```
javascript
```
javascript
{
{
"
node_id
"
:
"
uique-machine-id
"
,
"
message_type
"
:
"
deployment_launch
"
,
"
node_id
"
:
"
unique-machine-id
"
,
"
entity_id
"
:
"
handle for the actual container/VM/etc.
"
,
"
entity_type
"
:
"
docker/libvirt/etc.
"
,
"
resource_id
"
:
"
logical-entity-id
"
,
"
resource_id
"
:
"
logical-entity-id
"
,
"
function_id
"
:
"
unique-function-id
"
,
"
function_id
"
:
"
unique-function-id
"
,
"
timestamp
"
:
"
iso-8601-
timestamp
"
,
"
timestamp
"
:
"
time(2) compatible
timestamp
"
,
"
reason
"
:
"
deployment
"
/
"
termination
"
"
reason
"
:
"
deployment
"
/
"
termination
"
"
status
"
:
true
/
false
// Only valid if reason==deployment
"
status
"
:
true
/
false
// Only valid if reason==deployment
}
}
...
@@ -57,28 +60,31 @@ Instrumentation data is also sent on the `LOG_COMMON` topic. This data is sent f
...
@@ -57,28 +60,31 @@ Instrumentation data is also sent on the `LOG_COMMON` topic. This data is sent f
and whoever needs the data is allowed to read it. Each message is required to have atleast three fields:
`node_id`
,
`resource_id`
and
`function_id`
.
and whoever needs the data is allowed to read it. Each message is required to have atleast three fields:
`node_id`
,
`resource_id`
and
`function_id`
.
```
javascript
```
javascript
{
// Example message from Executor
{
// Example message from Executor
"
node_id
"
:
"
uique-machine-id
"
,
"
message_type
"
:
"
instrumentation
"
,
"
node_id
"
:
"
unique-machine-id
"
,
"
resource_id
"
:
"
logical-entity-id
"
,
"
resource_id
"
:
"
logical-entity-id
"
,
"
function_id
"
:
"
unique-function-id
"
,
"
function_id
"
:
"
unique-function-id
"
,
"
timestamp
"
:
"
iso-8601-
timestamp
"
,
"
timestamp
"
:
"
time(2) compatible
timestamp
"
,
"
cpu
"
:
343
,
// in MHz
"
cpu
"
:
343
,
// in MHz
"
memory
"
:
534
,
// in MiB
"
memory
"
:
534
,
// in MiB
"
network
"
:
234
// in KBps
"
network
"
:
234
// in KBps
}
}
{
// Example message from reverse proxy
{
// Example message from reverse proxy
"
node_id
"
:
"
uique-machine-id
"
,
"
message_type
"
:
"
instrumentation
"
,
"
node_id
"
:
"
unique-machine-id
"
,
"
resource_id
"
:
"
logical-entity-id
"
,
"
resource_id
"
:
"
logical-entity-id
"
,
"
function_id
"
:
"
unique-function-id
"
,
"
function_id
"
:
"
unique-function-id
"
,
"
timestamp
"
:
"
iso-8601-
timestamp
"
,
"
timestamp
"
:
"
time(2) compatible
timestamp
"
,
"
average_fn_time
"
:
23
// in ms
"
average_fn_time
"
:
23
// in ms
}
}
{
// Example message from dispatch manager
{
// Example message from dispatch manager
"
node_id
"
:
"
uique-machine-id
"
,
"
message_type
"
:
"
instrumentation
"
,
"
node_id
"
:
"
unique-machine-id
"
,
"
resource_id
"
:
"
logical-entity-id
"
,
"
resource_id
"
:
"
logical-entity-id
"
,
"
function_id
"
:
"
unique-function-id
"
,
"
function_id
"
:
"
unique-function-id
"
,
"
timestamp
"
:
"
iso-8601-
timestamp
"
,
"
timestamp
"
:
"
time(2) compatible
timestamp
"
,
"
coldstart_time
"
"
coldstart_time
"
}
}
```
```
...
@@ -223,7 +229,7 @@ resources being tracked by RDs on each machine. This data is cached by the RM.
...
@@ -223,7 +229,7 @@ resources being tracked by RDs on each machine. This data is cached by the RM.
```
javascript
```
javascript
{
{
"node_id": "unique-machine-id",
"node_id": "unique-machine-id",
"timestamp" : "
iso-8601-
timestamp",
"timestamp" : "
time(2) compatible
timestamp",
"memory": 1024, // in MiB
"memory": 1024, // in MiB
... // Any other resources
... // Any other resources
}
}
...
@@ -246,7 +252,7 @@ DM on topic `RESPONSE_RM_2_DM`.
...
@@ -246,7 +252,7 @@ DM on topic `RESPONSE_RM_2_DM`.
```
javascript
```
javascript
{
{
"resource_id": "unique-transaction-id",
"resource_id": "unique-transaction-id",
"timestamp" : "
iso-8601-
timestamp",
"timestamp" : "
time(2) compatible
timestamp",
// "port": 2343 --- NOT IMPLEMENTED YET
// "port": 2343 --- NOT IMPLEMENTED YET
"nodes": ["a", "b", ...] // List of unique machine IDs
"nodes": ["a", "b", ...] // List of unique machine IDs
}
}
...
@@ -258,7 +264,7 @@ Format:
...
@@ -258,7 +264,7 @@ Format:
```
javascript
```
javascript
{
{
"resource_id": "unique-transaction-id",
"resource_id": "unique-transaction-id",
"timestamp" : "
iso-8601-
timestamp",
"timestamp" : "
time(2) compatible
timestamp",
"memory": 1024, // in MiB
"memory": 1024, // in MiB
... // Any other resources
... // Any other resources
}
}
...
@@ -269,7 +275,7 @@ The RDs recieve this message and send back whether on not they satisfy the const
...
@@ -269,7 +275,7 @@ The RDs recieve this message and send back whether on not they satisfy the const
{
{
"node_id": "unique-machine-id",
"node_id": "unique-machine-id",
"resource_id": "unique-transaction-id",
"resource_id": "unique-transaction-id",
"timestamp" : "
iso-8601-
timestamp",
"timestamp" : "
time(2) compatible
timestamp",
"success" : 0/1 // 0 = fail, 1 = success
"success" : 0/1 // 0 = fail, 1 = success
}
}
```
```
...
...
resource_system/src/arbiter/arbiter.c
View file @
e349248d
...
@@ -4,6 +4,7 @@
...
@@ -4,6 +4,7 @@
*/
*/
#define logMessage(s, ...) printf(s "\n", ##__VA_ARGS__)
#define logMessage(s, ...) printf(s "\n", ##__VA_ARGS__)
#define logError(s, ...) fprintf(stderr, s "\n", ##__VA_ARGS__)
#include "nlib/nlib.h"
#include "nlib/nlib.h"
...
@@ -21,83 +22,11 @@
...
@@ -21,83 +22,11 @@
#include <signal.h>
#include <signal.h>
#include <librdkafka/rdkafka.h>
#include <librdkafka/rdkafka.h>
typedef
struct
Grunt
{
#include "kafka.h"
Char
*
id
;
#include "time.c"
Sint
memory
;
#include "conf.c"
B32
rejoin_asked
;
#include "command.c"
Sint
time_to_die
;
// in ms
#include "grunt_manager.c"
}
Grunt
;
typedef
struct
Grunt_Survey
{
Char
**
grunt_ids
;
U16
*
ports
;
U64
milli_passed
;
U64
milli_last
;
Char
*
resource_id
;
}
Grunt_Survey
;
typedef
struct
Command
{
enum
Command_Kind
{
Command_NONE
,
Command_RESPONSE_ARBITER_2_DM
,
Command_REQUEST_ARBITER_2_GRUNT
,
Command_REJOIN_ARBITER_2_GRUNT
,
}
kind
;
Char
*
resource_id
;
union
{
struct
{
Char
**
grunt_ids
;
}
res_a2d
;
struct
{
Sint
memory
;
}
req_a2g
;
struct
{
Char
*
grunt_id
;
}
rejoin_a2g
;
};
}
Command
;
typedef
struct
Grunt_Tracker
{
Grunt
*
grunts
;
Size
*
free_list
;
Hash_Table
map
;
}
Grunt_Tracker
;
internal_function
void
gruntTrackBegin
(
Grunt_Tracker
*
t
,
Grunt
g
)
{
if
(
t
->
grunts
==
NULL
)
{
t
->
map
=
htCreate
(
0
);
sbufAdd
(
t
->
grunts
,
(
Grunt
){
0
});
// SInce 0 index out of hash table will be invalid
}
Size
insertion_index
=
0
;
if
(
sbufElemin
(
t
->
free_list
)
>
0
)
{
t
->
grunts
[
t
->
free_list
[
0
]]
=
g
;
insertion_index
=
t
->
free_list
[
0
];
sbufUnsortedRemove
(
t
->
free_list
,
0
);
}
else
{
sbufAdd
(
t
->
grunts
,
g
);
insertion_index
=
sbufElemin
(
t
->
grunts
)
-
1
;
}
htInsert
(
&
t
->
map
,
hashString
(
g
.
id
),
insertion_index
);
}
internal_function
void
gruntTrackEnd
(
Grunt_Tracker
*
t
,
Char
*
grunt_id
)
{
Size
index
=
htLookup
(
&
t
->
map
,
hashString
(
grunt_id
));
sbufAdd
(
t
->
free_list
,
index
);
free
(
t
->
grunts
[
index
].
id
);
t
->
grunts
[
index
]
=
(
Grunt
){
0
};
htRemove
(
&
t
->
map
,
index
);
}
# if defined(COMPILER_CLANG)
# if defined(COMPILER_CLANG)
# pragma clang diagnostic push
# pragma clang diagnostic push
...
@@ -110,8 +39,16 @@ void gruntTrackEnd (Grunt_Tracker *t, Char *grunt_id)
...
@@ -110,8 +39,16 @@ void gruntTrackEnd (Grunt_Tracker *t, Char *grunt_id)
# pragma clang diagnostic pop
# pragma clang diagnostic pop
# endif
# endif
#include "kafka.h"
# if defined(COMPILER_CLANG)
#include "time.c"
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wreserved-id-macro"
# pragma clang diagnostic ignored "-Wcast-qual"
# endif
#include "inih/ini.h"
#include "inih/ini.c"
# if defined(COMPILER_CLANG)
# pragma clang diagnostic pop
# endif
global_variable
volatile
sig_atomic_t
global_keep_running
=
1
;
global_variable
volatile
sig_atomic_t
global_keep_running
=
1
;
...
@@ -129,6 +66,20 @@ Sint main (Sint argc, Char *argv[])
...
@@ -129,6 +66,20 @@ Sint main (Sint argc, Char *argv[])
signal
(
SIGINT
,
signalHandlerSIGINT
);
signal
(
SIGINT
,
signalHandlerSIGINT
);
Configuration
conf
=
{
0
};
{
// Default config values
conf
.
kafka_address
=
"10.129.6.5:9092"
;
conf
.
message_read_gap
=
10
;
conf
.
grunt_time_to_die
=
10000
;
conf
.
grunt_response_wait_time
=
100
;
}
if
(
ini_parse
(
"config.ini"
,
confCallback
,
&
conf
)
<
0
)
{
printf
(
"Can't load 'config.ini'
\n
"
);
return
-
1
;
}
Command
*
commands
=
NULL
;
Command
*
commands
=
NULL
;
Grunt_Tracker
gt
=
{
0
};
Grunt_Tracker
gt
=
{
0
};
...
@@ -136,7 +87,7 @@ Sint main (Sint argc, Char *argv[])
...
@@ -136,7 +87,7 @@ Sint main (Sint argc, Char *argv[])
Kafka
kafka
=
{
0
};
Kafka
kafka
=
{
0
};
kafkaCreateWriter
(
&
kafka
,
"10.129.6.5:9092"
);
kafkaCreateWriter
(
&
kafka
,
conf
.
kafka_address
);
#define CREATE_TOPIC(s) \
#define CREATE_TOPIC(s) \
do { \
do { \
...
@@ -152,11 +103,12 @@ Sint main (Sint argc, Char *argv[])
...
@@ -152,11 +103,12 @@ Sint main (Sint argc, Char *argv[])
CREATE_TOPIC
(
"RESPONSE_RD_2_RM"
);
//
CREATE_TOPIC
(
"RESPONSE_RD_2_RM"
);
//
CREATE_TOPIC
(
"JOIN_RD_2_RM"
);
//
CREATE_TOPIC
(
"JOIN_RD_2_RM"
);
//
CREATE_TOPIC
(
"HEARTBEAT_RD_2_RM"
);
//
CREATE_TOPIC
(
"HEARTBEAT_RD_2_RM"
);
//
CREATE_TOPIC
(
"JOIN_ACK_RM_2_RD"
);
//
CREATE_TOPIC
(
"REJOIN_RM_2_RD"
);
//
CREATE_TOPIC
(
"REJOIN_RM_2_RD"
);
//
CREATE_TOPIC
(
"LOG_COMMON"
);
//
CREATE_TOPIC
(
"LOG_COMMON"
);
//
#undef CREATE_TOPIC
#undef CREATE_TOPIC
kafkaCreateReader
(
&
kafka
,
"10.129.6.5:9092"
);
kafkaCreateReader
(
&
kafka
,
conf
.
kafka_address
);
rd_kafka_topic_partition_list_t
*
kafka_reader_topics
=
rd_kafka_topic_partition_list_new
(
1
);
rd_kafka_topic_partition_list_t
*
kafka_reader_topics
=
rd_kafka_topic_partition_list_new
(
1
);
...
@@ -173,31 +125,42 @@ Sint main (Sint argc, Char *argv[])
...
@@ -173,31 +125,42 @@ Sint main (Sint argc, Char *argv[])
rd_kafka_resp_err_t
kafka_reader_topics_err
=
rd_kafka_subscribe
(
kafka
.
reader
,
rd_kafka_resp_err_t
kafka_reader_topics_err
=
rd_kafka_subscribe
(
kafka
.
reader
,
kafka_reader_topics
);
kafka_reader_topics
);
logMessage
(
"Subscription finished
\n
"
);
fflush
(
stdout
);
rd_kafka_topic_partition_list_destroy
(
kafka_reader_topics
);
rd_kafka_topic_partition_list_destroy
(
kafka_reader_topics
);
if
(
kafka_reader_topics_err
)
{
if
(
kafka_reader_topics_err
)
{
fprintf
(
stderr
,
"Subscribe failed: %s
\n
"
,
logError
(
"Subscribe failed: %s
\n
"
,
rd_kafka_err2str
(
kafka_reader_topics_err
));
rd_kafka_err2str
(
kafka_reader_topics_err
));
rd_kafka_destroy
(
kafka
.
reader
);
rd_kafka_destroy
(
kafka
.
reader
);
return
-
1
;
return
-
1
;
}
}
Sint
time_of_launch
=
(
Sint
)
time
(
0
);
U64
time_passed_last
=
timeMilli
();
while
(
global_keep_running
)
{
while
(
global_keep_running
)
{
// NOTE(naman): Get the fd's that are ready
// NOTE(naman): Get the fd's that are ready
rd_kafka_message_t
*
kafka_message_read
=
rd_kafka_consumer_poll
(
kafka
.
reader
,
100
);
rd_kafka_message_t
*
kafka_message_read
=
rd_kafka_consumer_poll
(
kafka
.
reader
,
conf
.
message_read_gap
);
U64
time_passed_now
=
timeMilli
();
U64
time_passed
=
time_passed_now
-
time_passed_last
;
time_passed_last
=
time_passed_now
;
for
(
Size
j
=
0
;
j
<
gt
.
map
.
slot_count
;
j
++
)
{
for
(
Size
j
=
0
;
j
<
gt
.
map
.
slot_count
;
j
++
)
{
if
(
gt
.
map
.
values
[
j
]
!=
0
)
{
if
(
gt
.
map
.
keys
[
j
]
!=
0
)
{
Grunt
g
=
gt
.
grunts
[
gt
.
map
.
values
[
j
]];
gt
.
grunts
[
gt
.
map
.
values
[
j
]].
time_to_die
-=
time_passed
;
g
.
time_to_die
-=
100
;
}
}
}
}
if
(
kafka_message_read
!=
NULL
)
{
if
(
kafka_message_read
!=
NULL
)
{
if
(
kafka_message_read
->
err
)
{
if
(
kafka_message_read
->
err
)
{
/* Consumer error: typically just informational. */
/* Consumer error: typically just informational. */
fprintf
(
stderr
,
"Consumer error: %s
\n
"
,
logError
(
"Consumer error: %s
\n
"
,
rd_kafka_message_errstr
(
kafka_message_read
));
rd_kafka_message_errstr
(
kafka_message_read
));
}
else
{
}
else
{
/* Proper message */
/* Proper message */
/* fprintf(stderr, */
/* fprintf(stderr, */
...
@@ -210,13 +173,20 @@ Sint main (Sint argc, Char *argv[])
...
@@ -210,13 +173,20 @@ Sint main (Sint argc, Char *argv[])
const
char
*
json_error
=
NULL
;
const
char
*
json_error
=
NULL
;
cJSON
*
root
=
cJSON_ParseWithOpts
(
kafka_message_read
->
payload
,
&
json_error
,
true
);
cJSON
*
root
=
cJSON_ParseWithOpts
(
kafka_message_read
->
payload
,
&
json_error
,
true
);
if
((
cJSON_GetObjectItem
(
root
,
"timestamp"
)
==
NULL
)
||
(
cJSON_GetObjectItem
(
root
,
"timestamp"
)
->
valueint
)
<
time_of_launch
)
{
logMessage
(
"Ignoring : %s
\n
"
,
kafka_message_read
->
payload
);
cJSON_Delete
(
root
);
rd_kafka_message_destroy
(
kafka_message_read
);
continue
;
}
if
(
kafka_message_read
->
rkt
==
topic_req_dm2a
)
{
if
(
kafka_message_read
->
rkt
==
topic_req_dm2a
)
{
Command
c
=
{.
kind
=
Command_RESPONSE_ARBITER_2_DM
};
Command
c
=
{.
kind
=
Command_RESPONSE_ARBITER_2_DM
};
c
.
resource_id
=
strdup
(
cJSON_GetObjectItem
(
root
,
"resource_id"
)
->
valuestring
);
c
.
resource_id
=
strdup
(
cJSON_GetObjectItem
(
root
,
"resource_id"
)
->
valuestring
);
// TODO(naman): Add any new resource fields here
// TODO(naman): Add any new resource fields here
Sint
memory
=
cJSON_GetObjectItem
(
root
,
"memory"
)
->
valueint
;
Sint
memory
=
cJSON_GetObjectItem
(
root
,
"memory"
)
->
valueint
;
logMessage
(
"Request DM2RM:
\t
id: %s = ([memory] = %d)"
,
logMessage
(
"Request DM2RM:
\t
id: %s = ([memory] = %d)"
,
c
.
resource_id
,
memory
);
c
.
resource_id
,
memory
);
...
@@ -245,20 +215,24 @@ Sint main (Sint argc, Char *argv[])
...
@@ -245,20 +215,24 @@ Sint main (Sint argc, Char *argv[])
}
}
}
else
if
(
kafka_message_read
->
rkt
==
topic_join_g2a
)
{
}
else
if
(
kafka_message_read
->
rkt
==
topic_join_g2a
)
{
Char
*
id
=
strdup
(
cJSON_GetObjectItem
(
root
,
"node_id"
)
->
valuestring
);
Char
*
id
=
strdup
(
cJSON_GetObjectItem
(
root
,
"node_id"
)
->
valuestring
);
Grunt
grunt
=
{.
id
=
id
,
.
time_to_die
=
1000
};
Grunt
grunt
=
{.
id
=
strdup
(
id
),
.
time_to_die
=
conf
.
grunt_time_to_die
};
logMessage
(
"Join G2A:
\t
id: %s"
,
id
);
logMessage
(
"Join G2A:
\t
id: %s"
,
id
);
if
(
htLookup
(
&
gt
.
map
,
hashString
(
id
))
==
0
)
{
if
(
htLookup
(
&
gt
.
map
,
hashString
(
id
))
==
0
)
{
gruntTrackBegin
(
&
gt
,
grunt
);
gruntTrackBegin
(
&
gt
,
grunt
);
}
}
Command
c
=
{.
kind
=
Command_JOIN_ACK_ARBITER_2_GRUNT
,
.
join_ack_a2g
.
grunt_id
=
strdup
(
id
)};
sbufAdd
(
commands
,
c
);
}
else
if
(
kafka_message_read
->
rkt
==
topic_beat_g2a
)
{
}
else
if
(
kafka_message_read
->
rkt
==
topic_beat_g2a
)
{
Char
*
id
=
cJSON_GetObjectItem
(
root
,
"node_id"
)
->
valuestring
;
Char
*
id
=
strdup
(
cJSON_GetObjectItem
(
root
,
"node_id"
)
->
valuestring
)
;
U64
index
=
htLookup
(
&
gt
.
map
,
hashString
(
id
));
U64
index
=
htLookup
(
&
gt
.
map
,
hashString
(
id
));
if
(
index
!=
0
)
{
// Prevent any left over message
if
(
index
!=
0
)
{
// Prevent any left over message
// TODO(naman): Add any new resource fields here
// TODO(naman): Add any new resource fields here
gt
.
grunts
[
index
].
time_to_die
=
1000
;
gt
.
grunts
[
index
].
time_to_die
=
conf
.
grunt_time_to_die
;
gt
.
grunts
[
index
].
memory
=
cJSON_GetObjectItem
(
root
,
"memory"
)
->
valueint
;
gt
.
grunts
[
index
].
memory
=
cJSON_GetObjectItem
(
root
,
"memory"
)
->
valueint
;
logMessage
(
"Beat G2A:
\t
id: %s (Memory %d)"
,
id
,
gt
.
grunts
[
index
].
memory
);
logMessage
(
"Beat G2A:
\t
id: %s (Memory %d)"
,
id
,
gt
.
grunts
[
index
].
memory
);
...
@@ -269,7 +243,7 @@ Sint main (Sint argc, Char *argv[])
...
@@ -269,7 +243,7 @@ Sint main (Sint argc, Char *argv[])
Command
c
=
{.
kind
=
Command_REJOIN_ARBITER_2_GRUNT
,
Command
c
=
{.
kind
=
Command_REJOIN_ARBITER_2_GRUNT
,
.
rejoin_a2g
.
grunt_id
=
id
};
.
rejoin_a2g
.
grunt_id
=
id
};
sbufAdd
(
commands
,
c
);
sbufAdd
(
commands
,
c
);
logMessage
(
"Beat G2A:
\t
id: %s (UNNOWN)"
,
id
);
logMessage
(
"Beat G2A:
\t
id: %s (UNNOWN)"
,
c
.
rejoin_a2g
.
grunt_
id
);
}
}
}
else
if
(
kafka_message_read
->
rkt
==
topic_res_g2a
)
{
}
else
if
(
kafka_message_read
->
rkt
==
topic_res_g2a
)
{
Char
*
node_id
=
cJSON_GetObjectItem
(
root
,
"node_id"
)
->
valuestring
;
Char
*
node_id
=
cJSON_GetObjectItem
(
root
,
"node_id"
)
->
valuestring
;
...
@@ -292,7 +266,7 @@ Sint main (Sint argc, Char *argv[])
...
@@ -292,7 +266,7 @@ Sint main (Sint argc, Char *argv[])
Char
*
node_id
=
cJSON_GetObjectItem
(
root
,
"node_id"
)
->
valuestring
;
Char
*
node_id
=
cJSON_GetObjectItem
(
root
,
"node_id"
)
->
valuestring
;
Char
*
resource_id
=
cJSON_GetObjectItem
(
root
,
"resource_id"
)
->
valuestring
;
Char
*
resource_id
=
cJSON_GetObjectItem
(
root
,
"resource_id"
)
->
valuestring
;
Char
*
function_id
=
cJSON_GetObjectItem
(
root
,
"function_id"
)
->
valuestring
;
Char
*
function_id
=
cJSON_GetObjectItem
(
root
,
"function_id"
)
->
valuestring
;
printf
(
"
%s
\n
"
,
cJSON_Print
(
cJSON_Parse
((
char
*
)
kafka_message_read
->
payload
)));
logMessage
(
"Log:
%s
\n
"
,
cJSON_Print
(
cJSON_Parse
((
char
*
)
kafka_message_read
->
payload
)));
unused_variable
(
node_id
);
unused_variable
(
node_id
);
unused_variable
(
resource_id
);
unused_variable
(
resource_id
);
unused_variable
(
function_id
);
unused_variable
(
function_id
);
...
@@ -311,6 +285,7 @@ Sint main (Sint argc, Char *argv[])
...
@@ -311,6 +285,7 @@ Sint main (Sint argc, Char *argv[])
Size
index
=
gt
.
map
.
values
[
j
];
Size
index
=
gt
.
map
.
values
[
j
];
Grunt
g
=
gt
.
grunts
[
index
];
Grunt
g
=
gt
.
grunts
[
index
];
if
(
g
.
time_to_die
<=
0
)
{
if
(
g
.
time_to_die
<=
0
)
{
logMessage
(
"Deleting grunt: %s
\n
"
,
g
.
id
);
gruntTrackEnd
(
&
gt
,
g
.
id
);
gruntTrackEnd
(
&
gt
,
g
.
id
);
}
}
}
}
...
@@ -323,7 +298,7 @@ Sint main (Sint argc, Char *argv[])
...
@@ -323,7 +298,7 @@ Sint main (Sint argc, Char *argv[])
U64
milli_new
=
timeMilli
();
U64
milli_new
=
timeMilli
();
gs
->
milli_passed
+=
milli_new
-
gs
->
milli_last
;
gs
->
milli_passed
+=
milli_new
-
gs
->
milli_last
;
gs
->
milli_last
=
milli_new
;
gs
->
milli_last
=
milli_new
;
if
(
gs
->
milli_passed
>=
1000
)
{
if
(
gs
->
milli_passed
>=
conf
.
grunt_response_wait_time
)
{
Command
c
=
{.
kind
=
Command_RESPONSE_ARBITER_2_DM
};
Command
c
=
{.
kind
=
Command_RESPONSE_ARBITER_2_DM
};
c
.
resource_id
=
gs
->
resource_id
;
c
.
resource_id
=
gs
->
resource_id
;
...
@@ -347,6 +322,8 @@ Sint main (Sint argc, Char *argv[])
...
@@ -347,6 +322,8 @@ Sint main (Sint argc, Char *argv[])
for
(
Size
j
=
0
;
j
<
sbufElemin
(
commands
);
j
++
)
{
for
(
Size
j
=
0
;
j
<
sbufElemin
(
commands
);
j
++
)
{
Command
c
=
commands
[
j
];
Command
c
=
commands
[
j
];
Sint
timestamp
=
(
Sint
)
time
(
0
);
Char
*
output
=
NULL
;
Char
*
output
=
NULL
;
Char
*
topic
=
NULL
;
Char
*
topic
=
NULL
;
...
@@ -354,12 +331,14 @@ Sint main (Sint argc, Char *argv[])
...
@@ -354,12 +331,14 @@ Sint main (Sint argc, Char *argv[])
topic
=
"REQUEST_RM_2_RD"
;
topic
=
"REQUEST_RM_2_RD"
;
sbufPrint
(
output
,
"{
\n\"
resource_id
\"
:
\"
%s
\"
"
,
c
.
resource_id
);
sbufPrint
(
output
,
"{
\n\"
resource_id
\"
:
\"
%s
\"
"
,
c
.
resource_id
);
sbufPrint
(
output
,
",
\n\"
memory
\"
: %d
\n
"
,
c
.
req_a2g
.
memory
);
sbufPrint
(
output
,
",
\n\"
memory
\"
: %d"
,
c
.
req_a2g
.
memory
);
sbufPrint
(
output
,
",
\n\"
timestamp
\"
: %d"
,
timestamp
);
sbufPrint
(
output
,
"
\n
}
\n
"
);
sbufPrint
(
output
,
"
\n
}
\n
"
);
}
else
if
(
c
.
kind
==
Command_RESPONSE_ARBITER_2_DM
)
{
}
else
if
(
c
.
kind
==
Command_RESPONSE_ARBITER_2_DM
)
{
topic
=
"RESPONSE_RM_2_DM"
;
topic
=
"RESPONSE_RM_2_DM"
;
sbufPrint
(
output
,
"{
\n\"
resource_id
\"
:
\"
%s
\"
"
,
c
.
resource_id
);
sbufPrint
(
output
,
"{
\n\"
resource_id
\"
:
\"
%s
\"
"
,
c
.
resource_id
);
sbufPrint
(
output
,
",
\n\"
timestamp
\"
: %d"
,
timestamp
);
sbufPrint
(
output
,
",
\n\"
nodes
\"
: ["
);
sbufPrint
(
output
,
",
\n\"
nodes
\"
: ["
);
for
(
Size
k
=
0
;
k
<
sbufElemin
(
c
.
res_a2d
.
grunt_ids
);
k
++
)
{
for
(
Size
k
=
0
;
k
<
sbufElemin
(
c
.
res_a2d
.
grunt_ids
);
k
++
)
{
sbufPrint
(
output
,
"
\"
%s
\"
"
,
c
.
res_a2d
.
grunt_ids
[
k
]);
sbufPrint
(
output
,
"
\"
%s
\"
"
,
c
.
res_a2d
.
grunt_ids
[
k
]);
...
@@ -369,11 +348,22 @@ Sint main (Sint argc, Char *argv[])
...
@@ -369,11 +348,22 @@ Sint main (Sint argc, Char *argv[])
}
}
sbufPrint
(
output
,
"]"
);
sbufPrint
(
output
,
"]"
);
sbufPrint
(
output
,
"
\n
}"
);
sbufPrint
(
output
,
"
\n
}"
);
}
else
if
(
c
.
kind
==
Command_JOIN_ACK_ARBITER_2_GRUNT
)
{
topic
=
"JOIN_ACK_RM_2_RD"
;
sbufPrint
(
output
,
"{
\n\"
node_id
\"
:
\"
%s
\"
"
,
c
.
join_ack_a2g
.
grunt_id
);
sbufPrint
(
output
,
",
\n\"
timestamp
\"
: %d"
,
timestamp
);
sbufPrint
(
output
,
"
\n
}"
);
free
(
c
.
join_ack_a2g
.
grunt_id
);
}
else
if
(
c
.
kind
==
Command_REJOIN_ARBITER_2_GRUNT
)
{
}
else
if
(
c
.
kind
==
Command_REJOIN_ARBITER_2_GRUNT
)
{
topic
=
"REJOIN_RM_2_RD"
;
topic
=
"REJOIN_RM_2_RD"
;
sbufPrint
(
output
,
"{
\n\"
node_id
\"
:
\"
%s
\"
"
,
c
.
rejoin_a2g
.
grunt_id
);
sbufPrint
(
output
,
"{
\n\"
node_id
\"
:
\"
%s
\"
"
,
c
.
rejoin_a2g
.
grunt_id
);
sbufPrint
(
output
,
",
\n\"
timestamp
\"
: %d"
,
timestamp
);
sbufPrint
(
output
,
"
\n
}"
);
sbufPrint
(
output
,
"
\n
}"
);
free
(
c
.
rejoin_a2g
.
grunt_id
);
}
}
if
(
output
!=
NULL
)
{
if
(
output
!=
NULL
)
{
...
...
resource_system/src/arbiter/command.c
0 → 100644
View file @
e349248d
/*
* Creator: Naman Dixit
* Notice: © Copyright 2020 Naman Dixit
*/
typedef
struct
Command
{
enum
Command_Kind
{
Command_NONE
,
Command_RESPONSE_ARBITER_2_DM
,
Command_REQUEST_ARBITER_2_GRUNT
,
Command_JOIN_ACK_ARBITER_2_GRUNT
,
Command_REJOIN_ARBITER_2_GRUNT
,
}
kind
;
Char
*
resource_id
;
union
{
struct
{
Char
**
grunt_ids
;
}
res_a2d
;
struct
{
Sint
memory
;
}
req_a2g
;
struct
{
Char
*
grunt_id
;
}
rejoin_a2g
;
struct
{
Char
*
grunt_id
;
}
join_ack_a2g
;
};
}
Command
;
resource_system/src/arbiter/conf.c
0 → 100644
View file @
e349248d
/*
* Creator: Naman Dixit
* Notice: © Copyright 2020 Naman Dixit
*/
typedef
struct
Configuration
{
Char
*
kafka_address
;
U64
grunt_response_wait_time
;
Sint
message_read_gap
;
Sint
grunt_time_to_die
;
}
Configuration
;
# if defined(COMPILER_CLANG)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wincompatible-pointer-types-discards-qualifiers"
# endif
internal_function
Sint
confCallback
(
void
*
user
,
const
Char
*
section
,
const
Char
*
name
,
const
Char
*
value
)
{
Configuration
*
conf
=
(
Configuration
*
)
user
;
if
(
strequal
(
section
,
"Arbiter"
))
{
if
(
strequal
(
name
,
"MessageReadGap"
))
{
conf
->
message_read_gap
=
atoi
(
value
);
}
else
if
(
strequal
(
name
,
"GruntTimeToDie"
))
{
conf
->
grunt_time_to_die
=
atoi
(
value
);
}
else
if
(
strequal
(
name
,
"GruntResponseWaitTime"
))
{
conf
->
grunt_response_wait_time
=
strtoul
(
value
,
NULL
,
10
);
}
else
{
return
0
;
/* unknown section/name, error */
}
}
else
if
(
strequal
(
section
,
"Kafka"
))
{
if
(
strequal
(
name
,
"Address"
))
{
conf
->
kafka_address
=
strdup
(
value
);
}
else
{
return
0
;
/* unknown section/name, error */
}
}
return
1
;
}
# if defined(COMPILER_CLANG)
# pragma clang diagnostic pop
# endif
resource_system/src/arbiter/grunt_manager.c
0 → 100644
View file @
e349248d
/*
* Creator: Naman Dixit
* Notice: © Copyright 2020 Naman Dixit
*/
typedef
struct
Grunt
{
Char
*
id
;
Sint
memory
;
B32
rejoin_asked
;
Sint
time_to_die
;
// in ms
}
Grunt
;
typedef
struct
Grunt_Survey
{
Char
**
grunt_ids
;
U16
*
ports
;
U64
milli_passed
;
U64
milli_last
;
Char
*
resource_id
;
}
Grunt_Survey
;
typedef
struct
Grunt_Tracker
{
Grunt
*
grunts
;
Size
*
free_list
;
Hash_Table
map
;
}
Grunt_Tracker
;
internal_function
void
gruntTrackBegin
(
Grunt_Tracker
*
t
,
Grunt
g
)
{
if
(
t
->
grunts
==
NULL
)
{
t
->
map
=
htCreate
(
0
);
sbufAdd
(
t
->
grunts
,
(
Grunt
){
0
});
// SInce 0 index out of hash table will be invalid
}
Size
insertion_index
=
0
;
if
(
sbufElemin
(
t
->
free_list
)
>
0
)
{
t
->
grunts
[
t
->
free_list
[
0
]]
=
g
;
insertion_index
=
t
->
free_list
[
0
];
sbufUnsortedRemove
(
t
->
free_list
,
0
);
}
else
{
sbufAdd
(
t
->
grunts
,
g
);
insertion_index
=
sbufElemin
(
t
->
grunts
)
-
1
;
}
htInsert
(
&
t
->
map
,
hashString
(
g
.
id
),
insertion_index
);
}
internal_function
void
gruntTrackEnd
(
Grunt_Tracker
*
t
,
Char
*
grunt_id
)
{
U64
hash
=
hashString
(
grunt_id
);
Size
index
=
htLookup
(
&
t
->
map
,
hash
);
sbufAdd
(
t
->
free_list
,
index
);
free
(
t
->
grunts
[
index
].
id
);
t
->
grunts
[
index
]
=
(
Grunt
){
0
};
htRemove
(
&
t
->
map
,
hash
);
}
inih
@
35121712
Subproject commit 351217124ddb3e3fe2b982248a04c672350bb0af
resource_system/src/grunt/conf.c
0 → 100644
View file @
e349248d
/*
* Creator: Naman Dixit
* Notice: © Copyright 2020 Naman Dixit
*/
typedef
struct
Configuration
{
Char
*
kafka_address
;
U64
heartbeat_gap
;
Sint
message_read_gap
;
}
Configuration
;
# if defined(COMPILER_CLANG)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wincompatible-pointer-types-discards-qualifiers"
# endif
internal_function
Sint
confCallback
(
void
*
user
,
const
Char
*
section
,
const
Char
*
name
,
const
Char
*
value
)
{
Configuration
*
conf
=
(
Configuration
*
)
user
;
if
(
strequal
(
section
,
"Grunt"
))
{
if
(
strequal
(
name
,
"MessageReadGap"
))
{
conf
->
message_read_gap
=
atoi
(
value
);
}
else
if
(
strequal
(
name
,
"HeartbeatGap"
))
{
conf
->
heartbeat_gap
=
strtoul
(
value
,
NULL
,
10
);
}
else
{
return
0
;
/* unknown section/name, error */
}
}
else
if
(
strequal
(
section
,
"Kafka"
))
{
if
(
strequal
(
name
,
"Address"
))
{
conf
->
kafka_address
=
strdup
(
value
);
}
else
{
return
0
;
/* unknown section/name, error */
}
}
return
1
;
}
# if defined(COMPILER_CLANG)
# pragma clang diagnostic pop
# endif
resource_system/src/grunt/grunt.c
View file @
e349248d
...
@@ -34,6 +34,17 @@
...
@@ -34,6 +34,17 @@
# pragma clang diagnostic pop
# pragma clang diagnostic pop
# endif
# endif
# if defined(COMPILER_CLANG)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wreserved-id-macro"
# pragma clang diagnostic ignored "-Wcast-qual"
# endif
#include "inih/ini.h"
#include "inih/ini.c"
# if defined(COMPILER_CLANG)
# pragma clang diagnostic pop
# endif
typedef
struct
Resources
{
typedef
struct
Resources
{
Sint
memory
;
Sint
memory
;
}
Resources
;
}
Resources
;
...
@@ -45,6 +56,7 @@ typedef struct Command {
...
@@ -45,6 +56,7 @@ typedef struct Command {
#include "kafka.h"
#include "kafka.h"
#include "time.c"
#include "time.c"
#include "conf.c"
#include "command.h"
#include "command.h"
...
@@ -55,17 +67,8 @@ typedef struct Thread_Manager_Command {
...
@@ -55,17 +67,8 @@ typedef struct Thread_Manager_Command {
Thread_Manager_Command_DOCKER_DESTROY
,
Thread_Manager_Command_DOCKER_DESTROY
,
}
kind
;
}
kind
;
Char
*
id
;
Char
*
entity_id
;
Char
*
resource_id
;
union
{
struct
{
Sint
placeholder
;
}
docker_create
;
struct
{
Sint
placeholder
;
}
docker_destroy
;
};
}
Thread_Manager_Command
;
}
Thread_Manager_Command
;
typedef
struct
JSON_Print_Command
{
typedef
struct
JSON_Print_Command
{
...
@@ -110,6 +113,19 @@ int main(int argc, char** argv)
...
@@ -110,6 +113,19 @@ int main(int argc, char** argv)
signal
(
SIGINT
,
signalHandlerSIGINT
);
signal
(
SIGINT
,
signalHandlerSIGINT
);
Configuration
conf
=
{
0
};
{
// Default config values
conf
.
kafka_address
=
"10.129.6.5:9092"
;
conf
.
message_read_gap
=
10
;
conf
.
heartbeat_gap
=
1000
;
}
if
(
ini_parse
(
"config.ini"
,
confCallback
,
&
conf
)
<
0
)
{
printf
(
"Can't load 'config.ini'
\n
"
);
return
-
1
;
}
tmCommandInit
();
tmCommandInit
();
instrumentCommandInit
();
instrumentCommandInit
();
...
@@ -118,7 +134,7 @@ int main(int argc, char** argv)
...
@@ -118,7 +134,7 @@ int main(int argc, char** argv)
Kafka
kafka
=
{
0
};
Kafka
kafka
=
{
0
};
kafka
.
writer
=
kafkaCreateWriter
(
&
kafka
,
"10.129.6.5:9092"
);
kafka
.
writer
=
kafkaCreateWriter
(
&
kafka
,
conf
.
kafka_address
);
#define CREATE_TOPIC(s) \
#define CREATE_TOPIC(s) \
do { \
do { \
...
@@ -134,23 +150,34 @@ int main(int argc, char** argv)
...
@@ -134,23 +150,34 @@ int main(int argc, char** argv)
CREATE_TOPIC
(
"RESPONSE_RD_2_RM"
);
//
CREATE_TOPIC
(
"RESPONSE_RD_2_RM"
);
//
CREATE_TOPIC
(
"JOIN_RD_2_RM"
);
//
CREATE_TOPIC
(
"JOIN_RD_2_RM"
);
//
CREATE_TOPIC
(
"HEARTBEAT_RD_2_RM"
);
//
CREATE_TOPIC
(
"HEARTBEAT_RD_2_RM"
);
//
CREATE_TOPIC
(
"JOIN_ACK_RM_2_RD"
);
//
CREATE_TOPIC
(
"REJOIN_RM_2_RD"
);
//
CREATE_TOPIC
(
"REJOIN_RM_2_RD"
);
//
CREATE_TOPIC
(
"LOG_COMMON"
);
//
CREATE_TOPIC
(
"LOG_COMMON"
);
//
kafka
.
reader
=
kafkaCreateReader
(
&
kafka
,
"10.129.6.5:9092"
);
kafka
.
reader
=
kafkaCreateReader
(
&
kafka
,
conf
.
kafka_address
);
rd_kafka_topic_partition_list_t
*
kafka_reader_topics
=
rd_kafka_topic_partition_list_new
(
1
);
rd_kafka_topic_partition_list_t
*
kafka_reader_topics
=
rd_kafka_topic_partition_list_new
(
1
);
rd_kafka_topic_t
*
topic_req_a2g
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
rd_kafka_topic_t
*
topic_req_a2g
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
"REQUEST_RM_2_RD"
);
"REQUEST_RM_2_RD"
);
rd_kafka_topic_t
*
topic_jac_a2g
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
"JOIN_ACK_RM_2_RD"
);
rd_kafka_topic_t
*
topic_rej_a2g
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
rd_kafka_topic_t
*
topic_rej_a2g
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
"REJOIN_RM_2_RD"
);
"REJOIN_RM_2_RD"
);
rd_kafka_topic_t
*
topic_log
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
rd_kafka_topic_t
*
topic_log
=
kafkaSubscribe
(
&
kafka
,
kafka_reader_topics
,
"LOG_COMMON"
);
"LOG_COMMON"
);
rd_kafka_resp_err_t
kafka_reader_topics_err
=
rd_kafka_subscribe
(
kafka
.
reader
,
kafka_reader_topics
);
rd_kafka_resp_err_t
kafka_reader_topics_err
=
rd_kafka_subscribe
(
kafka
.
reader
,
kafka_reader_topics
);
printf
(
"Subscription finished
\n
"
);
fflush
(
stdout
);
rd_kafka_topic_partition_list_destroy
(
kafka_reader_topics
);
rd_kafka_topic_partition_list_destroy
(
kafka_reader_topics
);
printf
(
"Partition list destroyed
\n
"
);
fflush
(
stdout
);
if
(
kafka_reader_topics_err
)
{
if
(
kafka_reader_topics_err
)
{
fprintf
(
stderr
,
"Subscribe failed: %s
\n
"
,
fprintf
(
stderr
,
"Subscribe failed: %s
\n
"
,
rd_kafka_err2str
(
kafka_reader_topics_err
));
rd_kafka_err2str
(
kafka_reader_topics_err
));
...
@@ -158,146 +185,186 @@ int main(int argc, char** argv)
...
@@ -158,146 +185,186 @@ int main(int argc, char** argv)
return
-
1
;
return
-
1
;
}
}
Char
*
join_msg
=
NULL
;
{
sbufPrint
(
join_msg
,
"{
\"
node_id
\"
:
\"
%s
\"
"
,
node_name
);
Sint
timestamp
=
(
Sint
)
time
(
0
);
sbufPrint
(
join_msg
,
"
\n
}
\n
"
);
Char
*
join_msg
=
NULL
;
sbufPrint
(
join_msg
,
"{
\"
node_id
\"
:
\"
%s
\"
"
,
node_name
);
sbufPrint
(
join_msg
,
",
\n\"
timestamp
\"
: %d"
,
timestamp
);
sbufPrint
(
join_msg
,
"
\n
}
\n
"
);
if
(
!
kafkaWrite
(
kafka
.
writer
,
"JOIN_RD_2_RM"
,
"resource_daemon"
,
join_msg
))
{
if
(
!
kafkaWrite
(
kafka
.
writer
,
"JOIN_RD_2_RM"
,
"resource_daemon"
,
join_msg
))
{
return
-
1
;
return
-
1
;
}
}
}
B32
join_successful
=
false
;
U64
time_begin
=
timeMilli
();
U64
time_begin
=
timeMilli
();
U64
time_accum
=
0
;
U64
time_accum
=
0
;
Sint
time_of_launch
=
(
Sint
)
time
(
0
);
while
(
global_keep_running
)
{
while
(
global_keep_running
)
{
// NOTE(naman): Get the fd's that are ready
// NOTE(naman): Get the fd's that are ready
rd_kafka_message_t
*
kafka_message_read
=
rd_kafka_consumer_poll
(
kafka
.
reader
,
100
);
rd_kafka_message_t
*
kafka_message_read
=
rd_kafka_consumer_poll
(
kafka
.
reader
,
conf
.
message_read_gap
);
B32
command_found
=
false
;
B32
command_found
=
false
;
Command
c
=
{
0
};
Command
c
=
{
0
};
if
(
kafka_message_read
!=
NULL
)
{
if
(
kafka_message_read
!=
NULL
)
{
fprintf
(
stderr
,
"Received message on %s [%d] "
"at offset %"
PRId64
":
\n
%s
\n
"
,
rd_kafka_topic_name
(
kafka_message_read
->
rkt
),
(
int
)
kafka_message_read
->
partition
,
kafka_message_read
->
offset
,
cJSON_Print
(
cJSON_Parse
((
char
*
)
kafka_message_read
->
payload
)));
char
*
buffer
=
(
char
*
)
kafka_message_read
->
payload
;
const
Char
*
json_error
=
NULL
;
cJSON
*
root
=
cJSON_ParseWithOpts
(
buffer
,
&
json_error
,
true
);
if
((
cJSON_GetObjectItem
(
root
,
"timestamp"
)
==
NULL
)
||
(
cJSON_GetObjectItem
(
root
,
"timestamp"
)
->
valueint
)
<
time_of_launch
)
{
printf
(
"Ignoring : %s
\n
"
,
buffer
);
cJSON_Delete
(
root
);
rd_kafka_message_destroy
(
kafka_message_read
);
continue
;
}
if
(
kafka_message_read
->
err
)
{
if
(
kafka_message_read
->
err
)
{
/* Consumer error: typically just informational. */
/* Consumer error: typically just informational. */
fprintf
(
stderr
,
"Consumer error: %s
\n
"
,
fprintf
(
stderr
,
"Consumer error: %s
\n
"
,
rd_kafka_message_errstr
(
kafka_message_read
));
rd_kafka_message_errstr
(
kafka_message_read
));
}
else
if
(
kafka_message_read
->
rkt
==
topic_req_a2g
)
{
}
else
if
(
kafka_message_read
->
rkt
==
topic_jac_a2g
)
{
fprintf
(
stderr
,
join_successful
=
true
;
"Received message on %s [%d] "
}
else
if
(
join_successful
)
{
"at offset %"
PRId64
":
\n
%s
\n
"
,
if
(
kafka_message_read
->
rkt
==
topic_req_a2g
)
{
rd_kafka_topic_name
(
kafka_message_read
->
rkt
),
if
(
root
==
NULL
)
{
(
int
)
kafka_message_read
->
partition
,
kafka_message_read
->
offset
,
// TODO(naman): Error
cJSON_Print
(
cJSON_Parse
((
char
*
)
kafka_message_read
->
payload
)));
}
else
{
command_found
=
true
;
char
*
buffer
=
(
char
*
)
kafka_message_read
->
payload
;
c
.
txn_id
=
cJSON_GetObjectItem
(
root
,
"resource_id"
)
->
valuestring
;
c
.
res
.
memory
=
cJSON_GetObjectItem
(
root
,
"memory"
)
->
valueint
;
const
Char
*
json_error
=
NULL
;
}
cJSON
*
root
=
cJSON_ParseWithOpts
(
buffer
,
&
json_error
,
true
);
}
else
if
(
kafka_message_read
->
rkt
==
topic_rej_a2g
)
{
Char
*
node_id
=
cJSON_GetObjectItem
(
root
,
"node_id"
)
->
valuestring
;
if
(
root
==
NULL
)
{
// TODO(naman): Error
}
else
{
command_found
=
true
;
c
.
txn_id
=
cJSON_GetObjectItem
(
root
,
"resource_id"
)
->
valuestring
;
c
.
res
.
memory
=
cJSON_GetObjectItem
(
root
,
"memory"
)
->
valueint
;
}
}
else
if
(
kafka_message_read
->
rkt
==
topic_rej_a2g
)
{
if
(
!
kafkaWrite
(
kafka
.
writer
,
"JOIN_RD_2_RM"
,
"resource_daemon"
,
join_msg
))
{
return
-
1
;
}
}
else
if
(
kafka_message_read
->
rkt
==
topic_log
)
{
fprintf
(
stderr
,
"Received message on %s [%d] "
"at offset %"
PRId64
":
\n
%s
\n
"
,
rd_kafka_topic_name
(
kafka_message_read
->
rkt
),
(
int
)
kafka_message_read
->
partition
,
kafka_message_read
->
offset
,
cJSON_Print
(
cJSON_Parse
((
char
*
)
kafka_message_read
->
payload
)));
char
*
buffer
=
(
char
*
)
kafka_message_read
->
payload
;
if
(
strequal
(
node_id
,
node_name
))
{
join_successful
=
false
;
const
Char
*
json_error
=
NULL
;
Sint
timestamp
=
(
Sint
)
time
(
0
);
cJSON
*
root
=
cJSON_ParseWithOpts
(
buffer
,
&
json_error
,
true
);
Char
*
rejoin_msg
=
NULL
;
sbufPrint
(
rejoin_msg
,
"{
\"
node_id
\"
:
\"
%s
\"
"
,
node_name
);
sbufPrint
(
rejoin_msg
,
",
\n\"
timestamp
\"
: %d"
,
timestamp
);
sbufPrint
(
rejoin_msg
,
"
\n
}
\n
"
);
if
(
root
==
NULL
)
{
if
(
!
kafkaWrite
(
kafka
.
writer
,
"JOIN_RD_2_RM"
,
"resource_daemon"
,
rejoin_msg
))
{
// TODO(naman): Error
return
-
1
;
}
else
{
}
Char
*
node_id
=
cJSON_GetObjectItem
(
root
,
"node_id"
)
->
valuestring
;
}
if
(
strequal
(
node_id
,
node_name
))
{
}
else
if
(
kafka_message_read
->
rkt
==
topic_log
)
{
// FIXME(naman): Fix this placeholder
cJSON
*
msg_type_json
=
cJSON_GetObjectItem
(
root
,
"message_type"
);
Thread_Manager_Command
tmc
=
{
0
};
if
(
msg_type_json
==
NULL
)
{
tmCommandEnqueue
(
tmc
);
if
(
strequal
(
msg_type_json
->
valuestring
,
"deployment_launch"
))
{
/* "resource_id": "logical-entity-id", */
Char
*
node_id
=
cJSON_GetObjectItem
(
root
,
"node_id"
)
->
valuestring
;
/* "function_id": "unique-function-id", */
if
(
strequal
(
node_id
,
node_name
))
{
/* "timestamp" : "iso-8601-timestamp", */
Char
*
resource_id
=
cJSON_GetObjectItem
(
root
,
"resource_id"
)
->
valuestring
;
/* "reason": "deployment"/"termination", */
Char
*
entity_id
=
cJSON_GetObjectItem
(
root
,
"entity_id"
)
->
valuestring
;
/* "status": true/false // Only valid if reason==deployment; */
Char
*
entity_type
=
cJSON_GetObjectItem
(
root
,
"entity_type"
)
->
valuestring
;
Thread_Manager_Command
tmc
=
{.
entity_id
=
strdup
(
entity_id
),
.
resource_id
=
strdup
(
resource_id
)};
B32
add_command
=
false
;
if
(
strequal
(
entity_type
,
"docker"
))
{
tmc
.
kind
=
Thread_Manager_Command_DOCKER_CREATE
;
add_command
=
true
;
}
if
(
add_command
)
{
tmCommandEnqueue
(
tmc
);
}
else
{
free
(
tmc
.
entity_id
);
free
(
tmc
.
resource_id
);
}
}
}
}
}
}
}
}
}
rd_kafka_message_destroy
(
kafka_message_read
);
rd_kafka_message_destroy
(
kafka_message_read
);
}
}
int
memory
=
0
;
if
(
join_successful
)
{
int
memory
=
0
;
FILE
*
meminfo
=
fopen
(
"/proc/meminfo"
,
"r"
);
FILE
*
meminfo
=
fopen
(
"/proc/meminfo"
,
"r"
);
Char
line
[
256
]
=
{
0
};
Char
line
[
256
]
=
{
0
};
while
(
fgets
(
line
,
sizeof
(
line
),
meminfo
))
{
while
(
fgets
(
line
,
sizeof
(
line
),
meminfo
))
{
if
(
sscanf
(
line
,
"MemAvailable: %d kB"
,
&
memory
)
==
1
)
{
if
(
sscanf
(
line
,
"MemAvailable: %d kB"
,
&
memory
)
==
1
)
{
fclose
(
meminfo
);
fclose
(
meminfo
);
break
;
break
;
}
}
}
}
memory
/=
1024
;
memory
/=
1024
;
if
(
command_found
)
{
if
(
command_found
)
{
Char
*
output
=
NULL
;
Char
*
output
=
NULL
;
sbufPrint
(
output
,
"{
\n\"
node_id
\"
:
\"
%s
\"
"
,
node_name
);
Sint
timestamp
=
(
Sint
)
time
(
0
);
sbufPrint
(
output
,
",
\n\"
resource_id
\"
:
\"
%s
\"
"
,
c
.
txn_id
);
if
(
memory
>=
c
.
res
.
memory
)
{
sbufPrint
(
output
,
"{
\n\"
node_id
\"
:
\"
%s
\"
"
,
node_name
);
sbufPrint
(
output
,
",
\n\"
success
\"
: true
\n
"
);
sbufPrint
(
output
,
",
\n\"
resource_id
\"
:
\"
%s
\"
"
,
c
.
txn_id
);
// TODO(naman): Add port
sbufPrint
(
output
,
",
\n\"
timestamp
\"
: %d"
,
timestamp
);
// sbufPrint(output, ",\n\"port\": %d\n", port);
}
else
{
sbufPrint
(
output
,
",
\n\"
success
\"
: false
\n
"
);
}
sbufPrint
(
output
,
"
\n
}
\n
"
);
if
(
memory
>=
c
.
res
.
memory
)
{
sbufPrint
(
output
,
",
\n\"
success
\"
: true
\n
"
);
// TODO(naman): Add port
// sbufPrint(output, ",\n\"port\": %d", port);
}
else
{
sbufPrint
(
output
,
",
\n\"
success
\"
: false
\n
"
);
}
sbufPrint
(
output
,
"
\n
}
\n
"
);
if
(
!
kafkaWrite
(
kafka
.
writer
,
"RESPONSE_RD_2_RM"
,
"resource_daemon"
,
output
))
{
if
(
!
kafkaWrite
(
kafka
.
writer
,
"RESPONSE_RD_2_RM"
,
"resource_daemon"
,
output
))
{
return
-
1
;
return
-
1
;
}
}
}
}
{
{
JSON_Print_Command
command
=
{
0
};
JSON_Print_Command
command
=
{
0
};
while
(
instrumentCommandDequeue
(
&
command
))
{
while
(
instrumentCommandDequeue
(
&
command
))
{
// TODO(naman): Enable this after proper testing
// TODO(naman): Enable this after proper testing
/* if (!kafkaWrite(kafka.writer, command.topic, "resource_daemon", command.msg)) { */
/* if (!kafkaWrite(kafka.writer, command.topic, "resource_daemon", command.msg)) { */
/* return -1; */
/* return -1; */
/* } */
/* } */
}
}
}
}
{
// Send a heartbeat message if it is time to do so
{
// Send a heartbeat message if it is time to do so
U64
time_new
=
timeMilli
();
U64
time_new
=
timeMilli
();
U64
time_passed
=
time_new
-
time_begin
;
U64
time_passed
=
time_new
-
time_begin
;
time_begin
=
time_new
;
time_begin
=
time_new
;
time_accum
+=
time_passed
;
time_accum
+=
time_passed
;
if
(
time_accum
>=
1000
)
{
if
(
time_accum
>=
conf
.
heartbeat_gap
)
{
time_accum
=
0
;
time_accum
=
0
;
Char
*
output
=
NULL
;
Char
*
output
=
NULL
;
Sint
timestamp
=
(
Sint
)
time
(
0
);
sbufPrint
(
output
,
"{
\"
node_id
\"
:
\"
%s
\"
"
,
node_name
);
sbufPrint
(
output
,
"{
\"
node_id
\"
:
\"
%s
\"
"
,
node_name
);
sbufPrint
(
output
,
",
\n\"
memory
\"
: %d"
,
memory
);
sbufPrint
(
output
,
",
\n\"
timestamp
\"
: %d"
,
timestamp
);
sbufPrint
(
output
,
",
\n\"
memory
\"
: %d"
,
memory
);
sbufPrint
(
output
,
"
\n
}
\n
"
);
sbufPrint
(
output
,
"
\n
}
\n
"
);
if
(
!
kafkaWrite
(
kafka
.
writer
,
"HEARTBEAT_RD_2_RM"
,
"resource_daemon"
,
output
))
{
if
(
!
kafkaWrite
(
kafka
.
writer
,
"HEARTBEAT_RD_2_RM"
,
"resource_daemon"
,
output
))
{
return
-
1
;
return
-
1
;
}
}
}
}
}
}
}
...
...
resource_system/src/grunt/instrument_docker.c
View file @
e349248d
...
@@ -7,12 +7,22 @@ internal_function
...
@@ -7,12 +7,22 @@ internal_function
noreturn
noreturn
void
*
dockerProcessLoop
(
void
*
arg
)
void
*
dockerProcessLoop
(
void
*
arg
)
{
{
unused_variable
(
arg
);
pthread_setcanceltype
(
PTHREAD_CANCEL_DEFERRED
,
NULL
);
pthread_setcanceltype
(
PTHREAD_CANCEL_DEFERRED
,
NULL
);
while
(
true
)
{
while
(
true
)
{
// TODO(naman): Get data
// TODO(naman): Get data
Char
*
data_cmd
=
NULL
;
sbufPrint
(
data_cmd
,
"docker stats %s"
,
(
Char
*
)
arg
);
FILE
*
data_file
=
popen
(
data_cmd
,
"r"
);
fseek
(
data_file
,
0
,
SEEK_END
);
long
size
=
ftell
(
data_file
);
fseek
(
data_file
,
0
,
SEEK_SET
);
Char
*
data
=
calloc
((
Size
)
size
+
1
,
sizeof
(
*
data
));
fread
(
data
,
1
,
(
Size
)
size
+
1
,
data_file
);
fclose
(
data_file
);
Char
*
json
=
NULL
;
Char
*
json
=
NULL
;
Char
*
output
=
NULL
;
Char
*
output
=
NULL
;
...
...
resource_system/src/grunt/thread_manager.c
View file @
e349248d
...
@@ -5,7 +5,8 @@
...
@@ -5,7 +5,8 @@
typedef
struct
Thread
{
typedef
struct
Thread
{
pthread_t
thread
;
pthread_t
thread
;
Char
*
id
;
Char
*
entity_id
;
Char
*
resource_id
;
}
Thread
;
}
Thread
;
typedef
struct
Thread_Tracker
{
typedef
struct
Thread_Tracker
{
...
@@ -33,7 +34,7 @@ void threadTrackBegin (Thread_Tracker *t, Thread th)
...
@@ -33,7 +34,7 @@ void threadTrackBegin (Thread_Tracker *t, Thread th)
insertion_index
=
sbufElemin
(
t
->
threads
)
-
1
;
insertion_index
=
sbufElemin
(
t
->
threads
)
-
1
;
}
}
htInsert
(
&
t
->
map
,
hashString
(
th
.
id
),
insertion_index
);
htInsert
(
&
t
->
map
,
hashString
(
th
.
resource_
id
),
insertion_index
);
}
}
internal_function
internal_function
...
@@ -41,7 +42,8 @@ void threadTrackEnd (Thread_Tracker *t, Char *thread_id)
...
@@ -41,7 +42,8 @@ void threadTrackEnd (Thread_Tracker *t, Char *thread_id)
{
{
Size
index
=
htLookup
(
&
t
->
map
,
hashString
(
thread_id
));
Size
index
=
htLookup
(
&
t
->
map
,
hashString
(
thread_id
));
sbufAdd
(
t
->
free_list
,
index
);
sbufAdd
(
t
->
free_list
,
index
);
free
(
t
->
threads
[
index
].
id
);
free
(
t
->
threads
[
index
].
resource_id
);
free
(
t
->
threads
[
index
].
entity_id
);
t
->
threads
[
index
]
=
(
Thread
){
0
};
t
->
threads
[
index
]
=
(
Thread
){
0
};
htRemove
(
&
t
->
map
,
index
);
htRemove
(
&
t
->
map
,
index
);
}
}
...
@@ -61,16 +63,18 @@ void* tmProcessLoop (void *arg)
...
@@ -61,16 +63,18 @@ void* tmProcessLoop (void *arg)
switch
(
command
.
kind
)
{
switch
(
command
.
kind
)
{
case
Thread_Manager_Command_DOCKER_CREATE
:
{
case
Thread_Manager_Command_DOCKER_CREATE
:
{
pthread_t
thread
;
pthread_t
thread
;
pthread_create
(
&
thread
,
NULL
,
&
dockerProcessLoop
,
NULL
);
pthread_create
(
&
thread
,
NULL
,
&
dockerProcessLoop
,
command
.
entity_id
);
threadTrackBegin
(
&
tt
,
(
Thread
){.
id
=
command
.
id
,
.
thread
=
thread
});
threadTrackBegin
(
&
tt
,
(
Thread
){.
entity_id
=
command
.
entity_id
,
.
resource_id
=
command
.
resource_id
,
.
thread
=
thread
});
}
break
;
}
break
;
case
Thread_Manager_Command_DOCKER_DESTROY
:
{
case
Thread_Manager_Command_DOCKER_DESTROY
:
{
Size
index
=
htLookup
(
&
tt
.
map
,
hashString
(
command
.
id
));
Size
index
=
htLookup
(
&
tt
.
map
,
hashString
(
command
.
resource_
id
));
pthread_t
thread
=
tt
.
threads
[
index
].
thread
;
pthread_t
thread
=
tt
.
threads
[
index
].
thread
;
pthread_cancel
(
thread
);
pthread_cancel
(
thread
);
pthread_join
(
thread
,
NULL
);
pthread_join
(
thread
,
NULL
);
threadTrackEnd
(
&
tt
,
command
.
id
);
threadTrackEnd
(
&
tt
,
command
.
resource_
id
);
}
break
;
}
break
;
case
Thread_Manager_Command_NONE
:
{
case
Thread_Manager_Command_NONE
:
{
...
...
resource_system/src/test/test.c
View file @
e349248d
...
@@ -85,16 +85,19 @@ int main(int argc, char** argv)
...
@@ -85,16 +85,19 @@ int main(int argc, char** argv)
Sint
id
=
(
Sint
)
time
(
NULL
);
Sint
id
=
(
Sint
)
time
(
NULL
);
sbufPrint
(
output
,
"{
\n\"
resource_id
\"
:
\"
%d
\"
"
,
id
);
sbufPrint
(
output
,
"{
\n\"
resource_id
\"
:
\"
%d
\"
"
,
id
);
sbufPrint
(
output
,
",
\n\"
timestamp
\"
: %d"
,
id
);
sbufPrint
(
output
,
",
\n\"
memory
\"
: %d"
,
memory_required
);
sbufPrint
(
output
,
",
\n\"
memory
\"
: %d"
,
memory_required
);
sbufPrint
(
output
,
"
\n
}
\n
"
);
sbufPrint
(
output
,
"
\n
}
\n
"
);
printf
(
"Sending to Arbiter:
\n
%s
\n
"
,
cJSON_Print
(
cJSON_Parse
(
output
)));
printf
(
"Sending to Arbiter:
\n
%s
\n
"
,
cJSON_Print
(
cJSON_Parse
(
output
)));
printf
(
"%ld
\n
"
,
time
(
0
));
if
(
output
!=
NULL
)
{
if
(
output
!=
NULL
)
{
if
(
!
kafkaWrite
(
kafka
.
writer
,
"REQUEST_DM_2_RM"
,
"rm_test"
,
output
))
{
if
(
!
kafkaWrite
(
kafka
.
writer
,
"REQUEST_DM_2_RM"
,
"rm_test"
,
output
))
{
return
-
1
;
return
-
1
;
}
}
}
}
printf
(
"%ld
\n
"
,
time
(
0
));
rd_kafka_message_t
*
kafka_message_read
=
rd_kafka_consumer_poll
(
kafka
.
reader
,
10
);
rd_kafka_message_t
*
kafka_message_read
=
rd_kafka_consumer_poll
(
kafka
.
reader
,
10
);
while
(
true
)
{
while
(
true
)
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment