Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
X
xanadu
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Commits
Open sidebar
SYNERG
xanadu
Commits
1e2d3ca4
Commit
1e2d3ca4
authored
Apr 19, 2020
by
Naman Dixit
Browse files
Options
Browse Files
Download
Plain Diff
Merge on pull
parents
85c0cfb7
0198dbc7
Changes
24
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
24 changed files
with
1071 additions
and
178 deletions
+1071
-178
.gitignore
.gitignore
+1
-0
dispatch_system/constants.json
dispatch_system/constants.json
+20
-8
dispatch_system/dispatch_daemon/config.json
dispatch_system/dispatch_daemon/config.json
+1
-1
dispatch_system/dispatch_daemon/execute.js
dispatch_system/dispatch_daemon/execute.js
+9
-6
dispatch_system/dispatch_daemon/index.js
dispatch_system/dispatch_daemon/index.js
+4
-4
dispatch_system/dispatch_daemon/lib.js
dispatch_system/dispatch_daemon/lib.js
+41
-21
dispatch_system/dispatch_daemon/package.json
dispatch_system/dispatch_daemon/package.json
+1
-0
dispatch_system/dispatch_manager/explicit_chain_handler.js
dispatch_system/dispatch_manager/explicit_chain_handler.js
+426
-0
dispatch_system/dispatch_manager/index.js
dispatch_system/dispatch_manager/index.js
+148
-121
dispatch_system/dispatch_manager/lib.js
dispatch_system/dispatch_manager/lib.js
+76
-10
dispatch_system/dispatch_manager/metrics.js
dispatch_system/dispatch_manager/metrics.js
+199
-0
dispatch_system/dispatch_manager/operator.js
dispatch_system/dispatch_manager/operator.js
+10
-0
dispatch_system/dispatch_manager/package.json
dispatch_system/dispatch_manager/package.json
+1
-0
dispatch_system/dispatch_manager/repository/worker_env/env.js
...atch_system/dispatch_manager/repository/worker_env/env.js
+18
-7
dispatch_system/log_listener.js
dispatch_system/log_listener.js
+20
-0
dispatch_system/prometheus.yml
dispatch_system/prometheus.yml
+38
-0
dispatch_system/setup_topics.js
dispatch_system/setup_topics.js
+15
-0
docker-compose.yml
docker-compose.yml
+41
-0
hybrid_serverless-1.png
hybrid_serverless-1.png
+0
-0
kafka-docker-1.1.0.tar.gz
kafka-docker-1.1.0.tar.gz
+0
-0
local_experiments/Coldstart latency for function chains.png
local_experiments/Coldstart latency for function chains.png
+0
-0
local_experiments/function_chain.drawio
local_experiments/function_chain.drawio
+1
-0
local_experiments/function_chain.png
local_experiments/function_chain.png
+0
-0
readme.md
readme.md
+1
-0
No files found.
.gitignore
View file @
1e2d3ca4
...
@@ -6,3 +6,4 @@ firecracker*
...
@@ -6,3 +6,4 @@ firecracker*
secrets.json
secrets.json
resource_system/bin/**
resource_system/bin/**
resource_system/version.linux
resource_system/version.linux
local_experiments/
dispatch_system/constants.json
View file @
1e2d3ca4
{
{
"registry_url"
:
"
10.129.6.5
:5000/"
,
"registry_url"
:
"
localhost
:5000/"
,
"master_port"
:
8080
,
"master_port"
:
8080
,
"master_address"
:
"localhost"
,
"master_address"
:
"localhost"
,
"kafka_host"
:
"10.129.6.5:9092"
,
"grunt_host"
:
"https://www.namandixit.net/lovecraftian_nightmares/grunt"
,
"grunt_host"
:
"https://www.namandixit.net/lovecraftian_nightmares/grunt"
,
"log_channel"
:
"LOG_COMMON"
,
"couchdb_host"
:
"localhost:5984"
,
"couchdb_host"
:
"10.129.6.5:5984"
,
"function_db_name"
:
"serverless"
,
"couchdb_db_name"
:
"serverless"
,
"metrics_db_name"
:
"metrics"
,
"implicit_chain_db_name"
:
"implicit_chain"
,
"network"
:
{
"network_bridge"
:
"hybrid_kafka-serverless"
,
"internal"
:
{
"kafka_host"
:
"kafka:9092"
},
"external"
:
{
"kafka_host"
:
"localhost:29092"
}
},
"topics"
:
{
"topics"
:
{
"request_dm_2_rm"
:
"request"
,
"request_dm_2_rm"
:
"request"
,
"heartbeat"
:
"heartbeat"
,
"heartbeat"
:
"heartbeat"
,
"deployed"
:
"deployed"
,
"deployed"
:
"deployed"
,
"remove_worker"
:
"removeWorker"
,
"remove_worker"
:
"removeWorker"
,
"response_rm_2_dm"
:
"RESPONSE_RM_2_DM_DUMMY"
,
"response_rm_2_dm"
:
"RESPONSE_RM_2_DM_DUMMY"
,
"hscale"
:
"hscale"
"hscale"
:
"hscale"
,
"log_channel"
:
"LOG_COMMON"
},
},
"autoscalar_metrics"
:
{
"autoscalar_metrics"
:
{
"open_request_threshold"
:
100
"open_request_threshold"
:
100
},
},
"speculative_deployment"
:
true
"speculative_deployment"
:
false
,
}
"JIT_deployment"
:
false
,
\ No newline at end of file
"id_size"
:
20
}
dispatch_system/dispatch_daemon/config.json
View file @
1e2d3ca4
{
"id"
:
"192.168.31.51"
,
"master_node"
:
"10.129.6.5"
}
{
"id"
:
"192.168.0.105"
,
"master_node"
:
"192.168.0.105"
}
\ No newline at end of file
\ No newline at end of file
dispatch_system/dispatch_daemon/execute.js
View file @
1e2d3ca4
...
@@ -17,7 +17,7 @@ function runIsolate(local_repository, metadata) {
...
@@ -17,7 +17,7 @@ function runIsolate(local_repository, metadata) {
return
new
Promise
((
resolve
,
reject
)
=>
{
return
new
Promise
((
resolve
,
reject
)
=>
{
const
worker
=
new
Worker
(
filename
,
{
const
worker
=
new
Worker
(
filename
,
{
argv
:
[
resource_id
,
functionHash
,
port
,
"
isolate
"
],
argv
:
[
resource_id
,
functionHash
,
port
,
"
isolate
"
,
constants
.
network
.
external
.
kafka_host
],
resourceLimits
:
{
resourceLimits
:
{
maxOldGenerationSizeMb
:
memory
maxOldGenerationSizeMb
:
memory
}
}
...
@@ -43,7 +43,8 @@ function runProcess(local_repository, metadata) {
...
@@ -43,7 +43,8 @@ function runProcess(local_repository, metadata) {
return
new
Promise
((
resolve
,
reject
)
=>
{
return
new
Promise
((
resolve
,
reject
)
=>
{
let
timeStart
=
Date
.
now
()
let
timeStart
=
Date
.
now
()
const
process
=
spawn
(
'
node
'
,
[
filename
,
resource_id
,
functionHash
,
port
,
"
process
"
,
`--max-old-space-size=
${
memory
}
`
]);
const
process
=
spawn
(
'
node
'
,
[
filename
,
resource_id
,
functionHash
,
port
,
"
process
"
,
constants
.
network
.
external
.
kafka_host
,
`--max-old-space-size=
${
memory
}
`
]);
process
.
stdout
.
on
(
'
data
'
,
(
data
)
=>
{
process
.
stdout
.
on
(
'
data
'
,
(
data
)
=>
{
console
.
log
(
`stdout:
${
data
}
`
);
console
.
log
(
`stdout:
${
data
}
`
);
let
timeDifference
=
Math
.
ceil
((
Date
.
now
()
-
timeStart
))
let
timeDifference
=
Math
.
ceil
((
Date
.
now
()
-
timeStart
))
...
@@ -91,8 +92,9 @@ function runContainer(metadata) {
...
@@ -91,8 +92,9 @@ function runContainer(metadata) {
if
(
code
!=
0
)
if
(
code
!=
0
)
reject
(
"
error
"
)
reject
(
"
error
"
)
else
{
else
{
const
process
=
spawn
(
'
docker
'
,
[
"
run
"
,
"
--rm
"
,
"
-p
"
,
`
${
port
}
:
${
port
}
`
,
"
--name
"
,
resource_id
,
registry_url
+
imageName
,
const
process
=
spawn
(
'
docker
'
,
[
"
run
"
,
"
--rm
"
,
`--network=
${
constants
.
network
.
network_bridge
}
`
,
"
-p
"
,
`
${
port
}
:
${
port
}
`
,
resource_id
,
imageName
,
port
,
"
container
"
]);
"
--name
"
,
resource_id
,
registry_url
+
imageName
,
resource_id
,
imageName
,
port
,
"
container
"
,
constants
.
network
.
internal
.
kafka_host
]);
let
result
=
""
;
let
result
=
""
;
// timeStart = Date.now()
// timeStart = Date.now()
process
.
stdout
.
on
(
'
data
'
,
(
data
)
=>
{
process
.
stdout
.
on
(
'
data
'
,
(
data
)
=>
{
...
@@ -118,8 +120,9 @@ function runContainer(metadata) {
...
@@ -118,8 +120,9 @@ function runContainer(metadata) {
}
else
{
}
else
{
logger
.
info
(
"
container starting at port
"
,
port
);
logger
.
info
(
"
container starting at port
"
,
port
);
const
process
=
spawn
(
'
docker
'
,
[
"
run
"
,
"
--rm
"
,
"
-p
"
,
`
${
port
}
:
${
port
}
`
,
"
--name
"
,
resource_id
,
const
process
=
spawn
(
'
docker
'
,
[
"
run
"
,
"
--rm
"
,
`--network=
${
constants
.
network
.
network_bridge
}
`
,
registry_url
+
imageName
,
resource_id
,
imageName
,
port
,
"
container
"
]);
"
-p
"
,
`
${
port
}
:
${
port
}
`
,
"
--name
"
,
resource_id
,
registry_url
+
imageName
,
resource_id
,
imageName
,
port
,
"
container
"
,
constants
.
network
.
internal
.
kafka_host
]);
let
result
=
""
;
let
result
=
""
;
// timeStart = Date.now()
// timeStart = Date.now()
process
.
stdout
.
on
(
'
data
'
,
(
data
)
=>
{
process
.
stdout
.
on
(
'
data
'
,
(
data
)
=>
{
...
...
dispatch_system/dispatch_daemon/index.js
View file @
1e2d3ca4
...
@@ -11,7 +11,7 @@ const fs = require('fs')
...
@@ -11,7 +11,7 @@ const fs = require('fs')
const
fetch
=
require
(
'
node-fetch
'
);
const
fetch
=
require
(
'
node-fetch
'
);
let
metadataDB
=
`http://
${
secrets
.
couchdb_username
}
:
${
secrets
.
couchdb_password
}
@
${
constants
.
couchdb_host
}
`
let
metadataDB
=
`http://
${
secrets
.
couchdb_username
}
:
${
secrets
.
couchdb_password
}
@
${
constants
.
couchdb_host
}
`
metadataDB
=
metadataDB
+
"
/
"
+
constants
.
couchdb
_db_name
+
"
/
"
metadataDB
=
metadataDB
+
"
/
"
+
constants
.
function
_db_name
+
"
/
"
const
kafka
=
require
(
'
kafka-node
'
)
const
kafka
=
require
(
'
kafka-node
'
)
const
logger
=
libSupport
.
logger
const
logger
=
libSupport
.
logger
...
@@ -21,7 +21,7 @@ const host_url = "http://" + constants.master_address + ":" + constants.master_p
...
@@ -21,7 +21,7 @@ const host_url = "http://" + constants.master_address + ":" + constants.master_p
let
Producer
=
kafka
.
Producer
,
let
Producer
=
kafka
.
Producer
,
client
=
new
kafka
.
KafkaClient
({
client
=
new
kafka
.
KafkaClient
({
kafkaHost
:
constants
.
kafka_host
,
kafkaHost
:
constants
.
network
.
external
.
kafka_host
,
autoConnect
:
true
autoConnect
:
true
}),
}),
producer
=
new
Producer
(
client
),
producer
=
new
Producer
(
client
),
...
@@ -77,9 +77,9 @@ libSupport.makeTopic(node_id).then(() => {
...
@@ -77,9 +77,9 @@ libSupport.makeTopic(node_id).then(() => {
/**
/**
* download and start grunt
* download and start grunt
*/
*/
libSupport
.
download
(
constants
.
grunt_host
,
"
grunt
"
).
then
(()
=>
{
libSupport
.
download
(
constants
.
grunt_host
,
"
grunt
"
,
false
).
then
(()
=>
{
logger
.
info
(
"
Downloaded grunt binary from repository
"
)
logger
.
info
(
"
Downloaded grunt binary from repository
"
)
fs
.
chmod
(
'
grunt
'
,
0o
5
55
,
(
err
)
=>
{
fs
.
chmod
(
'
grunt
'
,
0o
7
55
,
(
err
)
=>
{
logger
.
info
(
"
grunt made executable. Starting grunt
"
)
logger
.
info
(
"
grunt made executable. Starting grunt
"
)
let
grunt
=
spawn
(
'
./grunt
'
,
[
node_id
])
let
grunt
=
spawn
(
'
./grunt
'
,
[
node_id
])
grunt
.
stdout
.
on
(
'
data
'
,
data
=>
{
grunt
.
stdout
.
on
(
'
data
'
,
data
=>
{
...
...
dispatch_system/dispatch_daemon/lib.js
View file @
1e2d3ca4
const
http
=
require
(
'
http
'
);
const
fetch
=
require
(
'
node-fetch
'
);
const
fs
=
require
(
'
fs
'
);
const
fs
=
require
(
'
fs
'
);
const
process
=
require
(
'
process
'
)
const
process
=
require
(
'
process
'
)
const
{
spawnSync
}
=
require
(
'
child_process
'
);
const
{
spawnSync
}
=
require
(
'
child_process
'
);
...
@@ -30,7 +30,7 @@ function makeTopic(id) {
...
@@ -30,7 +30,7 @@ function makeTopic(id) {
console
.
log
(
"
Using Primary IP
"
,
id
,
"
as topic
"
);
console
.
log
(
"
Using Primary IP
"
,
id
,
"
as topic
"
);
let
client
=
new
kafka
.
KafkaClient
({
let
client
=
new
kafka
.
KafkaClient
({
kafkaHost
:
constants
.
kafka_host
,
kafkaHost
:
constants
.
network
.
external
.
kafka_host
,
autoConnect
:
true
autoConnect
:
true
}),
}),
Producer
=
kafka
.
Producer
,
Producer
=
kafka
.
Producer
,
...
@@ -50,28 +50,48 @@ function makeTopic(id) {
...
@@ -50,28 +50,48 @@ function makeTopic(id) {
})
})
}
}
var
download
=
function
(
url
,
dest
,
cb
)
{
// var download = function (url, dest, check = true, cb) {
return
new
Promise
((
resolve
,
reject
)
=>
{
// return new Promise((resolve, reject) => {
// console.log(url);
// if (!check || !fs.existsSync(dest)) {
// var file = fs.createWriteStream(dest);
// var request = https.get(url, function (response) {
// response.pipe(file);
// file.on('finish', function () {
// file.close(cb); // close() is async, call cb after close completes.
// resolve();
// });
// }).on('error', function (err) { // Handle errors
// fs.unlink(dest); // Delete the file async. (But we don't check the result)
// logger.error("download failed" + err.message);
// if (cb) cb(err.message);
// reject(err);
// });
// } else {
// resolve();
// }
// })
// };
const
download
=
(
async
(
url
,
path
,
check
=
true
)
=>
{
if
(
!
check
||
!
fs
.
existsSync
(
path
))
{
console
.
log
(
url
);
console
.
log
(
url
);
if
(
!
fs
.
existsSync
(
dest
))
{
const
res
=
await
fetch
(
url
);
var
file
=
fs
.
createWriteStream
(
dest
);
const
fileStream
=
fs
.
createWriteStream
(
path
);
var
request
=
https
.
get
(
url
,
function
(
response
)
{
await
new
Promise
((
resolve
,
reject
)
=>
{
response
.
pipe
(
file
);
res
.
body
.
pipe
(
fileStream
);
file
.
on
(
'
finish
'
,
function
()
{
res
.
body
.
on
(
"
error
"
,
(
err
)
=>
{
file
.
close
(
cb
);
// close() is async, call cb after close completes.
resolve
();
});
}).
on
(
'
error
'
,
function
(
err
)
{
// Handle errors
fs
.
unlink
(
dest
);
// Delete the file async. (But we don't check the result)
if
(
cb
)
cb
(
err
.
message
);
reject
(
err
);
reject
(
err
);
});
});
}
else
{
fileStream
.
on
(
"
finish
"
,
function
()
{
resolve
();
resolve
();
}
});
})
});
}
};
}
)
;
function
makeid
(
length
)
{
function
makeid
(
length
)
{
var
result
=
''
;
var
result
=
''
;
...
...
dispatch_system/dispatch_daemon/package.json
View file @
1e2d3ca4
...
@@ -16,6 +16,7 @@
...
@@ -16,6 +16,7 @@
"kafka-node"
:
"^5.0.0"
,
"kafka-node"
:
"^5.0.0"
,
"morgan"
:
"^1.9.1"
,
"morgan"
:
"^1.9.1"
,
"mqtt"
:
"^3.0.0"
,
"mqtt"
:
"^3.0.0"
,
"node-fetch"
:
"^2.6.0"
,
"redis"
:
"^2.8.0"
,
"redis"
:
"^2.8.0"
,
"request"
:
"^2.88.2"
,
"request"
:
"^2.88.2"
,
"winston"
:
"^3.2.1"
"winston"
:
"^3.2.1"
...
...
dispatch_system/dispatch_manager/explicit_chain_handler.js
0 → 100644
View file @
1e2d3ca4
This diff is collapsed.
Click to expand it.
dispatch_system/dispatch_manager/index.js
View file @
1e2d3ca4
This diff is collapsed.
Click to expand it.
dispatch_system/dispatch_manager/lib.js
View file @
1e2d3ca4
...
@@ -4,10 +4,23 @@ const rp = require('request-promise');
...
@@ -4,10 +4,23 @@ const rp = require('request-promise');
const
fetch
=
require
(
'
node-fetch
'
);
const
fetch
=
require
(
'
node-fetch
'
);
const
winston
=
require
(
'
winston
'
)
const
winston
=
require
(
'
winston
'
)
const
constants
=
require
(
'
.././constants.json
'
)
const
constants
=
require
(
'
.././constants.json
'
)
const
secrets
=
require
(
'
./secrets.json
'
)
const
metrics
=
require
(
'
./metrics
'
)
const
{
createLogger
,
format
,
transports
}
=
winston
;
const
{
createLogger
,
format
,
transports
}
=
winston
;
const
heap
=
require
(
'
heap
'
)
const
heap
=
require
(
'
heap
'
)
let
kafka
=
require
(
'
kafka-node
'
),
Producer
=
kafka
.
Producer
,
client
=
new
kafka
.
KafkaClient
({
kafkaHost
:
constants
.
network
.
external
.
kafka_host
,
autoConnect
:
true
}),
producer
=
new
Producer
(
client
)
let
implicitChainDB
=
`http://
${
secrets
.
couchdb_username
}
:
${
secrets
.
couchdb_password
}
@
${
constants
.
couchdb_host
}
`
implicitChainDB
=
implicitChainDB
+
"
/
"
+
constants
.
implicit_chain_db_names
+
"
/
"
/**
/**
* Generates unique IDs of arbitrary length
* Generates unique IDs of arbitrary length
* @param {Length of the ID} length
* @param {Length of the ID} length
...
@@ -45,6 +58,14 @@ function generateExecutor(functionPath, functionHash) {
...
@@ -45,6 +58,14 @@ function generateExecutor(functionPath, functionHash) {
return
hash
return
hash
}
}
/**
* Reverse proxy to take user requests and forward them to appropriate workers using a loadbalacer
* @param {JSON} req the user request to be forwarded to the worker
* @param {JSON} res Object to use to return the response to the user
* @param {Map} functionToResource Function to resource Map
* @param {Map} resourceMap Map from resource ID to resource metadata
* @param {Map} functionBranchTree Holds the function path's and related probability distribution
*/
function
reverseProxy
(
req
,
res
,
functionToResource
,
resourceMap
,
functionBranchTree
)
{
function
reverseProxy
(
req
,
res
,
functionToResource
,
resourceMap
,
functionBranchTree
)
{
branchChainPredictor
(
req
,
resourceMap
,
functionToResource
,
functionBranchTree
)
branchChainPredictor
(
req
,
resourceMap
,
functionToResource
,
functionBranchTree
)
return
new
Promise
((
resolve
,
reject
)
=>
{
return
new
Promise
((
resolve
,
reject
)
=>
{
...
@@ -54,6 +75,7 @@ function reverseProxy(req, res, functionToResource, resourceMap, functionBranchT
...
@@ -54,6 +75,7 @@ function reverseProxy(req, res, functionToResource, resourceMap, functionBranchT
* Bypass deployment pipeline if resource available
* Bypass deployment pipeline if resource available
*/
*/
let
functionHeap
=
functionToResource
.
get
(
id
)
let
functionHeap
=
functionToResource
.
get
(
id
)
// loadbalancing by choosing worker with lowest load
let
forwardTo
=
functionHeap
[
0
]
let
forwardTo
=
functionHeap
[
0
]
let
resource
=
resourceMap
.
get
(
forwardTo
.
resource_id
)
let
resource
=
resourceMap
.
get
(
forwardTo
.
resource_id
)
// logger.info(`Choosing resource ${JSON.stringify(forwardTo.resource_id)}` +
// logger.info(`Choosing resource ${JSON.stringify(forwardTo.resource_id)}` +
...
@@ -62,7 +84,7 @@ function reverseProxy(req, res, functionToResource, resourceMap, functionBranchT
...
@@ -62,7 +84,7 @@ function reverseProxy(req, res, functionToResource, resourceMap, functionBranchT
// logger.info("Request received at reverseproxy. Forwarding to: " + url);
// logger.info("Request received at reverseproxy. Forwarding to: " + url);
forwardTo
.
open_request_count
+=
1
forwardTo
.
open_request_count
+=
1
heap
.
heapify
(
functionHeap
,
compare
)
heap
.
heapify
(
functionHeap
,
compare
)
// maintain loadbalancer by heapifying the Map
// logger.info(functionHeap);
// logger.info(functionHeap);
var
options
=
{
var
options
=
{
...
@@ -71,22 +93,23 @@ function reverseProxy(req, res, functionToResource, resourceMap, functionBranchT
...
@@ -71,22 +93,23 @@ function reverseProxy(req, res, functionToResource, resourceMap, functionBranchT
body
:
req
.
body
,
body
:
req
.
body
,
json
:
true
// Automatically stringifies the body to JSON
json
:
true
// Automatically stringifies the body to JSON
};
};
// console.log(options);
rp
(
options
)
rp
(
options
)
.
then
(
function
(
parsedBody
)
{
.
then
(
function
(
parsedBody
)
{
let
serviceTime
=
Date
.
now
()
-
res
.
timestamp
res
.
json
(
parsedBody
)
res
.
json
(
parsedBody
)
forwardTo
.
open_request_count
-=
1
forwardTo
.
open_request_count
-=
1
heap
.
heapify
(
functionHeap
,
compare
)
heap
.
heapify
(
functionHeap
,
compare
)
metrics
.
collectMetrics
({
type
:
res
.
start
,
value
:
serviceTime
,
functionHash
:
req
.
params
.
id
,
runtime
})
resolve
()
resolve
()
})
})
.
catch
(
function
(
err
)
{
.
catch
(
function
(
err
)
{
forwardTo
.
open_request_count
-=
1
forwardTo
.
open_request_count
-=
1
heap
.
heapify
(
functionHeap
,
compare
)
heap
.
heapify
(
functionHeap
,
compare
)
logger
.
error
(
"
error
"
+
err
.
error
.
errno
);
logger
.
error
(
"
error
"
+
err
);
res
.
json
(
err
.
message
).
status
(
err
.
statusCode
)
res
.
json
(
err
.
message
).
status
(
err
.
statusCode
)
resolve
()
resolve
()
});
});
...
@@ -109,7 +132,6 @@ function getPort(usedPort) {
...
@@ -109,7 +132,6 @@ function getPort(usedPort) {
return
port
return
port
}
}
const
logger
=
winston
.
createLogger
({
const
logger
=
winston
.
createLogger
({
level
:
'
info
'
,
level
:
'
info
'
,
format
:
winston
.
format
.
combine
(
format
:
winston
.
format
.
combine
(
...
@@ -252,17 +274,61 @@ function viterbi(functionBranchTree) {
...
@@ -252,17 +274,61 @@ function viterbi(functionBranchTree) {
path
.
push
({
node
:
maxSibling
,
probability
:
maxProb
})
path
.
push
({
node
:
maxSibling
,
probability
:
maxProb
})
siblings
=
new
Map
()
siblings
=
new
Map
()
}
}
if
(
path
.
length
>
0
)
//
if (path.length > 0)
console
.
log
(
"
path
"
,
path
);
//
console.log("path", path);
metadata
.
mle_path
=
path
metadata
.
mle_path
=
path
if
(
path
.
length
>
1
)
{
let
payload
=
{
method
:
'
put
'
,
body
:
JSON
.
stringify
(
path
),
headers
:
{
'
Content-Type
'
:
'
application/json
'
}
}
fetch
(
implicitChainDB
+
functionHash
,
payload
)
}
}
}
});
});
}
function
logBroadcast
(
message
,
resource_id
,
resourceMap
)
{
return
new
Promise
((
resolve
,
reject
)
=>
{
try
{
message
.
timestamp
=
Date
.
now
()
if
(
resource_id
&&
resourceMap
.
has
(
resource_id
))
{
let
resource
=
resourceMap
.
get
(
resource_id
)
message
.
resource_id
=
resource_id
message
.
node_id
=
resource
.
node_id
message
.
runtime
=
resource
.
runtime
message
.
function_id
=
resource
.
functionHash
}
let
log
=
[{
topic
:
constants
.
topics
.
log_channel
,
messages
:
JSON
.
stringify
(
message
),
partition
:
0
}]
producer
.
send
(
log
,
()
=>
{
resolve
()
})
}
catch
(
err
)
{
console
.
log
(
err
);
reject
()
}
})
}
}
async
function
fetchData
(
url
,
data
=
null
)
{
let
res
if
(
data
===
undefined
||
data
===
null
)
res
=
await
fetch
(
url
)
else
res
=
await
fetch
(
url
,
data
)
return
await
res
.
json
()
}
module
.
exports
=
{
module
.
exports
=
{
makeid
,
generateExecutor
,
reverseProxy
,
makeid
,
generateExecutor
,
reverseProxy
,
getPort
,
logger
,
compare
,
getPort
,
logger
,
compare
,
viterbi
viterbi
,
logBroadcast
,
fetchData
,
metrics
,
producer
}
}
\ No newline at end of file
dispatch_system/dispatch_manager/metrics.js
0 → 100644
View file @
1e2d3ca4
'
use strict
'
;
const
constants
=
require
(
'
.././constants.json
'
);
const
secrets
=
require
(
'
./secrets.json
'
)
const
fetch
=
require
(
'
node-fetch
'
);
const
util
=
require
(
'
util
'
)
const
prom
=
require
(
'
prom-client
'
);
const
Registry
=
prom
.
Registry
;
const
register
=
new
Registry
();
const
alpha
=
0.99
let
log_channel
=
constants
.
topics
.
log_channel
,
metrics
=
{
}
const
intervalCollector
=
prom
.
collectDefaultMetrics
({
prefix
:
'
xanadu
'
,
timeout
:
5000
,
register
});
const
workerCountMetric
=
new
prom
.
Gauge
({
name
:
"
worker_count
"
,
help
:
"
worker count
"
});
const
warmstartMetric
=
new
prom
.
Histogram
({
name
:
"
warmstart
"
,
help
:
"
warm start latency
"
});
const
coldstartMetric
=
new
prom
.
Histogram
({
name
:
"
coldstart
"
,
help
:
"
cold start latency
"
});
const
starttimeMetric
=
new
prom
.
Histogram
({
name
:
"
starttime
"
,
help
:
"
worker start times
"
});
const
requestMetric
=
new
prom
.
Summary
({
name
:
"
requests
"
,
help
:
"
request RTT times
"
,
percentiles
:
[
0.01
,
0.05
,
0.5
,
0.9
,
0.95
,
0.99
,
0.999
]
});
register
.
registerMetric
(
workerCountMetric
);
register
.
registerMetric
(
warmstartMetric
);
register
.
registerMetric
(
coldstartMetric
);
register
.
registerMetric
(
starttimeMetric
);
register
.
registerMetric
(
requestMetric
);
let
metricsDB
=
`http://
${
secrets
.
couchdb_username
}
:
${
secrets
.
couchdb_password
}
@
${
constants
.
couchdb_host
}
`
metricsDB
=
metricsDB
+
"
/
"
+
constants
.
metrics_db_name
+
"
/
"
let
kafka
=
require
(
'
kafka-node
'
),
Producer
=
kafka
.
Producer
,
client
=
new
kafka
.
KafkaClient
({
kafkaHost
:
constants
.
network
.
external
.
kafka_host
,
autoConnect
:
true
}),
producer
=
new
Producer
(
client
)
/**
* Function called to report metric data related to functions
* @param {JSON} metric
*/
function
collectMetrics
(
metric
)
{
/**
* If metrics for a new function comes in,
* provision required structure for the function
*/
if
(
!
(
metric
.
functionHash
in
metrics
))
{
metrics
[
metric
.
functionHash
]
=
{}
}
if
(
!
(
metric
.
runtime
in
metrics
[
metric
.
functionHash
]))
{
metrics
[
metric
.
functionHash
][
metric
.
runtime
]
=
{
shortterm
:
{
coldstart
:
0
,
coldstart_total_request
:
0
,
warm_total_request
:
0
,
scale_count
:
0
,
warmstart
:
0
,
worker_count
:
0
,
starttime
:
0
}
}
}
if
(
metric
.
type
===
'
coldstart
'
)
{
metrics
[
metric
.
functionHash
][
metric
.
runtime
].
shortterm
.
coldstart
+=
metric
.
value
metrics
[
metric
.
functionHash
][
metric
.
runtime
].
shortterm
.
coldstart_total_request
+=
1
coldstartMetric
.
observe
(
metric
.
value
)
requestMetric
.
observe
(
metric
.
value
)
}
else
if
(
metric
.
type
===
'
warmstart
'
)
{
metrics
[
metric
.
functionHash
][
metric
.
runtime
].
shortterm
.
warmstart
+=
metric
.
value
metrics
[
metric
.
functionHash
][
metric
.
runtime
].
shortterm
.
warm_total_request
+=
1
warmstartMetric
.
observe
(
metric
.
value
)
requestMetric
.
observe
(
metric
.
value
)
}
else
if
(
metric
.
type
===
'
scale
'
)
{
metrics
[
metric
.
functionHash
][
metric
.
runtime
].
shortterm
.
worker_count
=
metric
.
value
workerCountMetric
.
set
(
metric
.
value
)
if
(
metric
.
starttime
!==
undefined
)
{
metrics
[
metric
.
functionHash
][
metric
.
runtime
].
shortterm
.
starttime
+=
metric
.
starttime
metrics
[
metric
.
functionHash
][
metric
.
runtime
].
shortterm
.
scale_count
+=
1
starttimeMetric
.
observe
(
metric
.
starttime
)
}
}
}
/**
* Run periodically to calculate average runtime metrics like coldstart and
* warmstart latencies.
* The module provides two granularities for metrics - shortterm and longterm
* shortterm - realtime data at a granularity of 5s (set in dispatch_manager/lib.js)
* shortterm data is calculated using Simple Moving Average (SMA)
* longterm - longterm data is held and averaged out over a period of time.
* longterm data is calculated using Expontential Moving Average (EMA)
*/
async
function
broadcastMetrics
()
{
if
(
Object
.
keys
(
metrics
).
length
!==
0
)
{
for
(
let
[
functionHash
,
data
]
of
Object
.
entries
(
metrics
))
{
for
(
let
[
runtime
,
metricData
]
of
Object
.
entries
(
data
))
{
if
(
metricData
.
shortterm
.
coldstart
!=
0
||
metricData
.
shortterm
.
longterm
!=
0
)
{
let
{
metric
,
dbData
}
=
await
fetchData
(
functionHash
,
metricData
,
runtime
)
/**
* Shortterm moving average
*/
metric
.
shortterm
.
coldstart
/=
(
metric
.
shortterm
.
coldstart_total_request
!=
0
)
?
metric
.
shortterm
.
coldstart_total_request
:
1
metric
.
shortterm
.
starttime
/=
(
metric
.
shortterm
.
scale_count
!=
0
)
?
metric
.
shortterm
.
scale_count
:
1
metric
.
shortterm
.
warmstart
/=
(
metric
.
shortterm
.
warm_total_request
!=
0
)
?
metric
.
shortterm
.
warm_total_request
:
1
/**
* Longterm exponential moving average
*/
if
(
metric
.
shortterm
.
coldstart
!=
0
)
metric
.
longterm
.
coldstart
=
(
metric
.
longterm
.
coldstart
!=
0
)
?
metric
.
longterm
.
coldstart
*
alpha
+
metric
.
shortterm
.
coldstart
*
(
1
-
alpha
)
:
metric
.
shortterm
.
coldstart
if
(
metric
.
shortterm
.
starttime
&&
metric
.
shortterm
.
starttime
!=
0
)
metric
.
longterm
.
starttime
=
(
metric
.
longterm
.
starttime
!=
0
)
?
metric
.
longterm
.
starttime
*
alpha
+
metric
.
shortterm
.
starttime
*
(
1
-
alpha
)
:
metric
.
shortterm
.
starttime
if
(
metric
.
shortterm
.
warmstart
!=
0
)
metric
.
longterm
.
warmstart
=
(
metric
.
longterm
.
warmstart
!=
0
)
?
metric
.
longterm
.
warmstart
*
alpha
+
metric
.
shortterm
.
warmstart
*
(
1
-
alpha
)
:
metric
.
shortterm
.
warmstart
dbData
[
runtime
]
=
{
coldstart
:
metric
.
longterm
.
coldstart
,
warmstart
:
metric
.
longterm
.
warmstart
,
starttime
:
metric
.
longterm
.
starttime
}
let
payload
=
{
method
:
'
put
'
,
body
:
JSON
.
stringify
(
dbData
),
headers
:
{
'
Content-Type
'
:
'
application/json
'
}
}
await
fetch
(
metricsDB
+
functionHash
,
payload
)
metric
.
timestamp
=
Date
.
now
()
}
}
}
let
log
=
[{
topic
:
log_channel
,
messages
:
JSON
.
stringify
({
metrics
}),
partition
:
0
}]
producer
.
send
(
log
,
()
=>
{
})
for
(
let
[
functionHash
,
data
]
of
Object
.
entries
(
metrics
))
{
for
(
let
[
runtime
,
metric
]
of
Object
.
entries
(
data
))
{
metric
.
shortterm
=
{
coldstart
:
0
,
coldstart_total_request
:
0
,
warm_total_request
:
0
,
warmstart
:
0
,
worker_count
:
0
,
starttime
:
0
,
scale_count
:
0
}
}
}
}
}
/**
* Function to fetch the latest data from metric DB
* @param {String} functionHash
* @param {JSON} metric
*/
async
function
fetchData
(
functionHash
,
metric
,
runtime
)
{
let
res
=
await
fetch
(
metricsDB
+
functionHash
)
let
json
=
await
res
.
json
()
if
(
json
.
error
===
"
not_found
"
||
json
[
runtime
]
===
undefined
)
{
metric
.
longterm
=
{
coldstart
:
0
,
warmstart
:
0
,
starttime
:
0
}
}
else
{
metric
.
longterm
=
{
coldstart
:
json
[
runtime
].
coldstart
,
warmstart
:
json
[
runtime
].
warmstart
,
starttime
:
(
json
[
runtime
].
starttime
)
?
json
[
runtime
].
starttime
:
0
}
}
return
{
metric
,
dbData
:
(
json
.
error
===
"
not_found
"
)?
{}:
json
}
}
module
.
exports
=
{
collectMetrics
,
broadcastMetrics
,
register
}
\ No newline at end of file
dispatch_system/dispatch_manager/operator.js
0 → 100644
View file @
1e2d3ca4
const
op
=
{
'
lt
'
:
function
(
x
,
y
)
{
return
x
<
y
},
'
gt
'
:
function
(
x
,
y
)
{
return
x
>
y
},
'
lte
'
:
function
(
x
,
y
)
{
return
x
<=
y
},
'
gte
'
:
function
(
x
,
y
)
{
return
x
>=
y
},
'
eq
'
:
function
(
x
,
y
)
{
return
x
===
y
},
'
neq
'
:
function
(
x
,
y
)
{
return
x
!==
y
},