Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
X
xanadu
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Commits
Open sidebar
SYNERG
xanadu
Commits
0198dbc7
Commit
0198dbc7
authored
Apr 02, 2020
by
Nilanjan Daw
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'explicit_function_chaining'
parents
9d5f6ec9
db8d9fb6
Changes
16
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
528 additions
and
192 deletions
+528
-192
dispatch_system/constants.json
dispatch_system/constants.json
+7
-4
dispatch_system/dispatch_daemon/index.js
dispatch_system/dispatch_daemon/index.js
+1
-1
dispatch_system/dispatch_manager/explicit_chain_handler.js
dispatch_system/dispatch_manager/explicit_chain_handler.js
+183
-50
dispatch_system/dispatch_manager/index.js
dispatch_system/dispatch_manager/index.js
+71
-77
dispatch_system/dispatch_manager/lib.js
dispatch_system/dispatch_manager/lib.js
+37
-10
dispatch_system/dispatch_manager/metrics.js
dispatch_system/dispatch_manager/metrics.js
+149
-48
dispatch_system/dispatch_manager/operator.js
dispatch_system/dispatch_manager/operator.js
+10
-0
dispatch_system/dispatch_manager/package.json
dispatch_system/dispatch_manager/package.json
+1
-0
dispatch_system/dispatch_manager/repository/worker_env/env.js
...atch_system/dispatch_manager/repository/worker_env/env.js
+8
-2
dispatch_system/log_listener.js
dispatch_system/log_listener.js
+20
-0
dispatch_system/prometheus.yml
dispatch_system/prometheus.yml
+38
-0
docker-compose.yml
docker-compose.yml
+2
-0
hybrid_serverless-1.png
hybrid_serverless-1.png
+0
-0
local_experiments/Coldstart latency for function chains.png
local_experiments/Coldstart latency for function chains.png
+0
-0
local_experiments/function_chain.drawio
local_experiments/function_chain.drawio
+1
-0
local_experiments/function_chain.png
local_experiments/function_chain.png
+0
-0
No files found.
dispatch_system/constants.json
View file @
0198dbc7
...
...
@@ -3,9 +3,10 @@
"master_port"
:
8080
,
"master_address"
:
"localhost"
,
"grunt_host"
:
"https://www.namandixit.net/lovecraftian_nightmares/grunt"
,
"log_channel"
:
"LOG_COMMON"
,
"couchdb_host"
:
"localhost:5984"
,
"couchdb_db_name"
:
"serverless"
,
"function_db_name"
:
"serverless"
,
"metrics_db_name"
:
"metrics"
,
"implicit_chain_db_name"
:
"implicit_chain"
,
"network"
:
{
"network_bridge"
:
"hybrid_kafka-serverless"
,
"internal"
:
{
...
...
@@ -21,11 +22,13 @@
"deployed"
:
"deployed"
,
"remove_worker"
:
"removeWorker"
,
"response_rm_2_dm"
:
"RESPONSE_RM_2_DM_DUMMY"
,
"hscale"
:
"hscale"
"hscale"
:
"hscale"
,
"log_channel"
:
"LOG_COMMON"
},
"autoscalar_metrics"
:
{
"open_request_threshold"
:
100
},
"speculative_deployment"
:
true
,
"speculative_deployment"
:
false
,
"JIT_deployment"
:
false
,
"id_size"
:
20
}
dispatch_system/dispatch_daemon/index.js
View file @
0198dbc7
...
...
@@ -11,7 +11,7 @@ const fs = require('fs')
const
fetch
=
require
(
'
node-fetch
'
);
let
metadataDB
=
`http://
${
secrets
.
couchdb_username
}
:
${
secrets
.
couchdb_password
}
@
${
constants
.
couchdb_host
}
`
metadataDB
=
metadataDB
+
"
/
"
+
constants
.
couchdb
_db_name
+
"
/
"
metadataDB
=
metadataDB
+
"
/
"
+
constants
.
function
_db_name
+
"
/
"
const
kafka
=
require
(
'
kafka-node
'
)
const
logger
=
libSupport
.
logger
...
...
dispatch_system/dispatch_manager/explicit_chain_handler.js
View file @
0198dbc7
This diff is collapsed.
Click to expand it.
dispatch_system/dispatch_manager/index.js
View file @
0198dbc7
...
...
@@ -12,18 +12,22 @@ const heap = require('heap');
const
fetch
=
require
(
'
node-fetch
'
);
const
swStats
=
require
(
'
swagger-stats
'
);
const
apiSpec
=
require
(
'
./swagger.json
'
);
const
util
=
require
(
'
util
'
)
/**
* URL to the couchdb database server used to store function metadata
*/
let
metadataDB
=
`http://
${
secrets
.
couchdb_username
}
:
${
secrets
.
couchdb_password
}
@
${
constants
.
couchdb_host
}
`
metadataDB
=
metadataDB
+
"
/
"
+
constants
.
couchdb_db_name
+
"
/
"
metadataDB
=
metadataDB
+
"
/
"
+
constants
.
function_db_name
+
"
/
"
let
metricsDB
=
`http://
${
secrets
.
couchdb_username
}
:
${
secrets
.
couchdb_password
}
@
${
constants
.
couchdb_host
}
`
metricsDB
=
metricsDB
+
"
/
"
+
constants
.
metrics_db_name
+
"
/
"
const
app
=
express
()
const
libSupport
=
require
(
'
./lib
'
)
const
logger
=
libSupport
.
logger
let
date
=
new
Date
();
let
log_channel
=
constants
.
log_channel
let
log_channel
=
constants
.
topics
.
log_channel
let
usedPort
=
new
Map
(),
// TODO: remove after integration with RM
db
=
new
Map
(),
// queue holding request to be dispatched
...
...
@@ -71,6 +75,11 @@ const WINDOW_SIZE = 10
const
port
=
constants
.
master_port
const
registry_url
=
constants
.
registry_url
app
.
get
(
'
/metrics
'
,
(
req
,
res
)
=>
{
res
.
set
(
'
Content-Type
'
,
libSupport
.
metrics
.
register
.
contentType
);
res
.
end
(
libSupport
.
metrics
.
register
.
metrics
());
});
/**
* REST API to receive deployment requests
*/
...
...
@@ -256,68 +265,14 @@ function dispatch() {
if
(
!
db
.
has
(
functionHash
+
runtime
))
{
db
.
set
(
functionHash
+
runtime
,
[])
db
.
get
(
functionHash
+
runtime
).
push
({
req
,
res
})
let
resource_id
=
libSupport
.
makeid
(
constants
.
id_size
)
// each function resource request is associated with an unique ID
logger
.
info
(
`Generated new resource ID:
${
resource_id
}
for runtime:
${
runtime
}
`
);
/**
* Request RM for resource
*/
logger
.
info
(
"
Requesting RM
"
+
JSON
.
stringify
({
resource_id
,
"
memory
"
:
332
,
}))
resourceMap
.
set
(
resource_id
,
{
runtime
,
functionHash
,
port
:
null
,
node_id
:
null
,
deployed
:
false
,
deploy_request_time
:
Date
.
now
()
})
let
payloadToRM
=
[{
topic
:
constants
.
topics
.
request_dm_2_rm
,
// changing from REQUEST_DM_2_RM
messages
:
JSON
.
stringify
({
resource_id
,
"
memory
"
:
332
,
timestamp
:
Date
.
now
()
}),
partition
:
0
}]
producer
.
send
(
payloadToRM
,
()
=>
{
// db.set(functionHash + runtime, { req, res })
console
.
log
(
"
sent rm
"
);
})
/**
* Speculative deployment:
* If function MLE path is present then deploy those parts of the path which are
* not already running
*/
if
(
constants
.
speculative_deployment
&&
req
.
headers
[
'
x-resource-id
'
]
===
undefined
)
{
console
.
log
(
functionBranchTree
,
req
.
params
.
id
);
if
(
functionBranchTree
.
has
(
req
.
params
.
id
))
{
let
branchInfo
=
functionBranchTree
.
get
(
req
.
params
.
id
)
console
.
log
(
"
mle_path
"
,
branchInfo
.
mle_path
);
if
(
branchInfo
.
mle_path
&&
branchInfo
.
mle_path
.
length
>
1
)
{
for
(
let
node
of
branchInfo
.
mle_path
)
{
// console.log(functionToResource);
if
(
!
functionToResource
.
has
(
node
.
node
+
runtime
)
&&
!
db
.
has
(
node
.
node
+
runtime
))
{
console
.
log
(
"
Deploying according to MLE path:
"
,
node
.
node
);
let
payload
=
[{
topic
:
constants
.
topics
.
hscale
,
messages
:
JSON
.
stringify
({
"
runtime
"
:
"
container
"
,
"
functionHash
"
:
node
.
node
})
messages
:
JSON
.
stringify
({
runtime
,
functionHash
})
}]
producer
.
send
(
payload
,
function
()
{
})
db
.
set
(
node
.
node
+
runtime
,
[])
}
}
}
}
}
speculative_deployment
(
req
,
runtime
)
}
else
{
logger
.
info
(
"
deployment process already started waiting
"
)
db
.
get
(
functionHash
+
runtime
).
push
({
req
,
res
})
...
...
@@ -396,7 +351,8 @@ function postDeploy(message) {
libSupport
.
metrics
.
collectMetrics
({
type
:
"
scale
"
,
value
:
functionToResource
.
get
(
id
).
length
,
functionHash
:
id
})
functionHash
:
message
.
functionHash
,
runtime
:
message
.
runtime
,
starttime
:
(
Date
.
now
()
-
resource
.
deploy_request_time
)})
}
catch
(
e
)
{
logger
.
error
(
e
.
message
)
}
...
...
@@ -448,7 +404,7 @@ consumer.on('message', function (message) {
heap
.
heapify
(
resourceArray
,
libSupport
.
compare
)
libSupport
.
metrics
.
collectMetrics
({
type
:
"
scale
"
,
value
:
resourceArray
.
length
,
functionHash
:
id
})
functionHash
:
message
.
functionHash
,
runtime
:
message
.
runtime
})
libSupport
.
logBroadcast
({
entity_id
:
message
.
entity_id
,
"
reason
"
:
"
terminate
"
,
...
...
@@ -470,15 +426,17 @@ consumer.on('message', function (message) {
let
resource_id
=
libSupport
.
makeid
(
constants
.
id_size
),
// each function resource request is associated with an unique ID
runtime
=
message
.
runtime
,
functionHash
=
message
.
functionHash
logger
.
info
(
`Generated new resource ID:
${
resource_id
}
for runtime:
${
runtime
}
`
);
console
.
log
(
"
Resource Status:
"
,
functionToResource
);
/**
* Request RM for resource
*/
logger
.
info
(
"
Requesting RM
"
+
JSON
.
stringify
({
resource_id
,
"
memory
"
:
332
,
}))
/** uncomment when RM is unavailable */
resourceMap
.
set
(
resource_id
,
{
runtime
,
functionHash
,
port
:
null
,
node_id
:
null
,
deployed
:
false
,
deploy_request_time
:
Date
.
now
()
...
...
@@ -525,7 +483,7 @@ consumer.on('message', function (message) {
}),
partition
:
0
}]
logger
.
info
(
resourceMap
);
//
logger.info(resourceMap);
producer
.
send
(
payload
,
()
=>
{
logger
.
info
(
`Resource Deployment request sent to Dispatch Agent`
)
})
...
...
@@ -553,18 +511,54 @@ function autoscalar() {
}
function
periodicMetricBroadcast
()
{
let
message
=
{},
flag
=
false
functionToResource
.
forEach
((
functionHeap
,
functionHash
)
=>
{
if
(
functionHeap
.
length
>
0
)
{
message
[
functionHash
]
=
functionHeap
.
length
libSupport
.
metrics
.
collectMetrics
({
type
:
"
scale
"
,
value
:
functionHeap
.
length
,
functionHash
:
functionHash
})
}
/**
* Speculative deployment:
* If function MLE path is present then deploy those parts of the path which are
* not already running
*
* FIXME: Currently supports homogenous runtime chain i.e takes runtime as a param.
* Change it to also profile runtime
*/
async
function
speculative_deployment
(
req
,
runtime
)
{
if
(
constants
.
speculative_deployment
&&
req
.
headers
[
'
x-resource-id
'
]
===
undefined
)
{
console
.
log
(
functionBranchTree
,
req
.
params
.
id
);
if
(
functionBranchTree
.
has
(
req
.
params
.
id
))
{
let
branchInfo
=
functionBranchTree
.
get
(
req
.
params
.
id
)
console
.
log
(
"
mle_path
"
,
branchInfo
.
mle_path
);
if
(
branchInfo
.
mle_path
&&
branchInfo
.
mle_path
.
length
>
1
)
{
for
(
let
node
of
branchInfo
.
mle_path
)
node
.
id
=
node
.
node
let
metrics
=
await
libSupport
.
fetchData
(
metricsDB
+
"
_bulk_get
"
,
{
method
:
'
post
'
,
body
:
JSON
.
stringify
({
docs
:
branchInfo
.
mle_path
}),
headers
:
{
'
Content-Type
'
:
'
application/json
'
},
})
}
console
.
log
(
util
.
inspect
(
metrics
,
false
,
null
,
true
/* enable colors */
))
for
(
let
node
of
branchInfo
.
mle_path
)
{
// console.log(functionToResource);
if
(
!
functionToResource
.
has
(
node
.
node
+
runtime
)
&&
!
db
.
has
(
node
.
node
+
runtime
))
{
console
.
log
(
"
Deploying according to MLE path:
"
,
node
.
node
);
let
payload
=
[{
topic
:
constants
.
topics
.
hscale
,
messages
:
JSON
.
stringify
({
"
runtime
"
:
"
container
"
,
"
functionHash
"
:
node
.
node
})
}]
producer
.
send
(
payload
,
function
()
{
})
db
.
set
(
node
.
node
+
runtime
,
[])
}
}
}
}
}
}
setInterval
(
libSupport
.
metrics
.
broadcastMetrics
,
5000
)
setInterval
(
libSupport
.
viterbi
,
1000
,
functionBranchTree
)
setInterval
(
autoscalar
,
1000
);
setInterval
(
dispatch
,
1000
);
// setInterval(periodicMetricBroadcast, 5000)
app
.
listen
(
port
,
()
=>
logger
.
info
(
`Server listening on port
${
port
}
!`
))
\ No newline at end of file
dispatch_system/dispatch_manager/lib.js
View file @
0198dbc7
...
...
@@ -4,10 +4,12 @@ const rp = require('request-promise');
const
fetch
=
require
(
'
node-fetch
'
);
const
winston
=
require
(
'
winston
'
)
const
constants
=
require
(
'
.././constants.json
'
)
const
secrets
=
require
(
'
./secrets.json
'
)
const
metrics
=
require
(
'
./metrics
'
)
const
{
createLogger
,
format
,
transports
}
=
winston
;
const
heap
=
require
(
'
heap
'
)
let
kafka
=
require
(
'
kafka-node
'
),
Producer
=
kafka
.
Producer
,
client
=
new
kafka
.
KafkaClient
({
...
...
@@ -16,6 +18,9 @@ let kafka = require('kafka-node'),
}),
producer
=
new
Producer
(
client
)
let
implicitChainDB
=
`http://
${
secrets
.
couchdb_username
}
:
${
secrets
.
couchdb_password
}
@
${
constants
.
couchdb_host
}
`
implicitChainDB
=
implicitChainDB
+
"
/
"
+
constants
.
implicit_chain_db_names
+
"
/
"
/**
* Generates unique IDs of arbitrary length
* @param {Length of the ID} length
...
...
@@ -53,6 +58,14 @@ function generateExecutor(functionPath, functionHash) {
return
hash
}
/**
* Reverse proxy to take user requests and forward them to appropriate workers using a loadbalacer
* @param {JSON} req the user request to be forwarded to the worker
* @param {JSON} res Object to use to return the response to the user
* @param {Map} functionToResource Function to resource Map
* @param {Map} resourceMap Map from resource ID to resource metadata
* @param {Map} functionBranchTree Holds the function path's and related probability distribution
*/
function
reverseProxy
(
req
,
res
,
functionToResource
,
resourceMap
,
functionBranchTree
)
{
branchChainPredictor
(
req
,
resourceMap
,
functionToResource
,
functionBranchTree
)
return
new
Promise
((
resolve
,
reject
)
=>
{
...
...
@@ -62,6 +75,7 @@ function reverseProxy(req, res, functionToResource, resourceMap, functionBranchT
* Bypass deployment pipeline if resource available
*/
let
functionHeap
=
functionToResource
.
get
(
id
)
// loadbalancing by choosing worker with lowest load
let
forwardTo
=
functionHeap
[
0
]
let
resource
=
resourceMap
.
get
(
forwardTo
.
resource_id
)
// logger.info(`Choosing resource ${JSON.stringify(forwardTo.resource_id)}` +
...
...
@@ -70,7 +84,7 @@ function reverseProxy(req, res, functionToResource, resourceMap, functionBranchT
// logger.info("Request received at reverseproxy. Forwarding to: " + url);
forwardTo
.
open_request_count
+=
1
heap
.
heapify
(
functionHeap
,
compare
)
heap
.
heapify
(
functionHeap
,
compare
)
// maintain loadbalancer by heapifying the Map
// logger.info(functionHeap);
var
options
=
{
...
...
@@ -80,8 +94,6 @@ function reverseProxy(req, res, functionToResource, resourceMap, functionBranchT
json
:
true
// Automatically stringifies the body to JSON
};
// console.log(options);
rp
(
options
)
.
then
(
function
(
parsedBody
)
{
...
...
@@ -91,7 +103,7 @@ function reverseProxy(req, res, functionToResource, resourceMap, functionBranchT
forwardTo
.
open_request_count
-=
1
heap
.
heapify
(
functionHeap
,
compare
)
metrics
.
collectMetrics
({
type
:
res
.
start
,
value
:
serviceTime
,
functionHash
:
id
})
metrics
.
collectMetrics
({
type
:
res
.
start
,
value
:
serviceTime
,
functionHash
:
req
.
params
.
id
,
runtime
})
resolve
()
})
.
catch
(
function
(
err
)
{
...
...
@@ -120,7 +132,6 @@ function getPort(usedPort) {
return
port
}
const
logger
=
winston
.
createLogger
({
level
:
'
info
'
,
format
:
winston
.
format
.
combine
(
...
...
@@ -263,9 +274,17 @@ function viterbi(functionBranchTree) {
path
.
push
({
node
:
maxSibling
,
probability
:
maxProb
})
siblings
=
new
Map
()
}
if
(
path
.
length
>
0
)
console
.
log
(
"
path
"
,
path
);
//
if (path.length > 0)
//
console.log("path", path);
metadata
.
mle_path
=
path
if
(
path
.
length
>
1
)
{
let
payload
=
{
method
:
'
put
'
,
body
:
JSON
.
stringify
(
path
),
headers
:
{
'
Content-Type
'
:
'
application/json
'
}
}
fetch
(
implicitChainDB
+
functionHash
,
payload
)
}
}
});
}
...
...
@@ -283,7 +302,7 @@ function logBroadcast(message, resource_id, resourceMap) {
message
.
function_id
=
resource
.
functionHash
}
let
log
=
[{
topic
:
constants
.
log_channel
,
topic
:
constants
.
topics
.
log_channel
,
messages
:
JSON
.
stringify
(
message
),
partition
:
0
}]
...
...
@@ -298,10 +317,18 @@ function logBroadcast(message, resource_id, resourceMap) {
}
setInterval
(
metrics
.
broadcastMetrics
,
5000
)
async
function
fetchData
(
url
,
data
=
null
)
{
let
res
if
(
data
===
undefined
||
data
===
null
)
res
=
await
fetch
(
url
)
else
res
=
await
fetch
(
url
,
data
)
return
await
res
.
json
()
}
module
.
exports
=
{
makeid
,
generateExecutor
,
reverseProxy
,
getPort
,
logger
,
compare
,
viterbi
,
logBroadcast
,
metrics
viterbi
,
logBroadcast
,
fetchData
,
metrics
,
producer
}
\ No newline at end of file
dispatch_system/dispatch_manager/metrics.js
View file @
0198dbc7
This diff is collapsed.
Click to expand it.
dispatch_system/dispatch_manager/operator.js
0 → 100644
View file @
0198dbc7
const
op
=
{
'
lt
'
:
function
(
x
,
y
)
{
return
x
<
y
},
'
gt
'
:
function
(
x
,
y
)
{
return
x
>
y
},
'
lte
'
:
function
(
x
,
y
)
{
return
x
<=
y
},
'
gte
'
:
function
(
x
,
y
)
{
return
x
>=
y
},
'
eq
'
:
function
(
x
,
y
)
{
return
x
===
y
},
'
neq
'
:
function
(
x
,
y
)
{
return
x
!==
y
},
};
module
.
exports
=
op
\ No newline at end of file
dispatch_system/dispatch_manager/package.json
View file @
0198dbc7
...
...
@@ -20,6 +20,7 @@
"morgan"
:
"^1.9.1"
,
"mqtt"
:
"^3.0.0"
,
"node-fetch"
:
"^2.6.0"
,
"prom-client"
:
"^12.0.0"
,
"redis"
:
"^2.8.0"
,
"request"
:
"^2.88.0"
,
"request-promise"
:
"^4.2.5"
,
...
...
dispatch_system/dispatch_manager/repository/worker_env/env.js
View file @
0198dbc7
...
...
@@ -5,7 +5,8 @@ let request = require('request')
const
process
=
require
(
'
process
'
)
const
app
=
express
()
let
port
=
5000
,
resource_id
,
functionHash
,
runtime
,
idleTime
=
30
let
port
=
5000
,
resource_id
,
functionHash
,
runtime
,
idleTime
=
60
,
flagFirstRequest
=
true
let
waitTime
resource_id
=
process
.
argv
[
2
]
functionHash
=
process
.
argv
[
3
]
...
...
@@ -28,6 +29,10 @@ app.use(bodyParser.json())
let
lastRequest
=
Date
.
now
(),
totalRequest
=
0
app
.
post
(
'
/serverless/function/execute/
'
,
(
req
,
res
)
=>
{
if
(
flagFirstRequest
)
{
waitTime
=
Date
.
now
()
-
waitTime
flagFirstRequest
=
false
}
let
payload
=
req
.
body
lastRequest
=
Date
.
now
()
totalRequest
++
...
...
@@ -59,6 +64,7 @@ app.listen(port, () => {
runtime
,
resource_id
,
entity_id
:
process
.
pid
}),
"
status
"
:
true
}],
()
=>
{
})
waitTime
=
Date
.
now
()
})
function
shouldDie
()
{
...
...
@@ -67,7 +73,7 @@ function shouldDie() {
let
message
=
JSON
.
stringify
({
functionHash
,
portExternal
:
port
,
runtime
,
resource_id
,
entity_id
:
process
.
pid
,
total_request
:
totalRequest
total_request
:
totalRequest
,
wait_time
:
waitTime
})
console
.
log
(
"
Idle for too long. Exiting
"
);
...
...
dispatch_system/log_listener.js
0 → 100644
View file @
0198dbc7
const
constants
=
require
(
'
./constants.json
'
)
const
util
=
require
(
'
util
'
)
let
kafka
=
require
(
'
kafka-node
'
),
Producer
=
kafka
.
Producer
,
client
=
new
kafka
.
KafkaClient
({
kafkaHost
:
constants
.
network
.
external
.
kafka_host
,
autoConnect
:
true
}),
producer
=
new
Producer
(
client
),
Consumer
=
kafka
.
Consumer
,
consumer
=
new
Consumer
(
client
,
[
{
topic
:
constants
.
topics
.
log_channel
}
])
consumer
.
on
(
'
message
'
,
function
(
message
)
{
message
=
JSON
.
parse
(
message
.
value
)
console
.
log
(
util
.
inspect
(
message
,
false
,
null
,
true
/* enable colors */
))
})
\ No newline at end of file
dispatch_system/prometheus.yml
0 → 100644
View file @
0198dbc7
# my global config
global
:
scrape_interval
:
15s
# Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval
:
15s
# Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels
:
monitor
:
'
codelab-monitor'
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files
:
# - "first.rules"
# - "second.rules"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs
:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
-
job_name
:
'
prometheus'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs
:
-
targets
:
[
'
localhost:9090'
]
-
job_name
:
'
docker'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs
:
-
targets
:
[
'
localhost:9323'
]
-
job_name
:
'
xanadu'
static_configs
:
-
targets
:
[
'
localhost:8080'
]
docker-compose.yml
View file @
0198dbc7
...
...
@@ -5,6 +5,7 @@ networks:
services
:
zookeeper
:
image
:
'
bitnami/zookeeper:3'
restart
:
unless-stopped
networks
:
-
kafka-serverless
ports
:
...
...
@@ -15,6 +16,7 @@ services:
-
ALLOW_ANONYMOUS_LOGIN=yes
kafka
:
image
:
'
bitnami/kafka:2'
restart
:
unless-stopped
networks
:
-
kafka-serverless
ports
:
...
...
hybrid_serverless-1.png
0 → 100644
View file @
0198dbc7
129 KB
local_experiments/Coldstart latency for function chains.png
0 → 100644
View file @
0198dbc7
16.2 KB
local_experiments/function_chain.drawio
0 → 100644
View file @
0198dbc7
<mxfile host="Electron" modified="2020-03-09T11:38:25.308Z" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/12.6.5 Chrome/80.0.3987.86 Electron/8.0.0 Safari/537.36" etag="MPKhp0QP3HoRdc4h3Pxj" version="12.6.5" type="device"><diagram id="_vj1HQFHM5RY5pbnoe9b" name="Page-1">5Zlbb5swGIZ/DZebwJzSy+bQbFNTVY20db2JPHDBHcHIOAn018+ACcdkzZoFRG8if58P2M/38kISSZ2sozmFgbsgNvIkINuRpE4lAEbGiH8miThL6IqaJRyK7SylFIklfkUiKYvsBtsorAxkhHgMB9WkRXwfWaySg5SSXXXYM/GqVw2ggxqJpQW9ZvYHtpkrjgXMIv8FYcfNr6wYV1nPGuaDxUlCF9pkV0qpM0mdUEJY1lpHE+Ql7HIu2bybA737jVHks7dMWDy8zFbfngC5vlvZ7sPcBNf4kyH2xuL8wJRsfBslc2RJHRPKXOIQH3q3hAQ8qfDkC2IsFqWCG0Z4ymVrT/SiCLPHUvunWCppT6NyEOeBz2j8WA7SOZ/1PCympVE+L2SU/N6XhQMdN5kITCHZUAsdAZFrC1IHsSPjhHqRXZGNID5HZI34DvkAijzI8LaqIijE6OzH7afeE8y3DGRx3wBNqEbcNkquonyJbKNiVlF13ihto0ilWjhBF+ZldKEMRhfg3LqoVPTU8ol1t9DbiCvdKI2K8nW4i/JgvHMxQ8sApiR23MerlYNhkFnrM44SBZwCeIsoQ9FRJKJXryl+JOJdyXZFyi057kh+P8On8I5tHKB/XdxOv9s0Nlexld/pZWC8uEsR+sRPyL3/rjizxmsC/DfRgzeKXruQxlvro3VaHwP8rUJpdI8o5sdFtE9l07u0JtC0JtB3awJG1ZtA197UYuYl7RcynxXZgVqV2jPNt5arKfCPXYxLPTeObbJsQC2P+n4b0AX953ABqwz1vjPUOzTxw5ZUhdjyTtNviF0/CJsMTX6pphb58ViNl4cdn7ctDiN5PRsnELAFvWvRsca2nT04UYhf4a90qcRCg+RLd3oQfSzp02Qtbs9h5tTKnnMDagv6g5wVtfZtSGuC1lpA138mOBtoY6CgNaUGWu4YtPlBQKugY9CjBmh1EKDr1tHm0RcFfTVQ0JpZA921R+d32PBI1946FP2/keZh8ZdM9mt68b+WOvsD</diagram></mxfile>
\ No newline at end of file
local_experiments/function_chain.png
0 → 100644
View file @
0198dbc7
24.9 KB
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment