Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
X
xanadu
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Commits
Open sidebar
SYNERG
xanadu
Commits
c1ccb55a
Commit
c1ccb55a
authored
Apr 22, 2022
by
kedar
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
resolved bug with multiple worker nodes
parent
0212678f
Changes
48
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
48 changed files
with
3426 additions
and
1140305 deletions
+3426
-1140305
dispatch_system/benchmark.sh
dispatch_system/benchmark.sh
+23
-2
dispatch_system/bm_static_1.csv
dispatch_system/bm_static_1.csv
+7
-1140189
dispatch_system/constants_local.json
dispatch_system/constants_local.json
+9
-3
dispatch_system/dispatch_daemon/config.json
dispatch_system/dispatch_daemon/config.json
+1
-1
dispatch_system/dispatch_daemon/index.js
dispatch_system/dispatch_daemon/index.js
+142
-2
dispatch_system/dispatch_daemon/lib.js
dispatch_system/dispatch_daemon/lib.js
+15
-0
dispatch_system/dispatch_daemon/shared_meta.js
dispatch_system/dispatch_daemon/shared_meta.js
+21
-0
dispatch_system/dispatch_manager/checkheap.js
dispatch_system/dispatch_manager/checkheap.js
+29
-0
dispatch_system/dispatch_manager/index.js
dispatch_system/dispatch_manager/index.js
+13
-10
dispatch_system/dispatch_manager/lib.js
dispatch_system/dispatch_manager/lib.js
+39
-35
dispatch_system/dispatch_manager/nic/Makefile-nfp4build
dispatch_system/dispatch_manager/nic/Makefile-nfp4build
+9
-13
dispatch_system/dispatch_manager/nic/assign_ip.sh
dispatch_system/dispatch_manager/nic/assign_ip.sh
+9
-0
dispatch_system/dispatch_manager/nic/build_offload.sh
dispatch_system/dispatch_manager/nic/build_offload.sh
+3
-3
dispatch_system/dispatch_manager/nic/p4src/includes/headers.p4
...tch_system/dispatch_manager/nic/p4src/includes/headers.p4
+1
-0
dispatch_system/dispatch_manager/nic/p4src/orchestrator.bmv2.json
..._system/dispatch_manager/nic/p4src/orchestrator.bmv2.json
+2
-1
dispatch_system/dispatch_manager/nic/p4src/orchestrator.yml
dispatch_system/dispatch_manager/nic/p4src/orchestrator.yml
+2
-1
dispatch_system/dispatch_manager/nic/p4src/orchestrator_coldstart.p4
...stem/dispatch_manager/nic/p4src/orchestrator_coldstart.p4
+255
-0
dispatch_system/dispatch_manager/nic/smartnic_dispatch_monitor.py
..._system/dispatch_manager/nic/smartnic_dispatch_monitor.py
+102
-15
dispatch_system/dispatch_manager/node_modules_2/.bin/detect-libc
...h_system/dispatch_manager/node_modules_2/.bin/detect-libc
+18
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/detect-libc
...h_system/dispatch_manager/node_modules_2/.bin/detect-libc
+18
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/mime
dispatch_system/dispatch_manager/node_modules_2/.bin/mime
+8
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/mime
dispatch_system/dispatch_manager/node_modules_2/.bin/mime
+8
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/mkdirp
dispatch_system/dispatch_manager/node_modules_2/.bin/mkdirp
+33
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/mkdirp
dispatch_system/dispatch_manager/node_modules_2/.bin/mkdirp
+33
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/mqtt
dispatch_system/dispatch_manager/node_modules_2/.bin/mqtt
+41
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/mqtt
dispatch_system/dispatch_manager/node_modules_2/.bin/mqtt
+41
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/mqtt_pub
...atch_system/dispatch_manager/node_modules_2/.bin/mqtt_pub
+146
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/mqtt_pub
...atch_system/dispatch_manager/node_modules_2/.bin/mqtt_pub
+146
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/mqtt_sub
...atch_system/dispatch_manager/node_modules_2/.bin/mqtt_sub
+123
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/mqtt_sub
...atch_system/dispatch_manager/node_modules_2/.bin/mqtt_sub
+123
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/prebuild-install
...tem/dispatch_manager/node_modules_2/.bin/prebuild-install
+85
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/prebuild-install
...tem/dispatch_manager/node_modules_2/.bin/prebuild-install
+85
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/rc
dispatch_system/dispatch_manager/node_modules_2/.bin/rc
+4
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/rc
dispatch_system/dispatch_manager/node_modules_2/.bin/rc
+4
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/semver
dispatch_system/dispatch_manager/node_modules_2/.bin/semver
+160
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/semver
dispatch_system/dispatch_manager/node_modules_2/.bin/semver
+160
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/sshpk-conv
...ch_system/dispatch_manager/node_modules_2/.bin/sshpk-conv
+243
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/sshpk-conv
...ch_system/dispatch_manager/node_modules_2/.bin/sshpk-conv
+243
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/sshpk-sign
...ch_system/dispatch_manager/node_modules_2/.bin/sshpk-sign
+191
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/sshpk-sign
...ch_system/dispatch_manager/node_modules_2/.bin/sshpk-sign
+191
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/sshpk-verify
..._system/dispatch_manager/node_modules_2/.bin/sshpk-verify
+167
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/sshpk-verify
..._system/dispatch_manager/node_modules_2/.bin/sshpk-verify
+167
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/uuid
dispatch_system/dispatch_manager/node_modules_2/.bin/uuid
+65
-1
dispatch_system/dispatch_manager/node_modules_2/.bin/uuid
dispatch_system/dispatch_manager/node_modules_2/.bin/uuid
+65
-1
dispatch_system/dispatch_manager/repository/worker_env/node_modules/.bin/mime
...atch_manager/repository/worker_env/node_modules/.bin/mime
+8
-1
dispatch_system/dispatch_manager/repository/worker_env/node_modules/.bin/mime
...atch_manager/repository/worker_env/node_modules/.bin/mime
+8
-1
dispatch_system/dispatch_manager/rm.js
dispatch_system/dispatch_manager/rm.js
+146
-2
dispatch_system/speedo_data_static2_1f_host.csv
dispatch_system/speedo_data_static2_1f_host.csv
+14
-0
No files found.
dispatch_system/benchmark.sh
View file @
c1ccb55a
echo
$1
# python2 send.py --client-port 8000 --closed 1 --offload 0 --req-count 50 --send-data 10 --fid $1
# sudo ip netns exec ns_server python benchmark_dispatcher2.py --fid 369020 --c 1 --t 1 --n 2
# sudo ip netns exec ns_server python benchmark_dispatcher2.py --fid $1 --c 1 --rps 2 --req_count 10
sudo
ip netns
exec
ns_server python benchmark_dispatcher.py
--fid
$1
--c
20
--t
300
--rps
$2
\ No newline at end of file
#! /bin/bash -ex
rps_flag
=
0
n_flag
=
0
while
getopts
'rn'
flag
;
do
case
"
${
flag
}
"
in
r
)
rps_flag
=
1
;;
n
)
n_flag
=
1
;;
esac
done
echo
$1
,
$2
,
$3
if
[[
$rps_flag
-eq
1
]]
then
sudo
ip netns
exec
ns_server python benchmark_dispatcher.py
--fid
$2
--c
50
--t
30
--rps
$3
fi
if
[[
$n_flag
-eq
1
]]
then
sudo
ip netns
exec
ns_server python benchmark_dispatcher.py
--fid
$2
--c
50
--t
100
--n
$3
fi
dispatch_system/bm_static_1.csv
View file @
c1ccb55a
This diff is collapsed.
Click to expand it.
dispatch_system/constants_local.json
View file @
c1ccb55a
...
...
@@ -2,6 +2,8 @@
"registry_url"
:
"10.129.2.201:5000/"
,
"master_port"
:
8080
,
"master_address"
:
"10.129.2.201"
,
"daemon_port"
:
9000
,
"daemon_mac"
:
"00:22:22:22:22:22"
,
"grunt_host"
:
"https://www.namandixit.net/lovecraftian_nightmares/grunt"
,
"couchdb_host"
:
"10.129.2.201:5984"
,
"env"
:
"env_udp2.js"
,
...
...
@@ -15,7 +17,7 @@
"network_bridge"
:
"xanadu_kafka-serverless"
,
"use_bridge"
:
false
,
"internal"
:
{
"kafka_host"
:
"
kafka
:9092"
"kafka_host"
:
"
10.129.2.201
:9092"
},
"external"
:
{
"kafka_host"
:
"10.129.2.201:9092"
...
...
@@ -29,10 +31,14 @@
"response_rm_2_dm"
:
"RESPONSE_RM_2_DM_DUMMY"
,
"hscale"
:
"hscale"
,
"metrics_worker"
:
"metrics_worker"
,
"log_channel"
:
"LOG_COMMON"
"log_channel"
:
"LOG_COMMON"
,
"coldstart_worker"
:
"COLDSTART_WORKER"
,
"check_autoscale"
:
"CHECK_AUTOSCALE"
,
"autoscale"
:
"AUTOSCALE"
},
"autoscalar_metrics"
:
{
"open_request_threshold"
:
100
"open_request_threshold"
:
100
,
"function_load_threshold"
:
5
},
"metrics"
:
{
"alpha"
:
0.7
...
...
dispatch_system/dispatch_daemon/config.json
View file @
c1ccb55a
{
"id"
:
"10.129.2.201"
,
"master_node"
:
"192.168.0.105"
}
\ No newline at end of file
{
"id"
:
"192.168.2.3"
,
"master_node"
:
"192.168.2.3"
}
\ No newline at end of file
dispatch_system/dispatch_daemon/index.js
View file @
c1ccb55a
...
...
@@ -10,6 +10,11 @@ const execute = require('./execute')
const
fs
=
require
(
'
fs
'
)
const
fetch
=
require
(
'
node-fetch
'
);
const
os
=
require
(
'
os
'
);
const
dgram
=
require
(
'
dgram
'
);
const
server
=
dgram
.
createSocket
(
'
udp4
'
);
let
struct
=
require
(
'
jspack
'
);
struct
=
struct
.
jspack
let
metadataDB
=
`http://
${
secrets
.
couchdb_username
}
:
${
secrets
.
couchdb_password
}
@
${
constants
.
couchdb_host
}
`
metadataDB
=
metadataDB
+
"
/
"
+
constants
.
db
.
function_meta
+
"
/
"
...
...
@@ -69,7 +74,6 @@ libSupport.makeTopic(node_id).then(() => {
logger
.
error
(
"
something went wrong
"
+
err
.
toString
())
});
}
})
...
...
@@ -170,13 +174,149 @@ function heartbeat() {
topic
:
"
heartbeat
"
,
messages
:
JSON
.
stringify
({
"
address
"
:
node_id
,
"
port
"
:
constants
.
daemon_port
,
"
mac
"
:
constants
.
daemon_mac
,
"
system_info
"
:
info
,
"
timestamp
"
:
Date
.
now
()
})
}]
console
.
log
(
"
daemon system info :
"
,
info
)
//
console.log("daemon system info : ", info)
producer
.
send
(
payload
,
function
(
cb
)
{})
}
// TODO 2: implement packer deparser for the udp packet
// TODO 3: create UPD server to get the coldstart request
server
.
on
(
'
error
'
,
(
err
)
=>
{
console
.
log
(
`server error:\n
${
err
.
stack
}
`
);
server
.
close
();
});
server
.
on
(
'
message
'
,
(
msg
,
rinfo
)
=>
{
console
.
log
(
"
message
"
,
msg
)
let
payload
=
unpackPacket
(
msg
)
console
.
log
(
payload
,
typeof
payload
);
// get the coldstart request and start the function
// logger.info("Received Deployment UDP request for resource_id: " + resource_id);
let
functionHash
=
"
function_
"
+
payload
.
function_id
let
resource_id
=
'
aaa
'
let
runtime
=
'
process
'
let
port
=
9920
let
mac
=
constants
.
daemon_mac
logger
.
info
(
"
Received Deployment UPD request
"
)
fetch
(
metadataDB
+
functionHash
).
then
(
res
=>
res
.
json
())
.
then
(
json
=>
{
console
.
log
(
"
metadata
"
,
json
);
libSupport
.
download
(
host_url
+
"
/repository/
"
+
functionHash
+
"
.js
"
,
local_repository
+
functionHash
+
"
.js
"
).
then
(()
=>
{
let
metadata
=
{
resource_id
,
functionHash
,
runtime
,
port
,
mac
,
resources
:
{
memory
:
json
.
memory
}
}
startWorker
(
local_repository
,
producer
,
metadata
)
})
}).
catch
(
err
=>
{
logger
.
error
(
"
something went wrong
"
+
err
.
toString
())
});
// lastRequest = Date.now()
// console.log("network stack time", lastRequest - payload.t1)
// totalRequest++
// executor(msg).then(result => {
// result = packPacket(msg)
// let port = 10000 + getRandomInt(0, 10)
// try {
// udpProxy.send(msg, 0, msg.length, port, rinfo.address, function (err, bytes) {
// if (err)
// console.log(err)
// console.log("response via UDP")
// })
// } catch (e) {
// console.log(e)
// }
// })
});
function
unpackPacket
(
packet
)
{
// let buffer = new Array(1024)
let
chain_id
=
null
,
exec_id
=
null
,
function_count
=
null
,
function_id
=
null
,
data
=
null
let
base
=
0
,
f0
,
f1
,
f2
,
f3
,
f4
,
t1
,
t2
,
t3
,
t4
chain_id
=
struct
.
Unpack
(
"
>I
"
,
packet
,
base
)
base
+=
4
exec_id
=
struct
.
Unpack
(
"
>I
"
,
packet
,
base
)
base
+=
4
function_id
=
struct
.
Unpack
(
"
>I
"
,
packet
,
base
)
base
+=
4
data
=
struct
.
Unpack
(
"
>I
"
,
packet
,
base
)
base
+=
4
function_count
=
struct
.
Unpack
(
"
I
"
,
packet
,
base
)
base
+=
4
f0
=
struct
.
Unpack
(
"
B
"
,
packet
,
base
)
base
+=
1
f1
=
struct
.
Unpack
(
"
B
"
,
packet
,
base
)
base
+=
1
f2
=
struct
.
Unpack
(
"
B
"
,
packet
,
base
)
base
+=
1
f3
=
struct
.
Unpack
(
"
B
"
,
packet
,
base
)
base
+=
1
f4
=
struct
.
Unpack
(
"
B
"
,
packet
,
base
)
base
+=
1
t1
=
struct
.
Unpack
(
"
I
"
,
packet
,
base
)
base
+=
8
t2
=
struct
.
Unpack
(
"
I
"
,
packet
,
base
)
base
+=
8
t3
=
struct
.
Unpack
(
"
I
"
,
packet
,
base
)
base
+=
8
t4
=
struct
.
Unpack
(
"
I
"
,
packet
,
base
)
console
.
log
(
"
chain_id
"
,
chain_id
,
"
exec_id
"
,
exec_id
,
"
data
"
,
data
,
"
function_count
"
,
function_count
,
"
function_id
"
,
function_id
)
return
{
chain_id
:
chain_id
[
0
],
exec_id
:
exec_id
[
0
],
data
:
data
[
0
],
function_count
:
function_count
[
0
],
function_id
:
function_id
[
0
],
f0
,
f1
,
f2
,
f3
,
f4
,
t1
,
t2
,
t3
,
t4
}
}
function
packPacket
(
dataPacket
)
{
let
message
=
new
Array
(
1024
)
let
base
=
0
,
chain_id
,
exec_id
,
function_id
,
data
,
function_count
chain_id
=
struct
.
PackTo
(
"
>I
"
,
message
,
base
,
[
dataPacket
.
chain_id
])
base
+=
4
exec_id
=
struct
.
PackTo
(
"
>I
"
,
message
,
base
,
[
dataPacket
.
exec_id
])
base
+=
4
function_id
=
struct
.
PackTo
(
"
>I
"
,
message
,
base
,
[
dataPacket
.
function_id
])
base
+=
4
data
=
struct
.
PackTo
(
"
>I
"
,
message
,
base
,
[
dataPacket
.
data
])
base
+=
4
function_count
=
struct
.
PackTo
(
"
B
"
,
message
,
base
,
[
dataPacket
.
function_count
])
message
=
Buffer
.
from
(
message
)
return
message
}
server
.
on
(
'
listening
'
,
()
=>
{
const
address
=
server
.
address
();
console
.
log
(
`server listening
${
address
.
address
}
:
${
address
.
port
}
`
);
});
// server.bind(port, "192.168.2.3");
server
.
bind
(
constants
.
daemon_port
);
setInterval
(
heartbeat
,
1000
);
dispatch_system/dispatch_daemon/lib.js
View file @
c1ccb55a
...
...
@@ -134,6 +134,21 @@ const logger = winston.createLogger({
});
function
getPort
(
usedPort
)
{
let
port
=
-
1
,
ctr
=
0
do
{
let
min
=
Math
.
ceil
(
30000
);
let
max
=
Math
.
floor
(
60000
);
port
=
Math
.
floor
(
Math
.
random
()
*
(
max
-
min
+
1
))
+
min
;
ctr
+=
1
;
if
(
ctr
>
30000
)
{
port
=
-
1
break
}
}
while
(
usedPort
.
has
(
port
))
return
port
}
module
.
exports
=
{
download
,
makeid
,
updateConfig
,
makeTopic
,
returnPort
,
logger
}
dispatch_system/dispatch_daemon/shared_meta.js
0 → 100644
View file @
c1ccb55a
const
secrets
=
require
(
'
./secrets.json
'
)
const
constants
=
require
(
'
.././constants_local.json
'
)
let
db
=
new
Map
(),
// queue holding request to be dispatched
resourceMap
=
new
Map
(),
// map between resource_id and resource details like node_id, port, associated function etc
functionToResource
=
new
Map
(),
// a function to resource map. Each map contains a minheap of
// resources associated with the function
workerNodes
=
new
Map
(),
// list of worker nodes currently known to the DM
functionBranchTree
=
new
Map
(),
// a tree to store function branch predictions
conditionProbabilityExplicit
=
new
Map
(),
// tree holding conditional probabilities for explicit chains
requestFlightQueue
=
new
Map
()
// map to store in flight requests
/**
* URL to the couchdb database server used to store data
*/
module
.
exports
=
{
db
,
functionBranchTree
,
functionToResource
,
workerNodes
,
resourceMap
,
conditionProbabilityExplicit
,
requestFlightQueue
}
dispatch_system/dispatch_manager/checkheap.js
0 → 100644
View file @
c1ccb55a
const
Heap
=
require
(
'
heap
'
);
var
heap
=
new
Heap
(
function
(
a
,
b
)
{
return
a
.
foo
-
b
.
foo
;
});
let
map
=
new
Map
();
// a = {foo : 3};
// b = {foo : 4};
// c = {foo : 2};
arr
=
[{
foo
:
4
},{
foo
:
5
},{
foo
:
2
}]
// map.set("foo1", a);
// map.set("foo2", b);
// map.set("foo3", c);
// heap.push({foo: 3});
// heap.push({foo: 1});
// heap.push({foo: 2});
heap
.
push
(
arr
[
0
]);
console
.
log
(
heap
)
heap
.
push
(
arr
[
1
]);
console
.
log
(
heap
)
heap
.
push
(
arr
[
2
]);
console
.
log
(
heap
)
arr
[
0
].
foo
=
1
;
// heap.pop(b);
console
.
log
(
heap
)
heap
.
updateItem
(
arr
[
0
])
console
.
log
(
heap
)
heap
.
pop
();
console
.
log
(
heap
)
dispatch_system/dispatch_manager/index.js
View file @
c1ccb55a
...
...
@@ -144,7 +144,8 @@ app.post('/serverless/deploy', (req, res) => {
res
.
send
(
"
error
"
).
status
(
400
)
}
else
{
let
func_id
=
parseInt
(
functionHash
.
slice
(
0
,
5
),
16
)
let
func_id
=
functionHash
// let func_id = parseInt(functionHash.slice(0,5),16)
//console.log(func_id)
console
.
log
(
"
Function id to be used is:
"
,
func_id
)
idToFunchashMap
.
set
(
func_id
,
functionHash
)
...
...
@@ -422,11 +423,11 @@ consumer.on('message', function (message) {
// console.log(topic, message)
if
(
topic
===
"
response
"
)
{
logger
.
info
(
"
response
"
+
message
);
}
else
if
(
topic
===
constants
.
topics
.
heartbeat
)
{
message
=
JSON
.
parse
(
message
)
// console.log(message)
console
.
log
(
"
node_to_resource_mapping :
"
,
node_to_resource_mapping
)
if
(
Date
.
now
()
-
message
.
timestamp
<
1000
)
if
(
!
workerNodes
.
has
(
message
.
address
))
{
workerNodes
.
set
(
message
.
address
,
message
.
timestamp
)
...
...
@@ -436,10 +437,10 @@ consumer.on('message', function (message) {
else
{
if
(
node_to_resource_mapping
.
has
(
message
.
address
))
{
console
.
log
(
""
)
let
resource_id
=
node_to_resource_mapping
.
get
(
message
.
address
)
resource_to_cpu_util
.
set
(
resource_id
,
message
.
system_info
.
loadavg
)
}
}
}
else
if
(
topic
==
constants
.
topics
.
deployed
)
{
try
{
...
...
@@ -487,12 +488,11 @@ consumer.on('message', function (message) {
}
}
else
if
(
topic
==
constants
.
topics
.
hscale
)
{
message
=
JSON
.
parse
(
message
)
let
resource_id
=
libSupport
.
makeid
(
constants
.
id_size
),
// each function resource request is associated with an unique ID
runtime
=
message
.
runtime
,
functionHash
=
message
.
functionHash
runtime
=
message
.
runtime
,
functionHash
=
message
.
functionHash
logger
.
info
(
`Generated new resource ID:
${
resource_id
}
for runtime:
${
runtime
}
`
);
console
.
log
(
"
Resource Status:
"
,
functionToResource
);
if
(
!
functionToResource
.
has
(
functionHash
+
runtime
)
&&
!
db
.
has
(
functionHash
+
runtime
))
{
...
...
@@ -587,13 +587,16 @@ function autoscalar() {
}
function
heapUpdate
()
{
console
.
log
(
"
functionToResource :
"
,
functionToResource
)
console
.
log
(
"
resource_to_cpu_util :
"
,
resource_to_cpu_util
)
functionToResource
.
forEach
((
resourceArray
,
functionKey
)
=>
{
//resourceArray = resourceList.toArray()
console
.
log
(
"
Function being updated:
"
,
functionKey
)
//
console.log("Function being updated: ",functionKey)
for
(
let
i
=
0
;
i
<
resourceArray
.
length
;
i
++
)
{
let
res_i
=
resourceArray
[
i
].
resource_id
;
resourceArray
[
i
].
cpu_utilization
=
resource_to_cpu_util
.
get
(
res_i
);
console
.
log
(
"
Avg load on resource-worker
"
,
i
,
"
:
"
,
resourceArray
[
i
].
cpu_utilization
)
console
.
log
(
"
Avg load on resource-worker
"
,
i
,
"
:
"
,
resourceArray
[
i
])
}
heap
.
heapify
(
resourceArray
,
libSupport
.
compare_uti
)
...
...
@@ -713,8 +716,8 @@ async function speculative_deployment(req, runtime) {
}
}
}
setInterval
(
libSupport
.
metrics
.
broadcastMetrics
,
5000
)
//
setInterval(libSupport.metrics.broadcastMetrics, 5000)
// setInterval(autoscalar, 1000);
setInterval
(
dispatch
,
1000
);
// setInterval(heapUpdate,
1
000);
// setInterval(heapUpdate,
5
000);
app
.
listen
(
port
,
()
=>
logger
.
info
(
`Server listening on port
${
port
}
!`
))
dispatch_system/dispatch_manager/lib.js
View file @
c1ccb55a
...
...
@@ -75,10 +75,13 @@ function generateExecutor(functionPath, functionHash) {
let
output
=
input
.
slice
(
0
,
insertIndex
)
+
functionFile
+
input
.
slice
(
insertIndex
)
let
hash
=
crypto
.
createHash
(
'
md5
'
).
update
(
output
).
digest
(
"
hex
"
);
console
.
log
(
hash
);
let
func_id
=
parseInt
(
hash
.
slice
(
0
,
5
),
16
)
console
.
log
(
func_id
);
fs
.
writeFileSync
(
functionPath
+
hash
+
"
.js
"
,
output
)
return
hash
// fs.writeFileSync(functionPath + hash + ".js", output)
fs
.
writeFileSync
(
functionPath
+
"
function_
"
+
func_id
+
"
.js
"
,
output
)
return
"
function_
"
+
func_id
// return hash
}
/**
...
...
@@ -89,41 +92,42 @@ function generateExecutor(functionPath, functionHash) {
*/
function
generateMicrocExecutor
(
functionPath
,
functionName
,
jsfunctionhash
)
{
//creating function.c
let
function_temp
=
fs
.
readFileSync
(
`./repository/worker_env/function_temp.c`
)
let
function_def
=
fs
.
readFileSync
(
functionPath
+
functionName
)
let
searchSize
=
"
//ADD_FUNCTION
"
.
length
let
fid
=
parseInt
(
jsfunctionhash
.
slice
(
0
,
5
),
16
)
let
insertIndex
=
function_temp
.
indexOf
(
"
//ADD_FUNCTION
"
)
+
searchSize
let
function_name
=
"
void function_
"
+
fid
+
"
(PIF_PLUGIN_map_hdr_T *mapHdr)
"
//
let function_temp = fs.readFileSync(`./repository/worker_env/function_temp.c`)
//
let function_def = fs.readFileSync(functionPath + functionName)
//
let searchSize = "//ADD_FUNCTION".length
//
let fid = parseInt(jsfunctionhash.slice(0,5), 16)
//
let insertIndex = function_temp.indexOf("//ADD_FUNCTION") + searchSize
//
let function_name = "void function_"+ fid +"(PIF_PLUGIN_map_hdr_T *mapHdr)"
let
full_function
=
function_temp
.
slice
(
0
,
insertIndex
)
+
"
\n
"
+
function_name
+
"
{
\n
"
+
function_def
+
"
\n
}
"
+
function_temp
.
slice
(
insertIndex
)
//
let full_function = function_temp.slice(0, insertIndex) +"\n"+ function_name + "{\n" +function_def +"\n}"+ function_temp.slice(insertIndex)
// let hash = crypto.createHash('md5').update(full_function).digest("hex");
// console.log(hash);
console
.
log
(
full_function
);
fs
.
writeFileSync
(
functionPath
+
"
offload/
"
+
jsfunctionhash
+
"
.c
"
,
full_function
)
//
// let hash = crypto.createHash('md5').update(full_function).digest("hex");
//
// console.log(hash);
//
console.log(full_function);
//
fs.writeFileSync(functionPath +"offload/"+ jsfunctionhash + ".c", full_function)
//adding call to function when match with fid
return
new
Promise
((
resolve
)
=>
{
let
main_function_temp
=
fs
.
readFileSync
(
functionPath
+
"
offload/
"
+
"
static_dispatch_function.c
"
)
// let client_function = fs.readFileSync(functionPath + "offload/"+jsfunctionhash+".c")
searchSize
=
"
//ADD_FUNCTION_EXTERNS
"
.
length
insertIndex
=
main_function_temp
.
indexOf
(
"
//ADD_FUNCTION_EXTERNS
"
)
+
searchSize
let
extern_name
=
"
extern void function_
"
+
fid
+
"
(PIF_PLUGIN_map_hdr_T *mapHdr)
"
let
main_function
=
main_function_temp
.
slice
(
0
,
insertIndex
)
+
"
\n
"
+
extern_name
+
"
;
\n
"
+
main_function_temp
.
slice
(
insertIndex
)
console
.
log
(
"
MAIN FUNCTION :
\n
"
,
main_function
)
let
hash
=
crypto
.
createHash
(
'
md5
'
).
update
(
full_function
).
digest
(
"
hex
"
);
// console.log(hash);
searchSize
=
"
//ADD_FUNCTION_CONDITION
"
.
length
insertIndex
=
main_function
.
indexOf
(
"
//ADD_FUNCTION_CONDITION
"
)
+
searchSize
let
inc_pkt_count
=
"
function_packet_count[
"
+
fid
+
"
-10000]++;
"
let
if_else_cond
=
"
else if( fid ==
"
+
fid
+
"
) {
\n
"
+
inc_pkt_count
+
"
\n
function_
"
+
fid
+
"
(mapHdr);
\n
}
"
let
main_function_full
=
main_function
.
slice
(
0
,
insertIndex
)
+
"
\n
"
+
if_else_cond
+
"
\n
"
+
main_function
.
slice
(
insertIndex
)
console
.
log
(
main_function_full
);
fs
.
writeFileSync
(
functionPath
+
"
offload/
"
+
"
static_dispatch_function.c
"
,
main_function_full
)
return
hash
});
// //adding call to function when match with fid
// return new Promise((resolve) => {
// let main_function_temp = fs.readFileSync(functionPath +"offload/"+ "static_dispatch_function.c")
// // let client_function = fs.readFileSync(functionPath + "offload/"+jsfunctionhash+".c")
// searchSize = "//ADD_FUNCTION_EXTERNS".length
// insertIndex = main_function_temp.indexOf("//ADD_FUNCTION_EXTERNS") + searchSize
// let extern_name = "extern void function_"+fid +"(PIF_PLUGIN_map_hdr_T *mapHdr)"
// let main_function = main_function_temp.slice(0, insertIndex) +"\n"+ extern_name+";\n"+ main_function_temp.slice(insertIndex)
// console.log("MAIN FUNCTION : \n",main_function)
// let hash = crypto.createHash('md5').update(full_function).digest("hex");
// // console.log(hash);
// searchSize = "//ADD_FUNCTION_CONDITION".length
// insertIndex = main_function.indexOf("//ADD_FUNCTION_CONDITION") + searchSize
// let inc_pkt_count = "function_packet_count["+fid+"-10000]++;"
// let if_else_cond = "else if( fid == "+fid + " ) {\n "+inc_pkt_count +"\nfunction_"+fid+"(mapHdr);\n}"
// let main_function_full = main_function.slice(0, insertIndex) +"\n"+ if_else_cond +"\n"+ main_function.slice(insertIndex)
// console.log(main_function_full);
// fs.writeFileSync(functionPath +"offload/"+ "static_dispatch_function.c", main_function_full)
// return 'xyz';
// return hash
// });
}
/**
...
...
dispatch_system/dispatch_manager/nic/Makefile-nfp4build
View file @
c1ccb55a
#
# Generated Makefile for orchestrator
_speedo
# Generated Makefile for orchestrator
#
ifndef SDKDIR
...
...
@@ -122,7 +122,7 @@ ifneq ($(NFAS_FOUND),found)
$(warning warning: nfas not found or not executable, on windows please run nfp4term.bat)
endif
$(OUTDIR)/orchestrator
_speedo
.nffw: $(OUTDIR)/nfd_pcie0_pd0.list/nfd_pcie0_pd0.list \
$(OUTDIR)/orchestrator.nffw: $(OUTDIR)/nfd_pcie0_pd0.list/nfd_pcie0_pd0.list \
$(OUTDIR)/nfd_pcie0_pci_in_issue1.list/nfd_pcie0_pci_in_issue1.list \
$(OUTDIR)/nfd_pcie0_pci_out_me0.list/nfd_pcie0_pci_out_me0.list \
$(OUTDIR)/nbi_init_csr.list/nbi_init_csr.list \
...
...
@@ -186,17 +186,17 @@ $(PIFOUTDIR)/build_info.json: $(MAKEFILE_LIST)
# Generate IR from P4
#
$(OUTDIR)/orchestrator
_speedo.yml: p4src/orchestrator_speedo
.p4 \
$(OUTDIR)/orchestrator
.yml: p4src/orchestrator
.p4 \
$(MAKEFILE_LIST)
@echo ---------
@echo compiling p4 $@
@echo ---------
@mkdir -p $(PIFOUTDIR)
$(SDKP4DIR)/bin/nfp4c -o $(OUTDIR)/orchestrator
_speedo
.yml \
$(SDKP4DIR)/bin/nfp4c -o $(OUTDIR)/orchestrator.yml \
--p4-version 16 \
--p4-compiler p4c-nfp \
--source_info \
p4src/orchestrator
_speedo
.p4
p4src/orchestrator.p4
#
...
...
@@ -229,16 +229,16 @@ $(PIFOUTDIR)/pif_pkt_clone%h \
$(PIFOUTDIR)/pif_flcalc%c \
$(PIFOUTDIR)/pif_flcalc%h \
$(PIFOUTDIR)/pif_field_lists%h \
$(PIFOUTDIR)/pif_parrep_pvs_sync%c : $(OUTDIR)/orchestrator
_speedo
%yml $(MAKEFILE_LIST)
$(PIFOUTDIR)/pif_parrep_pvs_sync%c : $(OUTDIR)/orchestrator%yml $(MAKEFILE_LIST)
@echo ---------
@echo generating pif $@
@echo ---------
@mkdir -p $(PIFOUTDIR)
$(SDKP4DIR)/bin/nfirc -o $(PIFOUTDIR)/ \
--p4info $(OUTDIR)/orchestrator
_speedo
.p4info.json \
--p4info $(OUTDIR)/orchestrator.p4info.json \
--debugpoints \
--mac_ingress_timestamp \
$(OUTDIR)/orchestrator
_speedo
.yml
$(OUTDIR)/orchestrator.yml
#
...
...
@@ -707,8 +707,6 @@ $(OUTDIR)/pif_app_nfd.list/pif_app_nfd.list: $(SDKP4DIR)/components/nfp_pif/me/a
$(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_flcalc_algorithms.c \
$(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_memops.c \
$(SDKP4DIR)/components/dcfl/me/lib/dcfl/libdcfl.c \
p4src/static_dispatch_function.c \
p4src/nic_function_test.c \
$(PIFOUTDIR)/pif_design.h \
$(MAKEFILE_LIST)
@echo ---------
...
...
@@ -817,9 +815,7 @@ $(OUTDIR)/pif_app_nfd.list/pif_app_nfd.list: $(SDKP4DIR)/components/nfp_pif/me/a
$(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_init.c \
$(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_flcalc_algorithms.c \
$(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_memops.c \
$(SDKP4DIR)/components/dcfl/me/lib/dcfl/libdcfl.c \
p4src/static_dispatch_function.c \
p4src/nic_function_test.c
$(SDKP4DIR)/components/dcfl/me/lib/dcfl/libdcfl.c
#
# APP_MASTER
...
...
dispatch_system/dispatch_manager/nic/assign_ip.sh
View file @
c1ccb55a
...
...
@@ -51,3 +51,12 @@ sudo ip netns exec ns_server ifconfig vf0_2 mtu 9000
# sudo ip addr add 10.129.6.5/24 dev bridgek0
# sudo ip link set bridgek0 up
# create veth cable for kafka
sudo
ip
link
add veth_nnic0
type
veth peer name veth_nnic1
sudo
ip
link set
veth_nnic0 netns ns_server
sudo
ip netns
exec
ns_server ip addr add 10.128.2.201/24 dev veth_nnic0
sudo
ip netns
exec
ns_server ip
link set
dev veth_nnic0 up
sudo
ip addr add 10.128.2.200/24 dev veth_nnic1
sudo
ip
link set
dev veth_nnic1 up
dispatch_system/dispatch_manager/nic/build_offload.sh
View file @
c1ccb55a
...
...
@@ -30,10 +30,10 @@ fi
if
[[
$offload_flag
-eq
1
]]
then
# move to p4 bin
cd
/opt/netronome
/p4/bin/
# cd /home/ub-01/mahendra/nfp-sdk-6.1.0-preview
/p4/bin/
# offload
sudo
.
/rtecli design-load
-f
$location
/p4src/orchestrator.nffw
-c
$location
/p4src/echo.p4cfg
-p
$location
/p4src/out/pif_design.json
sudo
/opt/netronome/p4/bin
/rtecli design-load
-f
$location
/p4src/orchestrator.nffw
-c
$location
/p4src/echo.p4cfg
-p
$location
/p4src/out/pif_design.json
# returning back to base
cd
$location
...
...
@@ -45,4 +45,4 @@ then
docker stop
$(
docker ps
-a
-q
)
||
true
#assigning IPs to network interfaces
sudo
./assign_ip.sh
fi
\ No newline at end of file
fi
dispatch_system/dispatch_manager/nic/p4src/includes/headers.p4
View file @
c1ccb55a
...
...
@@ -67,6 +67,7 @@ header map_hdr_t {
bit<8> f2;
bit<8> f3;
bit<8> f4;
bit<8> autoscaling;
// bit<8> batch_count;
}
...
...
dispatch_system/dispatch_manager/nic/p4src/orchestrator.bmv2.json
View file @
c1ccb55a
...
...
@@ -62,7 +62,8 @@
[
"f1"
,
8
,
false
],
[
"f2"
,
8
,
false
],
[
"f3"
,
8
,
false
],
[
"f4"
,
8
,
false
]
[
"f4"
,
8
,
false
],
[
"autoscaling"
,
8
,
false
]
]
},
{
...
...
dispatch_system/dispatch_manager/nic/p4src/orchestrator.yml
View file @
c1ccb55a
...
...
@@ -75,6 +75,7 @@ map_hdr:
-
f2
:
8
-
f3
:
8
-
f4
:
8
-
autoscaling
:
8
type
:
header
resubmit_meta
:
...
...
@@ -427,7 +428,7 @@ layout:
##########################################
source_info
:
date
:
202
1/12/15 05:37:07
date
:
202
2/02/04 11:27:23
output_file
:
p4src/orchestrator.yml
p4_version
:
'
16'
source_files
:
...
...
dispatch_system/dispatch_manager/nic/p4src/orchestrator_coldstart.p4
0 → 100644
View file @
c1ccb55a
#include <core.p4>
#define V1MODEL_VERSION 20200408
#include <v1model.p4>
#include "includes/defines.p4"
#include "includes/headers.p4"
#include "includes/parsers.p4"
//extern void prime();
//extern void prime2();
extern void static_dispatch_function();
//extern void countpacket();
//struct digest_t {
// bit<32> index;
// bit<48> dstAddr;
// bit<48> srcAddr;
//bit<16> etherType;
//}
//struct digest_time_t {
// bit<64> igt;
// bit<64> cgt;
// bit<64> time_taken;
//}
//struct digest_t2 {
// bit<32> req_fid;
//}
struct digest_check_udp_port{
bit<16> udp_port;
bit<32> fid;
bit<4> packet_count;
bit<32> src_ip;
bit<32> dst_ip;
}
control ingress(inout headers hdr, inout metadata meta, inout standard_metadata_t standard_metadata) {
//register<bit<8>>(10000) function_id_check;
register<bit<4>>(1) fwd_checks;
//bit<8> pc;
bit<4> pc2=0;
bit<1> static=1w1;
@name(".fwd_act") action fwd_act(bit<16> port) {
standard_metadata.egress_spec = port;
}
@name(".fwd") table fwd {
actions = {
fwd_act;
}
key = {
standard_metadata.ingress_port : exact;
}
}
@name(".dispatch_act") action dispatch_act(bit<32> dstAddr, bit<16> dstPort, bit<48> ethernetAddr , bit<16> egress_port) {
hdr.ipv4.dstAddr = dstAddr;
hdr.udp.dstPort = dstPort;
hdr.ethernet.dstAddr = ethernetAddr;
//digest_t d0;
//d0.srcAddr = hdr.ethernet.srcAddr;
//d0.dstAddr = hdr.ethernet.dstAddr;
//d0.etherType = hdr.ethernet.etherType;
//digest<digest_t>(0, d0 );
//prime();
}
@name(".prime1_act") action prime1_act(bit<32> dstAddr, bit<16> dstPort, bit<48> ethernetAddr , bit<16> egress_port) {
hdr.ipv4.dstAddr = dstAddr;
hdr.udp.dstPort = dstPort;
hdr.ethernet.dstAddr = ethernetAddr;
//prime();
}
@name(".prime2_act") action prime2_act(bit<32> dstAddr, bit<16> dstPort, bit<48> ethernetAddr , bit<16> egress_port) {
hdr.ipv4.dstAddr = dstAddr;
hdr.udp.dstPort = dstPort;
hdr.ethernet.dstAddr = ethernetAddr;
//prime2();
}
@name(".swap_addr") action swap_addr() {
//swap ethernet address
bit<48> temp = hdr.ethernet.srcAddr;
hdr.ethernet.srcAddr = hdr.ethernet.dstAddr;
hdr.ethernet.dstAddr = temp;
// swap ipv4
bit<32> tmp = hdr.ipv4.srcAddr;
hdr.ipv4.srcAddr = hdr.ipv4.dstAddr;
hdr.ipv4.dstAddr = tmp;
// change udp
//bit<16> val = 10001;
hdr.udp.srcPort = hdr.udp.dstPort;
//hdr.udp.dstPort = val;
// change vf
standard_metadata.ingress_port = 769;
//prime2();
}
@name(".dispatch") table dispatch {
actions = {
dispatch_act;
prime1_act;
prime2_act;
}
key = {
hdr.map_hdr.function_id : exact;
}
}
apply {
if (hdr.ipv4.isValid() && hdr.udp.dstPort == DISPATCHER_PORT) {
//function_id_check.read(pc,0);
//pc = 8w2;
//pc = hdr.map_hdr.function_id;
//function_id_check.write(0,pc);
dispatch.apply();
//countpacket();
//digest_t2 d2;
//digest<digest_t2>(0, d2 );
//digest_time_t dtime;
//dtime.igt = meta.intrinsic_metadata.ingress_global_timestamp;
//dtime.cgt = meta.intrinsic_metadata.current_global_timestamp;
//dtime.time_taken = dtime.cgt - dtime.igt;
//digest<digest_time_t>(0, dtime );
//if(static == 1w1 && hdr.udp.dstPort != 8080)
fwd_checks.read(pc2,0);
pc2 = pc2 + 1;
if(hdr.udp.dstPort != 8080 )
{
static_dispatch_function();
//bit<32> var=32w21;
if(hdr.map_hdr.data == EXEC_ON_NIC)
{
swap_addr();
//hdr.map_hdr.data=var;
}
}
fwd_checks.write(0,pc2);
digest_check_udp_port dig;
dig.udp_port = hdr.udp.dstPort;
dig.fid = hdr.map_hdr.data;
dig.packet_count = pc2;
dig.src_ip = hdr.ipv4.srcAddr;
dig.dst_ip = hdr.ipv4.dstAddr;
digest<digest_check_udp_port>(0, dig );
fwd.apply();
} else {
fwd.apply();
}
//bit<16>mod = 16w10;
//hdr.udp.dstPort = 10000+(pc2 % mod);
}
}
control egress(inout headers hdr, inout metadata meta, inout standard_metadata_t standard_metadata) {
// @name(".ethernet_set_mac_act") action ethernet_set_mac_act(bit<48> smac, bit<48> dmac) {
// hdr.ethernet.srcAddr = smac;
// hdr.ethernet.dstAddr = dmac;
// }
// @name(".ethernet_set_mac") table ethernet_set_mac {
// actions = {
// ethernet_set_mac_act;
// }
// key = {
// standard_metadata.egress_port: exact;
// }
// }
@name("fix_checksum") action fix_checksum() {
hdr.udp.checksum = 16w0;
}
apply {
// if (hdr.udp.dstPort == MDS_PORT) {
// ethernet_set_mac.apply();
// }
fix_checksum();
}
}
control DeparserImpl(packet_out packet, in headers hdr) {
apply {
packet.emit<ethernet_t>(hdr.ethernet);
packet.emit<ipv4_t>(hdr.ipv4);
packet.emit<udp_t>(hdr.udp);
packet.emit<map_hdr_t>(hdr.map_hdr);
}
}
control verifyChecksum(inout headers hdr, inout metadata meta) {
apply {
verify_checksum(
hdr.ipv4.isValid(),
{ hdr.ipv4.version,
hdr.ipv4.ihl,
hdr.ipv4.diffserv,
hdr.ipv4.totalLen,
hdr.ipv4.identification,
hdr.ipv4.flags,
hdr.ipv4.fragOffset,
hdr.ipv4.ttl,
hdr.ipv4.protocol,
hdr.ipv4.srcAddr,
hdr.ipv4.dstAddr },
hdr.ipv4.hdrChecksum,
HashAlgorithm.csum16);
}
}
control computeChecksum(inout headers hdr, inout metadata meta) {
apply {
update_checksum(
hdr.ipv4.isValid(),
{ hdr.ipv4.version,
hdr.ipv4.ihl,
hdr.ipv4.diffserv,
hdr.ipv4.totalLen,
hdr.ipv4.identification,
hdr.ipv4.flags,
hdr.ipv4.fragOffset,
hdr.ipv4.ttl,
hdr.ipv4.protocol,
hdr.ipv4.srcAddr,
hdr.ipv4.dstAddr },
hdr.ipv4.hdrChecksum,
HashAlgorithm.csum16);
}
}
V1Switch<headers, metadata>(ParserImpl(), verifyChecksum(), ingress(), egress(), computeChecksum(), DeparserImpl()) main;
dispatch_system/dispatch_manager/nic/smartnic_dispatch_monitor.py
View file @
c1ccb55a
...
...
@@ -5,31 +5,47 @@ from kafka import KafkaConsumer
#consumer = KafkaConsumer('deployed', 'removeWorker',
# "request", bootstrap_servers='10.129.6.5:9092')
consumer
=
KafkaConsumer
(
'deployed'
,
'removeWorker'
,
"request"
,
bootstrap_servers
=
'
localhost
:9092'
)
consumer
=
KafkaConsumer
(
'deployed'
,
'removeWorker'
,
'COLDSTART_WORKER'
,
'AUTOSCALE'
,
"request"
,
bootstrap_servers
=
'
10.129.2.201
:9092'
)
RTEInterface
.
Connect
(
'thrift'
,
"10.129.2.201"
,
20206
)
tableId
=
"ingress::dispatch"
tableId2
=
"ingress::fwd"
ruleDictionary
=
{}
def
makeRule
(
ip
,
port
,
mac
,
functionHash
,
tableId
,
rule_name
,
default_rule
):
actions
=
'{ "type" : "ingress::dispatch_act", "data" : { "dstAddr" : { "value" : "
%
s" },
\
"dstPort" : { "value" : "
%
d" } , "egress_port": { "value": "v0.1" }, "ethernetAddr": { "value": "
%
s" } } }'
\
%
(
ip
,
int
(
port
),
mac
)
match
=
'{ "map_hdr.function_id" : { "value" :
%
d} }'
%
(
functionHash
)
rule
=
{
"tableId"
:
tableId
,
"rule_name"
:
rule_name
,
"default_rule"
:
default_rule
,
"match"
:
match
,
"actions"
:
actions
}
rule
=
{}
if
default_rule
:
actions
=
'{ "type" : "ingress::dispatch_act", "data" : { "dstAddr" : { "value" : "
%
s" },
\
"dstPort" : { "value" : "
%
d" } , "egress_port": { "value": "v0.1" }, "ethernetAddr": { "value": "
%
s" } } }'
\
%
(
ip
,
int
(
port
),
mac
)
match
=
'{ }'
rule
=
{
"tableId"
:
tableId
,
"rule_name"
:
rule_name
,
"default_rule"
:
default_rule
,
"match"
:
match
,
"actions"
:
actions
}
else
:
actions
=
'{ "type" : "ingress::dispatch_act", "data" : { "dstAddr" : { "value" : "
%
s" },
\
"dstPort" : { "value" : "
%
d" } , "egress_port": { "value": "v0.1" }, "ethernetAddr": { "value": "
%
s" } } }'
\
%
(
ip
,
int
(
port
),
mac
)
match
=
'{ "map_hdr.function_id" : { "value" :
%
d} }'
%
(
functionHash
)
rule
=
{
"tableId"
:
tableId
,
"rule_name"
:
rule_name
,
"default_rule"
:
default_rule
,
"match"
:
match
,
"actions"
:
actions
}
return
rule
def
addRule
(
worker
):
functionHash
=
worker
[
u'functionHash'
]
rule_name
=
"dispatch_to_worker"
+
functionHash
functionHash
=
int
(
functionHash
[
0
:
5
],
16
)
# functionHash = int(functionHash[0:5], 16)
functionHash
=
int
(
functionHash
[
9
:])
ip
=
str
(
worker
[
u'node_id'
])
.
strip
()
port
=
int
(
worker
[
u'portExternal'
])
mac
=
str
(
worker
[
u'mac'
])
.
strip
()
...
...
@@ -45,7 +61,8 @@ def addRule(worker):
def
deleteRule
(
worker
):
functionHash
=
worker
[
u'functionHash'
]
functionHash
=
int
(
functionHash
[
0
:
5
],
16
)
# functionHash = int(functionHash[0:5], 16)
functionHash
=
int
(
functionHash
[
9
:])
rule
=
ruleDictionary
[
functionHash
]
RTEInterface
.
Tables
.
DeleteRule
(
rule
[
"tableId"
],
rule
[
"rule_name"
],
rule
[
"default_rule"
],
rule
[
"match"
],
rule
[
"actions"
])
...
...
@@ -56,13 +73,83 @@ def deleteRule(worker):
return
0
def
updateDefaultRule
(
worker
):
# TODO 1: implement function to update deault rule when worker load heap changes
# def EditRule(self, tbl_id, rule_name, default_rule, match, actions, priority = None, timeout = None):
rule_name
=
"default"
# functionHash = int(functionHash[0:5], 16)
ip
=
str
(
worker
[
u'node_id'
])
.
strip
()
port
=
int
(
worker
[
u'port'
])
mac
=
str
(
worker
[
u'mac'
])
.
strip
()
default_rule
=
True
rule
=
makeRule
(
ip
,
port
,
mac
,
None
,
tableId
,
rule_name
,
default_rule
)
RTEInterface
.
Tables
.
EditRule
(
rule
[
"tableId"
],
rule
[
"rule_name"
],
rule
[
"default_rule"
],
rule
[
"match"
],
rule
[
"actions"
])
print
(
"
\n
Default rule updated
\n\n
"
)
return
0
def
makeFwdTableRule
(
rule_name
,
default_rule
):
rule
=
{}
if
default_rule
:
actions
=
'{ "type" : "ingress::fwd_act", "data" : { "port" : { "value" : "p0" } } }'
match
=
'{ "standard_metadata.ingress_port" : { "value" : "v0.0" } }'
rule
=
{
"tableId"
:
tableId2
,
"rule_name"
:
'host_to_net'
,
"default_rule"
:
default_rule
,
"match"
:
match
,
"actions"
:
actions
}
else
:
actions
=
'{ "type" : "ingress::fwd_act", "data" : { "port" : { "value" : "p0" } } }'
match
=
'{ "standard_metadata.ingress_port" : { "value" : "v0.0" } }'
rule
=
{
"tableId"
:
tableId2
,
"rule_name"
:
'host_to_net'
,
"default_rule"
:
default_rule
,
"match"
:
match
,
"actions"
:
actions
}
return
rule
def
updateDefaultRule2
():
# TODO 1: implement function to update deault rule when worker load heap changes
# def EditRule(self, tbl_id, rule_name, default_rule, match, actions, priority = None, timeout = None):
default_rule
=
False
rule_name
=
'host_to_net'
rule
=
makeRule2
(
rule_name
,
default_rule
)
print
(
rule
)
RTEInterface
.
Tables
.
EditRule
(
rule
[
"tableId"
],
rule
[
"rule_name"
],
rule
[
"default_rule"
],
rule
[
"match"
],
rule
[
"actions"
])
for
msg
in
consumer
:
if
msg
.
topic
==
"deployed"
:
msg
=
msg
.
value
.
decode
(
'utf-8'
)
worker
=
json
.
loads
(
msg
)
print
(
"received message on deployed : "
,
worker
)
addRule
(
worker
)
elif
msg
.
topic
==
"removeWorker"
:
msg
=
msg
.
value
.
decode
(
'utf-8'
)
worker
=
json
.
loads
(
msg
)
print
(
"received message on removeWorker : "
,
worker
)
deleteRule
(
worker
)
elif
msg
.
topic
==
"COLDSTART_WORKER"
:
msg
=
msg
.
value
.
decode
(
'utf-8'
)
worker
=
json
.
loads
(
msg
)
print
(
"received message on COLDSTART_WORKER : "
,
worker
)
updateDefaultRule
(
worker
[
u'nodes'
])
# elif msg.topic == "AUTOSCALE":
# msg = msg.value.decode('utf-8')
# updateDefaultRule2()
# worker={
# 'node_id': '192.168.2.3',
# 'portExternal': '8081',
# 'mac': '00:22:22:22:22:22'
# }
# updateDefaultRule(worker)
\ No newline at end of file
dispatch_system/dispatch_manager/node_modules_2/.bin/detect-libc
deleted
120000 → 0
View file @
0212678f
../detect-libc/bin/detect-libc.js
\ No newline at end of file
dispatch_system/dispatch_manager/node_modules_2/.bin/detect-libc
0 → 100755
View file @
c1ccb55a
#!/usr/bin/env node
'
use strict
'
;
var
spawnSync
=
require
(
'
child_process
'
).
spawnSync
;
var
libc
=
require
(
'
../
'
);
var
spawnOptions
=
{
env
:
process
.
env
,
shell
:
true
,
stdio
:
'
inherit
'
};
if
(
libc
.
isNonGlibcLinux
)
{
spawnOptions
.
env
.
LIBC
=
process
.
env
.
LIBC
||
libc
.
family
;
}
process
.
exit
(
spawnSync
(
process
.
argv
[
2
],
process
.
argv
.
slice
(
3
),
spawnOptions
).
status
);
dispatch_system/dispatch_manager/node_modules_2/.bin/mime
deleted
120000 → 0
View file @
0212678f
../mime/cli.js
\ No newline at end of file
dispatch_system/dispatch_manager/node_modules_2/.bin/mime
0 → 100755
View file @
c1ccb55a
#!/usr/bin/env node
var
mime
=
require
(
'
./mime.js
'
);
var
file
=
process
.
argv
[
2
];
var
type
=
mime
.
lookup
(
file
);
process
.
stdout
.
write
(
type
+
'
\n
'
);
dispatch_system/dispatch_manager/node_modules_2/.bin/mkdirp
deleted
120000 → 0
View file @
0212678f
../mkdirp/bin/cmd.js
\ No newline at end of file
dispatch_system/dispatch_manager/node_modules_2/.bin/mkdirp
0 → 100755
View file @
c1ccb55a
#!/usr/bin/env node
var
mkdirp
=
require
(
'
../
'
);
var
minimist
=
require
(
'
minimist
'
);
var
fs
=
require
(
'
fs
'
);
var
argv
=
minimist
(
process
.
argv
.
slice
(
2
),
{
alias
:
{
m
:
'
mode
'
,
h
:
'
help
'
},
string
:
[
'
mode
'
]
});
if
(
argv
.
help
)
{
fs
.
createReadStream
(
__dirname
+
'
/usage.txt
'
).
pipe
(
process
.
stdout
);
return
;
}
var
paths
=
argv
.
_
.
slice
();
var
mode
=
argv
.
mode
?
parseInt
(
argv
.
mode
,
8
)
:
undefined
;
(
function
next
()
{
if
(
paths
.
length
===
0
)
return
;
var
p
=
paths
.
shift
();
if
(
mode
===
undefined
)
mkdirp
(
p
,
cb
)
else
mkdirp
(
p
,
mode
,
cb
)
function
cb
(
err
)
{
if
(
err
)
{
console
.
error
(
err
.
message
);
process
.
exit
(
1
);
}
else
next
();
}
})();
dispatch_system/dispatch_manager/node_modules_2/.bin/mqtt
deleted
120000 → 0
View file @
0212678f
../mqtt/mqtt.js
\ No newline at end of file
dispatch_system/dispatch_manager/node_modules_2/.bin/mqtt
0 → 100755
View file @
c1ccb55a
#!/usr/bin/env node
'
use strict
'
/*
* Copyright (c) 2015-2015 MQTT.js contributors.
* Copyright (c) 2011-2014 Adam Rudd.
*
* See LICENSE for more information
*/
var
MqttClient
=
require
(
'
./lib/client
'
)
var
connect
=
require
(
'
./lib/connect
'
)
var
Store
=
require
(
'
./lib/store
'
)
module
.
exports
.
connect
=
connect
// Expose MqttClient
module
.
exports
.
MqttClient
=
MqttClient
module
.
exports
.
Client
=
MqttClient
module
.
exports
.
Store
=
Store
function
cli
()
{
var
commist
=
require
(
'
commist
'
)()
var
helpMe
=
require
(
'
help-me
'
)()
commist
.
register
(
'
publish
'
,
require
(
'
./bin/pub
'
))
commist
.
register
(
'
subscribe
'
,
require
(
'
./bin/sub
'
))
commist
.
register
(
'
version
'
,
function
()
{
console
.
log
(
'
MQTT.js version:
'
,
require
(
'
./package.json
'
).
version
)
})
commist
.
register
(
'
help
'
,
helpMe
.
toStdout
)
if
(
commist
.
parse
(
process
.
argv
.
slice
(
2
))
!==
null
)
{
console
.
log
(
'
No such command:
'
,
process
.
argv
[
2
],
'
\n
'
)
helpMe
.
toStdout
()
}
}
if
(
require
.
main
===
module
)
{
cli
()
}
dispatch_system/dispatch_manager/node_modules_2/.bin/mqtt_pub
deleted
120000 → 0
View file @
0212678f
../mqtt/bin/pub.js
\ No newline at end of file
dispatch_system/dispatch_manager/node_modules_2/.bin/mqtt_pub
0 → 100755
View file @
c1ccb55a
#!/usr/bin/env node
'
use strict
'
var
mqtt
=
require
(
'
../
'
)
var
pump
=
require
(
'
pump
'
)
var
path
=
require
(
'
path
'
)
var
fs
=
require
(
'
fs
'
)
var
concat
=
require
(
'
concat-stream
'
)
var
Writable
=
require
(
'
readable-stream
'
).
Writable
var
helpMe
=
require
(
'
help-me
'
)({
dir
:
path
.
join
(
__dirname
,
'
..
'
,
'
doc
'
)
})
var
minimist
=
require
(
'
minimist
'
)
var
split2
=
require
(
'
split2
'
)
function
send
(
args
)
{
var
client
=
mqtt
.
connect
(
args
)
client
.
on
(
'
connect
'
,
function
()
{
client
.
publish
(
args
.
topic
,
args
.
message
,
args
,
function
(
err
)
{
if
(
err
)
{
console
.
warn
(
err
)
}
client
.
end
()
})
})
client
.
on
(
'
error
'
,
function
(
err
)
{
console
.
warn
(
err
)
client
.
end
()
})
}
function
multisend
(
args
)
{
var
client
=
mqtt
.
connect
(
args
)
var
sender
=
new
Writable
({
objectMode
:
true
})
sender
.
_write
=
function
(
line
,
enc
,
cb
)
{
client
.
publish
(
args
.
topic
,
line
.
trim
(),
args
,
cb
)
}
client
.
on
(
'
connect
'
,
function
()
{
pump
(
process
.
stdin
,
split2
(),
sender
,
function
(
err
)
{
client
.
end
()
if
(
err
)
{
throw
err
}
})
})
}
function
start
(
args
)
{
args
=
minimist
(
args
,
{
string
:
[
'
hostname
'
,
'
username
'
,
'
password
'
,
'
key
'
,
'
cert
'
,
'
ca
'
,
'
message
'
,
'
clientId
'
,
'
i
'
,
'
id
'
],
boolean
:
[
'
stdin
'
,
'
retain
'
,
'
help
'
,
'
insecure
'
,
'
multiline
'
],
alias
:
{
port
:
'
p
'
,
hostname
:
[
'
h
'
,
'
host
'
],
topic
:
'
t
'
,
message
:
'
m
'
,
qos
:
'
q
'
,
clientId
:
[
'
i
'
,
'
id
'
],
retain
:
'
r
'
,
username
:
'
u
'
,
password
:
'
P
'
,
stdin
:
'
s
'
,
multiline
:
'
M
'
,
protocol
:
[
'
C
'
,
'
l
'
],
help
:
'
H
'
,
ca
:
'
cafile
'
},
default
:
{
host
:
'
localhost
'
,
qos
:
0
,
retain
:
false
,
topic
:
''
,
message
:
''
}
})
if
(
args
.
help
)
{
return
helpMe
.
toStdout
(
'
publish
'
)
}
if
(
args
.
key
)
{
args
.
key
=
fs
.
readFileSync
(
args
.
key
)
}
if
(
args
.
cert
)
{
args
.
cert
=
fs
.
readFileSync
(
args
.
cert
)
}
if
(
args
.
ca
)
{
args
.
ca
=
fs
.
readFileSync
(
args
.
ca
)
}
if
(
args
.
key
&&
args
.
cert
&&
!
args
.
protocol
)
{
args
.
protocol
=
'
mqtts
'
}
if
(
args
.
port
)
{
if
(
typeof
args
.
port
!==
'
number
'
)
{
console
.
warn
(
'
# Port: number expected,
\'
%s
\'
was given.
'
,
typeof
args
.
port
)
return
}
}
if
(
args
[
'
will-topic
'
])
{
args
.
will
=
{}
args
.
will
.
topic
=
args
[
'
will-topic
'
]
args
.
will
.
payload
=
args
[
'
will-message
'
]
args
.
will
.
qos
=
args
[
'
will-qos
'
]
args
.
will
.
retain
=
args
[
'
will-retain
'
]
}
if
(
args
.
insecure
)
{
args
.
rejectUnauthorized
=
false
}
args
.
topic
=
(
args
.
topic
||
args
.
_
.
shift
()).
toString
()
args
.
message
=
(
args
.
message
||
args
.
_
.
shift
()).
toString
()
if
(
!
args
.
topic
)
{
console
.
error
(
'
missing topic
\n
'
)
return
helpMe
.
toStdout
(
'
publish
'
)
}
if
(
args
.
stdin
)
{
if
(
args
.
multiline
)
{
multisend
(
args
)
}
else
{
process
.
stdin
.
pipe
(
concat
(
function
(
data
)
{
args
.
message
=
data
send
(
args
)
}))
}
}
else
{
send
(
args
)
}
}
module
.
exports
=
start
if
(
require
.
main
===
module
)
{
start
(
process
.
argv
.
slice
(
2
))
}
dispatch_system/dispatch_manager/node_modules_2/.bin/mqtt_sub
deleted
120000 → 0
View file @
0212678f
../mqtt/bin/sub.js
\ No newline at end of file
dispatch_system/dispatch_manager/node_modules_2/.bin/mqtt_sub
0 → 100755
View file @
c1ccb55a
#!/usr/bin/env node
var
mqtt
=
require
(
'
../
'
)
var
path
=
require
(
'
path
'
)
var
fs
=
require
(
'
fs
'
)
var
helpMe
=
require
(
'
help-me
'
)({
dir
:
path
.
join
(
__dirname
,
'
..
'
,
'
doc
'
)
})
var
minimist
=
require
(
'
minimist
'
)
function
start
(
args
)
{
args
=
minimist
(
args
,
{
string
:
[
'
hostname
'
,
'
username
'
,
'
password
'
,
'
key
'
,
'
cert
'
,
'
ca
'
,
'
clientId
'
,
'
i
'
,
'
id
'
],
boolean
:
[
'
stdin
'
,
'
help
'
,
'
clean
'
,
'
insecure
'
],
alias
:
{
port
:
'
p
'
,
hostname
:
[
'
h
'
,
'
host
'
],
topic
:
'
t
'
,
qos
:
'
q
'
,
clean
:
'
c
'
,
keepalive
:
'
k
'
,
clientId
:
[
'
i
'
,
'
id
'
],
username
:
'
u
'
,
password
:
'
P
'
,
protocol
:
[
'
C
'
,
'
l
'
],
verbose
:
'
v
'
,
help
:
'
-H
'
,
ca
:
'
cafile
'
},
default
:
{
host
:
'
localhost
'
,
qos
:
0
,
retain
:
false
,
clean
:
true
,
keepAlive
:
30
// 30 sec
}
})
if
(
args
.
help
)
{
return
helpMe
.
toStdout
(
'
subscribe
'
)
}
args
.
topic
=
args
.
topic
||
args
.
_
.
shift
()
if
(
!
args
.
topic
)
{
console
.
error
(
'
missing topic
\n
'
)
return
helpMe
.
toStdout
(
'
subscribe
'
)
}
if
(
args
.
key
)
{
args
.
key
=
fs
.
readFileSync
(
args
.
key
)
}
if
(
args
.
cert
)
{
args
.
cert
=
fs
.
readFileSync
(
args
.
cert
)
}
if
(
args
.
ca
)
{
args
.
ca
=
fs
.
readFileSync
(
args
.
ca
)
}
if
(
args
.
key
&&
args
.
cert
&&
!
args
.
protocol
)
{
args
.
protocol
=
'
mqtts
'
}
if
(
args
.
insecure
)
{
args
.
rejectUnauthorized
=
false
}
if
(
args
.
port
)
{
if
(
typeof
args
.
port
!==
'
number
'
)
{
console
.
warn
(
'
# Port: number expected,
\'
%s
\'
was given.
'
,
typeof
args
.
port
)
return
}
}
if
(
args
[
'
will-topic
'
])
{
args
.
will
=
{}
args
.
will
.
topic
=
args
[
'
will-topic
'
]
args
.
will
.
payload
=
args
[
'
will-message
'
]
args
.
will
.
qos
=
args
[
'
will-qos
'
]
args
.
will
.
retain
=
args
[
'
will-retain
'
]
}
args
.
keepAlive
=
args
[
'
keep-alive
'
]
var
client
=
mqtt
.
connect
(
args
)
client
.
on
(
'
connect
'
,
function
()
{
client
.
subscribe
(
args
.
topic
,
{
qos
:
args
.
qos
},
function
(
err
,
result
)
{
if
(
err
)
{
console
.
error
(
err
)
process
.
exit
(
1
)
}
result
.
forEach
(
function
(
sub
)
{
if
(
sub
.
qos
>
2
)
{
console
.
error
(
'
subscription negated to
'
,
sub
.
topic
,
'
with code
'
,
sub
.
qos
)
process
.
exit
(
1
)
}
})
})
})
client
.
on
(
'
message
'
,
function
(
topic
,
payload
)
{
if
(
args
.
verbose
)
{
console
.
log
(
topic
,
payload
.
toString
())
}
else
{
console
.
log
(
payload
.
toString
())
}
})
client
.
on
(
'
error
'
,
function
(
err
)
{
console
.
warn
(
err
)
client
.
end
()
})
}
module
.
exports
=
start
if
(
require
.
main
===
module
)
{
start
(
process
.
argv
.
slice
(
2
))
}
dispatch_system/dispatch_manager/node_modules_2/.bin/prebuild-install
deleted
120000 → 0
View file @
0212678f
../prebuild-install/bin.js
\ No newline at end of file
dispatch_system/dispatch_manager/node_modules_2/.bin/prebuild-install
0 → 100755
View file @
c1ccb55a
#!/usr/bin/env node
var
path
=
require
(
'
path
'
)
var
fs
=
require
(
'
fs
'
)
var
whichPmRuns
=
require
(
'
which-pm-runs
'
)
var
napi
=
require
(
'
napi-build-utils
'
)
var
pkg
=
require
(
path
.
resolve
(
'
package.json
'
))
var
rc
=
require
(
'
./rc
'
)(
pkg
)
var
log
=
require
(
'
./log
'
)(
rc
,
process
.
env
)
var
download
=
require
(
'
./download
'
)
var
asset
=
require
(
'
./asset
'
)
var
util
=
require
(
'
./util
'
)
var
prebuildClientVersion
=
require
(
'
./package.json
'
).
version
if
(
rc
.
version
)
{
console
.
log
(
prebuildClientVersion
)
process
.
exit
(
0
)
}
if
(
rc
.
path
)
process
.
chdir
(
rc
.
path
)
if
(
rc
.
runtime
===
'
electron
'
&&
rc
.
target
[
0
]
===
'
4
'
&&
rc
.
abi
===
'
64
'
)
{
log
.
error
(
`Electron version
${
rc
.
target
}
found - skipping prebuild-install work due to known ABI issue`
)
log
.
error
(
'
More information about this issue can be found at https://github.com/lgeiger/node-abi/issues/54
'
)
process
.
exit
(
1
)
}
if
(
!
fs
.
existsSync
(
'
package.json
'
))
{
log
.
error
(
'
setup
'
,
'
No package.json found. Aborting...
'
)
process
.
exit
(
1
)
}
if
(
rc
.
help
)
{
console
.
error
(
fs
.
readFileSync
(
path
.
join
(
__dirname
,
'
help.txt
'
),
'
utf-8
'
))
process
.
exit
(
0
)
}
log
.
info
(
'
begin
'
,
'
Prebuild-install version
'
,
prebuildClientVersion
)
var
opts
=
Object
.
assign
({},
rc
,
{
pkg
:
pkg
,
log
:
log
})
if
(
napi
.
isNapiRuntime
(
rc
.
runtime
))
napi
.
logUnsupportedVersion
(
rc
.
target
,
log
)
var
pm
=
whichPmRuns
()
var
isNpm
=
!
pm
||
pm
.
name
===
'
npm
'
if
(
!
isNpm
&&
/node_modules/
.
test
(
process
.
cwd
()))
{
// From yarn repository
}
else
if
(
opts
.
force
)
{
log
.
warn
(
'
install
'
,
'
prebuilt binaries enforced with --force!
'
)
log
.
warn
(
'
install
'
,
'
prebuilt binaries may be out of date!
'
)
}
else
if
(
!
(
typeof
pkg
.
_from
===
'
string
'
))
{
log
.
info
(
'
install
'
,
'
installing standalone, skipping download.
'
)
process
.
exit
(
1
)
}
else
if
(
pkg
.
_from
.
length
>
4
&&
pkg
.
_from
.
substr
(
0
,
4
)
===
'
git+
'
)
{
log
.
info
(
'
install
'
,
'
installing from git repository, skipping download.
'
)
process
.
exit
(
1
)
}
else
if
(
opts
.
compile
===
true
||
opts
.
prebuild
===
false
)
{
log
.
info
(
'
install
'
,
'
--build-from-source specified, not attempting download.
'
)
process
.
exit
(
1
)
}
var
startDownload
=
function
(
downloadUrl
)
{
download
(
downloadUrl
,
opts
,
function
(
err
)
{
if
(
err
)
{
log
.
warn
(
'
install
'
,
err
.
message
)
return
process
.
exit
(
1
)
}
log
.
info
(
'
install
'
,
'
Successfully installed prebuilt binary!
'
)
})
}
if
(
opts
.
token
)
{
asset
(
opts
,
function
(
err
,
assetId
)
{
if
(
err
)
{
log
.
warn
(
'
install
'
,
err
.
message
)
return
process
.
exit
(
1
)
}
startDownload
(
util
.
getAssetUrl
(
opts
,
assetId
))
})
}
else
{
startDownload
(
util
.
getDownloadUrl
(
opts
))
}
dispatch_system/dispatch_manager/node_modules_2/.bin/rc
deleted
120000 → 0
View file @
0212678f
../rc/cli.js
\ No newline at end of file
dispatch_system/dispatch_manager/node_modules_2/.bin/rc
0 → 100755
View file @
c1ccb55a
#! /usr/bin/env node
var
rc
=
require
(
'
./index
'
)
console
.
log
(
JSON
.
stringify
(
rc
(
process
.
argv
[
2
]),
false
,
2
))
dispatch_system/dispatch_manager/node_modules_2/.bin/semver
deleted
120000 → 0
View file @
0212678f
../semver/bin/semver
\ No newline at end of file
dispatch_system/dispatch_manager/node_modules_2/.bin/semver
0 → 100755
View file @
c1ccb55a
#!/usr/bin/env node
// Standalone semver comparison program.
// Exits successfully and prints matching version(s) if
// any supplied version is valid and passes all tests.
var
argv
=
process
.
argv
.
slice
(
2
)
var
versions
=
[]
var
range
=
[]
var
inc
=
null
var
version
=
require
(
'
../package.json
'
).
version
var
loose
=
false
var
includePrerelease
=
false
var
coerce
=
false
var
identifier
var
semver
=
require
(
'
../semver
'
)
var
reverse
=
false
var
options
=
{}
main
()
function
main
()
{
if
(
!
argv
.
length
)
return
help
()
while
(
argv
.
length
)
{
var
a
=
argv
.
shift
()
var
indexOfEqualSign
=
a
.
indexOf
(
'
=
'
)
if
(
indexOfEqualSign
!==
-
1
)
{
a
=
a
.
slice
(
0
,
indexOfEqualSign
)
argv
.
unshift
(
a
.
slice
(
indexOfEqualSign
+
1
))
}
switch
(
a
)
{
case
'
-rv
'
:
case
'
-rev
'
:
case
'
--rev
'
:
case
'
--reverse
'
:
reverse
=
true
break
case
'
-l
'
:
case
'
--loose
'
:
loose
=
true
break
case
'
-p
'
:
case
'
--include-prerelease
'
:
includePrerelease
=
true
break
case
'
-v
'
:
case
'
--version
'
:
versions
.
push
(
argv
.
shift
())
break
case
'
-i
'
:
case
'
--inc
'
:
case
'
--increment
'
:
switch
(
argv
[
0
])
{
case
'
major
'
:
case
'
minor
'
:
case
'
patch
'
:
case
'
prerelease
'
:
case
'
premajor
'
:
case
'
preminor
'
:
case
'
prepatch
'
:
inc
=
argv
.
shift
()
break
default
:
inc
=
'
patch
'
break
}
break
case
'
--preid
'
:
identifier
=
argv
.
shift
()
break
case
'
-r
'
:
case
'
--range
'
:
range
.
push
(
argv
.
shift
())
break
case
'
-c
'
:
case
'
--coerce
'
:
coerce
=
true
break
case
'
-h
'
:
case
'
--help
'
:
case
'
-?
'
:
return
help
()
default
:
versions
.
push
(
a
)
break
}
}
var
options
=
{
loose
:
loose
,
includePrerelease
:
includePrerelease
}
versions
=
versions
.
map
(
function
(
v
)
{
return
coerce
?
(
semver
.
coerce
(
v
)
||
{
version
:
v
}).
version
:
v
}).
filter
(
function
(
v
)
{
return
semver
.
valid
(
v
)
})
if
(
!
versions
.
length
)
return
fail
()
if
(
inc
&&
(
versions
.
length
!==
1
||
range
.
length
))
{
return
failInc
()
}
for
(
var
i
=
0
,
l
=
range
.
length
;
i
<
l
;
i
++
)
{
versions
=
versions
.
filter
(
function
(
v
)
{
return
semver
.
satisfies
(
v
,
range
[
i
],
options
)
})
if
(
!
versions
.
length
)
return
fail
()
}
return
success
(
versions
)
}
function
failInc
()
{
console
.
error
(
'
--inc can only be used on a single version with no range
'
)
fail
()
}
function
fail
()
{
process
.
exit
(
1
)
}
function
success
()
{
var
compare
=
reverse
?
'
rcompare
'
:
'
compare
'
versions
.
sort
(
function
(
a
,
b
)
{
return
semver
[
compare
](
a
,
b
,
options
)
}).
map
(
function
(
v
)
{
return
semver
.
clean
(
v
,
options
)
}).
map
(
function
(
v
)
{
return
inc
?
semver
.
inc
(
v
,
inc
,
options
,
identifier
)
:
v
}).
forEach
(
function
(
v
,
i
,
_
)
{
console
.
log
(
v
)
})
}
function
help
()
{
console
.
log
([
'
SemVer
'
+
version
,
''
,
'
A JavaScript implementation of the https://semver.org/ specification
'
,
'
Copyright Isaac Z. Schlueter
'
,
''
,
'
Usage: semver [options] <version> [<version> [...]]
'
,
'
Prints valid versions sorted by SemVer precedence
'
,
''
,
'
Options:
'
,
'
-r --range <range>
'
,
'
Print versions that match the specified range.
'
,
''
,
'
-i --increment [<level>]
'
,
'
Increment a version by the specified level. Level can
'
,
'
be one of: major, minor, patch, premajor, preminor,
'
,
"
prepatch, or prerelease. Default level is 'patch'.
"
,
'
Only one version may be specified.
'
,
''
,
'
--preid <identifier>
'
,
'
Identifier to be used to prefix premajor, preminor,
'
,
'
prepatch or prerelease version increments.
'
,
''
,
'
-l --loose
'
,
'
Interpret versions and ranges loosely
'
,
''
,
'
-p --include-prerelease
'
,
'
Always include prerelease versions in range matching
'
,
''
,
'
-c --coerce
'
,
'
Coerce a string into SemVer if possible
'
,
'
(does not imply --loose)
'
,
''
,
'
Program exits successfully if any valid version satisfies
'
,
'
all supplied ranges, and prints all satisfying versions.
'
,
''
,
'
If no satisfying versions are found, then exits failure.
'
,
''
,
'
Versions are printed in ascending order, so supplying
'
,
'
multiple versions to the utility will just sort them.
'
].
join
(
'
\n
'
))
}
dispatch_system/dispatch_manager/node_modules_2/.bin/sshpk-conv
deleted
120000 → 0
View file @
0212678f
../sshpk/bin/sshpk-conv
\ No newline at end of file
dispatch_system/dispatch_manager/node_modules_2/.bin/sshpk-conv
0 → 100755
View file @
c1ccb55a
#!/usr/bin/env node
// -*- mode: js -*-
// vim: set filetype=javascript :
// Copyright 2018 Joyent, Inc. All rights reserved.
var
dashdash
=
require
(
'
dashdash
'
);
var
sshpk
=
require
(
'
../lib/index
'
);
var
fs
=
require
(
'
fs
'
);
var
path
=
require
(
'
path
'
);
var
tty
=
require
(
'
tty
'
);
var
readline
=
require
(
'
readline
'
);
var
getPassword
=
require
(
'
getpass
'
).
getPass
;
var
options
=
[
{
names
:
[
'
outformat
'
,
'
t
'
],
type
:
'
string
'
,
help
:
'
Output format
'
},
{
names
:
[
'
informat
'
,
'
T
'
],
type
:
'
string
'
,
help
:
'
Input format
'
},
{
names
:
[
'
file
'
,
'
f
'
],
type
:
'
string
'
,
help
:
'
Input file name (default stdin)
'
},
{
names
:
[
'
out
'
,
'
o
'
],
type
:
'
string
'
,
help
:
'
Output file name (default stdout)
'
},
{
names
:
[
'
private
'
,
'
p
'
],
type
:
'
bool
'
,
help
:
'
Produce a private key as output
'
},
{
names
:
[
'
derive
'
,
'
d
'
],
type
:
'
string
'
,
help
:
'
Output a new key derived from this one, with given algo
'
},
{
names
:
[
'
identify
'
,
'
i
'
],
type
:
'
bool
'
,
help
:
'
Print key metadata instead of converting
'
},
{
names
:
[
'
fingerprint
'
,
'
F
'
],
type
:
'
bool
'
,
help
:
'
Output key fingerprint
'
},
{
names
:
[
'
hash
'
,
'
H
'
],
type
:
'
string
'
,
help
:
'
Hash function to use for key fingeprint with -F
'
},
{
names
:
[
'
spki
'
,
'
s
'
],
type
:
'
bool
'
,
help
:
'
With -F, generates an SPKI fingerprint instead of SSH
'
},
{
names
:
[
'
comment
'
,
'
c
'
],
type
:
'
string
'
,
help
:
'
Set key comment, if output format supports
'
},
{
names
:
[
'
help
'
,
'
h
'
],
type
:
'
bool
'
,
help
:
'
Shows this help text
'
}
];
if
(
require
.
main
===
module
)
{
var
parser
=
dashdash
.
createParser
({
options
:
options
});
try
{
var
opts
=
parser
.
parse
(
process
.
argv
);
}
catch
(
e
)
{
console
.
error
(
'
sshpk-conv: error: %s
'
,
e
.
message
);
process
.
exit
(
1
);
}
if
(
opts
.
help
||
opts
.
_args
.
length
>
1
)
{
var
help
=
parser
.
help
({}).
trimRight
();
console
.
error
(
'
sshpk-conv: converts between SSH key formats
\n
'
);
console
.
error
(
help
);
console
.
error
(
'
\n
available key formats:
'
);
console
.
error
(
'
- pem, pkcs1 eg id_rsa
'
);
console
.
error
(
'
- ssh eg id_rsa.pub
'
);
console
.
error
(
'
- pkcs8 format you want for openssl
'
);
console
.
error
(
'
- openssh like output of ssh-keygen -o
'
);
console
.
error
(
'
- rfc4253 raw OpenSSH wire format
'
);
console
.
error
(
'
- dnssec dnssec-keygen format
'
);
console
.
error
(
'
- putty PuTTY ppk format
'
);
console
.
error
(
'
\n
available fingerprint formats:
'
);
console
.
error
(
'
- hex colon-separated hex for SSH
'
);
console
.
error
(
'
straight hex for SPKI
'
);
console
.
error
(
'
- base64 SHA256:* format from OpenSSH
'
);
process
.
exit
(
1
);
}
/*
* Key derivation can only be done on private keys, so use of the -d
* option necessarily implies -p.
*/
if
(
opts
.
derive
)
opts
.
private
=
true
;
var
inFile
=
process
.
stdin
;
var
inFileName
=
'
stdin
'
;
var
inFilePath
;
if
(
opts
.
file
)
{
inFilePath
=
opts
.
file
;
}
else
if
(
opts
.
_args
.
length
===
1
)
{
inFilePath
=
opts
.
_args
[
0
];
}
if
(
inFilePath
)
inFileName
=
path
.
basename
(
inFilePath
);
try
{
if
(
inFilePath
)
{
fs
.
accessSync
(
inFilePath
,
fs
.
R_OK
);
inFile
=
fs
.
createReadStream
(
inFilePath
);
}
}
catch
(
e
)
{
ifError
(
e
,
'
error opening input file
'
);
}
var
outFile
=
process
.
stdout
;
try
{
if
(
opts
.
out
&&
!
opts
.
identify
)
{
fs
.
accessSync
(
path
.
dirname
(
opts
.
out
),
fs
.
W_OK
);
outFile
=
fs
.
createWriteStream
(
opts
.
out
);
}
}
catch
(
e
)
{
ifError
(
e
,
'
error opening output file
'
);
}
var
bufs
=
[];
inFile
.
on
(
'
readable
'
,
function
()
{
var
data
;
while
((
data
=
inFile
.
read
()))
bufs
.
push
(
data
);
});
var
parseOpts
=
{};
parseOpts
.
filename
=
inFileName
;
inFile
.
on
(
'
end
'
,
function
processKey
()
{
var
buf
=
Buffer
.
concat
(
bufs
);
var
fmt
=
'
auto
'
;
if
(
opts
.
informat
)
fmt
=
opts
.
informat
;
var
f
=
sshpk
.
parseKey
;
if
(
opts
.
private
)
f
=
sshpk
.
parsePrivateKey
;
try
{
var
key
=
f
(
buf
,
fmt
,
parseOpts
);
}
catch
(
e
)
{
if
(
e
.
name
===
'
KeyEncryptedError
'
)
{
getPassword
(
function
(
err
,
pw
)
{
if
(
err
)
ifError
(
err
);
parseOpts
.
passphrase
=
pw
;
processKey
();
});
return
;
}
ifError
(
e
);
}
if
(
opts
.
derive
)
key
=
key
.
derive
(
opts
.
derive
);
if
(
opts
.
comment
)
key
.
comment
=
opts
.
comment
;
if
(
opts
.
identify
)
{
var
kind
=
'
public
'
;
if
(
sshpk
.
PrivateKey
.
isPrivateKey
(
key
))
kind
=
'
private
'
;
console
.
log
(
'
%s: a %d bit %s %s key
'
,
inFileName
,
key
.
size
,
key
.
type
.
toUpperCase
(),
kind
);
if
(
key
.
type
===
'
ecdsa
'
)
console
.
log
(
'
ECDSA curve: %s
'
,
key
.
curve
);
if
(
key
.
comment
)
console
.
log
(
'
Comment: %s
'
,
key
.
comment
);
console
.
log
(
'
SHA256 fingerprint:
'
+
key
.
fingerprint
(
'
sha256
'
).
toString
());
console
.
log
(
'
MD5 fingerprint:
'
+
key
.
fingerprint
(
'
md5
'
).
toString
());
console
.
log
(
'
SPKI-SHA256 fingerprint:
'
+
key
.
fingerprint
(
'
sha256
'
,
'
spki
'
).
toString
());
process
.
exit
(
0
);
return
;
}
if
(
opts
.
fingerprint
)
{
var
hash
=
opts
.
hash
;
var
type
=
opts
.
spki
?
'
spki
'
:
'
ssh
'
;
var
format
=
opts
.
outformat
;
var
fp
=
key
.
fingerprint
(
hash
,
type
).
toString
(
format
);
outFile
.
write
(
fp
);
outFile
.
write
(
'
\n
'
);
outFile
.
once
(
'
drain
'
,
function
()
{
process
.
exit
(
0
);
});
return
;
}
fmt
=
undefined
;
if
(
opts
.
outformat
)
fmt
=
opts
.
outformat
;
outFile
.
write
(
key
.
toBuffer
(
fmt
));
if
(
fmt
===
'
ssh
'
||
(
!
opts
.
private
&&
fmt
===
undefined
))
outFile
.
write
(
'
\n
'
);
outFile
.
once
(
'
drain
'
,
function
()
{
process
.
exit
(
0
);
});
});
}
function
ifError
(
e
,
txt
)
{
if
(
txt
)
txt
=
txt
+
'
:
'
;
else
txt
=
''
;
console
.
error
(
'
sshpk-conv:
'
+
txt
+
e
.
name
+
'
:
'
+
e
.
message
);
if
(
process
.
env
[
'
DEBUG
'
]
||
process
.
env
[
'
V
'
])
{
console
.
error
(
e
.
stack
);
if
(
e
.
innerErr
)
console
.
error
(
e
.
innerErr
.
stack
);
}
process
.
exit
(
1
);
}
dispatch_system/dispatch_manager/node_modules_2/.bin/sshpk-sign
deleted
120000 → 0
View file @
0212678f
../sshpk/bin/sshpk-sign
\ No newline at end of file
dispatch_system/dispatch_manager/node_modules_2/.bin/sshpk-sign
0 → 100755
View file @
c1ccb55a
#!/usr/bin/env node
// -*- mode: js -*-
// vim: set filetype=javascript :
// Copyright 2015 Joyent, Inc. All rights reserved.
var
dashdash
=
require
(
'
dashdash
'
);
var
sshpk
=
require
(
'
../lib/index
'
);
var
fs
=
require
(
'
fs
'
);
var
path
=
require
(
'
path
'
);
var
getPassword
=
require
(
'
getpass
'
).
getPass
;
var
options
=
[
{
names
:
[
'
hash
'
,
'
H
'
],
type
:
'
string
'
,
help
:
'
Hash algorithm (sha1, sha256, sha384, sha512)
'
},
{
names
:
[
'
verbose
'
,
'
v
'
],
type
:
'
bool
'
,
help
:
'
Display verbose info about key and hash used
'
},
{
names
:
[
'
identity
'
,
'
i
'
],
type
:
'
string
'
,
help
:
'
Path to key to use
'
},
{
names
:
[
'
file
'
,
'
f
'
],
type
:
'
string
'
,
help
:
'
Input filename
'
},
{
names
:
[
'
out
'
,
'
o
'
],
type
:
'
string
'
,
help
:
'
Output filename
'
},
{
names
:
[
'
format
'
,
'
t
'
],
type
:
'
string
'
,
help
:
'
Signature format (asn1, ssh, raw)
'
},
{
names
:
[
'
binary
'
,
'
b
'
],
type
:
'
bool
'
,
help
:
'
Output raw binary instead of base64
'
},
{
names
:
[
'
help
'
,
'
h
'
],
type
:
'
bool
'
,
help
:
'
Shows this help text
'
}
];
var
parseOpts
=
{};
if
(
require
.
main
===
module
)
{
var
parser
=
dashdash
.
createParser
({
options
:
options
});
try
{
var
opts
=
parser
.
parse
(
process
.
argv
);
}
catch
(
e
)
{
console
.
error
(
'
sshpk-sign: error: %s
'
,
e
.
message
);
process
.
exit
(
1
);
}
if
(
opts
.
help
||
opts
.
_args
.
length
>
1
)
{
var
help
=
parser
.
help
({}).
trimRight
();
console
.
error
(
'
sshpk-sign: sign data using an SSH key
\n
'
);
console
.
error
(
help
);
process
.
exit
(
1
);
}
if
(
!
opts
.
identity
)
{
var
help
=
parser
.
help
({}).
trimRight
();
console
.
error
(
'
sshpk-sign: the -i or --identity option
'
+
'
is required
\n
'
);
console
.
error
(
help
);
process
.
exit
(
1
);
}
var
keyData
=
fs
.
readFileSync
(
opts
.
identity
);
parseOpts
.
filename
=
opts
.
identity
;
run
();
}
function
run
()
{
var
key
;
try
{
key
=
sshpk
.
parsePrivateKey
(
keyData
,
'
auto
'
,
parseOpts
);
}
catch
(
e
)
{
if
(
e
.
name
===
'
KeyEncryptedError
'
)
{
getPassword
(
function
(
err
,
pw
)
{
parseOpts
.
passphrase
=
pw
;
run
();
});
return
;
}
console
.
error
(
'
sshpk-sign: error loading private key "
'
+
opts
.
identity
+
'
":
'
+
e
.
name
+
'
:
'
+
e
.
message
);
process
.
exit
(
1
);
}
var
hash
=
opts
.
hash
||
key
.
defaultHashAlgorithm
();
var
signer
;
try
{
signer
=
key
.
createSign
(
hash
);
}
catch
(
e
)
{
console
.
error
(
'
sshpk-sign: error creating signer:
'
+
e
.
name
+
'
:
'
+
e
.
message
);
process
.
exit
(
1
);
}
if
(
opts
.
verbose
)
{
console
.
error
(
'
sshpk-sign: using %s-%s with a %d bit key
'
,
key
.
type
,
hash
,
key
.
size
);
}
var
inFile
=
process
.
stdin
;
var
inFileName
=
'
stdin
'
;
var
inFilePath
;
if
(
opts
.
file
)
{
inFilePath
=
opts
.
file
;
}
else
if
(
opts
.
_args
.
length
===
1
)
{
inFilePath
=
opts
.
_args
[
0
];
}
if
(
inFilePath
)
inFileName
=
path
.
basename
(
inFilePath
);
try
{
if
(
inFilePath
)
{
fs
.
accessSync
(
inFilePath
,
fs
.
R_OK
);
inFile
=
fs
.
createReadStream
(
inFilePath
);
}
}
catch
(
e
)
{
console
.
error
(
'
sshpk-sign: error opening input file
'
+
'
:
'
+
e
.
name
+
'
:
'
+
e
.
message
);
process
.
exit
(
1
);
}
var
outFile
=
process
.
stdout
;
try
{
if
(
opts
.
out
&&
!
opts
.
identify
)
{
fs
.
accessSync
(
path
.
dirname
(
opts
.
out
),
fs
.
W_OK
);
outFile
=
fs
.
createWriteStream
(
opts
.
out
);
}
}
catch
(
e
)
{
console
.
error
(
'
sshpk-sign: error opening output file
'
+
'
:
'
+
e
.
name
+
'
:
'
+
e
.
message
);
process
.
exit
(
1
);
}
inFile
.
pipe
(
signer
);
inFile
.
on
(
'
end
'
,
function
()
{
var
sig
;
try
{
sig
=
signer
.
sign
();
}
catch
(
e
)
{
console
.
error
(
'
sshpk-sign: error signing data:
'
+
e
.
name
+
'
:
'
+
e
.
message
);
process
.
exit
(
1
);
}
var
fmt
=
opts
.
format
||
'
asn1
'
;
var
output
;
try
{
output
=
sig
.
toBuffer
(
fmt
);
if
(
!
opts
.
binary
)
output
=
output
.
toString
(
'
base64
'
);
}
catch
(
e
)
{
console
.
error
(
'
sshpk-sign: error converting signature
'
+
'
to
'
+
fmt
+
'
format:
'
+
e
.
name
+
'
:
'
+
e
.
message
);
process
.
exit
(
1
);
}
outFile
.
write
(
output
);
if
(
!
opts
.
binary
)
outFile
.
write
(
'
\n
'
);
outFile
.
once
(
'
drain
'
,
function
()
{
process
.
exit
(
0
);
});
});
}
dispatch_system/dispatch_manager/node_modules_2/.bin/sshpk-verify
deleted
120000 → 0
View file @
0212678f
../sshpk/bin/sshpk-verify
\ No newline at end of file
dispatch_system/dispatch_manager/node_modules_2/.bin/sshpk-verify
0 → 100755
View file @
c1ccb55a
#!/usr/bin/env node
// -*- mode: js -*-
// vim: set filetype=javascript :
// Copyright 2015 Joyent, Inc. All rights reserved.
var
dashdash
=
require
(
'
dashdash
'
);
var
sshpk
=
require
(
'
../lib/index
'
);
var
fs
=
require
(
'
fs
'
);
var
path
=
require
(
'
path
'
);
var
Buffer
=
require
(
'
safer-buffer
'
).
Buffer
;
var
options
=
[
{
names
:
[
'
hash
'
,
'
H
'
],
type
:
'
string
'
,
help
:
'
Hash algorithm (sha1, sha256, sha384, sha512)
'
},
{
names
:
[
'
verbose
'
,
'
v
'
],
type
:
'
bool
'
,
help
:
'
Display verbose info about key and hash used
'
},
{
names
:
[
'
identity
'
,
'
i
'
],
type
:
'
string
'
,
help
:
'
Path to (public) key to use
'
},
{
names
:
[
'
file
'
,
'
f
'
],
type
:
'
string
'
,
help
:
'
Input filename
'
},
{
names
:
[
'
format
'
,
'
t
'
],
type
:
'
string
'
,
help
:
'
Signature format (asn1, ssh, raw)
'
},
{
names
:
[
'
signature
'
,
'
s
'
],
type
:
'
string
'
,
help
:
'
base64-encoded signature data
'
},
{
names
:
[
'
help
'
,
'
h
'
],
type
:
'
bool
'
,
help
:
'
Shows this help text
'
}
];
if
(
require
.
main
===
module
)
{
var
parser
=
dashdash
.
createParser
({
options
:
options
});
try
{
var
opts
=
parser
.
parse
(
process
.
argv
);
}
catch
(
e
)
{
console
.
error
(
'
sshpk-verify: error: %s
'
,
e
.
message
);
process
.
exit
(
3
);
}
if
(
opts
.
help
||
opts
.
_args
.
length
>
1
)
{
var
help
=
parser
.
help
({}).
trimRight
();
console
.
error
(
'
sshpk-verify: sign data using an SSH key
\n
'
);
console
.
error
(
help
);
process
.
exit
(
3
);
}
if
(
!
opts
.
identity
)
{
var
help
=
parser
.
help
({}).
trimRight
();
console
.
error
(
'
sshpk-verify: the -i or --identity option
'
+
'
is required
\n
'
);
console
.
error
(
help
);
process
.
exit
(
3
);
}
if
(
!
opts
.
signature
)
{
var
help
=
parser
.
help
({}).
trimRight
();
console
.
error
(
'
sshpk-verify: the -s or --signature option
'
+
'
is required
\n
'
);
console
.
error
(
help
);
process
.
exit
(
3
);
}
var
keyData
=
fs
.
readFileSync
(
opts
.
identity
);
var
key
;
try
{
key
=
sshpk
.
parseKey
(
keyData
);
}
catch
(
e
)
{
console
.
error
(
'
sshpk-verify: error loading key "
'
+
opts
.
identity
+
'
":
'
+
e
.
name
+
'
:
'
+
e
.
message
);
process
.
exit
(
2
);
}
var
fmt
=
opts
.
format
||
'
asn1
'
;
var
sigData
=
Buffer
.
from
(
opts
.
signature
,
'
base64
'
);
var
sig
;
try
{
sig
=
sshpk
.
parseSignature
(
sigData
,
key
.
type
,
fmt
);
}
catch
(
e
)
{
console
.
error
(
'
sshpk-verify: error parsing signature:
'
+
e
.
name
+
'
:
'
+
e
.
message
);
process
.
exit
(
2
);
}
var
hash
=
opts
.
hash
||
key
.
defaultHashAlgorithm
();
var
verifier
;
try
{
verifier
=
key
.
createVerify
(
hash
);
}
catch
(
e
)
{
console
.
error
(
'
sshpk-verify: error creating verifier:
'
+
e
.
name
+
'
:
'
+
e
.
message
);
process
.
exit
(
2
);
}
if
(
opts
.
verbose
)
{
console
.
error
(
'
sshpk-verify: using %s-%s with a %d bit key
'
,
key
.
type
,
hash
,
key
.
size
);
}
var
inFile
=
process
.
stdin
;
var
inFileName
=
'
stdin
'
;
var
inFilePath
;
if
(
opts
.
file
)
{
inFilePath
=
opts
.
file
;
}
else
if
(
opts
.
_args
.
length
===
1
)
{
inFilePath
=
opts
.
_args
[
0
];
}
if
(
inFilePath
)
inFileName
=
path
.
basename
(
inFilePath
);
try
{
if
(
inFilePath
)
{
fs
.
accessSync
(
inFilePath
,
fs
.
R_OK
);
inFile
=
fs
.
createReadStream
(
inFilePath
);
}
}
catch
(
e
)
{
console
.
error
(
'
sshpk-verify: error opening input file
'
+
'
:
'
+
e
.
name
+
'
:
'
+
e
.
message
);
process
.
exit
(
2
);
}
inFile
.
pipe
(
verifier
);
inFile
.
on
(
'
end
'
,
function
()
{
var
ret
;
try
{
ret
=
verifier
.
verify
(
sig
);
}
catch
(
e
)
{
console
.
error
(
'
sshpk-verify: error verifying data:
'
+
e
.
name
+
'
:
'
+
e
.
message
);
process
.
exit
(
1
);
}
if
(
ret
)
{
console
.
error
(
'
OK
'
);
process
.
exit
(
0
);
}
console
.
error
(
'
NOT OK
'
);
process
.
exit
(
1
);
});
}
dispatch_system/dispatch_manager/node_modules_2/.bin/uuid
deleted
120000 → 0
View file @
0212678f
../uuid/bin/uuid
\ No newline at end of file
dispatch_system/dispatch_manager/node_modules_2/.bin/uuid
0 → 100755
View file @
c1ccb55a
#!/usr/bin/env node
var
assert
=
require
(
'
assert
'
);
function
usage
()
{
console
.
log
(
'
Usage:
'
);
console
.
log
(
'
uuid
'
);
console
.
log
(
'
uuid v1
'
);
console
.
log
(
'
uuid v3 <name> <namespace uuid>
'
);
console
.
log
(
'
uuid v4
'
);
console
.
log
(
'
uuid v5 <name> <namespace uuid>
'
);
console
.
log
(
'
uuid --help
'
);
console
.
log
(
'
\n
Note: <namespace uuid> may be "URL" or "DNS" to use the corresponding UUIDs defined by RFC4122
'
);
}
var
args
=
process
.
argv
.
slice
(
2
);
if
(
args
.
indexOf
(
'
--help
'
)
>=
0
)
{
usage
();
process
.
exit
(
0
);
}
var
version
=
args
.
shift
()
||
'
v4
'
;
switch
(
version
)
{
case
'
v1
'
:
var
uuidV1
=
require
(
'
../v1
'
);
console
.
log
(
uuidV1
());
break
;
case
'
v3
'
:
var
uuidV3
=
require
(
'
../v3
'
);
var
name
=
args
.
shift
();
var
namespace
=
args
.
shift
();
assert
(
name
!=
null
,
'
v3 name not specified
'
);
assert
(
namespace
!=
null
,
'
v3 namespace not specified
'
);
if
(
namespace
==
'
URL
'
)
namespace
=
uuidV3
.
URL
;
if
(
namespace
==
'
DNS
'
)
namespace
=
uuidV3
.
DNS
;
console
.
log
(
uuidV3
(
name
,
namespace
));
break
;
case
'
v4
'
:
var
uuidV4
=
require
(
'
../v4
'
);
console
.
log
(
uuidV4
());
break
;
case
'
v5
'
:
var
uuidV5
=
require
(
'
../v5
'
);
var
name
=
args
.
shift
();
var
namespace
=
args
.
shift
();
assert
(
name
!=
null
,
'
v5 name not specified
'
);
assert
(
namespace
!=
null
,
'
v5 namespace not specified
'
);
if
(
namespace
==
'
URL
'
)
namespace
=
uuidV5
.
URL
;
if
(
namespace
==
'
DNS
'
)
namespace
=
uuidV5
.
DNS
;
console
.
log
(
uuidV5
(
name
,
namespace
));
break
;
default
:
usage
();
process
.
exit
(
1
);
}
dispatch_system/dispatch_manager/repository/worker_env/node_modules/.bin/mime
deleted
120000 → 0
View file @
0212678f
../mime/cli.js
\ No newline at end of file
dispatch_system/dispatch_manager/repository/worker_env/node_modules/.bin/mime
0 → 100755
View file @
c1ccb55a
#!/usr/bin/env node
var
mime
=
require
(
'
./mime.js
'
);
var
file
=
process
.
argv
[
2
];
var
type
=
mime
.
lookup
(
file
);
process
.
stdout
.
write
(
type
+
'
\n
'
);
dispatch_system/dispatch_manager/rm.js
View file @
c1ccb55a
let
workerNodes
=
{},
timeline
=
{}
const
constants
=
require
(
'
../constants_local.json
'
)
const
Heap
=
require
(
'
heap
'
);
const
libSupport
=
require
(
'
./lib
'
);
const
udpProxy
=
dgram
.
createSocket
(
'
udp4
'
);
const
loadThreashold
=
1
var
workerHeap
=
new
Heap
(
function
(
worker1
,
worker2
)
{
return
(
worker1
.
system_info
.
avg_load
[
0
]
-
worker2
.
system_info
.
avg_load
[
0
]);
});
let
coldstart_worker
=
{};
let
resourceMap
=
Map
(),
functionToResourceMap
=
Map
();
let
kafka
=
require
(
'
kafka-node
'
),
Producer
=
kafka
.
Producer
,
client
=
new
kafka
.
KafkaClient
({
...
...
@@ -11,14 +26,18 @@ let kafka = require('kafka-node'),
consumer
=
new
Consumer
(
client
,
[
{
topic
:
'
heartbeat
'
},
// receives heartbeat messages from workers, also acts as worker join message
{
topic
:
"
request
"
}
// receives deployment details from RM
{
topic
:
"
request
"
},
// receives deployment details from RM
{
topic
:
constants
.
topics
.
check_autoscale
},
{
topic
:
constants
.
topics
.
autoscale
},
{
topic
:
constants
.
topics
.
coldstart_worker
}
// give the information about worker having low load
],
[
{
autoCommit
:
true
}
])
function
getAddress
()
{
return
Object
.
keys
(
workerNodes
)[
0
];
return
workerHeap
.
peek
().
address
;
// return Object.keys(workerNodes)[0];
}
// payloads = [
...
...
@@ -30,6 +49,34 @@ function getAddress() {
// });
// });
function
updateColdstartOnNIC
()
{
let
lowloadworker
=
workerHeap
.
peek
();
if
(
lowloadworker
.
address
!=
coldstart_worker
.
address
)
{
console
.
log
(
"
min load worker changed !!!
"
);
coldstart_worker
=
lowloadworker
;
let
payload
=
[{
topic
:
constants
.
topics
.
coldstart_worker
,
messages
:
JSON
.
stringify
({
"
resource_id
"
:
coldstart_worker
.
resource_id
,
"
timestamp
"
:
Date
.
now
(),
"
nodes
"
:
{
node_id
:
coldstart_worker
.
address
,
port
:
coldstart_worker
.
port
,
mac
:
coldstart_worker
.
mac
}
}),
partition
:
0
}]
producer
.
send
(
payload
,
()
=>
{
console
.
log
(
`Replied`
)
})
}
}
consumer
.
on
(
'
message
'
,
function
(
message
)
{
let
topic
=
message
.
topic
...
...
@@ -41,11 +88,35 @@ consumer.on('message', function (message) {
if
(
topic
===
"
heartbeat
"
)
{
message
=
JSON
.
parse
(
message
)
if
(
Date
.
now
()
-
message
.
timestamp
<
1000
)
{
if
(
!
workerNodes
[
message
.
address
])
{
workerNodes
[
message
.
address
]
=
message
console
.
log
(
"
New worker discovered. Worker List:
"
)
console
.
log
(
workerNodes
);
workerHeap
.
push
(
workerNodes
[
message
.
address
])
if
(
Object
.
keys
(
workerNodes
).
length
===
1
)
{
updateColdstartOnNIC
();
}
}
else
{
// console.log("Got heartbeat updating load of wroker ", message.address)
if
(
Math
.
abs
(
workerNodes
[
message
.
address
].
system_info
.
avg_load
[
0
]
-
message
.
system_info
.
avg_load
[
0
])
>
loadThreashold
)
{
workerNodes
[
message
.
address
].
system_info
=
message
.
system_info
workerNodes
[
message
.
address
].
timestamp
=
message
.
timestamp
console
.
log
(
"
updated wroker load :
"
,
workerNodes
[
message
.
address
])
workerHeap
.
updateItem
(
workerNodes
[
message
.
address
])
updateColdstartOnNIC
();
}
else
{
console
.
log
(
"
Change in worker load is less than threshold
"
)
//f
}
}
// console.log("\nheap : ",workerHeap)
}
}
else
if
(
topic
===
"
request
"
)
{
message
=
JSON
.
parse
(
message
)
console
.
log
(
message
);
...
...
@@ -63,5 +134,78 @@ consumer.on('message', function (message) {
producer
.
send
(
payload
,
()
=>
{
console
.
log
(
`Replied`
)
})
}
else
if
(
topic
===
constants
.
topics
.
check_autoscale
)
{
message
=
JSON
.
parse
(
message
)
console
.
log
(
"
request to autoscale :
"
,
message
);
}
else
if
(
topic
==
constants
.
topics
.
autoscale
)
{
message
=
JSON
.
parse
(
message
)
console
.
log
(
"
request to scale :
"
,
message
);
let
request_id
=
Math
.
floor
(
Math
.
random
()
*
1000
)
// req.body.request_id = request_id
// res.request_id = request_id
// requestFlightQueue.set(request_id, res)
// let payload = "HELLO"
// payload.request_id = request_id
// let data = payload.data
let
fid
=
message
.
functionHash
.
slice
(
9
)
// res.data_set_time = Date.now()
let
packet
=
libSupport
.
packPacket
({
chain_id
:
0
,
exec_id
:
request_id
,
function_id
:
fid
,
data
,
function_count
:
1
,
autostart
:
1
})
// res.pack_time = Date.now()
udpProxy
.
send
(
packet
,
0
,
packet
.
length
,
"
8000
"
,
"
192.168.2.2
"
,
function
(
err
,
bytes
)
{
logger
.
info
(
`forwarded request via UDP, IP 192.168.2.2 Port 8000`
)
res
.
send_time
=
Date
.
now
()
})
// udpProxy.send(packet, 0, packet.length, resource.port, resource.node_id, function (err, bytes) {
// logger.info(`forwarded request via UDP, IP 192.168.2.5 Port ${resource.port}`)
// res.send_time = Date.now()
// })
}
else
if
(
topic
==
constants
.
topics
.
deployed
)
{
message
=
JSON
.
parse
(
message
)
//fuction is deployed successfully
// if function map availabe add it in the heap
// if not availabe create new heap map
// let id = message.functionHash + message.runtime
// if (resourceMap.has(message.resource_id)) {
// let resource = resourceMap.get(message.resource_id)
// resource.node_id = message.node_id.trim()
// }
// if (functionToResourceMap.has(id)) {
// let resourceHeap = functionToResourceMap.get(id)
// heap.push(resourceHeap, {
// resource_id: message.resource_id,
// open_request_count: 0,
// cpu_utilization: 0
// }, libSupport.compare_uti)
// logger.warn("Horizontally scaling up: " +
// JSON.stringify(functionToResource.get(id)));
// } else {
// /**
// * function to resource map - holds a min heap of resources associated with a function
// * the min heap is sorted based on a metric [TBD] like CPU usage, request count, mem usage etc
// * TODO: decide on metric to use for sorting.
// */
// let resourceHeap = []
// heap.push(resourceHeap, {
// resource_id: message.resource_id,
// open_request_count: 0,
// cpu_utilization: 0
// }, libSupport.compare_uti)
// functionToResourceMap.set(id, resourceHeap)
// logger.warn("Creating new resource pool"
// + JSON.stringify(functionToResourceMap.get(id)));
// }
}
})
dispatch_system/speedo_data_static2_1f_host.csv
View file @
c1ccb55a
...
...
@@ -17,3 +17,17 @@
35000,25.138213139734326,822,5969237,24.1701602935791,30.621051788330078,39.08381462097173
40000,29.38630774106816,678,6683877,24.225950241088867,58.105635643005286,104.85795974731427
4000,0.17381369331601093,3800,1140189,0.17499923706054688,0.32401084899902344,0.6968975067138672
,-1642101543770234.5,2,10,-1642101543770234.5,-1642101543770189.0,-1642101543770185.0
,-1642102887380229.5,8,10,-1642102887380225.8,-1642102887379994.8,-1642102887379967.2
,-1642103799768331.5,10,100,-1642103799768290.0,-1642103799767948.5,-1642103799767912.8
,-1642104072570253.8,9,100,-1642104072569945.0,-1642104072569566.0,-1642104072569533.5
,-1642135707705920.2,1,100,-1642135707705881.0,-1642135707705736.5,-1642135707705716.2
,-1642136335077307.8,0,10,-1642136335077362.2,-1642136335077075.8,-1642136335077043.2
,-1642136552107645.2,0,10,-1642136552107616.5,-1642136552107434.8,-1642136552107416.8
,-1642138981970252.2,0,10,-1642138981970292.2,-1642138981970014.2,-1642138981969992.2
,-1642139251495695.5,0,10,-1642139251495702.2,-1642139251495614.5,-1642139251495604.2
,-1642139930932117.8,0,10,-1642139930939564.2,-1642139930868838.0,-1642139930865180.0
,-1642142803621418.5,0,100,-1642142803621372.8,-1642142803621007.5,-1642142803620958.0
,-1642144416729338.2,0,10,-1642144416729343.5,-1642144416729111.0,-1642144416729088.8
,-1642163040078553.5,0,10,-1642163040078622.5,-1642163040078189.8,-1642163040078167.0
,-1642163273809842.8,0,10,-1642163273809840.5,-1642163273809598.0,-1642163273809575.0
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment