Commit 6e2e656a authored by Mahendra Patel's avatar Mahendra Patel

added coldstart on NIC, autoscaling and loadbalancing

parent 0212678f
echo $1 echo $1
# python2 send.py --client-port 8000 --closed 1 --offload 0 --req-count 50 --send-data 10 --fid $1 # python2 send.py --client-port 8000 --closed 1 --offload 0 --req-count 50 --send-data 10 --fid $1
# sudo ip netns exec ns_server python benchmark_dispatcher2.py --fid 369020 --c 1 --t 1 --n 2 # sudo ip netns exec ns_server python benchmark_dispatcher2.py --fid 369020 --c 1 --t 1 --n 2
# sudo ip netns exec ns_server python benchmark_dispatcher2.py --fid $1 --c 1 --rps 2 --req_count 10
sudo ip netns exec ns_server python benchmark_dispatcher.py --fid $1 --c 20 --t 300 --rps $2
\ No newline at end of file #! /bin/bash -ex
rps_flag=0
n_flag=0
while getopts 'rn' flag; do
case "${flag}" in
r) rps_flag=1 ;;
n) n_flag=1 ;;
esac
done
echo $1, $2, $3
if [[ $rps_flag -eq 1 ]]
then
sudo ip netns exec ns_server python benchmark_dispatcher.py --fid $2 --c 50 --t 30 --rps $3
fi
if [[ $n_flag -eq 1 ]]
then
sudo ip netns exec ns_server python benchmark_dispatcher.py --fid $2 --c 50 --t 100 --n $3
fi
...@@ -22,7 +22,6 @@ group.add_argument('--rps', help='Requests per second', ...@@ -22,7 +22,6 @@ group.add_argument('--rps', help='Requests per second',
group.add_argument('--n', help='Number of requests to send', group.add_argument('--n', help='Number of requests to send',
type=int, action="store") type=int, action="store")
args = parser.parse_args() args = parser.parse_args()
PORT = 8000 PORT = 8000
...@@ -37,7 +36,6 @@ packet_holder = [[] for i in range(12)] ...@@ -37,7 +36,6 @@ packet_holder = [[] for i in range(12)]
ingress_time = {} ingress_time = {}
stop_thread = False stop_thread = False
def receive(i): def receive(i):
global stop_thread, packet_holder global stop_thread, packet_holder
CLIENT_IP = "0.0.0.0" CLIENT_IP = "0.0.0.0"
...@@ -59,7 +57,6 @@ def receive(i): ...@@ -59,7 +57,6 @@ def receive(i):
packet_holder[i].append((packet, time.time() )) packet_holder[i].append((packet, time.time() ))
# print "r", "{0:f}".format((time.time() * 1000)), "{0:f}".format(ingress_time[exec_id]) # print "r", "{0:f}".format((time.time() * 1000)), "{0:f}".format(ingress_time[exec_id])
def genPacket(): def genPacket():
global fid global fid
packet = None packet = None
...@@ -70,11 +67,14 @@ def genPacket(): ...@@ -70,11 +67,14 @@ def genPacket():
f0 = 0; f1 = 1; f2 = 2; f3 = 0; f4 = 0 f0 = 0; f1 = 1; f2 = 2; f3 = 0; f4 = 0
# print chain_id, exec_id, "function_id", function_id, function_count, \ # print chain_id, exec_id, "function_id", function_id, function_count, \
# f0, f1, f2, f3, f4, # f0, f1, f2, f3, f4,
dataInt =1
autoscaling = 1; fno = 255
print(chain_id , exec_id , function_id , dataInt , function_count , autoscaling , fno)
chain_id = struct.pack(">I", chain_id) # chain id chain_id = struct.pack(">I", chain_id) # chain id
exec_id_packed = struct.pack(">I", exec_id) # execution id exec_id_packed = struct.pack(">I", exec_id) # execution id
dataInt =1
# print " dataInt", dataInt # print " dataInt", dataInt
data = struct.pack(">I", dataInt) # data data = struct.pack(">I", dataInt) # data
...@@ -85,12 +85,16 @@ def genPacket(): ...@@ -85,12 +85,16 @@ def genPacket():
f2 = struct.pack("B", f2) # f2 -> f0 f2 = struct.pack("B", f2) # f2 -> f0
f3 = struct.pack("B", f3) # f3 -> f1 f2 f3 = struct.pack("B", f3) # f3 -> f1 f2
f4 = struct.pack("B", f4) # f4 -> f3 f4 = struct.pack("B", f4) # f4 -> f3
autoscaling = struct.pack("B", autoscaling) # f2 -> f0
fno = struct.pack("B", fno) # f3 -> f1 f2
# packet = chain_id + exec_id_packed + function_id + data + function_count + autoscaling + fno + f0 + f1 + f2 + f3 + f4
packet = chain_id + exec_id_packed + function_id + data + function_count + f0 + f1 + f2 + f3 + f4
packet = chain_id + exec_id_packed + function_id + data + function_count + f0 + f1 + f2 + f3 + f4
# print dataInt, offload_status # print dataInt, offload_status
return packet, exec_id return packet, exec_id
def sendThread(start_time, runtime, sleep_time): def sendThread(start_time, runtime, sleep_time):
global ingress_time global ingress_time
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
...@@ -104,7 +108,6 @@ def sendThread(start_time, runtime, sleep_time): ...@@ -104,7 +108,6 @@ def sendThread(start_time, runtime, sleep_time):
ingress_time[exec_id] = time.time() ingress_time[exec_id] = time.time()
time.sleep(sleep_time) time.sleep(sleep_time)
def send(): def send():
global egress_time, ingress_time, concurrency, runtime, stop_thread global egress_time, ingress_time, concurrency, runtime, stop_thread
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
...@@ -113,13 +116,15 @@ def send(): ...@@ -113,13 +116,15 @@ def send():
print("chain id, exec id, data, function count, functions dependencies...") print("chain id, exec id, data, function count, functions dependencies...")
# op = struct.unpack("B", packet[0]) # op = struct.unpack("B", packet[0])
cnt = 0
if args.n is not None: if args.n is not None:
for i in range(args.n): for i in range(args.n):
packet, exec_id = genPacket() packet, exec_id = genPacket()
s.sendto(packet, (SERVER_IP, PORT)) s.sendto(packet, (SERVER_IP, PORT))
ingress_time[exec_id] = time.time() * 1000 ingress_time[exec_id] = time.time() * 1000
print("send", "{0:f}".format(ingress_time[exec_id])) print("send", "{0:f}".format(ingress_time[exec_id]))
cnt +=1
print("cnt request send : ", cnt)
elif args.rps is not None: elif args.rps is not None:
...@@ -127,8 +132,7 @@ def send(): ...@@ -127,8 +132,7 @@ def send():
sleep_time = concurrency / float(args.rps) sleep_time = concurrency / float(args.rps)
print("calculated inter-arrival time, offload mode", sleep_time) print("calculated inter-arrival time, offload mode", sleep_time)
for i in range(concurrency): for i in range(concurrency):
t = threading.Thread(target=sendThread, args=[ t = threading.Thread(target=sendThread, args=[start_time, runtime, sleep_time])
start_time, runtime, sleep_time])
t.daemon = True t.daemon = True
t.start() t.start()
time.sleep(runtime) time.sleep(runtime)
......
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -2,9 +2,12 @@ ...@@ -2,9 +2,12 @@
"registry_url": "10.129.2.201:5000/", "registry_url": "10.129.2.201:5000/",
"master_port": 8080, "master_port": 8080,
"master_address": "10.129.2.201", "master_address": "10.129.2.201",
"daemon_port": 9000,
"daemon_mac": "00:22:22:22:22:22",
"grunt_host": "https://www.namandixit.net/lovecraftian_nightmares/grunt", "grunt_host": "https://www.namandixit.net/lovecraftian_nightmares/grunt",
"couchdb_host": "10.129.2.201:5984", "couchdb_host": "10.129.2.201:5984",
"env": "env_udp2.js", "env": "env_udp2.js",
"runtime": "process",
"db": { "db": {
"function_meta": "serverless", "function_meta": "serverless",
"metrics": "metrics", "metrics": "metrics",
...@@ -12,31 +15,42 @@ ...@@ -12,31 +15,42 @@
"explicit_chain_meta": "explicit_chain" "explicit_chain_meta": "explicit_chain"
}, },
"network": { "network": {
"network_bridge": "xanadu_kafka-serverless", "network_bridge": "xanadu_kafka_serverless",
"use_bridge": false, "use_bridge": false,
"internal": { "internal": {
"kafka_host": "kafka:9092" "kafka_host": "10.129.2.201:9092"
}, },
"external": { "external": {
"kafka_host": "10.129.2.201:9092" "kafka_host": "10.129.2.201:9092"
} }
}, },
"topics": { "topics": {
"request_dm_2_rm": "request", "request_dm_2_rm": "request2",
"heartbeat": "heartbeat", "heartbeat": "heartbeat2",
"deployed": "deployed", "deployed": "deployed3",
"remove_worker": "removeWorker", "remove_worker": "removeWorker2",
"response_rm_2_dm": "RESPONSE_RM_2_DM_DUMMY", "response_rm_2_dm": "RESPONSE_RM_2_DM_DUMMY2",
"hscale": "hscale", "hscale": "hscale2",
"metrics_worker": "metrics_worker", "metrics_worker": "metrics_worker2",
"log_channel": "LOG_COMMON" "log_channel": "LOG_COMMON2",
"coldstart_worker": "COLDSTART_WORKER2",
"check_autoscale": "CHECK_AUTOSCALE2",
"autoscale": "AUTOSCALE2",
"function_load": "FUNCTION_LOAD3",
"update_function_instance_nic": "UPDATE_FUNCTION_INSTANCE_NIC",
"remove_function_intstance": "REMOVE_FUNCTION_INSTANCE2"
}, },
"autoscalar_metrics": { "autoscalar_metrics": {
"open_request_threshold": 100 "high_open_request_threshold": 10,
"low_open_request_threshold": 1,
"function_load_threshold": 5,
"low_load_count":5,
"high_load_count":5
}, },
"metrics": { "metrics": {
"alpha": 0.7 "alpha": 0.7
}, },
"heartbeat_threshold": 5000,
"speculative_deployment": true, "speculative_deployment": true,
"JIT_deployment": true, "JIT_deployment": true,
"id_size": 20 "id_size": 20
......
#! /bin/bash -x
echo "before deletetion list of kafka topic"
echo "--------------------------------------"
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --list --zookeeper 10.129.2.201:2181
echo "\n========================================================================"
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic AUTOSCALE
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic CHECK_AUTOSCALE
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic COLDSTART_WORKER
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic FUNCTION_LOAD
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic LOG_COMMON
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic 10.129.2.201
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic 192.168.2.3
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic RESPONSE_RM_2_DM_DUMMY
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic deployed
# /home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic heartbeat
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic hscale
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic metrics_worker
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic removeWorker
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic request
echo "after deletetion list of kafka topic"
echo "-------------------------"
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --list --zookeeper 10.129.2.201:2181
\ No newline at end of file
{"id":"10.129.2.201","master_node":"192.168.0.105"} {"id":"192.168.2.3","master_node":"192.168.2.3"}
\ No newline at end of file \ No newline at end of file
...@@ -85,6 +85,7 @@ function runContainer(metadata) { ...@@ -85,6 +85,7 @@ function runContainer(metadata) {
const process_checkImage = spawn('docker', ["inspect", registry_url + imageName]) const process_checkImage = spawn('docker', ["inspect", registry_url + imageName])
process_checkImage.on('close', (code) => { process_checkImage.on('close', (code) => {
console.log("\ncode : ", code)
if (code != 0) { if (code != 0) {
const process_pullImage = spawn('docker', ["pull", registry_url + imageName]); const process_pullImage = spawn('docker', ["pull", registry_url + imageName]);
...@@ -100,11 +101,11 @@ function runContainer(metadata) { ...@@ -100,11 +101,11 @@ function runContainer(metadata) {
let process = null; let process = null;
if (constants.network.use_bridge) if (constants.network.use_bridge)
process = spawn('docker', ["create", "--rm", `--network=${constants.network.network_bridge}`, "-p", `${port}:${port}`, process = spawn('docker', ["create", "--rm", `--network=${constants.network.network_bridge}`, "-p", `${port}:${port}`,
"-p", `${port}:${port}/udp`, "--name", resource_id, registry_url + imageName, "-p", `${port}:${port}/udp`, "--mac-address","00:22:22:22:22:22","--name", resource_id, registry_url + imageName,
resource_id, imageName, port, "container", constants.network.internal.kafka_host]); resource_id, imageName, port, "container", constants.network.internal.kafka_host]);
else else
process = spawn('docker', ["create", "--rm", "-p", `${port}:${port}`, process = spawn('docker', ["create", "--rm", "-p", `${port}:${port}`,
"-p", `${port}:${port}/udp`, "--name", resource_id, registry_url + imageName, "-p", `${port}:${port}/udp`, "--mac-address","00:22:22:22:22:22", "--name", resource_id, registry_url + imageName,
resource_id, imageName, port, "container", constants.network.internal.kafka_host]); resource_id, imageName, port, "container", constants.network.internal.kafka_host]);
let result = ""; let result = "";
...@@ -150,14 +151,19 @@ function runContainer(metadata) { ...@@ -150,14 +151,19 @@ function runContainer(metadata) {
/** /**
* create docker on the default bridge * create docker on the default bridge
*/ */
let docker_args = null;
if (constants.network.use_bridge) if (constants.network.use_bridge)
process = spawn('docker', ["create", "--rm", `--network=${constants.network.network_bridge}`, docker_args = ["create", "--rm", `--network=${constants.network.network_bridge}`,
"-p", `${port}:${port}`, "-p", `${port}:${port}/udp`, "--name", resource_id, "-p", `${port}:${port}/tcp`, "-p", `${port}:${port}/udp`, "--name", resource_id,
registry_url + imageName, resource_id, imageName, port, "container", constants.network.internal.kafka_host]); registry_url + imageName, resource_id, imageName, port, "container", constants.network.internal.kafka_host];
else else
process = spawn('docker', ["create", docker_args = ["create",
"-p", `${port}:${port}`, "-p", `${port}:${port}/udp`, "--name", resource_id, "-p", `${port}:${port}/tcp`, "-p", `${port}:${port}/udp`, "--name", resource_id,
registry_url + imageName, resource_id, imageName, port, "container", constants.network.internal.kafka_host]); registry_url + imageName, resource_id, imageName, port, "container", constants.network.internal.kafka_host];
console.log("docker args :: ", docker_args)
process = spawn('docker',docker_args);
let result = ""; let result = "";
// timeStart = Date.now() // timeStart = Date.now()
console.log("resource id is: ",resource_id) console.log("resource id is: ",resource_id)
...@@ -173,8 +179,9 @@ function runContainer(metadata) { ...@@ -173,8 +179,9 @@ function runContainer(metadata) {
*/ */
// let add_network = spawn('docker', ['network', 'connect', 'pub_net', resource_id]) // let add_network = spawn('docker', ['network', 'connect', 'pub_net', resource_id])
let add_network = spawn('docker', ['network', 'connect', 'pub_net', resource_id]) let add_network = spawn('docker', ['network', 'connect', 'pub_net', resource_id])
// let add_network = spawn('docker', ['network', 'connect', constants.network.network_bridge, resource_id])
let _ = spawn('docker', ['start', resource_id]) let _ = spawn('docker', ['start', resource_id,'-i'])
_.stdout.on('data', (data) => { _.stdout.on('data', (data) => {
logger.info(data.toString()) logger.info(data.toString())
......
...@@ -10,10 +10,23 @@ const execute = require('./execute') ...@@ -10,10 +10,23 @@ const execute = require('./execute')
const fs = require('fs') const fs = require('fs')
const fetch = require('node-fetch'); const fetch = require('node-fetch');
const os = require('os'); const os = require('os');
const dgram = require('dgram');
const server = dgram.createSocket('udp4');
const udpProxy = dgram.createSocket('udp4');
const WINDOW_SIZE = 10
let struct = require('jspack');
struct = struct.jspack
let metadataDB = `http://${secrets.couchdb_username}:${secrets.couchdb_password}@${constants.couchdb_host}` let metadataDB = `http://${secrets.couchdb_username}:${secrets.couchdb_password}@${constants.couchdb_host}`
metadataDB = metadataDB + "/" + constants.db.function_meta + "/" metadataDB = metadataDB + "/" + constants.db.function_meta + "/"
let usedPort = new Map(); //map of used ports by function
let functionToResourceMap={} // f
let new_starting_function={}
let coldstart_request_queue = []
const kafka = require('kafka-node') const kafka = require('kafka-node')
const logger = libSupport.logger const logger = libSupport.logger
...@@ -28,11 +41,85 @@ let Producer = kafka.Producer, ...@@ -28,11 +41,85 @@ let Producer = kafka.Producer,
producer = new Producer(client), producer = new Producer(client),
Consumer = kafka.Consumer Consumer = kafka.Consumer
function getRandomInt(min, max) {
min = Math.ceil(min);
max = Math.floor(max);
return Math.floor(Math.random() * (max - min + 1)) + min;
}
function dispatch_coldstart_request()
{
//dispatch the coldstart request
// console.log("dispatch the coldstart request !!!")
// let lookbackWindow = Math.min(WINDOW_SIZE, coldstart_request_queue.length)
let lookbackWindow = coldstart_request_queue.length
for (let i = 0; i < lookbackWindow; i++) {
let req = coldstart_request_queue.shift()
console.log("coldstart req in queue : ", req)
// dispatch the request
let id = req.request.functionHash + req.request.runtime
let data = 101
if(functionToResourceMap[id])
{
console.log("function is running so dispatching request : " , functionToResourceMap[id])
let packet = packPacket({
chain_id: 0,
exec_id: 0,
function_id: functionToResourceMap[id].functionHash.slice(9),
data,
function_count: 1,
// autostart: 1,
// fno : 255
})
let proxy_port = functionToResourceMap[id].portExternal
console.log("proxing packet to function : ", packet)
try {
udpProxy.send(packet, 0, packet.length, proxy_port, functionToResourceMap[id].node_id, function (err, bytes) {
if (err)
console.log(err)
console.log("UDP coldstart request sent to function")
})
} catch (e) {
console.log("error proxying coldstart request",e)
}
}
else{
coldstart_request_queue.push(req)
}
// logger.info(req)
// console.log("dispatch : ",req.body, "params : ",req.params, "uri : ",req.uri)
// let runtime = req.body.runtime
// let functionHash = req.params.id
// if (!db.has(functionHash + runtime)) {
// db.set(functionHash + runtime, [])
// db.get(functionHash + runtime).push({ req, res })
// let payload = [{
// topic: constants.topics.hscale,
// messages: JSON.stringify({ runtime, functionHash })
// }]
// producer.send(payload, function () { })
// speculative_deployment(req, runtime)
// } else {
// logger.info("deployment process already started waiting")
// db.get(functionHash + runtime).push({ req, res })
// }
}
}
libSupport.makeTopic(node_id).then(() => { libSupport.makeTopic(node_id).then(() => {
logger.info("node topic created") logger.info("node topic created")
let consumer = new Consumer(client, let consumer = new Consumer(client,
[ [
{ topic: node_id, partition: 0, offset: 0 } { topic: node_id, partition: 0, offset: 0 },
{ topic: constants.topics.remove_function_intstance},
{ topic: constants.topics.deployed }
], ],
[ [
{ autoCommit: true } { autoCommit: true }
...@@ -42,10 +129,17 @@ libSupport.makeTopic(node_id).then(() => { ...@@ -42,10 +129,17 @@ libSupport.makeTopic(node_id).then(() => {
let topic = message.topic let topic = message.topic
message = message.value message = message.value
message = JSON.parse(message) message = JSON.parse(message)
console.log("message received on kafka : ",message)
let runtime = message.runtime let runtime = message.runtime
let functionHash = message.functionHash let functionHash = message.functionHash
let resource_id = message.resource_id // let resource_id = message.resource_id
let port = message.port let resource_id = libSupport.makeid(constants.id_size)
// let port = message.port
let port = libSupport.getPort(usedPort)
let mac = constants.daemon_mac
let firststart = false
let executordata = {}
/** /**
* Download necessary files (function file) and Start resource deployment * Download necessary files (function file) and Start resource deployment
*/ */
...@@ -58,19 +152,57 @@ libSupport.makeTopic(node_id).then(() => { ...@@ -58,19 +152,57 @@ libSupport.makeTopic(node_id).then(() => {
libSupport.download(host_url + "/repository/" + functionHash + ".js", local_repository + functionHash + ".js").then(() => { libSupport.download(host_url + "/repository/" + functionHash + ".js", local_repository + functionHash + ".js").then(() => {
let metadata = { let metadata = {
resource_id, functionHash, resource_id, functionHash,
runtime, port, runtime, port, mac,
resources: { resources: {
memory: json.memory memory: json.memory
} },
firststart,
executordata
} }
startWorker(local_repository, producer, metadata) startWorker(local_repository, producer, metadata)
}) })
}).catch(err => { }).catch(err => {
logger.error("something went wrong" + err.toString()) logger.error("something went wrong" + err.toString())
}); });
} }
else if (topic==constants.topics.remove_function_intstance)
{
logger.info("function closed !!! Freeing resources");
console.log("fc message : ", message)
// if(usedPort.has(port))
// usedPort.delete(port)
let id = message.functionHash + message.runtime
if(functionToResourceMap[id])
delete functionToResourceMap[id]
}
else if (topic == constants.topics.deployed ) {
// function deployed store metadata in functiontoResourceMap
// message = JSON.parse(message)
//fuction is deployed successfully
console.log("message deployed : ", message)
let id = message.functionHash + message.runtime
if(new_starting_function[id])
delete new_starting_function[id]
console.log("\nthere is no function for function ID so creating one\n")
functionToResourceMap[id] = {}
functionToResourceMap[id] = {
"functionHash": message.functionHash,
"resource_id": resource_id,
"function_runtime": message.runtime,
"node_id": message.node_id,
"portExternal": message.portExternal,
"mac": message.mac,
"open_request_count": 0,
"load": 0
}
console.log("function to resource MAP : ", functionToResourceMap)
}
}) })
}) })
...@@ -122,7 +254,7 @@ function startWorker(local_repository, producer, metadata) { ...@@ -122,7 +254,7 @@ function startWorker(local_repository, producer, metadata) {
logger.error("=====================deployment failed========================="); logger.error("=====================deployment failed=========================");
logger.error(err) logger.error(err)
producer.send([{ producer.send([{
topic: "deployed", topic: constants.topics.deployed,
messages: JSON.stringify({ messages: JSON.stringify({
"status": false, "status": false,
resource_id: metadata.resource_id, resource_id: metadata.resource_id,
...@@ -135,7 +267,7 @@ function startWorker(local_repository, producer, metadata) { ...@@ -135,7 +267,7 @@ function startWorker(local_repository, producer, metadata) {
execute.runProcess(local_repository, metadata) execute.runProcess(local_repository, metadata)
.catch(err => { .catch(err => {
logger.error("=====================deployment failed========================="); logger.error("=====================deployment failed=========================");
producer.send([{ topic: "deployed", producer.send([{ topic: constants.topics.deployed,
messages: JSON.stringify({ messages: JSON.stringify({
"status": false, "status": false,
resource_id: metadata.resource_id, resource_id: metadata.resource_id,
...@@ -150,7 +282,7 @@ function startWorker(local_repository, producer, metadata) { ...@@ -150,7 +282,7 @@ function startWorker(local_repository, producer, metadata) {
else { else {
producer.send( producer.send(
[{ [{
topic: "response", topic: "response2",
messages: JSON.stringify({ status: "unknown runtime" }) messages: JSON.stringify({ status: "unknown runtime" })
}], () => { }) }], () => { })
...@@ -167,16 +299,169 @@ function heartbeat() { ...@@ -167,16 +299,169 @@ function heartbeat() {
avg_load: os.loadavg() avg_load: os.loadavg()
} }
let payload = [{ let payload = [{
topic: "heartbeat", topic: constants.topics.heartbeat,
messages: JSON.stringify({ messages: JSON.stringify({
"address": node_id, "address": node_id,
"portExternal": constants.daemon_port,
"mac": constants.daemon_mac,
"system_info": info, "system_info": info,
"timestamp": Date.now() "timestamp": Date.now()
}) })
}] }]
console.log("daemon system info : ", info) // console.log("daemon system info : ", payload)
producer.send(payload, function(cb) {}) producer.send(payload, function(cb) {})
} }
// TODO 2: implement packer deparser for the udp packet
// TODO 3: create UPD server to get the coldstart request
server.on('error', (err) => {
console.log(`server error:\n${err.stack}`);
server.close();
});
server.on('message', (msg, rinfo) => {
console.log("message", msg)
let req = unpackPacket(msg)
console.log(req, typeof req);
// get the coldstart request and start the function
// logger.info("Received Deployment UDP request for resource_id: " + resource_id);
let functionHash = "function_" + req.function_id
// let resource_id = 'aaa'
let resource_id = libSupport.makeid(constants.id_size)
// let runtime = 'process'
let runtime = constants.runtime
// let port = // create random unused port and assign to function
let port = libSupport.getPort(usedPort)
let mac = constants.daemon_mac
let firststart=true
let executordata = req.data
let id = functionHash + runtime
if(new_starting_function[id])
{
// function is already running so no need to start new function instance
// add the request to the request queue and proxy the request
console.log("function is already starting in some time. Queueing the request")
let request = {functionHash, runtime }
console.log("request : ", request)
coldstart_request_queue.push({request});
}
else{
// there is no function instance running for fid
// start the new function
new_starting_function[id] = 1
console.log("Starting new function for fid : ", functionHash)
logger.info("Received Deployment UPD request")
fetch(metadataDB + functionHash).then(res => res.json())
.then(json => {
console.log("metadata", json);
libSupport.download(host_url + "/repository/" + functionHash + ".js", local_repository + functionHash + ".js").then(() => {
let metadata = {
resource_id, functionHash,
runtime, port, mac,
resources: {
memory: json.memory
},
firststart,
executordata
}
startWorker(local_repository, producer, metadata)
})
}).catch(err => {
logger.error("something went wrong" + err.toString())
});
}
// lastRequest = Date.now()
// console.log("network stack time", lastRequest - payload.t1)
// totalRequest++
// executor(msg).then(result => {
// result = packPacket(msg)
// let port = 10000 + getRandomInt(0, 10)
// try {
// udpProxy.send(msg, 0, msg.length, port, rinfo.address, function (err, bytes) {
// if (err)
// console.log(err)
// console.log("response via UDP")
// })
// } catch (e) {
// console.log(e)
// }
// })
});
function unpackPacket(packet) {
// let buffer = new Array(1024)
let chain_id = null, exec_id = null, function_count = null, function_id = null, data = null
let base = 0, f0, f1, f2, f3, f4, t1, t2, t3, t4
chain_id = struct.Unpack(">I", packet, base)
base += 4
exec_id = struct.Unpack(">I", packet, base)
base += 4
function_id = struct.Unpack(">I", packet, base)
base += 4
data = struct.Unpack(">I", packet, base)
base += 4
function_count = struct.Unpack("I", packet, base)
base += 4
f0 = struct.Unpack("B", packet, base)
base += 1
f1 = struct.Unpack("B", packet, base)
base += 1
f2 = struct.Unpack("B", packet, base)
base += 1
f3 = struct.Unpack("B", packet, base)
base += 1
f4 = struct.Unpack("B", packet, base)
base += 1
t1 = struct.Unpack("I", packet, base)
base += 8
t2 = struct.Unpack("I", packet, base)
base += 8
t3 = struct.Unpack("I", packet, base)
base += 8
t4 = struct.Unpack("I", packet, base)
console.log("chain_id", chain_id, "exec_id", exec_id, "data", data, "function_count", function_count, "function_id", function_id)
return {
chain_id: chain_id[0],
exec_id: exec_id[0],
data: data[0],
function_count: function_count[0],
function_id: function_id[0],
f0, f1, f2, f3, f4, t1, t2, t3, t4
}
}
function packPacket(dataPacket) {
let message = new Array(1024)
let base = 0, chain_id, exec_id, function_id, data, function_count
chain_id = struct.PackTo(">I", message, base, [dataPacket.chain_id])
base += 4
exec_id = struct.PackTo(">I", message, base, [dataPacket.exec_id])
base += 4
function_id = struct.PackTo(">I", message, base, [dataPacket.function_id])
base += 4
data = struct.PackTo(">I", message, base, [dataPacket.data])
base += 4
function_count = struct.PackTo("B", message, base, [dataPacket.function_count])
message = Buffer.from(message)
return message
}
server.on('listening', () => {
const address = server.address();
console.log(`server listening ${address.address}:${address.port}`);
});
// server.bind(port, "192.168.2.3");
server.bind(constants.daemon_port);
setInterval(heartbeat, 1000); setInterval(heartbeat, 1000);
setInterval(dispatch_coldstart_request, 1000)
\ No newline at end of file
...@@ -134,6 +134,21 @@ const logger = winston.createLogger({ ...@@ -134,6 +134,21 @@ const logger = winston.createLogger({
}); });
function getPort(usedPort) {
let port = -1, ctr = 0
do {
let min = Math.ceil(30000);
let max = Math.floor(60000);
port = Math.floor(Math.random() * (max - min + 1)) + min;
ctr += 1;
if (ctr > 30000) {
port = -1
break
}
} while (usedPort.has(port))
return port
}
module.exports = { module.exports = {
download, makeid, updateConfig, makeTopic, returnPort, logger download, makeid, updateConfig, makeTopic, returnPort, logger, getPort
} }
...@@ -17,7 +17,8 @@ ...@@ -17,7 +17,8 @@
"kafka-node": "^5.0.0", "kafka-node": "^5.0.0",
"morgan": "^1.9.1", "morgan": "^1.9.1",
"mqtt": "^4.2.8", "mqtt": "^4.2.8",
"node-fetch": "^2.6.0", "node-fetch": "^2.6.7",
"os-utils": "0.0.14",
"redis": "^3.1.2", "redis": "^3.1.2",
"request": "^2.88.2", "request": "^2.88.2",
"usage": "^0.7.1", "usage": "^0.7.1",
......
const secrets = require('./secrets.json')
const constants = require('.././constants_local.json')
let db = new Map(), // queue holding request to be dispatched
resourceMap = new Map(), // map between resource_id and resource details like node_id, port, associated function etc
functionToResource = new Map(), // a function to resource map. Each map contains a minheap of
// resources associated with the function
workerNodes = new Map(), // list of worker nodes currently known to the DM
functionBranchTree = new Map(), // a tree to store function branch predictions
conditionProbabilityExplicit = new Map(), // tree holding conditional probabilities for explicit chains
requestFlightQueue = new Map()// map to store in flight requests
/**
* URL to the couchdb database server used to store data
*/
module.exports = {
db, functionBranchTree, functionToResource, workerNodes, resourceMap,
conditionProbabilityExplicit, requestFlightQueue
}
let a = 10, c = "abc"
let data = {
a: a,
c: c,
timestamp: Date.now(),
}
console.log(data)
// const sleep = (waitTimeInMs) => new Promise(resolve => setTimeout(resolve, waitTimeInMs));
// async function work1()
// {
// for(let i=0; i<4; i++)
// {
// console.log(1,1)
// await sleep(1000);
// console.log(1,2)
// await sleep(1000);
// }
// }
// async function work2()
// {
// for(let i=0; i<4; i++)
// {
// console.log(2,1)
// await sleep(1000);
// console.log(2,2)
// await sleep(1000);
// }
// }
// work1()
// work2()
// // function sleep(ms) {
// // return new Promise((resolve) => {
// // setTimeout(resolve, ms);
// // });
// // }
// // await sleep(1000)
// // function sleep(ms) {
// // return new Promise((resolve) => {
// // setTimeout(resolve, ms);
// // });
// // }
// // async function sleep(ms)
// // {
// // new Promise(resolve => setTimeout(resolve, ms));
// // }
// // let ms = 10000
// // for(let i=0; i<2; i++)
// // {
// // console.log(1)
// // await sleep(100000)
// // // await new Promise(resolve => setTimeout(resolve, ms));
// // console.log(2)
// // await sleep(100000)
// // // await new Promise(resolve => setTimeout(resolve, ms));
// // }
// // var pidusage = require('pidusage')
// // let cpuper = 0
// // const compute = async () => {
// // const stats = await pidusage(process.pid)
// // // do something
// // // console.log(stats.cpu)
// // cpuper = stats.cpu
// // }
// // // Compute statistics every second:
// // const interval = async (time) => {
// // setTimeout(async () => {
// // await compute()
// // console.log(cpuper)
// // interval(time)
// // }, time)
// // }
// // interval(1000)
// // // function compute(cb) {
// // // pidusage(process.pid, function (err, stats) {
// // // console.log(stats)
// // // => {
// // // cpu: 10.0, // percentage (from 0 to 100*vcore)
// // // memory: 357306368, // bytes
// // // ppid: 312, // PPID
// // // pid: 727, // PID
// // // ctime: 867000, // ms user + system time
// // // elapsed: 6650000, // ms since the start of the process
// // // timestamp: 864000000 // ms since epoch
// // // }
// // // cb()
// // // })
// // // }
// // // function interval(time) {
// // // setTimeout(function() {
// // // compute(function() {
// // // interval(time)
// // // })
// // // }, time)
// // // }
// // // Compute statistics every second:
// // // interval(1000)
// // // pidusage(process.pid, function (err, stats) {
// // // console.log(stats)
// // // // => {
// // // // cpu: 10.0, // percentage (from 0 to 100*vcore)
// // // // memory: 357306368, // bytes
// // // // ppid: 312, // PPID
// // // // pid: 727, // PID
// // // // ctime: 867000, // ms user + system time
// // // // elapsed: 6650000, // ms since the start of the process
// // // // timestamp: 864000000 // ms since epoch
// // // // }
// // // // cb()
// // // })
// // // var usage = require('usage');
// // // var pid = process.pid // you can use any valid PID instead
// // // usage.lookup(pid, function(err, result) {
// // // });
// // // // const Heap = require('heap');
// // // const osutils = require('os-utils')
// // function createLoad()
// // {
// // // while(1)
// // // {
// // arr = []
// // for(let i=2; i<1000; i++)
// // {
// // for(let j=2; j<i; j++)
// // {
// // for(let k=0; k<i; k++){
// // }
// // }
// // }
// // // console.log("\n\n")
// // // }
// // }
// // // function getCPUUsage () {
// // // return new Promise(resolve => {
// // // osutils.cpuUsage(value => resolve(value))
// // // })
// // // }
// // // async function testCPUUsage() {
// // // const cpuUsage = await getCPUUsage();
// // // console.log(`test CPU usage: ${(cpuUsage * 100)}%`);
// // // }
// // // // function getCpuUsage(){
// // // // os_utils.cpuUsage(function(v){
// // // // console.log( 'CPU (%):' + v );
// // // // });
// // // // os_utils.cpuFree(function(v){
// // // // console.log( 'CPU Free:' + v );
// // // // });
// // // // console.log("\n")
// // // // }
// // // setInterval(testCPUUsage,1000);
// // setInterval(createLoad,1000);
// // // // var heap = new Heap(function(a, b) {
// // // // return a.foo - b.foo;
// // // // });
// // // // let map = new Map();
// // // // // a = {foo : 3};
// // // // // b = {foo : 4};
// // // // // c = {foo : 2};
// // // // arr = [{foo : 4},{foo : 5},{foo : 2}]
// // // // // map.set("foo1", a);
// // // // // map.set("foo2", b);
// // // // // map.set("foo3", c);
// // // // // heap.push({foo: 3});
// // // // // heap.push({foo: 1});
// // // // // heap.push({foo: 2});
// // // // heap.push(arr[0]);
// // // // console.log(heap)
// // // // heap.push(arr[1]);
// // // // console.log(heap)
// // // // heap.push(arr[2]);
// // // // console.log(heap)
// // // // arr[0].foo = 1;
// // // // // heap.pop(b);
// // // // console.log(heap)
// // // // heap.updateItem(arr[0])
// // // // console.log(heap)
// // // // heap.pop();
// // // // console.log(heap)
...@@ -87,7 +87,8 @@ app.get('/metrics', (req, res) => { ...@@ -87,7 +87,8 @@ app.get('/metrics', (req, res) => {
/** /**
* REST API to receive deployment requests * REST API to receive deployment requests
*/ **/
app.post('/serverless/deploy', (req, res) => { app.post('/serverless/deploy', (req, res) => {
console.log("req = "+req+" ** "+req.body.runtime+" ** "+req.body.serverless,req.files,req.files.serverless, req.files.nicfunction)//newcode console.log("req = "+req+" ** "+req.body.runtime+" ** "+req.body.serverless,req.files,req.files.serverless, req.files.nicfunction)//newcode
console.log("res = "+res)//newcode console.log("res = "+res)//newcode
...@@ -144,7 +145,8 @@ app.post('/serverless/deploy', (req, res) => { ...@@ -144,7 +145,8 @@ app.post('/serverless/deploy', (req, res) => {
res.send("error").status(400) res.send("error").status(400)
} }
else { else {
let func_id = parseInt(functionHash.slice(0,5),16) let func_id = functionHash
// let func_id = parseInt(functionHash.slice(0,5),16)
//console.log(func_id) //console.log(func_id)
console.log("Function id to be used is: ", func_id) console.log("Function id to be used is: ", func_id)
idToFunchashMap.set(func_id, functionHash) idToFunchashMap.set(func_id, functionHash)
...@@ -202,9 +204,7 @@ function deployContainer(path, imageName) { ...@@ -202,9 +204,7 @@ function deployContainer(path, imageName) {
ENTRYPOINT ["node", "${imageName}.js"]` ENTRYPOINT ["node", "${imageName}.js"]`
, function (err) { , function (err) {
if (err) { if (err) {
logger.error("failed", err); logger.error("failed", err);
reject(err); reject(err);
} }
else { else {
...@@ -345,7 +345,6 @@ function postDeploy(message) { ...@@ -345,7 +345,6 @@ function postDeploy(message) {
res.status(400).json({ reason: message.reason }) res.status(400).json({ reason: message.reason })
} }
db.delete(id) db.delete(id)
return; return;
} }
...@@ -382,7 +381,6 @@ function postDeploy(message) { ...@@ -382,7 +381,6 @@ function postDeploy(message) {
functionToResource.set(id, resourceHeap) functionToResource.set(id, resourceHeap)
logger.warn("Creating new resource pool" logger.warn("Creating new resource pool"
+ JSON.stringify(functionToResource.get(id))); + JSON.stringify(functionToResource.get(id)));
} }
try { try {
...@@ -412,7 +410,6 @@ function postDeploy(message) { ...@@ -412,7 +410,6 @@ function postDeploy(message) {
} catch (e) { } catch (e) {
logger.error(e.message) logger.error(e.message)
} }
} }
consumer.on('message', function (message) { consumer.on('message', function (message) {
...@@ -420,13 +417,13 @@ consumer.on('message', function (message) { ...@@ -420,13 +417,13 @@ consumer.on('message', function (message) {
let topic = message.topic let topic = message.topic
message = message.value message = message.value
// console.log(topic, message) // console.log(topic, message)
if (topic === "response") { if (topic === "response2") {
logger.info("response " + message); logger.info("response " + message);
} else if (topic === constants.topics.heartbeat) { } else if (topic === constants.topics.heartbeat) {
message = JSON.parse(message) message = JSON.parse(message)
// console.log(message) // console.log(message)
console.log("node_to_resource_mapping : ", node_to_resource_mapping)
if (Date.now() - message.timestamp < 1000) if (Date.now() - message.timestamp < 1000)
if (!workerNodes.has(message.address)) { if (!workerNodes.has(message.address)) {
workerNodes.set(message.address, message.timestamp) workerNodes.set(message.address, message.timestamp)
...@@ -436,10 +433,10 @@ consumer.on('message', function (message) { ...@@ -436,10 +433,10 @@ consumer.on('message', function (message) {
else else
{ {
if(node_to_resource_mapping.has(message.address)) { if(node_to_resource_mapping.has(message.address)) {
console.log("")
let resource_id = node_to_resource_mapping.get(message.address) let resource_id = node_to_resource_mapping.get(message.address)
resource_to_cpu_util.set(resource_id,message.system_info.loadavg) resource_to_cpu_util.set(resource_id,message.system_info.loadavg)
} }
} }
} else if (topic == constants.topics.deployed) { } else if (topic == constants.topics.deployed) {
try { try {
...@@ -487,12 +484,11 @@ consumer.on('message', function (message) { ...@@ -487,12 +484,11 @@ consumer.on('message', function (message) {
} }
} else if (topic == constants.topics.hscale) { } else if (topic == constants.topics.hscale) {
message = JSON.parse(message) message = JSON.parse(message)
let resource_id = libSupport.makeid(constants.id_size), // each function resource request is associated with an unique ID let resource_id = libSupport.makeid(constants.id_size), // each function resource request is associated with an unique ID
runtime = message.runtime, runtime = message.runtime,
functionHash = message.functionHash functionHash = message.functionHash
logger.info(`Generated new resource ID: ${resource_id} for runtime: ${runtime}`); logger.info(`Generated new resource ID: ${resource_id} for runtime: ${runtime}`);
console.log("Resource Status: ", functionToResource); console.log("Resource Status: ", functionToResource);
if (!functionToResource.has(functionHash + runtime) && !db.has(functionHash + runtime)) { if (!functionToResource.has(functionHash + runtime) && !db.has(functionHash + runtime)) {
...@@ -573,7 +569,7 @@ function autoscalar() { ...@@ -573,7 +569,7 @@ function autoscalar() {
functionToResource.forEach((resourceList, functionKey, map) => { functionToResource.forEach((resourceList, functionKey, map) => {
if (resourceList.length > 0 && if (resourceList.length > 0 &&
resourceList[resourceList.length - 1].open_request_count > constants.autoscalar_metrics.open_request_threshold) { resourceList[resourceList.length - 1].open_request_count > constants.autoscalar_metrics.high_open_request_threshold) {
let resource = resourceMap.get(resourceList[resourceList.length - 1].resource_id) let resource = resourceMap.get(resourceList[resourceList.length - 1].resource_id)
logger.warn(`resource ${resourceList[resourceList.length - 1]} exceeded autoscalar threshold. Scaling up!`) logger.warn(`resource ${resourceList[resourceList.length - 1]} exceeded autoscalar threshold. Scaling up!`)
let payload = [{ let payload = [{
...@@ -587,13 +583,16 @@ function autoscalar() { ...@@ -587,13 +583,16 @@ function autoscalar() {
} }
function heapUpdate() { function heapUpdate() {
console.log("functionToResource : ", functionToResource)
console.log("resource_to_cpu_util : ", resource_to_cpu_util)
functionToResource.forEach((resourceArray, functionKey) => { functionToResource.forEach((resourceArray, functionKey) => {
//resourceArray = resourceList.toArray() //resourceArray = resourceList.toArray()
console.log("Function being updated: ",functionKey) // console.log("Function being updated: ",functionKey)
for (let i = 0; i < resourceArray.length; i++) { for (let i = 0; i < resourceArray.length; i++) {
let res_i = resourceArray[i].resource_id; let res_i = resourceArray[i].resource_id;
resourceArray[i].cpu_utilization = resource_to_cpu_util.get(res_i); resourceArray[i].cpu_utilization = resource_to_cpu_util.get(res_i);
console.log("Avg load on resource-worker ",i, ": ", resourceArray[i].cpu_utilization) console.log("Avg load on resource-worker ",i, ": ", resourceArray[i].cpu_utilization)
console.log("Avg load on resource-worker ",i, ": ", resourceArray[i])
} }
heap.heapify(resourceArray, libSupport.compare_uti) heap.heapify(resourceArray, libSupport.compare_uti)
...@@ -672,7 +671,7 @@ async function speculative_deployment(req, runtime) { ...@@ -672,7 +671,7 @@ async function speculative_deployment(req, runtime) {
if (chainData[id]) if (chainData[id])
chainData[id].branches = new Map(chainData[id].branches) chainData[id].branches = new Map(chainData[id].branches)
} }
currentDelay = metrics[branchInfo.mle_path[0].id].container.starttime currentDelay = metrics[branchInfo.mle_path[0].id].container.starttime
for (let i = 1; i < deployDepth; i++) { for (let i = 1; i < deployDepth; i++) {
let parent = chainData[branchInfo.mle_path[i - 1].id] let parent = chainData[branchInfo.mle_path[i - 1].id]
...@@ -683,10 +682,8 @@ async function speculative_deployment(req, runtime) { ...@@ -683,10 +682,8 @@ async function speculative_deployment(req, runtime) {
invokeTime = (invokeTime < 0)? 0: invokeTime invokeTime = (invokeTime < 0)? 0: invokeTime
console.log(self, "current delay", currentDelay, "invoke time:", currentDelay - metrics[self].container.starttime); console.log(self, "current delay", currentDelay, "invoke time:", currentDelay - metrics[self].container.starttime);
setTimeout(chainHandler.notify, invokeTime, "container", self) setTimeout(chainHandler.notify, invokeTime, "container", self)
} }
}) })
} else { } else {
/** /**
* Perform Speculation without JIT * Perform Speculation without JIT
...@@ -713,8 +710,8 @@ async function speculative_deployment(req, runtime) { ...@@ -713,8 +710,8 @@ async function speculative_deployment(req, runtime) {
} }
} }
} }
setInterval(libSupport.metrics.broadcastMetrics, 5000) // setInterval(libSupport.metrics.broadcastMetrics, 5000)
// setInterval(autoscalar, 1000); // setInterval(autoscalar, 1000);
setInterval(dispatch, 1000); setInterval(dispatch, 1000);
// setInterval(heapUpdate, 1000); // setInterval(heapUpdate, 5000);
app.listen(port, () => logger.info(`Server listening on port ${port}!`)) app.listen(port, () => logger.info(`Server listening on port ${port}!`))
\ No newline at end of file
...@@ -75,10 +75,13 @@ function generateExecutor(functionPath, functionHash) { ...@@ -75,10 +75,13 @@ function generateExecutor(functionPath, functionHash) {
let output = input.slice(0, insertIndex) + functionFile + input.slice(insertIndex) let output = input.slice(0, insertIndex) + functionFile + input.slice(insertIndex)
let hash = crypto.createHash('md5').update(output).digest("hex"); let hash = crypto.createHash('md5').update(output).digest("hex");
console.log(hash); let func_id = parseInt(hash.slice(0,5),16)
console.log(func_id);
fs.writeFileSync(functionPath + hash + ".js", output) // fs.writeFileSync(functionPath + hash + ".js", output)
return hash fs.writeFileSync(functionPath + "function_" + func_id + ".js", output )
return "function_"+func_id
// return hash
} }
/** /**
...@@ -89,41 +92,42 @@ function generateExecutor(functionPath, functionHash) { ...@@ -89,41 +92,42 @@ function generateExecutor(functionPath, functionHash) {
*/ */
function generateMicrocExecutor(functionPath, functionName, jsfunctionhash) { function generateMicrocExecutor(functionPath, functionName, jsfunctionhash) {
//creating function.c //creating function.c
let function_temp = fs.readFileSync(`./repository/worker_env/function_temp.c`) // let function_temp = fs.readFileSync(`./repository/worker_env/function_temp.c`)
let function_def = fs.readFileSync(functionPath + functionName) // let function_def = fs.readFileSync(functionPath + functionName)
let searchSize = "//ADD_FUNCTION".length // let searchSize = "//ADD_FUNCTION".length
let fid = parseInt(jsfunctionhash.slice(0,5), 16) // let fid = parseInt(jsfunctionhash.slice(0,5), 16)
let insertIndex = function_temp.indexOf("//ADD_FUNCTION") + searchSize // let insertIndex = function_temp.indexOf("//ADD_FUNCTION") + searchSize
let function_name = "void function_"+ fid +"(PIF_PLUGIN_map_hdr_T *mapHdr)" // let function_name = "void function_"+ fid +"(PIF_PLUGIN_map_hdr_T *mapHdr)"
let full_function = function_temp.slice(0, insertIndex) +"\n"+ function_name + "{\n" +function_def +"\n}"+ function_temp.slice(insertIndex) // let full_function = function_temp.slice(0, insertIndex) +"\n"+ function_name + "{\n" +function_def +"\n}"+ function_temp.slice(insertIndex)
// let hash = crypto.createHash('md5').update(full_function).digest("hex"); // // let hash = crypto.createHash('md5').update(full_function).digest("hex");
// console.log(hash); // // console.log(hash);
console.log(full_function); // console.log(full_function);
fs.writeFileSync(functionPath +"offload/"+ jsfunctionhash + ".c", full_function) // fs.writeFileSync(functionPath +"offload/"+ jsfunctionhash + ".c", full_function)
//adding call to function when match with fid // //adding call to function when match with fid
return new Promise((resolve) => { // return new Promise((resolve) => {
let main_function_temp = fs.readFileSync(functionPath +"offload/"+ "static_dispatch_function.c") // let main_function_temp = fs.readFileSync(functionPath +"offload/"+ "static_dispatch_function.c")
// let client_function = fs.readFileSync(functionPath + "offload/"+jsfunctionhash+".c") // // let client_function = fs.readFileSync(functionPath + "offload/"+jsfunctionhash+".c")
searchSize = "//ADD_FUNCTION_EXTERNS".length // searchSize = "//ADD_FUNCTION_EXTERNS".length
insertIndex = main_function_temp.indexOf("//ADD_FUNCTION_EXTERNS") + searchSize // insertIndex = main_function_temp.indexOf("//ADD_FUNCTION_EXTERNS") + searchSize
let extern_name = "extern void function_"+fid +"(PIF_PLUGIN_map_hdr_T *mapHdr)" // let extern_name = "extern void function_"+fid +"(PIF_PLUGIN_map_hdr_T *mapHdr)"
let main_function = main_function_temp.slice(0, insertIndex) +"\n"+ extern_name+";\n"+ main_function_temp.slice(insertIndex) // let main_function = main_function_temp.slice(0, insertIndex) +"\n"+ extern_name+";\n"+ main_function_temp.slice(insertIndex)
console.log("MAIN FUNCTION : \n",main_function) // console.log("MAIN FUNCTION : \n",main_function)
let hash = crypto.createHash('md5').update(full_function).digest("hex"); // let hash = crypto.createHash('md5').update(full_function).digest("hex");
// console.log(hash); // // console.log(hash);
searchSize = "//ADD_FUNCTION_CONDITION".length // searchSize = "//ADD_FUNCTION_CONDITION".length
insertIndex = main_function.indexOf("//ADD_FUNCTION_CONDITION") + searchSize // insertIndex = main_function.indexOf("//ADD_FUNCTION_CONDITION") + searchSize
let inc_pkt_count = "function_packet_count["+fid+"-10000]++;" // let inc_pkt_count = "function_packet_count["+fid+"-10000]++;"
let if_else_cond = "else if( fid == "+fid + " ) {\n "+inc_pkt_count +"\nfunction_"+fid+"(mapHdr);\n}" // let if_else_cond = "else if( fid == "+fid + " ) {\n "+inc_pkt_count +"\nfunction_"+fid+"(mapHdr);\n}"
let main_function_full = main_function.slice(0, insertIndex) +"\n"+ if_else_cond +"\n"+ main_function.slice(insertIndex) // let main_function_full = main_function.slice(0, insertIndex) +"\n"+ if_else_cond +"\n"+ main_function.slice(insertIndex)
console.log(main_function_full); // console.log(main_function_full);
fs.writeFileSync(functionPath +"offload/"+ "static_dispatch_function.c", main_function_full) // fs.writeFileSync(functionPath +"offload/"+ "static_dispatch_function.c", main_function_full)
return hash // return 'xyz';
}); // return hash
// });
} }
/** /**
...@@ -251,6 +255,7 @@ function getPort(usedPort) { ...@@ -251,6 +255,7 @@ function getPort(usedPort) {
break break
} }
} while (usedPort.has(port)) } while (usedPort.has(port))
usedPort.set(port, True)
return port return port
} }
...@@ -560,13 +565,33 @@ function unpackPacket(packet) { ...@@ -560,13 +565,33 @@ function unpackPacket(packet) {
base += 4 base += 4
function_count = struct.Unpack("B", packet, base) function_count = struct.Unpack("B", packet, base)
base += 1
autoscale = struct.Unpack("B", packet, base)
base += 1
fno = struct.Unpack("B", packet, base)
base += 1
f0 = struct.Unpack("B", packet, base)
base += 1
f1 = struct.Unpack("B", packet, base)
base += 1
f2 = struct.Unpack("B", packet, base)
base += 1
f3 = struct.Unpack("B", packet, base)
base += 1
f4 = struct.Unpack("B", packet, base)
return { return {
chain_id: chain_id[0], chain_id: chain_id[0],
exec_id: exec_id[0], exec_id: exec_id[0],
data: data[0], data: data[0],
function_count: function_count[0], function_count: function_count[0],
function_id: function_id[0] function_id: function_id[0],
echain: [f1[0], f2[0], f3[0], f4[0]],
autosacle: autoscale[0],
fno: fno[0]
} }
} }
...@@ -609,6 +634,41 @@ function packPacket(dataPacket) { ...@@ -609,6 +634,41 @@ function packPacket(dataPacket) {
return message return message
} }
function packPacketFromDictionary(dataPacket) {
let message = new Array(1024)
let base = 0, chain_id, exec_id, function_id, data, function_count
chain_id = struct.PackTo(">I", message, base, [dataPacket.chain_id])
base += 4
exec_id = struct.PackTo(">I", message, base, [dataPacket.exec_id])
base += 4
function_id = struct.PackTo(">I", message, base, [dataPacket.function_id])
base += 4
data = struct.PackTo(">I", message, base, [dataPacket.data])
base += 4
function_count = struct.PackTo("B", message, base, [dataPacket.function_count])
base += 1
// autoscale = struct.PackTo("B", message, base, [dataPacket.autostart])
// base += 1
// fno = struct.PackTo("B", message, base, [dataPacket.fno])
f0 = struct.PackTo("B", message, base, [0])
base += 1
f1 = struct.PackTo("B", message, base, [12])
base += 1
f2 = struct.PackTo("B", message, base, [0])
base += 1
f3 = struct.PackTo("B", message, base, [34])
base += 1
f4 = struct.PackTo("B", message, base, [0])
base += 1
message = Buffer.from(message)
return message
}
udpProxy.bind(constants.master_port); // starting UDP server for offloaded endpoints udpProxy.bind(constants.master_port); // starting UDP server for offloaded endpoints
...@@ -616,5 +676,5 @@ udpProxy.bind(constants.master_port); // starting UDP server for offloaded endpo ...@@ -616,5 +676,5 @@ udpProxy.bind(constants.master_port); // starting UDP server for offloaded endpo
makeid, generateExecutor, generateMicrocExecutor, reverseProxy, makeid, generateExecutor, generateMicrocExecutor, reverseProxy,
getPort, logger, compare, compare_uti, getPort, logger, compare, compare_uti,
logBroadcast, fetchData, metrics, logBroadcast, fetchData, metrics,
producer producer, packPacket, packPacketFromDictionary, unpackPacket
} }
# #
# Generated Makefile for orchestrator_speedo # Generated Makefile for orchestrator_autoscaling_design1
# #
ifndef SDKDIR ifndef SDKDIR
...@@ -122,7 +122,7 @@ ifneq ($(NFAS_FOUND),found) ...@@ -122,7 +122,7 @@ ifneq ($(NFAS_FOUND),found)
$(warning warning: nfas not found or not executable, on windows please run nfp4term.bat) $(warning warning: nfas not found or not executable, on windows please run nfp4term.bat)
endif endif
$(OUTDIR)/orchestrator_speedo.nffw: $(OUTDIR)/nfd_pcie0_pd0.list/nfd_pcie0_pd0.list \ $(OUTDIR)/orchestrator_autoscaling_design1.nffw: $(OUTDIR)/nfd_pcie0_pd0.list/nfd_pcie0_pd0.list \
$(OUTDIR)/nfd_pcie0_pci_in_issue1.list/nfd_pcie0_pci_in_issue1.list \ $(OUTDIR)/nfd_pcie0_pci_in_issue1.list/nfd_pcie0_pci_in_issue1.list \
$(OUTDIR)/nfd_pcie0_pci_out_me0.list/nfd_pcie0_pci_out_me0.list \ $(OUTDIR)/nfd_pcie0_pci_out_me0.list/nfd_pcie0_pci_out_me0.list \
$(OUTDIR)/nbi_init_csr.list/nbi_init_csr.list \ $(OUTDIR)/nbi_init_csr.list/nbi_init_csr.list \
...@@ -152,14 +152,7 @@ $(OUTDIR)/orchestrator_speedo.nffw: $(OUTDIR)/nfd_pcie0_pd0.list/nfd_pcie0_pd0.l ...@@ -152,14 +152,7 @@ $(OUTDIR)/orchestrator_speedo.nffw: $(OUTDIR)/nfd_pcie0_pd0.list/nfd_pcie0_pd0.l
-u i36.me7 $(OUTDIR)/nfd_pcie0_pd1.list/nfd_pcie0_pd1.list \ -u i36.me7 $(OUTDIR)/nfd_pcie0_pd1.list/nfd_pcie0_pd1.list \
-u pcie0.me2 $(OUTDIR)/nfd_pcie0_pci_in_issue0.list/nfd_pcie0_pci_in_issue0.list \ -u pcie0.me2 $(OUTDIR)/nfd_pcie0_pci_in_issue0.list/nfd_pcie0_pci_in_issue0.list \
-u i48.me1 $(OUTDIR)/gro1.list/gro1.list \ -u i48.me1 $(OUTDIR)/gro1.list/gro1.list \
-u i32.me0 i33.me0 i34.me0 i35.me0 i36.me0 i32.me1 i33.me1 \ -u i32.me0 i33.me0 i34.me0 i35.me0 i36.me0 $(OUTDIR)/pif_app_nfd.list/pif_app_nfd.list \
i34.me1 i35.me1 i36.me1 i32.me2 i33.me2 i34.me2 i35.me2 \
i36.me2 i32.me3 i33.me3 i34.me3 i35.me3 i36.me3 i32.me4 \
i33.me4 i34.me4 i35.me4 i36.me4 i32.me5 i33.me5 i34.me5 \
i35.me5 i36.me5 i32.me6 i33.me6 i34.me6 i35.me6 i32.me7 \
i33.me7 i34.me7 i35.me7 i32.me8 i33.me8 i34.me8 i35.me8 \
i32.me9 i33.me9 i34.me9 i35.me9 i32.me10 i33.me10 i34.me10 \
i35.me10 i32.me11 i33.me11 i34.me11 i35.me11 $(OUTDIR)/pif_app_nfd.list/pif_app_nfd.list \
-u i36.me11 $(OUTDIR)/app_master.list/app_master.list \ -u i36.me11 $(OUTDIR)/app_master.list/app_master.list \
-u i48.me2 $(OUTDIR)/nfd_svc.list/nfd_svc.list \ -u i48.me2 $(OUTDIR)/nfd_svc.list/nfd_svc.list \
-u i48.me3 $(OUTDIR)/blm0.list/blm0.list \ -u i48.me3 $(OUTDIR)/blm0.list/blm0.list \
...@@ -179,24 +172,24 @@ $(PIFOUTDIR)/build_info.json: $(MAKEFILE_LIST) ...@@ -179,24 +172,24 @@ $(PIFOUTDIR)/build_info.json: $(MAKEFILE_LIST)
@echo generating $@ @echo generating $@
@echo --------- @echo ---------
@mkdir -p $(PIFOUTDIR) @mkdir -p $(PIFOUTDIR)
@echo -n {\"sku\": \"nfp-4xxxc-b0\", \"worker_mes\": [\"i32.me0\", \"i33.me0\", \"i34.me0\", \"i35.me0\", \"i36.me0\", \"i32.me1\", \"i33.me1\", \"i34.me1\", \"i35.me1\", \"i36.me1\", \"i32.me2\", \"i33.me2\", \"i34.me2\", \"i35.me2\", \"i36.me2\", \"i32.me3\", \"i33.me3\", \"i34.me3\", \"i35.me3\", \"i36.me3\", \"i32.me4\", \"i33.me4\", \"i34.me4\", \"i35.me4\", \"i36.me4\", \"i32.me5\", \"i33.me5\", \"i34.me5\", \"i35.me5\", \"i36.me5\", \"i32.me6\", \"i33.me6\", \"i34.me6\", \"i35.me6\", \"i32.me7\", \"i33.me7\", \"i34.me7\", \"i35.me7\", \"i32.me8\", \"i33.me8\", \"i34.me8\", \"i35.me8\", \"i32.me9\", \"i33.me9\", \"i34.me9\", \"i35.me9\", \"i32.me10\", \"i33.me10\", \"i34.me10\", \"i35.me10\", \"i32.me11\", \"i33.me11\", \"i34.me11\", \"i35.me11\"], \"reduced_thread_usage\": true, \"debug_info\": true, \"simulation\": false} >$@ @echo -n {\"sku\": \"nfp-4xxxc-b0\", \"worker_mes\": [\"i32.me0\", \"i33.me0\", \"i34.me0\", \"i35.me0\", \"i36.me0\"], \"reduced_thread_usage\": true, \"debug_info\": true, \"simulation\": false} >$@
# #
# Generate IR from P4 # Generate IR from P4
# #
$(OUTDIR)/orchestrator_speedo.yml: p4src/orchestrator_speedo.p4 \ $(OUTDIR)/orchestrator_autoscaling_design1.yml: p4src/orchestrator_autoscaling_design1.p4 \
$(MAKEFILE_LIST) $(MAKEFILE_LIST)
@echo --------- @echo ---------
@echo compiling p4 $@ @echo compiling p4 $@
@echo --------- @echo ---------
@mkdir -p $(PIFOUTDIR) @mkdir -p $(PIFOUTDIR)
$(SDKP4DIR)/bin/nfp4c -o $(OUTDIR)/orchestrator_speedo.yml \ $(SDKP4DIR)/bin/nfp4c -o $(OUTDIR)/orchestrator_autoscaling_design1.yml \
--p4-version 16 \ --p4-version 16 \
--p4-compiler p4c-nfp \ --p4-compiler p4c-nfp \
--source_info \ --source_info \
p4src/orchestrator_speedo.p4 p4src/orchestrator_autoscaling_design1.p4
# #
...@@ -229,16 +222,16 @@ $(PIFOUTDIR)/pif_pkt_clone%h \ ...@@ -229,16 +222,16 @@ $(PIFOUTDIR)/pif_pkt_clone%h \
$(PIFOUTDIR)/pif_flcalc%c \ $(PIFOUTDIR)/pif_flcalc%c \
$(PIFOUTDIR)/pif_flcalc%h \ $(PIFOUTDIR)/pif_flcalc%h \
$(PIFOUTDIR)/pif_field_lists%h \ $(PIFOUTDIR)/pif_field_lists%h \
$(PIFOUTDIR)/pif_parrep_pvs_sync%c : $(OUTDIR)/orchestrator_speedo%yml $(MAKEFILE_LIST) $(PIFOUTDIR)/pif_parrep_pvs_sync%c : $(OUTDIR)/orchestrator_autoscaling_design1%yml $(MAKEFILE_LIST)
@echo --------- @echo ---------
@echo generating pif $@ @echo generating pif $@
@echo --------- @echo ---------
@mkdir -p $(PIFOUTDIR) @mkdir -p $(PIFOUTDIR)
$(SDKP4DIR)/bin/nfirc -o $(PIFOUTDIR)/ \ $(SDKP4DIR)/bin/nfirc -o $(PIFOUTDIR)/ \
--p4info $(OUTDIR)/orchestrator_speedo.p4info.json \ --p4info $(OUTDIR)/orchestrator_autoscaling_design1.p4info.json \
--debugpoints \ --debugpoints \
--mac_ingress_timestamp \ --mac_ingress_timestamp \
$(OUTDIR)/orchestrator_speedo.yml $(OUTDIR)/orchestrator_autoscaling_design1.yml
# #
...@@ -707,8 +700,7 @@ $(OUTDIR)/pif_app_nfd.list/pif_app_nfd.list: $(SDKP4DIR)/components/nfp_pif/me/a ...@@ -707,8 +700,7 @@ $(OUTDIR)/pif_app_nfd.list/pif_app_nfd.list: $(SDKP4DIR)/components/nfp_pif/me/a
$(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_flcalc_algorithms.c \ $(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_flcalc_algorithms.c \
$(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_memops.c \ $(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_memops.c \
$(SDKP4DIR)/components/dcfl/me/lib/dcfl/libdcfl.c \ $(SDKP4DIR)/components/dcfl/me/lib/dcfl/libdcfl.c \
p4src/static_dispatch_function.c \ ../../../../../../../opt/netronome/p4/components/flowcache/me/lib/flowcache/flow_cache_global_c.h \
p4src/nic_function_test.c \
$(PIFOUTDIR)/pif_design.h \ $(PIFOUTDIR)/pif_design.h \
$(MAKEFILE_LIST) $(MAKEFILE_LIST)
@echo --------- @echo ---------
...@@ -762,6 +754,7 @@ $(OUTDIR)/pif_app_nfd.list/pif_app_nfd.list: $(SDKP4DIR)/components/nfp_pif/me/a ...@@ -762,6 +754,7 @@ $(OUTDIR)/pif_app_nfd.list/pif_app_nfd.list: $(SDKP4DIR)/components/nfp_pif/me/a
-I$(SDKP4DIR)/components/dcfl/me/lib/dcfl \ -I$(SDKP4DIR)/components/dcfl/me/lib/dcfl \
-I$(SDKP4DIR)/components/dcfl/shared/include/dcfl \ -I$(SDKP4DIR)/components/dcfl/shared/include/dcfl \
-I$(SDKDIR)/components/standardlibrary/include \ -I$(SDKDIR)/components/standardlibrary/include \
-I../../../../../../../opt/netronome/p4/components/flowcache/me/lib/flowcache \
-FI$(SDKP4DIR)/components/nfp_pif/me/apps/pif_app_nfd/include/config.h \ -FI$(SDKP4DIR)/components/nfp_pif/me/apps/pif_app_nfd/include/config.h \
-Fo$(OUTDIR)/pif_app_nfd.list/ \ -Fo$(OUTDIR)/pif_app_nfd.list/ \
-Fe$(OUTDIR)/pif_app_nfd.list/pif_app_nfd $(NFCC_FLAGS) \ -Fe$(OUTDIR)/pif_app_nfd.list/pif_app_nfd $(NFCC_FLAGS) \
...@@ -818,8 +811,7 @@ $(OUTDIR)/pif_app_nfd.list/pif_app_nfd.list: $(SDKP4DIR)/components/nfp_pif/me/a ...@@ -818,8 +811,7 @@ $(OUTDIR)/pif_app_nfd.list/pif_app_nfd.list: $(SDKP4DIR)/components/nfp_pif/me/a
$(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_flcalc_algorithms.c \ $(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_flcalc_algorithms.c \
$(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_memops.c \ $(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_memops.c \
$(SDKP4DIR)/components/dcfl/me/lib/dcfl/libdcfl.c \ $(SDKP4DIR)/components/dcfl/me/lib/dcfl/libdcfl.c \
p4src/static_dispatch_function.c \ ../../../../../../../opt/netronome/p4/components/flowcache/me/lib/flowcache/flow_cache_global_c.h
p4src/nic_function_test.c
# #
# APP_MASTER # APP_MASTER
......
...@@ -14,6 +14,8 @@ sudo ifconfig vf0_1 192.168.2.3/24 up ...@@ -14,6 +14,8 @@ sudo ifconfig vf0_1 192.168.2.3/24 up
echo "y" | docker system prune echo "y" | docker system prune
docker network create -d macvlan --subnet=192.168.2.0/24 --aux-address="vf0_0=192.168.2.2" --aux-address="vf0_1=192.168.2.3" --aux-address="vf0_2=192.168.2.4" -o parent=vf0_1 pub_net docker network create -d macvlan --subnet=192.168.2.0/24 --aux-address="vf0_0=192.168.2.2" --aux-address="vf0_1=192.168.2.3" --aux-address="vf0_2=192.168.2.4" -o parent=vf0_1 pub_net
# docker network create -d bridge --subnet=192.168.2.0/24 --aux-address="vf0_0=192.168.2.2" --aux-address="vf0_1=192.168.2.3" --aux-address="vf0_2=192.168.2.4" xanadu_kafka-serverless
# move vf0_0 into its own namespace # move vf0_0 into its own namespace
# sudo ip netns exec ns_server ip link set vf0_0 netns 1 # sudo ip netns exec ns_server ip link set vf0_0 netns 1
sudo ip netns delete ns_server sudo ip netns delete ns_server
...@@ -51,3 +53,12 @@ sudo ip netns exec ns_server ifconfig vf0_2 mtu 9000 ...@@ -51,3 +53,12 @@ sudo ip netns exec ns_server ifconfig vf0_2 mtu 9000
# sudo ip addr add 10.129.6.5/24 dev bridgek0 # sudo ip addr add 10.129.6.5/24 dev bridgek0
# sudo ip link set bridgek0 up # sudo ip link set bridgek0 up
# create veth cable for kafka
# sudo ip link add veth_nnic0 type veth peer name veth_nnic1
# sudo ip link set veth_nnic0 netns ns_server
# sudo ip netns exec ns_server ip addr add 10.128.2.201/24 dev veth_nnic0
# sudo ip netns exec ns_server ip link set dev veth_nnic0 up
# sudo ip addr add 10.128.2.200/24 dev veth_nnic1
# sudo ip link set dev veth_nnic1 up
...@@ -17,7 +17,9 @@ done ...@@ -17,7 +17,9 @@ done
if [[ $compile_flag -eq 1 ]] if [[ $compile_flag -eq 1 ]]
then then
# compile the nfp code # compile the nfp code
sudo /opt/netronome/p4/bin/nfp4build -o ./p4src/orchestrator.nffw -p ./p4src/out -4 ./p4src/orchestrator.p4 -l lithium --nfp4c_p4_version 16 --nfirc_mac_ingress_timestamp --nfp4c_p4_compiler p4c-nfp # sudo /opt/netronome/p4/bin/nfp4build -o ./p4src/orchestrator_autoscaling_design1.nffw -p ./p4src/out -4 ./p4src/orchestrator_autoscaling_design1.p4 -c ./p4src/packet_counter.c /opt/netronome/p4/components/flowcache/me/lib/flowcache/flow_cache_global_c.h -l lithium --nfp4c_p4_version 16 --nfirc_mac_ingress_timestamp --nfp4c_p4_compiler p4c-nfp -A 1 -I /opt/netronome/p4/components/flowcache/me/lib/flowcache/
sudo /opt/netronome/p4/bin/nfp4build -o ./p4src/orchestrator_autoscaling_design1.nffw -p ./p4src/out -4 ./p4src/orchestrator_autoscaling_design1.p4 -c /opt/netronome/p4/components/flowcache/me/lib/flowcache/flow_cache_global_c.h -l lithium --nfp4c_p4_version 16 --nfirc_mac_ingress_timestamp --nfp4c_p4_compiler p4c-nfp -A 1 -I /opt/netronome/p4/components/flowcache/me/lib/flowcache/
#sudo /opt/netronome/p4/bin/nfp4build -o ./p4src/orchestrator.nffw -p ./p4src/out -4 ./p4src/orchestrator.p4 -c ./p4src/memory.c ./p4src/memory2.c -l lithium --nfp4c_p4_version 16 --nfirc_mac_ingress_timestamp --nfp4c_p4_compiler p4c-nfp #sudo /opt/netronome/p4/bin/nfp4build -o ./p4src/orchestrator.nffw -p ./p4src/out -4 ./p4src/orchestrator.p4 -c ./p4src/memory.c ./p4src/memory2.c -l lithium --nfp4c_p4_version 16 --nfirc_mac_ingress_timestamp --nfp4c_p4_compiler p4c-nfp
#cd $nic_function_loc #cd $nic_function_loc
#files=$(./generate_names.sh) #files=$(./generate_names.sh)
...@@ -33,7 +35,7 @@ then ...@@ -33,7 +35,7 @@ then
cd /opt/netronome/p4/bin/ cd /opt/netronome/p4/bin/
# offload # offload
sudo ./rtecli design-load -f $location/p4src/orchestrator.nffw -c $location/p4src/echo.p4cfg -p $location/p4src/out/pif_design.json sudo ./rtecli design-load -f $location/p4src/orchestrator_autoscaling_design1.nffw -c $location/p4src/echo_autoscaling1.p4cfg -p $location/p4src/out/pif_design.json
# returning back to base # returning back to base
cd $location cd $location
......
{
"registers": {
"configs": []
},
"tables": {
"ingress::fwd": {
"rules": [
{
"action": {
"type": "ingress::fwd_act",
"data": {
"port": {
"value": "p0"
}
}
},
"name": "host_to_net",
"match": {
"standard_metadata.ingress_port": {
"value": "v0.0"
}
}
},
{
"action": {
"type": "ingress::fwd_act",
"data": {
"port": {
"value": "v0.1"
}
}
},
"name": "net_to_host",
"match": {
"standard_metadata.ingress_port": {
"value": "p1"
}
}
},
{
"action": {
"type": "ingress::fwd_act",
"data": {
"port": {
"value": "v0.0"
}
}
},
"name": "net_to_host",
"match": {
"standard_metadata.ingress_port": {
"value": "p0"
}
}
},
{
"action": {
"type": "ingress::fwd_act",
"data": {
"port": {
"value": "p1"
}
}
},
"name": "net_to_host",
"match": {
"standard_metadata.ingress_port": {
"value": "v0.1"
}
}
}
]
},
"ingress::dispatch": {
"rules": [
{
"action": {
"type" : "ingress::dispatch_act",
"data" : {
"dstAddr" : { "value" : "192.168.2.3" },
"ethernetAddr" : { "value" : "00:22:22:22:22:22" },
"dstPort" : { "value" : "8080" },
"egress_port" : { "value" : "v0.1" }
}
},
"name": "dispatch_to_worker5a1",
"match": {
"map_hdr.function_id" : {
"value" : "38813"
}
}
}
],
"default_rule": {
"action": {
"type" : "ingress::dispatch_act",
"data" : {
"dstAddr" : { "value" : "192.168.2.3" },
"dstPort" : { "value" : "8080" },
"egress_port" : { "value" : "v0.1" },
"ethernetAddr" : { "value" : "00:22:22:22:22:22" }
}
},
"name": "default"
}
}
},
"multicast": {},
"meters": {
"configs": []
}
}
{
"registers": {
"configs": []
},
"tables": {
"ingress::fwd": {
"rules": [
{
"action": {
"type": "ingress::fwd_act",
"data": {
"port": {
"value": "p0"
}
}
},
"name": "host_to_net",
"match": {
"standard_metadata.ingress_port": {
"value": "v0.0"
}
}
},
{
"action": {
"type": "ingress::fwd_act",
"data": {
"port": {
"value": "v0.1"
}
}
},
"name": "net_to_host",
"match": {
"standard_metadata.ingress_port": {
"value": "p1"
}
}
},
{
"action": {
"type": "ingress::fwd_act",
"data": {
"port": {
"value": "v0.0"
}
}
},
"name": "net_to_host",
"match": {
"standard_metadata.ingress_port": {
"value": "p0"
}
}
},
{
"action": {
"type": "ingress::fwd_act",
"data": {
"port": {
"value": "p1"
}
}
},
"name": "net_to_host",
"match": {
"standard_metadata.ingress_port": {
"value": "v0.1"
}
}
}
]
},
"ingress::dispatch": {
"rules": [
{
"action": {
"type" : "ingress::dispatch_act",
"data" : {
"dstAddr" : { "value" : "192.168.2.3" },
"ethernetAddr" : { "value" : "00:22:22:22:22:22" },
"dstPort" : { "value" : "8080" },
"egress_port" : { "value" : "v0.1" },
"autoscaling" : {"value" : "3"}
}
},
"name": "dispatch_to_worker5a1",
"match": {
"map_hdr.function_id" : {
"value" : "38813"
}
}
}
],
"default_rule": {
"action": {
"type" : "ingress::dispatch_act",
"data" : {
"dstAddr" : { "value" : "192.168.2.3" },
"dstPort" : { "value" : "8080" },
"egress_port" : { "value" : "v0.1" },
"ethernetAddr" : { "value" : "00:22:22:22:22:22" },
"autoscaling" : {"value" : "1"}
}
},
"name": "default"
}
},
"ingress::warmstart_dispatch": {
"rules": [
{
"action": {
"type": "ingress::warmstart_dispatch_act",
"data": {
"dstAddr" : { "value" : "192.168.2.3" },
"dstPort" : { "value" : "8081" },
"egress_port" : { "value" : "v0.1" },
"ethernetAddr" : { "value" : "00:22:22:22:22:22" }
}
},
"name": "autodsfsff1",
"match": {
"map_hdr.function_id" : {
"value" : "38813"
},
"map_hdr.fno" : {
"value" : "0"
}
}
},
{
"action": {
"type": "ingress::warmstart_dispatch_act",
"data": {
"dstAddr" : { "value" : "192.168.2.3" },
"dstPort" : { "value" : "8082" },
"egress_port" : { "value" : "v0.1" },
"ethernetAddr" : { "value" : "00:22:22:22:22:22" }
}
},
"name": "autosjgjtyntyntcalef2",
"match": {
"map_hdr.function_id" : {
"value" : "38813"
},
"map_hdr.fno" : {
"value" : "1"
}
}
},
{
"action": {
"type": "ingress::warmstart_dispatch_act",
"data": {
"dstAddr" : { "value" : "192.168.2.3" },
"dstPort" : { "value" : "8083" },
"egress_port" : { "value" : "v0.1" },
"ethernetAddr" : { "value" : "00:22:22:22:22:22" }
}
},
"name": "autosjgjtyntyntcalef2",
"match": {
"map_hdr.function_id" : {
"value" : "38813"
},
"map_hdr.fno" : {
"value" : "2"
}
}
}
]
}
},
"multicast": {},
"meters": {
"configs": []
}
}
...@@ -62,11 +62,14 @@ header map_hdr_t { ...@@ -62,11 +62,14 @@ header map_hdr_t {
bit<32> function_id; bit<32> function_id;
bit<32> data; bit<32> data;
bit<8> function_count; bit<8> function_count;
// bit<8> autoscaling;
// bit<8> fno;
bit<8> f0; bit<8> f0;
bit<8> f1; bit<8> f1;
bit<8> f2; bit<8> f2;
bit<8> f3; bit<8> f3;
bit<8> f4; bit<8> f4;
// bit<8> batch_count; // bit<8> batch_count;
} }
......
...@@ -4,7 +4,7 @@ parser ParserImpl(packet_in packet, out headers hdr, inout metadata meta, inout ...@@ -4,7 +4,7 @@ parser ParserImpl(packet_in packet, out headers hdr, inout metadata meta, inout
packet.extract<ipv4_t>(hdr.ipv4); packet.extract<ipv4_t>(hdr.ipv4);
transition select(hdr.ipv4.fragOffset, hdr.ipv4.ihl, hdr.ipv4.protocol) { transition select(hdr.ipv4.fragOffset, hdr.ipv4.ihl, hdr.ipv4.protocol) {
(13w0x0 &&& 13w0x0, 4w0x5 &&& 4w0xf, 8w0x11 &&& 8w0xff): parse_udp; (13w0x0 &&& 13w0x0, 4w0x5 &&& 4w0xf, 8w0x11 &&& 8w0xff): parse_udp;
// (13w0x0 &&& 13w0x0, 4w0x5, 8w0x6): parse_tcp; //(13w0x0 &&& 13w0x0, 4w0x5, 8w0x6): parse_tcp;
default: accept; default: accept;
} }
} }
...@@ -26,10 +26,10 @@ parser ParserImpl(packet_in packet, out headers hdr, inout metadata meta, inout ...@@ -26,10 +26,10 @@ parser ParserImpl(packet_in packet, out headers hdr, inout metadata meta, inout
} }
} }
// state parse_tcp { //state parse_tcp {
// packet.extract(hdr.tcp); // packet.extract(hdr.tcp);
// transition accept; // transition accept;
// } //}
@name(".parse_map_hdr") state parse_map_hdr { @name(".parse_map_hdr") state parse_map_hdr {
packet.extract(hdr.map_hdr); packet.extract(hdr.map_hdr);
......
...@@ -427,7 +427,7 @@ layout: ...@@ -427,7 +427,7 @@ layout:
########################################## ##########################################
source_info: source_info:
date: 2021/12/15 05:37:07 date: 2022/01/20 19:51:09
output_file: p4src/orchestrator.yml output_file: p4src/orchestrator.yml
p4_version: '16' p4_version: '16'
source_files: source_files:
......
{
"program" : "p4src/orchestrator_autoscaling_design1.p4",
"__meta__" : {
"version" : [2, 7],
"compiler" : "https://github.com/p4lang/p4c"
},
"header_types" : [
{
"name" : "scalars_0",
"id" : 0,
"fields" : []
},
{
"name" : "digest_check_udp_port",
"id" : 1,
"fields" : [
["udp_port", 16, false],
["fid", 32, false],
["packet_count", 4, false],
["src_ip", 32, false],
["dst_ip", 32, false],
["_padding", 4, false]
]
},
{
"name" : "ethernet_t",
"id" : 2,
"fields" : [
["dstAddr", 48, false],
["srcAddr", 48, false],
["etherType", 16, false]
]
},
{
"name" : "ipv4_t",
"id" : 3,
"fields" : [
["version", 4, false],
["ihl", 4, false],
["diffserv", 8, false],
["totalLen", 16, false],
["identification", 16, false],
["flags", 3, false],
["fragOffset", 13, false],
["ttl", 8, false],
["protocol", 8, false],
["hdrChecksum", 16, false],
["srcAddr", 32, false],
["dstAddr", 32, false]
]
},
{
"name" : "udp_t",
"id" : 4,
"fields" : [
["srcPort", 16, false],
["dstPort", 16, false],
["length_", 16, false],
["checksum", 16, false]
]
},
{
"name" : "map_hdr_t",
"id" : 5,
"fields" : [
["chain_id", 32, false],
["exec_id", 32, false],
["function_id", 32, false],
["data", 32, false],
["function_count", 8, false],
["f0", 8, false],
["f1", 8, false],
["f2", 8, false],
["f3", 8, false],
["f4", 8, false]
]
},
{
"name" : "ingress_metadata_t",
"id" : 6,
"fields" : [
["drop", 1, false],
["egress_port", 9, false],
["packet_type", 4, false],
["_padding_1", 2, false]
]
},
{
"name" : "resubmit_meta_t",
"id" : 7,
"fields" : [
["current_state", 8, false],
["data", 32, false]
]
},
{
"name" : "exec_hdr_t",
"id" : 8,
"fields" : [
["function_count", 8, false],
["function", 8, false]
]
},
{
"name" : "standard_metadata",
"id" : 9,
"fields" : [
["ingress_port", 16, false],
["packet_length", 14, false],
["egress_spec", 16, false],
["egress_port", 16, false],
["egress_instance", 10, false],
["instance_type", 4, false],
["clone_spec", 32, false],
["parser_error_location", 8, false],
["parser_status", 3, false],
["checksum_error", 1, false]
]
}
],
"headers" : [
{
"name" : "tmp",
"id" : 0,
"header_type" : "digest_check_udp_port",
"metadata" : true,
"pi_omit" : true
},
{
"name" : "dig",
"id" : 1,
"header_type" : "digest_check_udp_port",
"metadata" : true,
"pi_omit" : true
},
{
"name" : "scalars",
"id" : 2,
"header_type" : "scalars_0",
"metadata" : true,
"pi_omit" : true
},
{
"name" : "standard_metadata",
"id" : 3,
"header_type" : "standard_metadata",
"metadata" : true,
"pi_omit" : true
},
{
"name" : "ethernet",
"id" : 4,
"header_type" : "ethernet_t",
"metadata" : false,
"pi_omit" : true
},
{
"name" : "ipv4",
"id" : 5,
"header_type" : "ipv4_t",
"metadata" : false,
"pi_omit" : true
},
{
"name" : "udp",
"id" : 6,
"header_type" : "udp_t",
"metadata" : false,
"pi_omit" : true
},
{
"name" : "map_hdr",
"id" : 7,
"header_type" : "map_hdr_t",
"metadata" : false,
"pi_omit" : true
},
{
"name" : "ing_metadata",
"id" : 8,
"header_type" : "ingress_metadata_t",
"metadata" : true,
"pi_omit" : true
},
{
"name" : "resubmit_meta",
"id" : 9,
"header_type" : "resubmit_meta_t",
"metadata" : true,
"pi_omit" : true
},
{
"name" : "exec_hdr",
"id" : 10,
"header_type" : "exec_hdr_t",
"metadata" : true,
"pi_omit" : true
}
],
"header_stacks" : [],
"header_union_types" : [],
"header_unions" : [],
"header_union_stacks" : [],
"field_lists" : [],
"errors" : [
["NoError", 1],
["PacketTooShort", 2],
["NoMatch", 3],
["StackOutOfBounds", 4],
["HeaderTooShort", 5],
["ParserTimeout", 6]
],
"enums" : [],
"parsers" : [
{
"name" : "parser",
"id" : 0,
"init_state" : "start",
"parse_states" : [
{
"name" : "parse_ipv4",
"id" : 0,
"parser_ops" : [
{
"parameters" : [
{
"type" : "regular",
"value" : "ipv4"
}
],
"op" : "extract"
}
],
"transitions" : [
{
"value" : "0x00000511",
"mask" : "0x00000fff",
"next_state" : "parse_udp"
},
{
"value" : "default",
"mask" : null,
"next_state" : null
}
],
"transition_key" : [
{
"type" : "field",
"value" : ["ipv4", "fragOffset"]
},
{
"type" : "field",
"value" : ["ipv4", "ihl"]
},
{
"type" : "field",
"value" : ["ipv4", "protocol"]
}
],
"source_info" : {
"filename" : "p4src/includes/parsers.p4",
"line" : 3,
"column" : 31,
"source_fragment" : "parse_ipv4"
}
},
{
"name" : "parse_udp",
"id" : 1,
"parser_ops" : [
{
"parameters" : [
{
"type" : "regular",
"value" : "udp"
}
],
"op" : "extract"
}
],
"transitions" : [
{
"value" : "0x1f40",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "0x2329",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "0x232a",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "0x232b",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "0x232c",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "0x232d",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "0x232e",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "0x232f",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "0x2382",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "0x2328",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "default",
"mask" : null,
"next_state" : null
}
],
"transition_key" : [
{
"type" : "field",
"value" : ["udp", "dstPort"]
}
],
"source_info" : {
"filename" : "p4src/includes/parsers.p4",
"line" : 12,
"column" : 30,
"source_fragment" : "parse_udp"
}
},
{
"name" : "parse_map_hdr",
"id" : 2,
"parser_ops" : [
{
"parameters" : [
{
"type" : "regular",
"value" : "map_hdr"
}
],
"op" : "extract"
}
],
"transitions" : [
{
"value" : "default",
"mask" : null,
"next_state" : null
}
],
"transition_key" : [],
"source_info" : {
"filename" : "p4src/includes/parsers.p4",
"line" : 34,
"column" : 34,
"source_fragment" : "parse_map_hdr"
}
},
{
"name" : "start",
"id" : 3,
"parser_ops" : [
{
"parameters" : [
{
"type" : "regular",
"value" : "ethernet"
}
],
"op" : "extract"
}
],
"transitions" : [
{
"value" : "0x0800",
"mask" : null,
"next_state" : "parse_ipv4"
},
{
"value" : "default",
"mask" : null,
"next_state" : null
}
],
"transition_key" : [
{
"type" : "field",
"value" : ["ethernet", "etherType"]
}
],
"source_info" : {
"filename" : "p4src/includes/parsers.p4",
"line" : 39,
"column" : 26,
"source_fragment" : "start"
}
}
]
}
],
"deparsers" : [
{
"name" : "deparser",
"id" : 0,
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 122,
"column" : 8,
"source_fragment" : "DeparserImpl"
},
"order" : ["ethernet", "ipv4", "udp", "map_hdr"]
}
],
"meter_arrays" : [],
"counter_arrays" : [],
"register_arrays" : [
{
"name" : "current_state",
"id" : 0,
"source_info" : {
"filename" : "p4src/includes/headers.p4",
"line" : 12,
"column" : 25,
"source_fragment" : "current_state"
},
"size" : 16384,
"bitwidth" : 8
},
{
"name" : "dispatch_state",
"id" : 1,
"source_info" : {
"filename" : "p4src/includes/headers.p4",
"line" : 13,
"column" : 25,
"source_fragment" : "dispatch_state"
},
"size" : 16384,
"bitwidth" : 8
},
{
"name" : "batch",
"id" : 2,
"source_info" : {
"filename" : "p4src/includes/headers.p4",
"line" : 14,
"column" : 25,
"source_fragment" : "batch"
},
"size" : 16384,
"bitwidth" : 8
},
{
"name" : "fwd_checks",
"id" : 3,
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 32,
"column" : 24,
"source_fragment" : "fwd_checks"
},
"size" : 1,
"bitwidth" : 4
}
],
"calculations" : [
{
"name" : "calc",
"id" : 0,
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 133,
"column" : 8,
"source_fragment" : "verify_checksum( ..."
},
"algo" : "csum16",
"input" : [
{
"type" : "field",
"value" : ["ipv4", "version"]
},
{
"type" : "field",
"value" : ["ipv4", "ihl"]
},
{
"type" : "field",
"value" : ["ipv4", "diffserv"]
},
{
"type" : "field",
"value" : ["ipv4", "totalLen"]
},
{
"type" : "field",
"value" : ["ipv4", "identification"]
},
{
"type" : "field",
"value" : ["ipv4", "flags"]
},
{
"type" : "field",
"value" : ["ipv4", "fragOffset"]
},
{
"type" : "field",
"value" : ["ipv4", "ttl"]
},
{
"type" : "field",
"value" : ["ipv4", "protocol"]
},
{
"type" : "field",
"value" : ["ipv4", "srcAddr"]
},
{
"type" : "field",
"value" : ["ipv4", "dstAddr"]
}
],
"output_width" : 16
},
{
"name" : "calc_0",
"id" : 1,
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 153,
"column" : 8,
"source_fragment" : "update_checksum( ..."
},
"algo" : "csum16",
"input" : [
{
"type" : "field",
"value" : ["ipv4", "version"]
},
{
"type" : "field",
"value" : ["ipv4", "ihl"]
},
{
"type" : "field",
"value" : ["ipv4", "diffserv"]
},
{
"type" : "field",
"value" : ["ipv4", "totalLen"]
},
{
"type" : "field",
"value" : ["ipv4", "identification"]
},
{
"type" : "field",
"value" : ["ipv4", "flags"]
},
{
"type" : "field",
"value" : ["ipv4", "fragOffset"]
},
{
"type" : "field",
"value" : ["ipv4", "ttl"]
},
{
"type" : "field",
"value" : ["ipv4", "protocol"]
},
{
"type" : "field",
"value" : ["ipv4", "srcAddr"]
},
{
"type" : "field",
"value" : ["ipv4", "dstAddr"]
}
],
"output_width" : 16
}
],
"learn_lists" : [
{
"id" : 1,
"name" : "digest_check_udp_port",
"elements" : [
{
"type" : "field",
"value" : ["tmp", "udp_port"]
},
{
"type" : "field",
"value" : ["tmp", "fid"]
},
{
"type" : "field",
"value" : ["tmp", "packet_count"]
},
{
"type" : "field",
"value" : ["tmp", "src_ip"]
},
{
"type" : "field",
"value" : ["tmp", "dst_ip"]
}
]
}
],
"actions" : [
{
"name" : "NoAction",
"id" : 0,
"runtime_data" : [],
"primitives" : [],
"source_info" : {
"filename" : "/opt/netronome/p4/include/16/p4include/core.p4",
"line" : 68,
"column" : 7,
"source_fragment" : "NoAction"
}
},
{
"name" : "NoAction",
"id" : 1,
"runtime_data" : [],
"primitives" : [],
"source_info" : {
"filename" : "/opt/netronome/p4/include/16/p4include/core.p4",
"line" : 68,
"column" : 7,
"source_fragment" : "NoAction"
}
},
{
"name" : "fwd_act",
"id" : 2,
"runtime_data" : [
{
"name" : "port",
"bitwidth" : 16
}
],
"primitives" : [
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["standard_metadata", "egress_spec"]
},
{
"type" : "runtime_data",
"value" : 0
}
],
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 38,
"column" : 8,
"source_fragment" : "standard_metadata.egress_spec = port"
}
}
],
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 37,
"column" : 29,
"source_fragment" : "fwd_act"
}
},
{
"name" : "dispatch_act",
"id" : 3,
"runtime_data" : [
{
"name" : "dstAddr",
"bitwidth" : 32
},
{
"name" : "dstPort",
"bitwidth" : 16
},
{
"name" : "ethernetAddr",
"bitwidth" : 48
},
{
"name" : "egress_port",
"bitwidth" : 16
}
],
"primitives" : [
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["ipv4", "dstAddr"]
},
{
"type" : "runtime_data",
"value" : 0
}
],
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 51,
"column" : 8,
"source_fragment" : "hdr.ipv4.dstAddr = dstAddr"
}
},
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["udp", "dstPort"]
},
{
"type" : "runtime_data",
"value" : 1
}
],
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 52,
"column" : 8,
"source_fragment" : "hdr.udp.dstPort = dstPort"
}
},
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["ethernet", "dstAddr"]
},
{
"type" : "runtime_data",
"value" : 2
}
],
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 53,
"column" : 8,
"source_fragment" : "hdr.ethernet.dstAddr = ethernetAddr"
}
}
],
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 50,
"column" : 34,
"source_fragment" : "dispatch_act"
}
},
{
"name" : "act",
"id" : 4,
"runtime_data" : [],
"primitives" : [
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["dig", "udp_port"]
},
{
"type" : "field",
"value" : ["udp", "dstPort"]
}
],
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 72,
"column" : 12,
"source_fragment" : "dig.udp_port = hdr.udp.dstPort"
}
},
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["dig", "fid"]
},
{
"type" : "field",
"value" : ["map_hdr", "function_id"]
}
],
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 73,
"column" : 12,
"source_fragment" : "dig.fid = hdr.map_hdr.function_id"
}
},
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["dig", "packet_count"]
},
{
"type" : "hexstr",
"value" : "0x00"
}
],
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 34,
"column" : 15,
"source_fragment" : "0; ..."
}
},
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["dig", "src_ip"]
},
{
"type" : "field",
"value" : ["ipv4", "srcAddr"]
}
],
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 75,
"column" : 12,
"source_fragment" : "dig.src_ip = hdr.ipv4.srcAddr"
}
},
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["dig", "dst_ip"]
},
{
"type" : "field",
"value" : ["ipv4", "dstAddr"]
}
],
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 76,
"column" : 12,
"source_fragment" : "dig.dst_ip = hdr.ipv4.dstAddr"
}
},
{
"op" : "assign_header",
"parameters" : [
{
"type" : "header",
"value" : "tmp"
},
{
"type" : "header",
"value" : "dig"
}
]
},
{
"op" : "generate_digest",
"parameters" : [
{
"type" : "hexstr",
"value" : "0x00000000"
},
{
"type" : "hexstr",
"value" : "0x1"
}
]
}
]
},
{
"name" : "fix_checksum",
"id" : 5,
"runtime_data" : [],
"primitives" : [
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["udp", "checksum"]
},
{
"type" : "hexstr",
"value" : "0x0000"
}
],
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 110,
"column" : 8,
"source_fragment" : "hdr.udp.checksum = 16w0"
}
}
],
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 109,
"column" : 33,
"source_fragment" : "fix_checksum"
}
}
],
"pipelines" : [
{
"name" : "ingress",
"id" : 0,
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 29,
"column" : 8,
"source_fragment" : "ingress"
},
"init_table" : "node_2",
"tables" : [
{
"name" : "dispatch",
"id" : 0,
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 57,
"column" : 29,
"source_fragment" : "dispatch"
},
"key" : [
{
"match_type" : "exact",
"target" : ["map_hdr", "function_id"],
"mask" : null
}
],
"match_type" : "exact",
"type" : "simple",
"max_size" : 1024,
"with_counters" : false,
"support_timeout" : false,
"direct_meters" : null,
"action_ids" : [3, 1],
"actions" : ["dispatch_act", "NoAction"],
"base_default_next" : "tbl_act",
"next_tables" : {
"dispatch_act" : "tbl_act",
"NoAction" : "tbl_act"
},
"default_entry" : {
"action_id" : 1,
"action_const" : false,
"action_data" : [],
"action_entry_const" : false
}
},
{
"name" : "tbl_act",
"id" : 1,
"key" : [],
"match_type" : "exact",
"type" : "simple",
"max_size" : 1024,
"with_counters" : false,
"support_timeout" : false,
"direct_meters" : null,
"action_ids" : [4],
"actions" : ["act"],
"base_default_next" : "fwd",
"next_tables" : {
"act" : "fwd"
},
"default_entry" : {
"action_id" : 4,
"action_const" : true,
"action_data" : [],
"action_entry_const" : true
}
},
{
"name" : "fwd",
"id" : 2,
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 41,
"column" : 24,
"source_fragment" : "fwd"
},
"key" : [
{
"match_type" : "exact",
"target" : ["standard_metadata", "ingress_port"],
"mask" : null
}
],
"match_type" : "exact",
"type" : "simple",
"max_size" : 1024,
"with_counters" : false,
"support_timeout" : false,
"direct_meters" : null,
"action_ids" : [2, 0],
"actions" : ["fwd_act", "NoAction"],
"base_default_next" : null,
"next_tables" : {
"fwd_act" : null,
"NoAction" : null
},
"default_entry" : {
"action_id" : 0,
"action_const" : false,
"action_data" : [],
"action_entry_const" : false
}
},
{
"name" : "fwd",
"id" : 3,
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 41,
"column" : 24,
"source_fragment" : "fwd"
},
"key" : [
{
"match_type" : "exact",
"target" : ["standard_metadata", "ingress_port"],
"mask" : null
}
],
"match_type" : "exact",
"type" : "simple",
"max_size" : 1024,
"with_counters" : false,
"support_timeout" : false,
"direct_meters" : null,
"action_ids" : [2, 0],
"actions" : ["fwd_act", "NoAction"],
"base_default_next" : null,
"next_tables" : {
"fwd_act" : null,
"NoAction" : null
},
"default_entry" : {
"action_id" : 0,
"action_const" : false,
"action_data" : [],
"action_entry_const" : false
}
}
],
"action_profiles" : [],
"conditionals" : [
{
"name" : "node_2",
"id" : 0,
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 67,
"column" : 12,
"source_fragment" : "hdr.ipv4.isValid() && hdr.udp.dstPort == 8000"
},
"expression" : {
"type" : "expression",
"value" : {
"op" : "and",
"left" : {
"type" : "expression",
"value" : {
"op" : "d2b",
"left" : null,
"right" : {
"type" : "field",
"value" : ["ipv4", "$valid$"]
}
}
},
"right" : {
"type" : "expression",
"value" : {
"op" : "==",
"left" : {
"type" : "field",
"value" : ["udp", "dstPort"]
},
"right" : {
"type" : "hexstr",
"value" : "0x1f40"
}
}
}
}
},
"true_next" : "dispatch",
"false_next" : "fwd"
}
]
},
{
"name" : "egress",
"id" : 1,
"source_info" : {
"filename" : "p4src/orchestrator_autoscaling_design1.p4",
"line" : 94,
"column" : 8,
"source_fragment" : "egress"
},
"init_table" : "tbl_fix_checksum",
"tables" : [
{
"name" : "tbl_fix_checksum",
"id" : 4,
"key" : [],
"match_type" : "exact",
"type" : "simple",
"max_size" : 1024,
"with_counters" : false,
"support_timeout" : false,
"direct_meters" : null,
"action_ids" : [5],
"actions" : ["fix_checksum"],
"base_default_next" : null,
"next_tables" : {
"fix_checksum" : null
},
"default_entry" : {
"action_id" : 5,
"action_const" : true,
"action_data" : [],
"action_entry_const" : true
}
}
],
"action_profiles" : [],
"conditionals" : []
}
],
"checksums" : [
{
"name" : "cksum",
"id" : 0,
"target" : ["ipv4", "hdrChecksum"],
"calculation" : "calc",
"verify" : true,
"if_cond" : {
"type" : "expression",
"value" : {
"op" : "d2b",
"left" : null,
"right" : {
"type" : "field",
"value" : ["ipv4", "$valid$"]
}
}
},
"type" : "generic"
},
{
"name" : "cksum_0",
"id" : 1,
"target" : ["ipv4", "hdrChecksum"],
"calculation" : "calc_0",
"verify" : false,
"if_cond" : {
"type" : "expression",
"value" : {
"op" : "d2b",
"left" : null,
"right" : {
"type" : "field",
"value" : ["ipv4", "$valid$"]
}
}
},
"type" : "generic"
}
],
"force_arith" : [],
"extern_instances" : [],
"extern_function_instances" : [],
"field_aliases" : [],
"flow_variables" : [
{
"flow" : "ingress",
"header" : "tmp"
},
{
"flow" : "ingress",
"header" : "dig"
}
]
}
\ No newline at end of file
#include <core.p4>
#define V1MODEL_VERSION 20200408
#include <v1model.p4>
#include "includes/defines.p4"
#include "includes/headers.p4"
#include "includes/parsers.p4"
//extern void prime();
//extern void prime2();
//extern void packet_counter();
//extern void static_dispatch_function();
struct small_dig{
bit<16> port;
}
struct digest_check_udp_port{
bit<16> udp_port;
bit<32> fid;
bit<4> packet_count;
bit<32> src_ip;
bit<32> dst_ip;
// bit<8> autoscaling;
// bit<8> fno;
}
control ingress(inout headers hdr, inout metadata meta, inout standard_metadata_t standard_metadata) {
//register<bit<8>>(10000) function_id_check;
register<bit<4>>(1) fwd_checks;
//bit<8> pc;
bit<4> pc2=0;
bit<1> static=1w1;
@name(".fwd_act") action fwd_act(bit<16> port) {
standard_metadata.egress_spec = port;
}
@name(".fwd") table fwd {
actions = {
fwd_act;
}
key = {
standard_metadata.ingress_port : exact;
}
}
@name(".dispatch_act") action dispatch_act(bit<32> dstAddr, bit<16> dstPort, bit<48> ethernetAddr, bit<16>egress_port) {
hdr.ipv4.dstAddr = dstAddr;
hdr.udp.dstPort = dstPort;
hdr.ethernet.dstAddr = ethernetAddr;
}
// #pragma netro no_lookup_caching dispatch_act
@name(".dispatch") table dispatch {
actions = {
dispatch_act;
}
key = {
hdr.map_hdr.function_id : exact;
}
}
apply {
if (hdr.ipv4.isValid() && hdr.udp.dstPort == DISPATCHER_PORT) {
dispatch.apply();
digest_check_udp_port dig;
dig.udp_port = hdr.udp.dstPort;
dig.fid = hdr.map_hdr.function_id;
dig.packet_count = pc2;
dig.src_ip = hdr.ipv4.srcAddr;
dig.dst_ip = hdr.ipv4.dstAddr;
//dig.autoscaling = hdr.map_hdr.autoscaling;
//dig.fno = hdr.map_hdr.fno;
digest<digest_check_udp_port>(0, dig );
fwd.apply();
} else {
fwd.apply();
}
//bit<16>mod = 16w10;
//hdr.udp.dstPort = 10000+(pc2 % mod);
}
}
control egress(inout headers hdr, inout metadata meta, inout standard_metadata_t standard_metadata) {
// @name(".ethernet_set_mac_act") action ethernet_set_mac_act(bit<48> smac, bit<48> dmac) {
// hdr.ethernet.srcAddr = smac;
// hdr.ethernet.dstAddr = dmac;
// }
// @name(".ethernet_set_mac") table ethernet_set_mac {
// actions = {
// ethernet_set_mac_act;
// }
// key = {
// standard_metadata.egress_port: exact;
// }
// }
@name("fix_checksum") action fix_checksum() {
hdr.udp.checksum = 16w0;
}
apply {
// if (hdr.udp.dstPort == MDS_PORT) {
// ethernet_set_mac.apply();
// }
fix_checksum();
}
}
control DeparserImpl(packet_out packet, in headers hdr) {
apply {
packet.emit<ethernet_t>(hdr.ethernet);
packet.emit<ipv4_t>(hdr.ipv4);
packet.emit<udp_t>(hdr.udp);
packet.emit<map_hdr_t>(hdr.map_hdr);
}
}
control verifyChecksum(inout headers hdr, inout metadata meta) {
apply {
verify_checksum(
hdr.ipv4.isValid(),
{ hdr.ipv4.version,
hdr.ipv4.ihl,
hdr.ipv4.diffserv,
hdr.ipv4.totalLen,
hdr.ipv4.identification,
hdr.ipv4.flags,
hdr.ipv4.fragOffset,
hdr.ipv4.ttl,
hdr.ipv4.protocol,
hdr.ipv4.srcAddr,
hdr.ipv4.dstAddr },
hdr.ipv4.hdrChecksum,
HashAlgorithm.csum16);
}
}
control computeChecksum(inout headers hdr, inout metadata meta) {
apply {
update_checksum(
hdr.ipv4.isValid(),
{ hdr.ipv4.version,
hdr.ipv4.ihl,
hdr.ipv4.diffserv,
hdr.ipv4.totalLen,
hdr.ipv4.identification,
hdr.ipv4.flags,
hdr.ipv4.fragOffset,
hdr.ipv4.ttl,
hdr.ipv4.protocol,
hdr.ipv4.srcAddr,
hdr.ipv4.dstAddr },
hdr.ipv4.hdrChecksum,
HashAlgorithm.csum16);
}
}
V1Switch<headers, metadata>(ParserImpl(), verifyChecksum(), ingress(), egress(), computeChecksum(), DeparserImpl()) main;
{
"tables": [
{
"preamble": {
"id": 33595533,
"name": "fwd",
"alias": "fwd"
},
"matchFields": [
{
"id": 1,
"name": "standard_metadata.ingress_port",
"bitwidth": 16,
"matchType": "EXACT"
}
],
"actionRefs": [
{
"id": 16805069
},
{
"id": 16800567,
"annotations": [
"@defaultonly()"
]
}
],
"size": "1024"
},
{
"preamble": {
"id": 33612818,
"name": "dispatch",
"alias": "dispatch"
},
"matchFields": [
{
"id": 1,
"name": "map_hdr.function_id",
"bitwidth": 32,
"matchType": "EXACT"
}
],
"actionRefs": [
{
"id": 16786857
},
{
"id": 16800567,
"annotations": [
"@defaultonly()"
]
}
],
"size": "1024"
}
],
"actions": [
{
"preamble": {
"id": 16800567,
"name": "NoAction",
"alias": "NoAction"
}
},
{
"preamble": {
"id": 16805069,
"name": "fwd_act",
"alias": "fwd_act"
},
"params": [
{
"id": 1,
"name": "port",
"bitwidth": 16
}
]
},
{
"preamble": {
"id": 16786857,
"name": "dispatch_act",
"alias": "dispatch_act"
},
"params": [
{
"id": 1,
"name": "dstAddr",
"bitwidth": 32
},
{
"id": 2,
"name": "dstPort",
"bitwidth": 16
},
{
"id": 3,
"name": "ethernetAddr",
"bitwidth": 48
},
{
"id": 4,
"name": "egress_port",
"bitwidth": 16
}
]
},
{
"preamble": {
"id": 16841338,
"name": "fix_checksum",
"alias": "fix_checksum"
}
}
]
}
##########################################
# Header instance definitions #
##########################################
errors:
type: enum
values:
- NoError: 1
- PacketTooShort: 2
- NoMatch: 3
- StackOutOfBounds: 4
- HeaderTooShort: 5
- ParserTimeout: 6
ethernet:
fields:
- dstAddr: 48
- srcAddr: 48
- etherType: 16
type: header
exec_hdr:
fields:
- function_count: 8
- function: 8
type: metadata
ing_metadata:
fields:
- drop: 1
- egress_port: 9
- packet_type: 4
- _padding_1: 2
type: metadata
ingress::act::dig:
fields:
- udp_port: 16
- fid: 32
- packet_count: 4
- src_ip: 32
- dst_ip: 32
- _padding: 4
type: metadata
ingress::act::tmp:
fields:
- udp_port: 16
- fid: 32
- packet_count: 4
- src_ip: 32
- dst_ip: 32
- _padding: 4
type: metadata
ipv4:
calculated_fields:
- condition: valid(ipv4)
field: hdrChecksum
func: calc
type: verify
- condition: valid(ipv4)
field: hdrChecksum
func: calc_0
type: update
fields:
- version: 4
- ihl: 4
- diffserv: 8
- totalLen: 16
- identification: 16
- flags: 3
- fragOffset: 13
- ttl: 8
- protocol: 8
- hdrChecksum: 16
- srcAddr: 32
- dstAddr: 32
type: header
map_hdr:
fields:
- chain_id: 32
- exec_id: 32
- function_id: 32
- data: 32
- function_count: 8
- f0: 8
- f1: 8
- f2: 8
- f3: 8
- f4: 8
type: header
resubmit_meta:
fields:
- current_state: 8
- data: 32
type: metadata
standard_metadata:
fields:
- ingress_port: 16
- packet_length: 14
- egress_spec: 16
- egress_port: 16
- egress_instance: 10
- instance_type: 4
- clone_spec: 32
- parser_error_location: 8
- parser_status: 3
- checksum_error: 1
type: metadata
udp:
fields:
- srcPort: 16
- dstPort: 16
- length_: 16
- checksum: 16
type: header
##########################################
# Register definitions #
##########################################
batch:
class: global
fields:
- value: 8
instance_count: 16384
type: register
current_state:
class: global
fields:
- value: 8
instance_count: 16384
type: register
dispatch_state:
class: global
fields:
- value: 8
instance_count: 16384
type: register
fwd_checks:
class: global
fields:
- value: 4
instance_count: 1
type: register
##########################################
# Field list definitions #
##########################################
field_list_1:
fields:
- ipv4.version
- ipv4.ihl
- ipv4.diffserv
- ipv4.totalLen
- ipv4.identification
- ipv4.flags
- ipv4.fragOffset
- ipv4.ttl
- ipv4.protocol
- ipv4.srcAddr
- ipv4.dstAddr
type: field_list
field_list_2:
fields:
- ingress::act::tmp.udp_port
- ingress::act::tmp.fid
- ingress::act::tmp.packet_count
- ingress::act::tmp.src_ip
- ingress::act::tmp.dst_ip
type: field_list
##########################################
# Field list calculations #
##########################################
calc:
algorithm: csum16
inputs:
- field_list_1
output_width: 16
type: field_list_calculation
calc_0:
algorithm: csum16
inputs:
- field_list_1
output_width: 16
type: field_list_calculation
##########################################
# Parse states #
##########################################
parse_ipv4:
implementation: extract(ipv4);
select_value:
- ipv4.fragOffset
- ipv4.ihl
- ipv4.protocol
src_filename: p4src/includes/parsers.p4
src_lineno: 3
type: parse_state
parse_map_hdr:
implementation: extract(map_hdr);
src_filename: p4src/includes/parsers.p4
src_lineno: 34
type: parse_state
parse_udp:
implementation: extract(udp);
select_value:
- udp.dstPort
src_filename: p4src/includes/parsers.p4
src_lineno: 12
type: parse_state
start:
implementation: extract(ethernet);
select_value:
- ethernet.etherType
src_filename: p4src/includes/parsers.p4
src_lineno: 39
type: parse_state
##########################################
# Parser #
##########################################
parser:
format: dot
implementation: |-
digraph {
start -> parse_ipv4 [value="0x0800", mask="none", order="0"]
start -> exit [value="default", mask="none", order="1"]
parse_ipv4 -> parse_udp [value="0x00000511", mask="0x00000fff", order="0"]
parse_ipv4 -> exit [value="default", mask="none", order="1"]
parse_udp -> parse_map_hdr [value="0x1f40", mask="none", order="0"]
parse_udp -> parse_map_hdr [value="0x2329", mask="none", order="1"]
parse_udp -> parse_map_hdr [value="0x232a", mask="none", order="2"]
parse_udp -> parse_map_hdr [value="0x232b", mask="none", order="3"]
parse_udp -> parse_map_hdr [value="0x232c", mask="none", order="4"]
parse_udp -> parse_map_hdr [value="0x232d", mask="none", order="5"]
parse_udp -> parse_map_hdr [value="0x232e", mask="none", order="6"]
parse_udp -> parse_map_hdr [value="0x232f", mask="none", order="7"]
parse_udp -> parse_map_hdr [value="0x2382", mask="none", order="8"]
parse_udp -> parse_map_hdr [value="0x2328", mask="none", order="9"]
parse_udp -> exit [value="default", mask="none", order="10"]
parse_map_hdr -> exit [value="default", mask="none", order="0"]
}
start_state: start
type: parser
##########################################
# Digests #
##########################################
_digest_digest_check_udp_port_0:
field_list: field_list_2
identifier: 0
type: digest
##########################################
# Action sets #
##########################################
egress::fix_checksum:
implementation: modify_field(udp.checksum, 0x0000);
src_filename: p4src/orchestrator_autoscaling_design1.p4
src_lineno: 109
type: action
ingress::act:
implementation: |-
modify_field(ingress::act::dig.udp_port, udp.dstPort);
modify_field(ingress::act::dig.fid, map_hdr.function_id);
modify_field(ingress::act::dig.packet_count, 0x00);
modify_field(ingress::act::dig.src_ip, ipv4.srcAddr);
modify_field(ingress::act::dig.dst_ip, ipv4.dstAddr);
copy_header(ingress::act::tmp, ingress::act::dig);
generate_digest(_digest_digest_check_udp_port_0);
src_filename: ''
src_lineno: 1
type: action
ingress::dispatch_act:
implementation: |-
modify_field(ipv4.dstAddr, dstAddr);
modify_field(udp.dstPort, dstPort);
modify_field(ethernet.dstAddr, ethernetAddr);
parameter_list:
- dstAddr: 32
- dstPort: 16
- ethernetAddr: 48
- egress_port: 16
src_filename: p4src/orchestrator_autoscaling_design1.p4
src_lineno: 50
type: action
ingress::fwd_act:
implementation: modify_field(standard_metadata.egress_spec, port);
parameter_list:
- port: 16
src_filename: p4src/orchestrator_autoscaling_design1.p4
src_lineno: 37
type: action
##########################################
# Ingress and Egress tables #
##########################################
egress::tbl_fix_checksum:
allowed_actions:
- egress::fix_checksum
default_entry:
action: egress::fix_checksum
const: true
max_entries: 1025
src_filename: ''
src_lineno: 1
type: table
ingress::dispatch:
allowed_actions:
- ingress::dispatch_act
match_on:
map_hdr.function_id: exact
max_entries: 1025
src_filename: p4src/orchestrator_autoscaling_design1.p4
src_lineno: 57
type: table
ingress::fwd:
allowed_actions:
- ingress::fwd_act
match_on:
standard_metadata.ingress_port: exact
max_entries: 1025
src_filename: p4src/orchestrator_autoscaling_design1.p4
src_lineno: 41
type: table
ingress::tbl_act:
allowed_actions:
- ingress::act
default_entry:
action: ingress::act
const: true
max_entries: 1025
src_filename: ''
src_lineno: 1
type: table
##########################################
# Ingress conditionals sets #
##########################################
_condition_0:
condition: (((valid(ipv4))) and (((udp.dstPort) == (8000))))
format: bracketed_expr
src_filename: p4src/orchestrator_autoscaling_design1.p4
src_lineno: 67
type: conditional
##########################################
# Ingress control flow #
##########################################
ingress_flow:
doc: control flow for ingress
format: dot
implementation: |-
digraph {
"_condition_0" -> "ingress::fwd" [condition = false]
"_condition_0" -> "ingress::dispatch" [condition = true]
"ingress::fwd" -> "exit_control_flow" [action = always]
"ingress::dispatch" -> "ingress::tbl_act" [action = always]
"ingress::tbl_act" -> "ingress::fwd" [action = always]
}
start_state: _condition_0
type: control_flow
##########################################
# Egress control flow #
##########################################
egress_flow:
doc: control flow for egress
format: dot
implementation: |-
digraph {
"egress::tbl_fix_checksum" -> "exit_control_flow" [action = always]
}
start_state: egress::tbl_fix_checksum
type: control_flow
##########################################
# Deparsers #
##########################################
deparser:
order:
- ethernet
- ipv4
- udp
- map_hdr
type: deparser
##########################################
# Processor layout #
##########################################
layout:
format: list
implementation:
- parser
- ingress
- egress
type: processor_layout
##########################################
# Source info #
##########################################
source_info:
date: 2022/02/20 20:21:57
output_file: p4src/orchestrator_autoscaling_design1.yml
p4_version: '16'
source_files:
- ''
- p4src/orchestrator_autoscaling_design1.p4
- /opt/netronome/p4/include/16/p4include/core.p4
type: source_info
#include <core.p4>
#define V1MODEL_VERSION 20200408
#include <v1model.p4>
#include "includes/defines.p4"
#include "includes/headers.p4"
#include "includes/parsers.p4"
//extern void prime();
//extern void prime2();
extern void packet_counter();
//extern void static_dispatch_function();
struct small_dig{
bit<16> port;
}
struct digest_check_udp_port{
bit<16> udp_port;
bit<32> fid;
bit<4> packet_count;
bit<32> src_ip;
bit<32> dst_ip;
bit<8> autoscaling;
bit<8> fno;
}
control ingress(inout headers hdr, inout metadata meta, inout standard_metadata_t standard_metadata) {
//register<bit<8>>(10000) function_id_check;
register<bit<4>>(1) fwd_checks;
//bit<8> pc;
bit<4> pc2=0;
bit<1> static=1w1;
@name(".fwd_act") action fwd_act(bit<16> port) {
standard_metadata.egress_spec = port;
}
@name(".fwd") table fwd {
actions = {
fwd_act;
}
key = {
standard_metadata.ingress_port : exact;
}
}
@name(".dispatch_act") action dispatch_act(bit<32> dstAddr, bit<16> dstPort, bit<48> ethernetAddr, bit<16>egress_port, bit<8>autoscaling) {
hdr.ipv4.dstAddr = dstAddr;
hdr.udp.dstPort = dstPort;
hdr.ethernet.dstAddr = ethernetAddr;
hdr.map_hdr.autoscaling = autoscaling;
//if(autoscaling>1)
//{
@atomic {
packet_counter();
}
//}
}
#pragma netro no_lookup_caching dispatch_act
@name(".dispatch") table dispatch {
actions = {
dispatch_act;
}
key = {
hdr.map_hdr.function_id : exact;
}
}
#pragma netro no_lookup_caching dispatch
@name(".warmstart_dispatch_act") action warmstart_dispatch_act(bit<32> dstAddr, bit<16> dstPort, bit<48> ethernetAddr , bit<16> egress_port) {
hdr.ipv4.dstAddr = dstAddr;
hdr.udp.dstPort = dstPort;
hdr.ethernet.dstAddr = ethernetAddr;
}
#pragma netro no_lookup_caching warmstart_dispatch_act
@name(".warmstart_dispatch") table warmstart_dispatch {
actions = {
warmstart_dispatch_act;
}
key = {
hdr.map_hdr.function_id : exact;
hdr.map_hdr.fno : exact;
}
}
#pragma netro warmstart_dispatch no_lookup_caching
apply {
if (hdr.ipv4.isValid() && hdr.udp.dstPort == DISPATCHER_PORT) {
//function_id_check.read(pc,0);
//pc = 8w2;
//pc = hdr.map_hdr.function_id;
//function_id_check.write(0,pc);
@atmoic{
if(hdr.map_hdr.fno==255)
{
hdr.map_hdr.function_id = 1;
}
dispatch.apply();
fwd_checks.read(pc2,0);
pc2 = pc2 + 1;
fwd_checks.write(0,pc2);
if(hdr.map_hdr.autoscaling > 1)
{
warmstart_dispatch.apply();
}
}
digest_check_udp_port dig;
dig.udp_port = hdr.udp.dstPort;
dig.fid = hdr.map_hdr.function_id;
dig.packet_count = pc2;
dig.src_ip = hdr.ipv4.srcAddr;
dig.dst_ip = hdr.ipv4.dstAddr;
dig.autoscaling = hdr.map_hdr.autoscaling;
dig.fno = hdr.map_hdr.fno;
digest<digest_check_udp_port>(0, dig );
fwd.apply();
} else {
fwd.apply();
}
//bit<16>mod = 16w10;
//hdr.udp.dstPort = 10000+(pc2 % mod);
}
}
control egress(inout headers hdr, inout metadata meta, inout standard_metadata_t standard_metadata) {
// @name(".ethernet_set_mac_act") action ethernet_set_mac_act(bit<48> smac, bit<48> dmac) {
// hdr.ethernet.srcAddr = smac;
// hdr.ethernet.dstAddr = dmac;
// }
// @name(".ethernet_set_mac") table ethernet_set_mac {
// actions = {
// ethernet_set_mac_act;
// }
// key = {
// standard_metadata.egress_port: exact;
// }
// }
@name("fix_checksum") action fix_checksum() {
hdr.udp.checksum = 16w0;
}
apply {
// if (hdr.udp.dstPort == MDS_PORT) {
// ethernet_set_mac.apply();
// }
fix_checksum();
}
}
control DeparserImpl(packet_out packet, in headers hdr) {
apply {
packet.emit<ethernet_t>(hdr.ethernet);
packet.emit<ipv4_t>(hdr.ipv4);
packet.emit<udp_t>(hdr.udp);
packet.emit<map_hdr_t>(hdr.map_hdr);
}
}
control verifyChecksum(inout headers hdr, inout metadata meta) {
apply {
verify_checksum(
hdr.ipv4.isValid(),
{ hdr.ipv4.version,
hdr.ipv4.ihl,
hdr.ipv4.diffserv,
hdr.ipv4.totalLen,
hdr.ipv4.identification,
hdr.ipv4.flags,
hdr.ipv4.fragOffset,
hdr.ipv4.ttl,
hdr.ipv4.protocol,
hdr.ipv4.srcAddr,
hdr.ipv4.dstAddr },
hdr.ipv4.hdrChecksum,
HashAlgorithm.csum16);
}
}
control computeChecksum(inout headers hdr, inout metadata meta) {
apply {
update_checksum(
hdr.ipv4.isValid(),
{ hdr.ipv4.version,
hdr.ipv4.ihl,
hdr.ipv4.diffserv,
hdr.ipv4.totalLen,
hdr.ipv4.identification,
hdr.ipv4.flags,
hdr.ipv4.fragOffset,
hdr.ipv4.ttl,
hdr.ipv4.protocol,
hdr.ipv4.srcAddr,
hdr.ipv4.dstAddr },
hdr.ipv4.hdrChecksum,
HashAlgorithm.csum16);
}
}
V1Switch<headers, metadata>(ParserImpl(), verifyChecksum(), ingress(), egress(), computeChecksum(), DeparserImpl()) main;
{
"program" : "p4src/orchestrator_coldstart.p4",
"__meta__" : {
"version" : [2, 7],
"compiler" : "https://github.com/p4lang/p4c"
},
"header_types" : [
{
"name" : "scalars_0",
"id" : 0,
"fields" : [
["pc2", 4, false],
["_padding_2", 4, false]
]
},
{
"name" : "digest_check_udp_port",
"id" : 1,
"fields" : [
["udp_port", 16, false],
["fid", 32, false],
["packet_count", 4, false],
["src_ip", 32, false],
["dst_ip", 32, false],
["autoscaling", 8, false],
["fno", 8, false],
["_padding", 4, false]
]
},
{
"name" : "ethernet_t",
"id" : 2,
"fields" : [
["dstAddr", 48, false],
["srcAddr", 48, false],
["etherType", 16, false]
]
},
{
"name" : "ipv4_t",
"id" : 3,
"fields" : [
["version", 4, false],
["ihl", 4, false],
["diffserv", 8, false],
["totalLen", 16, false],
["identification", 16, false],
["flags", 3, false],
["fragOffset", 13, false],
["ttl", 8, false],
["protocol", 8, false],
["hdrChecksum", 16, false],
["srcAddr", 32, false],
["dstAddr", 32, false]
]
},
{
"name" : "udp_t",
"id" : 4,
"fields" : [
["srcPort", 16, false],
["dstPort", 16, false],
["length_", 16, false],
["checksum", 16, false]
]
},
{
"name" : "map_hdr_t",
"id" : 5,
"fields" : [
["chain_id", 32, false],
["exec_id", 32, false],
["function_id", 32, false],
["data", 32, false],
["function_count", 8, false],
["autoscaling", 8, false],
["fno", 8, false],
["f0", 8, false],
["f1", 8, false],
["f2", 8, false],
["f3", 8, false],
["f4", 8, false]
]
},
{
"name" : "ingress_metadata_t",
"id" : 6,
"fields" : [
["drop", 1, false],
["egress_port", 9, false],
["packet_type", 4, false],
["_padding_1", 2, false]
]
},
{
"name" : "resubmit_meta_t",
"id" : 7,
"fields" : [
["current_state", 8, false],
["data", 32, false]
]
},
{
"name" : "exec_hdr_t",
"id" : 8,
"fields" : [
["function_count", 8, false],
["function", 8, false]
]
},
{
"name" : "standard_metadata",
"id" : 9,
"fields" : [
["ingress_port", 16, false],
["packet_length", 14, false],
["egress_spec", 16, false],
["egress_port", 16, false],
["egress_instance", 10, false],
["instance_type", 4, false],
["clone_spec", 32, false],
["parser_error_location", 8, false],
["parser_status", 3, false],
["checksum_error", 1, false]
]
}
],
"headers" : [
{
"name" : "tmp",
"id" : 0,
"header_type" : "digest_check_udp_port",
"metadata" : true,
"pi_omit" : true
},
{
"name" : "dig",
"id" : 1,
"header_type" : "digest_check_udp_port",
"metadata" : true,
"pi_omit" : true
},
{
"name" : "scalars",
"id" : 2,
"header_type" : "scalars_0",
"metadata" : true,
"pi_omit" : true
},
{
"name" : "standard_metadata",
"id" : 3,
"header_type" : "standard_metadata",
"metadata" : true,
"pi_omit" : true
},
{
"name" : "ethernet",
"id" : 4,
"header_type" : "ethernet_t",
"metadata" : false,
"pi_omit" : true
},
{
"name" : "ipv4",
"id" : 5,
"header_type" : "ipv4_t",
"metadata" : false,
"pi_omit" : true
},
{
"name" : "udp",
"id" : 6,
"header_type" : "udp_t",
"metadata" : false,
"pi_omit" : true
},
{
"name" : "map_hdr",
"id" : 7,
"header_type" : "map_hdr_t",
"metadata" : false,
"pi_omit" : true
},
{
"name" : "ing_metadata",
"id" : 8,
"header_type" : "ingress_metadata_t",
"metadata" : true,
"pi_omit" : true
},
{
"name" : "resubmit_meta",
"id" : 9,
"header_type" : "resubmit_meta_t",
"metadata" : true,
"pi_omit" : true
},
{
"name" : "exec_hdr",
"id" : 10,
"header_type" : "exec_hdr_t",
"metadata" : true,
"pi_omit" : true
}
],
"header_stacks" : [],
"header_union_types" : [],
"header_unions" : [],
"header_union_stacks" : [],
"field_lists" : [],
"errors" : [
["NoError", 1],
["PacketTooShort", 2],
["NoMatch", 3],
["StackOutOfBounds", 4],
["HeaderTooShort", 5],
["ParserTimeout", 6]
],
"enums" : [],
"parsers" : [
{
"name" : "parser",
"id" : 0,
"init_state" : "start",
"parse_states" : [
{
"name" : "parse_ipv4",
"id" : 0,
"parser_ops" : [
{
"parameters" : [
{
"type" : "regular",
"value" : "ipv4"
}
],
"op" : "extract"
}
],
"transitions" : [
{
"value" : "0x00000511",
"mask" : "0x00000fff",
"next_state" : "parse_udp"
},
{
"value" : "default",
"mask" : null,
"next_state" : null
}
],
"transition_key" : [
{
"type" : "field",
"value" : ["ipv4", "fragOffset"]
},
{
"type" : "field",
"value" : ["ipv4", "ihl"]
},
{
"type" : "field",
"value" : ["ipv4", "protocol"]
}
],
"source_info" : {
"filename" : "p4src/includes/parsers.p4",
"line" : 3,
"column" : 31,
"source_fragment" : "parse_ipv4"
}
},
{
"name" : "parse_udp",
"id" : 1,
"parser_ops" : [
{
"parameters" : [
{
"type" : "regular",
"value" : "udp"
}
],
"op" : "extract"
}
],
"transitions" : [
{
"value" : "0x1f40",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "0x2329",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "0x232a",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "0x232b",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "0x232c",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "0x232d",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "0x232e",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "0x232f",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "0x2382",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "0x2328",
"mask" : null,
"next_state" : "parse_map_hdr"
},
{
"value" : "default",
"mask" : null,
"next_state" : null
}
],
"transition_key" : [
{
"type" : "field",
"value" : ["udp", "dstPort"]
}
],
"source_info" : {
"filename" : "p4src/includes/parsers.p4",
"line" : 12,
"column" : 30,
"source_fragment" : "parse_udp"
}
},
{
"name" : "parse_map_hdr",
"id" : 2,
"parser_ops" : [
{
"parameters" : [
{
"type" : "regular",
"value" : "map_hdr"
}
],
"op" : "extract"
}
],
"transitions" : [
{
"value" : "default",
"mask" : null,
"next_state" : null
}
],
"transition_key" : [],
"source_info" : {
"filename" : "p4src/includes/parsers.p4",
"line" : 34,
"column" : 34,
"source_fragment" : "parse_map_hdr"
}
},
{
"name" : "start",
"id" : 3,
"parser_ops" : [
{
"parameters" : [
{
"type" : "regular",
"value" : "ethernet"
}
],
"op" : "extract"
}
],
"transitions" : [
{
"value" : "0x0800",
"mask" : null,
"next_state" : "parse_ipv4"
},
{
"value" : "default",
"mask" : null,
"next_state" : null
}
],
"transition_key" : [
{
"type" : "field",
"value" : ["ethernet", "etherType"]
}
],
"source_info" : {
"filename" : "p4src/includes/parsers.p4",
"line" : 39,
"column" : 26,
"source_fragment" : "start"
}
}
]
}
],
"deparsers" : [
{
"name" : "deparser",
"id" : 0,
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 170,
"column" : 8,
"source_fragment" : "DeparserImpl"
},
"order" : ["ethernet", "ipv4", "udp", "map_hdr"]
}
],
"meter_arrays" : [],
"counter_arrays" : [],
"register_arrays" : [
{
"name" : "current_state",
"id" : 0,
"source_info" : {
"filename" : "p4src/includes/headers.p4",
"line" : 12,
"column" : 25,
"source_fragment" : "current_state"
},
"size" : 16384,
"bitwidth" : 8
},
{
"name" : "dispatch_state",
"id" : 1,
"source_info" : {
"filename" : "p4src/includes/headers.p4",
"line" : 13,
"column" : 25,
"source_fragment" : "dispatch_state"
},
"size" : 16384,
"bitwidth" : 8
},
{
"name" : "batch",
"id" : 2,
"source_info" : {
"filename" : "p4src/includes/headers.p4",
"line" : 14,
"column" : 25,
"source_fragment" : "batch"
},
"size" : 16384,
"bitwidth" : 8
},
{
"name" : "fwd_checks",
"id" : 3,
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 32,
"column" : 24,
"source_fragment" : "fwd_checks"
},
"size" : 1,
"bitwidth" : 4
}
],
"calculations" : [
{
"name" : "calc",
"id" : 0,
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 181,
"column" : 8,
"source_fragment" : "verify_checksum( ..."
},
"algo" : "csum16",
"input" : [
{
"type" : "field",
"value" : ["ipv4", "version"]
},
{
"type" : "field",
"value" : ["ipv4", "ihl"]
},
{
"type" : "field",
"value" : ["ipv4", "diffserv"]
},
{
"type" : "field",
"value" : ["ipv4", "totalLen"]
},
{
"type" : "field",
"value" : ["ipv4", "identification"]
},
{
"type" : "field",
"value" : ["ipv4", "flags"]
},
{
"type" : "field",
"value" : ["ipv4", "fragOffset"]
},
{
"type" : "field",
"value" : ["ipv4", "ttl"]
},
{
"type" : "field",
"value" : ["ipv4", "protocol"]
},
{
"type" : "field",
"value" : ["ipv4", "srcAddr"]
},
{
"type" : "field",
"value" : ["ipv4", "dstAddr"]
}
],
"output_width" : 16
},
{
"name" : "calc_0",
"id" : 1,
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 201,
"column" : 8,
"source_fragment" : "update_checksum( ..."
},
"algo" : "csum16",
"input" : [
{
"type" : "field",
"value" : ["ipv4", "version"]
},
{
"type" : "field",
"value" : ["ipv4", "ihl"]
},
{
"type" : "field",
"value" : ["ipv4", "diffserv"]
},
{
"type" : "field",
"value" : ["ipv4", "totalLen"]
},
{
"type" : "field",
"value" : ["ipv4", "identification"]
},
{
"type" : "field",
"value" : ["ipv4", "flags"]
},
{
"type" : "field",
"value" : ["ipv4", "fragOffset"]
},
{
"type" : "field",
"value" : ["ipv4", "ttl"]
},
{
"type" : "field",
"value" : ["ipv4", "protocol"]
},
{
"type" : "field",
"value" : ["ipv4", "srcAddr"]
},
{
"type" : "field",
"value" : ["ipv4", "dstAddr"]
}
],
"output_width" : 16
}
],
"learn_lists" : [
{
"id" : 1,
"name" : "digest_check_udp_port",
"elements" : [
{
"type" : "field",
"value" : ["tmp", "udp_port"]
},
{
"type" : "field",
"value" : ["tmp", "fid"]
},
{
"type" : "field",
"value" : ["tmp", "packet_count"]
},
{
"type" : "field",
"value" : ["tmp", "src_ip"]
},
{
"type" : "field",
"value" : ["tmp", "dst_ip"]
},
{
"type" : "field",
"value" : ["tmp", "autoscaling"]
},
{
"type" : "field",
"value" : ["tmp", "fno"]
}
]
}
],
"actions" : [
{
"name" : "NoAction",
"id" : 0,
"runtime_data" : [],
"primitives" : [],
"source_info" : {
"filename" : "/opt/netronome/p4/include/16/p4include/core.p4",
"line" : 68,
"column" : 7,
"source_fragment" : "NoAction"
}
},
{
"name" : "NoAction",
"id" : 1,
"runtime_data" : [],
"primitives" : [],
"source_info" : {
"filename" : "/opt/netronome/p4/include/16/p4include/core.p4",
"line" : 68,
"column" : 7,
"source_fragment" : "NoAction"
}
},
{
"name" : "NoAction",
"id" : 2,
"runtime_data" : [],
"primitives" : [],
"source_info" : {
"filename" : "/opt/netronome/p4/include/16/p4include/core.p4",
"line" : 68,
"column" : 7,
"source_fragment" : "NoAction"
}
},
{
"name" : "fwd_act",
"id" : 3,
"runtime_data" : [
{
"name" : "port",
"bitwidth" : 16
}
],
"primitives" : [
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["standard_metadata", "egress_spec"]
},
{
"type" : "runtime_data",
"value" : 0
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 38,
"column" : 8,
"source_fragment" : "standard_metadata.egress_spec = port"
}
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 37,
"column" : 29,
"source_fragment" : "fwd_act"
}
},
{
"name" : "dispatch_act",
"id" : 4,
"runtime_data" : [
{
"name" : "dstAddr",
"bitwidth" : 32
},
{
"name" : "dstPort",
"bitwidth" : 16
},
{
"name" : "ethernetAddr",
"bitwidth" : 48
},
{
"name" : "egress_port",
"bitwidth" : 16
},
{
"name" : "autoscaling",
"bitwidth" : 8
}
],
"primitives" : [
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["ipv4", "dstAddr"]
},
{
"type" : "runtime_data",
"value" : 0
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 51,
"column" : 8,
"source_fragment" : "hdr.ipv4.dstAddr = dstAddr"
}
},
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["udp", "dstPort"]
},
{
"type" : "runtime_data",
"value" : 1
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 52,
"column" : 8,
"source_fragment" : "hdr.udp.dstPort = dstPort"
}
},
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["ethernet", "dstAddr"]
},
{
"type" : "runtime_data",
"value" : 2
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 53,
"column" : 8,
"source_fragment" : "hdr.ethernet.dstAddr = ethernetAddr"
}
},
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["map_hdr", "autoscaling"]
},
{
"type" : "runtime_data",
"value" : 4
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 54,
"column" : 8,
"source_fragment" : "hdr.map_hdr.autoscaling = autoscaling"
}
},
{
"op" : "packet_counter",
"parameters" : []
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 50,
"column" : 34,
"source_fragment" : "dispatch_act"
}
},
{
"name" : "warmstart_dispatch_act",
"id" : 5,
"runtime_data" : [
{
"name" : "dstAddr",
"bitwidth" : 32
},
{
"name" : "dstPort",
"bitwidth" : 16
},
{
"name" : "ethernetAddr",
"bitwidth" : 48
},
{
"name" : "egress_port",
"bitwidth" : 16
}
],
"primitives" : [
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["ipv4", "dstAddr"]
},
{
"type" : "runtime_data",
"value" : 0
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 75,
"column" : 8,
"source_fragment" : "hdr.ipv4.dstAddr = dstAddr"
}
},
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["udp", "dstPort"]
},
{
"type" : "runtime_data",
"value" : 1
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 76,
"column" : 8,
"source_fragment" : "hdr.udp.dstPort = dstPort"
}
},
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["ethernet", "dstAddr"]
},
{
"type" : "runtime_data",
"value" : 2
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 77,
"column" : 8,
"source_fragment" : "hdr.ethernet.dstAddr = ethernetAddr"
}
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 74,
"column" : 44,
"source_fragment" : "warmstart_dispatch_act"
}
},
{
"name" : "act",
"id" : 6,
"runtime_data" : [],
"primitives" : [
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["map_hdr", "function_id"]
},
{
"type" : "hexstr",
"value" : "0x00000001"
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 104,
"column" : 20,
"source_fragment" : "hdr.map_hdr.function_id = 1"
}
}
]
},
{
"name" : "act_0",
"id" : 7,
"runtime_data" : [],
"primitives" : [
{
"op" : "register_read",
"parameters" : [
{
"type" : "field",
"value" : ["scalars", "pc2"]
},
{
"type" : "register_array",
"value" : "fwd_checks"
},
{
"type" : "hexstr",
"value" : "0x00000000"
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 107,
"column" : 16,
"source_fragment" : "fwd_checks.read(pc2,0)"
}
},
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["scalars", "pc2"]
},
{
"type" : "expression",
"value" : {
"type" : "expression",
"value" : {
"op" : "&",
"left" : {
"type" : "expression",
"value" : {
"op" : "+",
"left" : {
"type" : "field",
"value" : ["scalars", "pc2"]
},
"right" : {
"type" : "hexstr",
"value" : "0x01"
}
}
},
"right" : {
"type" : "hexstr",
"value" : "0x0f"
}
}
}
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 108,
"column" : 16,
"source_fragment" : "pc2 = pc2 + 1"
}
},
{
"op" : "register_write",
"parameters" : [
{
"type" : "register_array",
"value" : "fwd_checks"
},
{
"type" : "hexstr",
"value" : "0x00000000"
},
{
"type" : "field",
"value" : ["scalars", "pc2"]
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 109,
"column" : 16,
"source_fragment" : "fwd_checks.write(0,pc2)"
}
}
]
},
{
"name" : "act_1",
"id" : 8,
"runtime_data" : [],
"primitives" : [
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["dig", "udp_port"]
},
{
"type" : "field",
"value" : ["udp", "dstPort"]
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 119,
"column" : 12,
"source_fragment" : "dig.udp_port = hdr.udp.dstPort"
}
},
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["dig", "fid"]
},
{
"type" : "field",
"value" : ["map_hdr", "function_id"]
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 120,
"column" : 12,
"source_fragment" : "dig.fid = hdr.map_hdr.function_id"
}
},
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["dig", "packet_count"]
},
{
"type" : "field",
"value" : ["scalars", "pc2"]
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 121,
"column" : 12,
"source_fragment" : "dig.packet_count = pc2"
}
},
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["dig", "src_ip"]
},
{
"type" : "field",
"value" : ["ipv4", "srcAddr"]
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 122,
"column" : 12,
"source_fragment" : "dig.src_ip = hdr.ipv4.srcAddr"
}
},
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["dig", "dst_ip"]
},
{
"type" : "field",
"value" : ["ipv4", "dstAddr"]
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 123,
"column" : 12,
"source_fragment" : "dig.dst_ip = hdr.ipv4.dstAddr"
}
},
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["dig", "autoscaling"]
},
{
"type" : "field",
"value" : ["map_hdr", "autoscaling"]
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 124,
"column" : 12,
"source_fragment" : "dig.autoscaling = hdr.map_hdr.autoscaling"
}
},
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["dig", "fno"]
},
{
"type" : "field",
"value" : ["map_hdr", "fno"]
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 125,
"column" : 12,
"source_fragment" : "dig.fno = hdr.map_hdr.fno"
}
},
{
"op" : "assign_header",
"parameters" : [
{
"type" : "header",
"value" : "tmp"
},
{
"type" : "header",
"value" : "dig"
}
]
},
{
"op" : "generate_digest",
"parameters" : [
{
"type" : "hexstr",
"value" : "0x00000000"
},
{
"type" : "hexstr",
"value" : "0x1"
}
]
}
]
},
{
"name" : "fix_checksum",
"id" : 9,
"runtime_data" : [],
"primitives" : [
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["udp", "checksum"]
},
{
"type" : "hexstr",
"value" : "0x0000"
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 158,
"column" : 8,
"source_fragment" : "hdr.udp.checksum = 16w0"
}
}
],
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 157,
"column" : 33,
"source_fragment" : "fix_checksum"
}
}
],
"pipelines" : [
{
"name" : "ingress",
"id" : 0,
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 29,
"column" : 8,
"source_fragment" : "ingress"
},
"init_table" : "node_2",
"tables" : [
{
"name" : "tbl_act",
"id" : 0,
"key" : [],
"match_type" : "exact",
"type" : "simple",
"max_size" : 1024,
"with_counters" : false,
"support_timeout" : false,
"direct_meters" : null,
"action_ids" : [6],
"actions" : ["act"],
"base_default_next" : "dispatch",
"next_tables" : {
"act" : "dispatch"
},
"default_entry" : {
"action_id" : 6,
"action_const" : true,
"action_data" : [],
"action_entry_const" : true
}
},
{
"name" : "dispatch",
"id" : 1,
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 64,
"column" : 29,
"source_fragment" : "dispatch"
},
"key" : [
{
"match_type" : "exact",
"target" : ["map_hdr", "function_id"],
"mask" : null
}
],
"match_type" : "exact",
"type" : "simple",
"max_size" : 1024,
"with_counters" : false,
"support_timeout" : false,
"direct_meters" : null,
"action_ids" : [4, 1],
"actions" : ["dispatch_act", "NoAction"],
"base_default_next" : "tbl_act_0",
"next_tables" : {
"dispatch_act" : "tbl_act_0",
"NoAction" : "tbl_act_0"
},
"default_entry" : {
"action_id" : 1,
"action_const" : false,
"action_data" : [],
"action_entry_const" : false
}
},
{
"name" : "tbl_act_0",
"id" : 2,
"key" : [],
"match_type" : "exact",
"type" : "simple",
"max_size" : 1024,
"with_counters" : false,
"support_timeout" : false,
"direct_meters" : null,
"action_ids" : [7],
"actions" : ["act_0"],
"base_default_next" : "node_7",
"next_tables" : {
"act_0" : "node_7"
},
"default_entry" : {
"action_id" : 7,
"action_const" : true,
"action_data" : [],
"action_entry_const" : true
}
},
{
"name" : "warmstart_dispatch",
"id" : 3,
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 81,
"column" : 40,
"source_fragment" : "warmstart_dispatch"
},
"key" : [
{
"match_type" : "exact",
"target" : ["map_hdr", "function_id"],
"mask" : null
},
{
"match_type" : "exact",
"target" : ["map_hdr", "fno"],
"mask" : null
}
],
"match_type" : "exact",
"type" : "simple",
"max_size" : 1024,
"with_counters" : false,
"support_timeout" : false,
"direct_meters" : null,
"action_ids" : [5, 2],
"actions" : ["warmstart_dispatch_act", "NoAction"],
"base_default_next" : "tbl_act_1",
"next_tables" : {
"warmstart_dispatch_act" : "tbl_act_1",
"NoAction" : "tbl_act_1"
},
"default_entry" : {
"action_id" : 2,
"action_const" : false,
"action_data" : [],
"action_entry_const" : false
}
},
{
"name" : "tbl_act_1",
"id" : 4,
"key" : [],
"match_type" : "exact",
"type" : "simple",
"max_size" : 1024,
"with_counters" : false,
"support_timeout" : false,
"direct_meters" : null,
"action_ids" : [8],
"actions" : ["act_1"],
"base_default_next" : "fwd",
"next_tables" : {
"act_1" : "fwd"
},
"default_entry" : {
"action_id" : 8,
"action_const" : true,
"action_data" : [],
"action_entry_const" : true
}
},
{
"name" : "fwd",
"id" : 5,
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 41,
"column" : 24,
"source_fragment" : "fwd"
},
"key" : [
{
"match_type" : "exact",
"target" : ["standard_metadata", "ingress_port"],
"mask" : null
}
],
"match_type" : "exact",
"type" : "simple",
"max_size" : 1024,
"with_counters" : false,
"support_timeout" : false,
"direct_meters" : null,
"action_ids" : [3, 0],
"actions" : ["fwd_act", "NoAction"],
"base_default_next" : null,
"next_tables" : {
"fwd_act" : null,
"NoAction" : null
},
"default_entry" : {
"action_id" : 0,
"action_const" : false,
"action_data" : [],
"action_entry_const" : false
}
},
{
"name" : "fwd",
"id" : 6,
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 41,
"column" : 24,
"source_fragment" : "fwd"
},
"key" : [
{
"match_type" : "exact",
"target" : ["standard_metadata", "ingress_port"],
"mask" : null
}
],
"match_type" : "exact",
"type" : "simple",
"max_size" : 1024,
"with_counters" : false,
"support_timeout" : false,
"direct_meters" : null,
"action_ids" : [3, 0],
"actions" : ["fwd_act", "NoAction"],
"base_default_next" : null,
"next_tables" : {
"fwd_act" : null,
"NoAction" : null
},
"default_entry" : {
"action_id" : 0,
"action_const" : false,
"action_data" : [],
"action_entry_const" : false
}
}
],
"action_profiles" : [],
"conditionals" : [
{
"name" : "node_2",
"id" : 0,
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 93,
"column" : 12,
"source_fragment" : "hdr.ipv4.isValid() && hdr.udp.dstPort == 8000"
},
"expression" : {
"type" : "expression",
"value" : {
"op" : "and",
"left" : {
"type" : "expression",
"value" : {
"op" : "d2b",
"left" : null,
"right" : {
"type" : "field",
"value" : ["ipv4", "$valid$"]
}
}
},
"right" : {
"type" : "expression",
"value" : {
"op" : "==",
"left" : {
"type" : "field",
"value" : ["udp", "dstPort"]
},
"right" : {
"type" : "hexstr",
"value" : "0x1f40"
}
}
}
}
},
"true_next" : "node_3",
"false_next" : "fwd"
},
{
"name" : "node_3",
"id" : 1,
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 102,
"column" : 19,
"source_fragment" : "hdr.map_hdr.fno==255"
},
"expression" : {
"type" : "expression",
"value" : {
"op" : "==",
"left" : {
"type" : "field",
"value" : ["map_hdr", "fno"]
},
"right" : {
"type" : "hexstr",
"value" : "0xff"
}
}
},
"true_next" : "tbl_act",
"false_next" : "dispatch"
},
{
"name" : "node_7",
"id" : 2,
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 112,
"column" : 19,
"source_fragment" : "hdr.map_hdr.autoscaling > 1"
},
"expression" : {
"type" : "expression",
"value" : {
"op" : ">",
"left" : {
"type" : "field",
"value" : ["map_hdr", "autoscaling"]
},
"right" : {
"type" : "hexstr",
"value" : "0x01"
}
}
},
"true_next" : "warmstart_dispatch",
"false_next" : "tbl_act_1"
}
]
},
{
"name" : "egress",
"id" : 1,
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 142,
"column" : 8,
"source_fragment" : "egress"
},
"init_table" : "tbl_fix_checksum",
"tables" : [
{
"name" : "tbl_fix_checksum",
"id" : 7,
"key" : [],
"match_type" : "exact",
"type" : "simple",
"max_size" : 1024,
"with_counters" : false,
"support_timeout" : false,
"direct_meters" : null,
"action_ids" : [9],
"actions" : ["fix_checksum"],
"base_default_next" : null,
"next_tables" : {
"fix_checksum" : null
},
"default_entry" : {
"action_id" : 9,
"action_const" : true,
"action_data" : [],
"action_entry_const" : true
}
}
],
"action_profiles" : [],
"conditionals" : []
}
],
"checksums" : [
{
"name" : "cksum",
"id" : 0,
"target" : ["ipv4", "hdrChecksum"],
"calculation" : "calc",
"verify" : true,
"if_cond" : {
"type" : "expression",
"value" : {
"op" : "d2b",
"left" : null,
"right" : {
"type" : "field",
"value" : ["ipv4", "$valid$"]
}
}
},
"type" : "generic"
},
{
"name" : "cksum_0",
"id" : 1,
"target" : ["ipv4", "hdrChecksum"],
"calculation" : "calc_0",
"verify" : false,
"if_cond" : {
"type" : "expression",
"value" : {
"op" : "d2b",
"left" : null,
"right" : {
"type" : "field",
"value" : ["ipv4", "$valid$"]
}
}
},
"type" : "generic"
}
],
"force_arith" : [],
"extern_instances" : [],
"extern_function_instances" : [
{
"name" : "packet_counter",
"id" : 0,
"source_info" : {
"filename" : "p4src/orchestrator_coldstart.p4",
"line" : 11,
"column" : 12,
"source_fragment" : "packet_counter"
}
}
],
"field_aliases" : [],
"flow_variables" : [
{
"flow" : "ingress",
"header" : "tmp"
},
{
"flow" : "ingress",
"header" : "scalars",
"field" : "pc2"
},
{
"flow" : "ingress",
"header" : "dig"
}
]
}
\ No newline at end of file
{
"tables": [
{
"preamble": {
"id": 33595533,
"name": "fwd",
"alias": "fwd"
},
"matchFields": [
{
"id": 1,
"name": "standard_metadata.ingress_port",
"bitwidth": 16,
"matchType": "EXACT"
}
],
"actionRefs": [
{
"id": 16805069
},
{
"id": 16800567,
"annotations": [
"@defaultonly()"
]
}
],
"size": "1024"
},
{
"preamble": {
"id": 33612818,
"name": "dispatch",
"alias": "dispatch"
},
"matchFields": [
{
"id": 1,
"name": "map_hdr.function_id",
"bitwidth": 32,
"matchType": "EXACT"
}
],
"actionRefs": [
{
"id": 16786857
},
{
"id": 16800567,
"annotations": [
"@defaultonly()"
]
}
],
"size": "1024"
},
{
"preamble": {
"id": 33605697,
"name": "warmstart_dispatch",
"alias": "warmstart_dispatch"
},
"matchFields": [
{
"id": 1,
"name": "map_hdr.function_id",
"bitwidth": 32,
"matchType": "EXACT"
},
{
"id": 2,
"name": "map_hdr.fno",
"bitwidth": 8,
"matchType": "EXACT"
}
],
"actionRefs": [
{
"id": 16819926
},
{
"id": 16800567,
"annotations": [
"@defaultonly()"
]
}
],
"size": "1024"
}
],
"actions": [
{
"preamble": {
"id": 16800567,
"name": "NoAction",
"alias": "NoAction"
}
},
{
"preamble": {
"id": 16805069,
"name": "fwd_act",
"alias": "fwd_act"
},
"params": [
{
"id": 1,
"name": "port",
"bitwidth": 16
}
]
},
{
"preamble": {
"id": 16786857,
"name": "dispatch_act",
"alias": "dispatch_act"
},
"params": [
{
"id": 1,
"name": "dstAddr",
"bitwidth": 32
},
{
"id": 2,
"name": "dstPort",
"bitwidth": 16
},
{
"id": 3,
"name": "ethernetAddr",
"bitwidth": 48
},
{
"id": 4,
"name": "egress_port",
"bitwidth": 16
},
{
"id": 5,
"name": "autoscaling",
"bitwidth": 8
}
]
},
{
"preamble": {
"id": 16819926,
"name": "warmstart_dispatch_act",
"alias": "warmstart_dispatch_act"
},
"params": [
{
"id": 1,
"name": "dstAddr",
"bitwidth": 32
},
{
"id": 2,
"name": "dstPort",
"bitwidth": 16
},
{
"id": 3,
"name": "ethernetAddr",
"bitwidth": 48
},
{
"id": 4,
"name": "egress_port",
"bitwidth": 16
}
]
},
{
"preamble": {
"id": 16841338,
"name": "fix_checksum",
"alias": "fix_checksum"
}
}
]
}
##########################################
# Header instance definitions #
##########################################
errors:
type: enum
values:
- NoError: 1
- PacketTooShort: 2
- NoMatch: 3
- StackOutOfBounds: 4
- HeaderTooShort: 5
- ParserTimeout: 6
ethernet:
fields:
- dstAddr: 48
- srcAddr: 48
- etherType: 16
type: header
exec_hdr:
fields:
- function_count: 8
- function: 8
type: metadata
ing_metadata:
fields:
- drop: 1
- egress_port: 9
- packet_type: 4
- _padding_1: 2
type: metadata
ingress::act_1::dig:
fields:
- udp_port: 16
- fid: 32
- packet_count: 4
- src_ip: 32
- dst_ip: 32
- autoscaling: 8
- fno: 8
- _padding: 4
type: metadata
ingress::act_1::tmp:
fields:
- udp_port: 16
- fid: 32
- packet_count: 4
- src_ip: 32
- dst_ip: 32
- autoscaling: 8
- fno: 8
- _padding: 4
type: metadata
ingress::scalars:
fields:
- pc2: 4
- _padding_2: 4
type: metadata
ipv4:
calculated_fields:
- condition: valid(ipv4)
field: hdrChecksum
func: calc
type: verify
- condition: valid(ipv4)
field: hdrChecksum
func: calc_0
type: update
fields:
- version: 4
- ihl: 4
- diffserv: 8
- totalLen: 16
- identification: 16
- flags: 3
- fragOffset: 13
- ttl: 8
- protocol: 8
- hdrChecksum: 16
- srcAddr: 32
- dstAddr: 32
type: header
map_hdr:
fields:
- chain_id: 32
- exec_id: 32
- function_id: 32
- data: 32
- function_count: 8
- autoscaling: 8
- fno: 8
- f0: 8
- f1: 8
- f2: 8
- f3: 8
- f4: 8
type: header
resubmit_meta:
fields:
- current_state: 8
- data: 32
type: metadata
standard_metadata:
fields:
- ingress_port: 16
- packet_length: 14
- egress_spec: 16
- egress_port: 16
- egress_instance: 10
- instance_type: 4
- clone_spec: 32
- parser_error_location: 8
- parser_status: 3
- checksum_error: 1
type: metadata
udp:
fields:
- srcPort: 16
- dstPort: 16
- length_: 16
- checksum: 16
type: header
##########################################
# Register definitions #
##########################################
batch:
class: global
fields:
- value: 8
instance_count: 16384
type: register
current_state:
class: global
fields:
- value: 8
instance_count: 16384
type: register
dispatch_state:
class: global
fields:
- value: 8
instance_count: 16384
type: register
fwd_checks:
class: global
fields:
- value: 4
instance_count: 1
type: register
##########################################
# Field list definitions #
##########################################
field_list_1:
fields:
- ipv4.version
- ipv4.ihl
- ipv4.diffserv
- ipv4.totalLen
- ipv4.identification
- ipv4.flags
- ipv4.fragOffset
- ipv4.ttl
- ipv4.protocol
- ipv4.srcAddr
- ipv4.dstAddr
type: field_list
field_list_2:
fields:
- ingress::act_1::tmp.udp_port
- ingress::act_1::tmp.fid
- ingress::act_1::tmp.packet_count
- ingress::act_1::tmp.src_ip
- ingress::act_1::tmp.dst_ip
- ingress::act_1::tmp.autoscaling
- ingress::act_1::tmp.fno
type: field_list
##########################################
# Field list calculations #
##########################################
calc:
algorithm: csum16
inputs:
- field_list_1
output_width: 16
type: field_list_calculation
calc_0:
algorithm: csum16
inputs:
- field_list_1
output_width: 16
type: field_list_calculation
##########################################
# Parse states #
##########################################
parse_ipv4:
implementation: extract(ipv4);
select_value:
- ipv4.fragOffset
- ipv4.ihl
- ipv4.protocol
src_filename: p4src/includes/parsers.p4
src_lineno: 3
type: parse_state
parse_map_hdr:
implementation: extract(map_hdr);
src_filename: p4src/includes/parsers.p4
src_lineno: 34
type: parse_state
parse_udp:
implementation: extract(udp);
select_value:
- udp.dstPort
src_filename: p4src/includes/parsers.p4
src_lineno: 12
type: parse_state
start:
implementation: extract(ethernet);
select_value:
- ethernet.etherType
src_filename: p4src/includes/parsers.p4
src_lineno: 39
type: parse_state
##########################################
# Parser #
##########################################
parser:
format: dot
implementation: |-
digraph {
start -> parse_ipv4 [value="0x0800", mask="none", order="0"]
start -> exit [value="default", mask="none", order="1"]
parse_ipv4 -> parse_udp [value="0x00000511", mask="0x00000fff", order="0"]
parse_ipv4 -> exit [value="default", mask="none", order="1"]
parse_udp -> parse_map_hdr [value="0x1f40", mask="none", order="0"]
parse_udp -> parse_map_hdr [value="0x2329", mask="none", order="1"]
parse_udp -> parse_map_hdr [value="0x232a", mask="none", order="2"]
parse_udp -> parse_map_hdr [value="0x232b", mask="none", order="3"]
parse_udp -> parse_map_hdr [value="0x232c", mask="none", order="4"]
parse_udp -> parse_map_hdr [value="0x232d", mask="none", order="5"]
parse_udp -> parse_map_hdr [value="0x232e", mask="none", order="6"]
parse_udp -> parse_map_hdr [value="0x232f", mask="none", order="7"]
parse_udp -> parse_map_hdr [value="0x2382", mask="none", order="8"]
parse_udp -> parse_map_hdr [value="0x2328", mask="none", order="9"]
parse_udp -> exit [value="default", mask="none", order="10"]
parse_map_hdr -> exit [value="default", mask="none", order="0"]
}
start_state: start
type: parser
##########################################
# External functions #
##########################################
packet_counter__0:
name: packet_counter
type: external_action
##########################################
# Digests #
##########################################
_digest_digest_check_udp_port_0:
field_list: field_list_2
identifier: 0
type: digest
##########################################
# Action Expressions #
##########################################
_expression_act_0_0:
expression: ((((ingress::scalars.pc2) + (0x01))) & (0x0f))
format: bracketed_expr
type: expression
##########################################
# Action sets #
##########################################
egress::fix_checksum:
implementation: modify_field(udp.checksum, 0x0000);
src_filename: p4src/orchestrator_coldstart.p4
src_lineno: 157
type: action
ingress::act:
implementation: modify_field(map_hdr.function_id, 0x00000001);
src_filename: ''
src_lineno: 1
type: action
ingress::act_0:
implementation: |-
register_read(ingress::scalars.pc2, fwd_checks.value, 0x00000000);
modify_field(ingress::scalars.pc2, _expression_act_0_0);
register_write(fwd_checks.value, 0x00000000, ingress::scalars.pc2);
src_filename: ''
src_lineno: 1
type: action
ingress::act_1:
implementation: |-
modify_field(ingress::act_1::dig.udp_port, udp.dstPort);
modify_field(ingress::act_1::dig.fid, map_hdr.function_id);
modify_field(ingress::act_1::dig.packet_count, ingress::scalars.pc2);
modify_field(ingress::act_1::dig.src_ip, ipv4.srcAddr);
modify_field(ingress::act_1::dig.dst_ip, ipv4.dstAddr);
modify_field(ingress::act_1::dig.autoscaling, map_hdr.autoscaling);
modify_field(ingress::act_1::dig.fno, map_hdr.fno);
copy_header(ingress::act_1::tmp, ingress::act_1::dig);
generate_digest(_digest_digest_check_udp_port_0);
src_filename: ''
src_lineno: 1
type: action
ingress::dispatch_act:
implementation: |-
modify_field(ipv4.dstAddr, dstAddr);
modify_field(udp.dstPort, dstPort);
modify_field(ethernet.dstAddr, ethernetAddr);
modify_field(map_hdr.autoscaling, autoscaling);
packet_counter();
parameter_list:
- dstAddr: 32
- dstPort: 16
- ethernetAddr: 48
- egress_port: 16
- autoscaling: 8
src_filename: p4src/orchestrator_coldstart.p4
src_lineno: 50
type: action
ingress::fwd_act:
implementation: modify_field(standard_metadata.egress_spec, port);
parameter_list:
- port: 16
src_filename: p4src/orchestrator_coldstart.p4
src_lineno: 37
type: action
ingress::warmstart_dispatch_act:
implementation: |-
modify_field(ipv4.dstAddr, dstAddr);
modify_field(udp.dstPort, dstPort);
modify_field(ethernet.dstAddr, ethernetAddr);
parameter_list:
- dstAddr: 32
- dstPort: 16
- ethernetAddr: 48
- egress_port: 16
src_filename: p4src/orchestrator_coldstart.p4
src_lineno: 74
type: action
##########################################
# Ingress and Egress tables #
##########################################
egress::tbl_fix_checksum:
allowed_actions:
- egress::fix_checksum
default_entry:
action: egress::fix_checksum
const: true
max_entries: 1025
src_filename: ''
src_lineno: 1
type: table
ingress::dispatch:
allowed_actions:
- ingress::dispatch_act
match_on:
map_hdr.function_id: exact
max_entries: 1025
src_filename: p4src/orchestrator_coldstart.p4
src_lineno: 64
type: table
ingress::fwd:
allowed_actions:
- ingress::fwd_act
match_on:
standard_metadata.ingress_port: exact
max_entries: 1025
src_filename: p4src/orchestrator_coldstart.p4
src_lineno: 41
type: table
ingress::tbl_act:
allowed_actions:
- ingress::act
default_entry:
action: ingress::act
const: true
max_entries: 1025
src_filename: ''
src_lineno: 1
type: table
ingress::tbl_act_0:
allowed_actions:
- ingress::act_0
default_entry:
action: ingress::act_0
const: true
max_entries: 1025
src_filename: ''
src_lineno: 1
type: table
ingress::tbl_act_1:
allowed_actions:
- ingress::act_1
default_entry:
action: ingress::act_1
const: true
max_entries: 1025
src_filename: ''
src_lineno: 1
type: table
ingress::warmstart_dispatch:
allowed_actions:
- ingress::warmstart_dispatch_act
match_on:
map_hdr.fno: exact
map_hdr.function_id: exact
max_entries: 1025
src_filename: p4src/orchestrator_coldstart.p4
src_lineno: 81
type: table
##########################################
# Ingress conditionals sets #
##########################################
_condition_0:
condition: (((valid(ipv4))) and (((udp.dstPort) == (8000))))
format: bracketed_expr
src_filename: p4src/orchestrator_coldstart.p4
src_lineno: 93
type: conditional
_condition_1:
condition: ((map_hdr.fno) == (255))
format: bracketed_expr
src_filename: p4src/orchestrator_coldstart.p4
src_lineno: 102
type: conditional
_condition_2:
condition: ((map_hdr.autoscaling) > (1))
format: bracketed_expr
src_filename: p4src/orchestrator_coldstart.p4
src_lineno: 112
type: conditional
##########################################
# Ingress control flow #
##########################################
ingress_flow:
doc: control flow for ingress
format: dot
implementation: |-
digraph {
"_condition_0" -> "ingress::fwd" [condition = false]
"_condition_0" -> "_condition_1" [condition = true]
"ingress::fwd" -> "exit_control_flow" [action = always]
"_condition_1" -> "ingress::dispatch" [condition = false]
"_condition_1" -> "ingress::tbl_act" [condition = true]
"ingress::dispatch" -> "ingress::tbl_act_0" [action = always]
"ingress::tbl_act_0" -> "_condition_2" [action = always]
"_condition_2" -> "ingress::tbl_act_1" [condition = false]
"_condition_2" -> "ingress::warmstart_dispatch" [condition = true]
"ingress::tbl_act_1" -> "ingress::fwd" [action = always]
"ingress::warmstart_dispatch" -> "ingress::tbl_act_1" [action = always]
"ingress::tbl_act" -> "ingress::dispatch" [action = always]
}
start_state: _condition_0
type: control_flow
##########################################
# Egress control flow #
##########################################
egress_flow:
doc: control flow for egress
format: dot
implementation: |-
digraph {
"egress::tbl_fix_checksum" -> "exit_control_flow" [action = always]
}
start_state: egress::tbl_fix_checksum
type: control_flow
##########################################
# Deparsers #
##########################################
deparser:
order:
- ethernet
- ipv4
- udp
- map_hdr
type: deparser
##########################################
# Processor layout #
##########################################
layout:
format: list
implementation:
- parser
- ingress
- egress
type: processor_layout
##########################################
# Source info #
##########################################
source_info:
date: 2022/02/15 20:54:39
output_file: p4src/orchestrator_coldstart.yml
p4_version: '16'
source_files:
- ''
- /opt/netronome/p4/include/16/p4include/core.p4
- p4src/orchestrator_coldstart.p4
type: source_info
//=============================================================================================================
#include <stdint.h>
// #include <stdlib.h>
#include <nfp/me.h>
#include <nfp/mem_atomic.h>
#include <pif_common.h>
#include "pif_plugin.h"
//=============================================================================================================
// __export __emem uint8_t pkt_cntrs[20];
int pif_plugin_packet_counter(EXTRACTED_HEADERS_T *headers, MATCH_DATA_T *match_data) {
PIF_PLUGIN_map_hdr_T *mapHdr = pif_plugin_hdr_get_map_hdr(headers);
uint32_t fid = mapHdr->function_id;
if(mapHdr->autoscaling > 1)
{
uint32_t exec_id = mapHdr->exec_id;
uint8_t maxf = mapHdr->autoscaling;
mapHdr->fno = 0;
mapHdr->fno = exec_id%maxf;
// pkt_cntrs++;
// mapHdr->function_id = mapHdr->function_id + mapHdr->fno;
}
mapHdr->data = mapHdr->fno;
// mapHdr->function_id = ma
return PIF_PLUGIN_RETURN_FORWARD;
}
...@@ -5,18 +5,172 @@ from kafka import KafkaConsumer ...@@ -5,18 +5,172 @@ from kafka import KafkaConsumer
#consumer = KafkaConsumer('deployed', 'removeWorker', #consumer = KafkaConsumer('deployed', 'removeWorker',
# "request", bootstrap_servers='10.129.6.5:9092') # "request", bootstrap_servers='10.129.6.5:9092')
consumer = KafkaConsumer('deployed', 'removeWorker', consumer = KafkaConsumer('deployed', 'removeWorker', 'COLDSTART_WORKER', 'AUTOSCALE',
"request", bootstrap_servers='localhost:9092') "request", bootstrap_servers='10.129.2.201:9092')
RTEInterface.Connect('thrift', "10.129.2.201", 20206) RTEInterface.Connect('thrift', "10.129.2.201", 20206)
tableId = "ingress::dispatch" tableIdDispatch = "ingress::dispatch"
ruleDictionary = {} tableId2 = "ingress::fwd"
tableIdWarmstartDispatch = "ingress::warmstart_dispatch"
def makeRule(ip, port, mac, functionHash, tableId, rule_name, default_rule): dispatchRuleDictionary = {}
actions = '{ "type" : "ingress::dispatch_act", "data" : { "dstAddr" : { "value" : "%s" }, \ warmstartDispatchRuleDictionary = {}
resourceMap = {}
functionToResourceMap = {}
def makeRuleForDispatchTable(ip, port, mac, functionHash, tableId, rule_name, default_rule, autoscaling):
rule={}
if default_rule:
actions = '{ "type" : "ingress::dispatch_act", "data" : { "dstAddr" : { "value" : "%s" }, \
"dstPort" : { "value" : "%d" } , "egress_port": { "value": "v0.1" }, "ethernetAddr": { "value": "%s" }, "autoscaling": {"value": %d} } }' \
% (ip, int(port), mac, autoscaling)
match = '{ }'
rule = {
"tableId": tableId,
"rule_name": rule_name,
"default_rule": default_rule,
"match": match,
"actions": actions
}
else:
actions = '{ "type" : "ingress::dispatch_act", "data" : { "dstAddr" : { "value" : "%s" }, \
"dstPort" : { "value" : "%d" } , "egress_port": { "value": "v0.1" }, "ethernetAddr": { "value": "%s" } , "autoscaling": {"value": %d} } }' \
% (ip, int(port), mac, autoscaling)
match = '{ "map_hdr.function_id" : { "value" : %d} }' % (functionHash)
rule = {
"tableId": tableId,
"rule_name": rule_name,
"default_rule": default_rule,
"match": match,
"actions": actions
}
return rule
def addRuleForDispatchTable(worker):
functionHash = worker[u'functionHash']
rule_name = "dispatch_to_worker" + functionHash
# functionHash = int(functionHash[0:5], 16)
functionHash = int(functionHash[9:])
ip = str(worker[u'node_id']).strip()
port = int(worker[u'portExternal'])
mac = str(worker[u'mac']).strip()
resource_id = worker[u'resource_id']
# resourceMap[resource_id] = {
# "ip": ip,
# "port": port,
# "mac": mac
# }
if(functionHash not in functionToResourceMap):
functionToResourceMap[functionHash]= []
# functionToResourceMap[functionHash].append({"resource_id": resource_id})
print("\n\nresource map : ", resourceMap)
print("\n\nfunction to resource map : ", functionToResourceMap)
default_rule = False
# autoscaling = len(functionToResourceMap[functionHash])
autoscaling = 1
rule = makeRuleForDispatchTable(ip, port, mac, functionHash, tableIdDispatch, rule_name, default_rule, autoscaling)
dispatchRuleDictionary[str(functionHash)+resource_id] = rule
print "\n\nrule added: ", dispatchRuleDictionary
RTEInterface.Tables.AddRule(rule["tableId"], rule["rule_name"], rule["default_rule"], rule["match"], rule["actions"])
ruleList = RTEInterface.Tables.ListRules(tableIdDispatch)
print "\n\nnew rule list: ", ruleList, "\n\n"
return 0
def deleteRuleFromDispatchTable(worker):
functionHash = worker[u'functionHash']
# functionHash = int(functionHash[0:5], 16)
functionHash = int(functionHash[9:])
resource_id = worker[u'resource_id']
rule = dispatchRuleDictionary[str(functionHash)+resource_id]
RTEInterface.Tables.DeleteRule(
rule["tableId"], rule["rule_name"], rule["default_rule"], rule["match"], rule["actions"])
del dispatchRuleDictionary[str(functionHash)+resource_id]
del resourceMap[resource_id]
for i in functionToResourceMap[functionHash]:
if resource_id == i["resource_id"]:
del i
print("\n\nresource map : ", resourceMap)
print("\n\nfunction to resource map : ", functionToResourceMap)
ruleList = RTEInterface.Tables.ListRules(tableIdDispatch)
print "\n\ndeleted rule"
print "\n\nnew rule list: ", ruleList, "\n\n"
return 0
def updateDefaultRuleOfDispatchTable(worker):
# TODO 1: implement function to update deault rule when worker load heap changes
# def EditRule(self, tbl_id, rule_name, default_rule, match, actions, priority = None, timeout = None):
rule_name = "default"
# functionHash = int(functionHash[0:5], 16)
ip = str(worker[u'node_id']).strip()
port = int(worker[u'portExternal'])
mac = str(worker[u'mac']).strip()
default_rule = True
autoscaling = 0
rule = makeRuleForDispatchTable(ip, port, mac, None, tableIdDispatch, rule_name, default_rule, autoscaling)
RTEInterface.Tables.EditRule(rule["tableId"], rule["rule_name"], rule["default_rule"], rule["match"], rule["actions"])
print("\n\nDefault rule updated\n\n")
return 0
def updateRuleOfDispatchTable(worker):
# TODO 1: implement function to update deault rule when worker load heap changes
# def EditRule(self, tbl_id, rule_name, default_rule, match, actions, priority = None, timeout = None):
functionHash = worker[u'functionHash']
rule_name = rule_name = "dispatch_to_worker" + functionHash
functionHash = int(functionHash[9:])
ip = str(worker[u'node_id']).strip()
port = int(worker[u'portExternal'])
mac = str(worker[u'mac']).strip()
autoscaling = len(functionToResourceMap[functionHash])
default_rule = False
rule = makeRuleForDispatchTable(ip, port, mac, functionHash, tableIdDispatch, rule_name, default_rule, autoscaling)
print("\n\nupdate rule : ", rule)
RTEInterface.Tables.EditRule(rule["tableId"], rule["rule_name"], rule["default_rule"], rule["match"], rule["actions"])
print("\n\nDefault rule updated\n\n")
return 0
def makeFwdTableRule(rule_name, default_rule):
rule={}
if default_rule:
actions = '{ "type" : "ingress::fwd_act", "data" : { "port" : { "value" : "p0" } } }'
match = '{ "standard_metadata.ingress_port" : { "value" : "v0.0" } }'
rule = {
"tableId": tableId2,
"rule_name": 'host_to_net',
"default_rule": default_rule,
"match": match,
"actions": actions
}
else:
actions = '{ "type" : "ingress::fwd_act", "data" : { "port" : { "value" : "p0" } } }'
match = '{ "standard_metadata.ingress_port" : { "value" : "v0.0" } }'
rule = {
"tableId": tableId2,
"rule_name": 'host_to_net',
"default_rule": default_rule,
"match": match ,
"actions": actions
}
return rule
def makeRuleForWarmstartDispatchTable(ip, port, mac, functionHash, tableId, rule_name, default_rule, fno):
rule={}
actions = '{ "type" : "ingress::warmstart_dispatch_act", "data" : { "dstAddr" : { "value" : "%s" }, \
"dstPort" : { "value" : "%d" } , "egress_port": { "value": "v0.1" }, "ethernetAddr": { "value": "%s" } } }' \ "dstPort" : { "value" : "%d" } , "egress_port": { "value": "v0.1" }, "ethernetAddr": { "value": "%s" } } }' \
% (ip, int(port), mac) % (ip, int(port), mac)
match = '{ "map_hdr.function_id" : { "value" : %d} }' % (functionHash) match = '{ "map_hdr.function_id" : { "value" : %d} , "map_hdr.fno": {"value": %d} }' % (functionHash, fno)
rule = { rule = {
"tableId": tableId, "tableId": tableId,
"rule_name": rule_name, "rule_name": rule_name,
...@@ -26,43 +180,122 @@ def makeRule(ip, port, mac, functionHash, tableId, rule_name, default_rule): ...@@ -26,43 +180,122 @@ def makeRule(ip, port, mac, functionHash, tableId, rule_name, default_rule):
} }
return rule return rule
def addRule(worker): def addRuleForWarmstartDispatchTable(worker):
functionHash = worker[u'functionHash'] functionHash = worker[u'functionHash']
rule_name = "dispatch_to_worker" + functionHash
functionHash = int(functionHash[0:5], 16) # functionHash = int(functionHash[0:5], 16)
functionHash = int(functionHash[9:])
ip = str(worker[u'node_id']).strip() ip = str(worker[u'node_id']).strip()
port = int(worker[u'portExternal']) port = int(worker[u'portExternal'])
mac = str(worker[u'mac']).strip() mac = str(worker[u'mac']).strip()
resource_id = worker[u'resource_id']
resourceMap[resource_id] = {
"ip": ip,
"port": port,
"mac": mac
}
if(functionHash not in functionToResourceMap):
functionToResourceMap[functionHash]= []
functionToResourceMap[functionHash].append({"resource_id": resource_id})
print("\n\nAutoscaling resource map : ", resourceMap)
print("\n\nAutoscaling function to resource map : ", functionToResourceMap)
default_rule = False default_rule = False
rule = makeRule(ip, port, mac, functionHash, tableId, rule_name, default_rule) autoscaling = len(functionToResourceMap[functionHash])
ruleDictionary[functionHash] = rule rule_name = "loadbalance_dispatch_" + str(functionHash)+ "_" + resource_id
print "rule added: ", ruleDictionary
RTEInterface.Tables.AddRule( rule = makeRuleForWarmstartDispatchTable(ip, port, mac, functionHash, tableIdWarmstartDispatch, rule_name, default_rule, autoscaling-1)
rule["tableId"], rule["rule_name"], rule["default_rule"], rule["match"], rule["actions"]) warmstartDispatchRuleDictionary[str(functionHash)+resource_id] = rule
ruleList = RTEInterface.Tables.ListRules(tableId) print "\n\nrule added: ", warmstartDispatchRuleDictionary
print "new rule list: ", ruleList, "\n\n" RTEInterface.Tables.AddRule(rule["tableId"], rule["rule_name"], rule["default_rule"], rule["match"], rule["actions"])
ruleList = RTEInterface.Tables.ListRules(tableIdWarmstartDispatch)
print "\n\nnew rule list: ", ruleList, "\n\n"
return 0 return 0
def deleteRule(worker):
def deleteRuleWarmstartFromDispatchTable(worker):
functionHash = worker[u'functionHash'] functionHash = worker[u'functionHash']
functionHash = int(functionHash[0:5], 16) # functionHash = int(functionHash[0:5], 16)
rule = ruleDictionary[functionHash] functionHash = int(functionHash[9:])
resource_id = worker[u'resource_id']
rule = warmstartDispatchRuleDictionary[str(functionHash)+resource_id]
RTEInterface.Tables.DeleteRule( RTEInterface.Tables.DeleteRule(
rule["tableId"], rule["rule_name"], rule["default_rule"], rule["match"], rule["actions"]) rule["tableId"], rule["rule_name"], rule["default_rule"], rule["match"], rule["actions"])
del ruleDictionary[functionHash] del warmstartDispatchRuleDictionary[str(functionHash)+resource_id]
ruleList = RTEInterface.Tables.ListRules(tableId)
print "deleted rule" del resourceMap[resource_id]
print "new rule list: ", ruleList, "\n\n" for i in functionToResourceMap[functionHash]:
if resource_id == i["resource_id"]:
del i
print("\n\nresource map : ", resourceMap)
print("\n\nfunction to resource map : ", functionToResourceMap)
ruleList = RTEInterface.Tables.ListRules(tableIdWarmstartDispatch)
print "\n\ndeleted rule"
print "\n\nnew rule list: ", ruleList, "\n\n"
return 0 return 0
for msg in consumer: for msg in consumer:
if msg.topic == "deployed": if msg.topic == "deployed":
msg = msg.value.decode('utf-8') msg = msg.value.decode('utf-8')
worker = json.loads(msg) worker = json.loads(msg)
addRule(worker) print("received message on deployed : ", worker)
functionHash = worker[u'functionHash']
functionHash = int(functionHash[9:])
# if(functionHash not in functionToResourceMap):
addRuleForDispatchTable(worker)
# addRuleForWarmstartDispatchTable(worker)
# else:
# addRuleForWarmstartDispatchTable(worker)
# updateRuleOfDispatchTable(worker)
elif msg.topic == "removeWorker": elif msg.topic == "removeWorker":
msg = msg.value.decode('utf-8') msg = msg.value.decode('utf-8')
worker = json.loads(msg) worker = json.loads(msg)
deleteRule(worker) print("received message on removeWorker : ", worker)
deleteRuleFromDispatchTable(worker)
elif msg.topic == "COLDSTART_WORKER":
msg = msg.value.decode('utf-8')
worker = json.loads(msg)
print("received message on COLDSTART_WORKER : ", worker)
updateDefaultRuleOfDispatchTable(worker[u'nodes'])
# elif msg.topic == "AUTOSCALE":
# msg = msg.value.decode('utf-8')
# updateDefaultRule2()
# worker1={
# 'node_id': '192.168.2.3',
# 'portExternal': '8081',
# 'mac': '00:22:22:22:22:22',
# 'resource_id': 'resource_id1',
# 'functionHash': 'function_1234'
# }
# addRuleForDispatchTable(worker1)
# addRuleForWarmstartDispatchTable(worker1)
# worker2={
# 'node_id': '192.168.2.3',
# 'portExternal': '8082',
# 'mac': '00:22:22:22:22:22',
# 'resource_id': 'resource_id2',
# 'functionHash': 'function_1234'
# }
# # addRuleForDispatchTable(worker2)
# addRuleForWarmstartDispatchTable(worker2)
# updateRuleOfDispatchTable(worker2)
import sys
import json, ast
from RTEInterface import RTEInterface
from kafka import KafkaConsumer
import json
import time
f = open('../../constants_local.json')
constants = json.load(f)
consumer = KafkaConsumer('deployed3', 'removeWorker2', 'COLDSTART_WORKER2', 'AUTOSCALE2', constants['topics']['update_function_instance_nic'],
"request2", bootstrap_servers='10.129.2.201:9092')
RTEInterface.Connect('thrift', "10.129.2.201", 20206)
tableIdDispatch = "ingress::dispatch"
tableId2 = "ingress::fwd"
tableIdWarmstartDispatch = "ingress::warmstart_dispatch"
dispatchRuleDictionary = {}
warmstartDispatchRuleDictionary = {}
resourceMap = {}
functionToResourceMap = {}
def makeRuleForDispatchTable(ip, port, mac, functionHash, tableId, rule_name, default_rule):
rule={}
if default_rule:
actions = '{ "type" : "ingress::dispatch_act", "data" : { "dstAddr" : { "value" : "%s" }, \
"dstPort" : { "value" : "%d" } , "egress_port": { "value": "v0.1" }, "ethernetAddr": { "value": "%s" } } }' \
% (ip, int(port), mac)
match = '{ }'
rule = {
"tableId": tableId,
"rule_name": rule_name,
"default_rule": default_rule,
"match": match,
"actions": actions
}
else:
actions = '{ "type" : "ingress::dispatch_act", "data" : { "dstAddr" : { "value" : "%s" }, \
"dstPort" : { "value" : "%d" } , "egress_port": { "value": "v0.1" }, "ethernetAddr": { "value": "%s" } } }' \
% (ip, int(port), mac)
match = '{ "map_hdr.function_id" : { "value" : %d} }' % (functionHash)
rule = {
"tableId": tableId,
"rule_name": rule_name,
"default_rule": default_rule,
"match": match,
"actions": actions
}
return rule
def addRuleForDispatchTable(worker):
functionHash = worker[u'functionHash']
rule_name = "dispatch_to_worker" + functionHash
# functionHash = int(functionHash[0:5], 16)
functionHash = int(functionHash[9:])
ip = str(worker[u'node_id']).strip()
port = int(worker[u'portExternal'])
mac = str(worker[u'mac']).strip()
resource_id = worker[u'resource_id']
resourceMap[resource_id] = {
"ip": ip,
"port": port,
"mac": mac
}
if(functionHash not in functionToResourceMap):
functionToResourceMap[functionHash]= []
functionToResourceMap[functionHash].append({"resource_id": resource_id})
print("\n\nresource map : ", resourceMap)
print("\n\nfunction to resource map : ", functionToResourceMap)
default_rule = False
rule = makeRuleForDispatchTable(ip, port, mac, functionHash, tableIdDispatch, rule_name, default_rule)
dispatchRuleDictionary[str(functionHash)+resource_id] = rule
print "\n\nrule added: ", dispatchRuleDictionary
RTEInterface.Tables.AddRule(rule["tableId"], rule["rule_name"], rule["default_rule"], rule["match"], rule["actions"])
ruleList = RTEInterface.Tables.ListRules(tableIdDispatch)
print "\n\nnew rule list: ", ruleList, "\n\n"
return 0
def deleteRuleFromDispatchTable(worker):
functionHash = worker[u'functionHash']
# functionHash = int(functionHash[0:5], 16)
functionHash = int(functionHash[9:])
resource_id = worker[u'resource_id']
rule = dispatchRuleDictionary[str(functionHash)+resource_id]
RTEInterface.Tables.DeleteRule(
rule["tableId"], rule["rule_name"], rule["default_rule"], rule["match"], rule["actions"])
del dispatchRuleDictionary[str(functionHash)+resource_id]
del resourceMap[resource_id]
for i in functionToResourceMap[functionHash]:
if resource_id == i["resource_id"]:
del i
print("\n\nresource map : ", resourceMap)
print("\n\nfunction to resource map : ", functionToResourceMap)
ruleList = RTEInterface.Tables.ListRules(tableIdDispatch)
print "\n\ndeleted rule"
print "\n\nnew rule list: ", ruleList, "\n\n"
return 0
def updateDefaultRuleOfDispatchTable(worker):
# TODO 1: implement function to update deault rule when worker load heap changes
# def EditRule(self, tbl_id, rule_name, default_rule, match, actions, priority = None, timeout = None):
rule_name = "default"
# functionHash = int(functionHash[0:5], 16)
ip = str(worker[u'node_id']).strip()
port = int(worker[u'portExternal'])
mac = str(worker[u'mac']).strip()
default_rule = True
# autoscaling = 0
rule = makeRuleForDispatchTable(ip, port, mac, None, tableIdDispatch, rule_name, default_rule)
RTEInterface.Tables.EditRule(rule["tableId"], rule["rule_name"], rule["default_rule"], rule["match"], rule["actions"])
print("\n\ndispatch table Default rule updated\n\n")
return 0
def updateRuleOfDispatchTable(worker):
# TODO 1: implement function to update deault rule when worker load heap changes
# def EditRule(self, tbl_id, rule_name, default_rule, match, actions, priority = None, timeout = None):
functionHash = worker[u'functionHash']
rule_name = rule_name = "dispatch_to_worker" + functionHash
# rule_name = 'dispatch_to_worker5a1'
functionHash = int(functionHash[9:])
ip = str(worker[u'node_id']).strip()
port = int(worker[u'portExternal'])
mac = str(worker[u'mac']).strip()
# autoscaling = len(functionToResourceMap[functionHash])
default_rule = False
rule = makeRuleForDispatchTable(ip, port, mac, functionHash, tableIdDispatch, rule_name, default_rule)
print("\n\nupdate rule : ", rule)
RTEInterface.Tables.EditRule(rule["tableId"], rule["rule_name"], rule["default_rule"], rule["match"], rule["actions"])
print("\n\dispatch table rule updated\n\n")
return 0
def makeFwdTableRule(rule_name, default_rule):
rule={}
if default_rule:
actions = '{ "type" : "ingress::fwd_act", "data" : { "port" : { "value" : "p0" } } }'
match = '{ "standard_metadata.ingress_port" : { "value" : "v0.0" } }'
rule = {
"tableId": tableId2,
"rule_name": 'host_to_net',
"default_rule": default_rule,
"match": match,
"actions": actions
}
else:
actions = '{ "type" : "ingress::fwd_act", "data" : { "port" : { "value" : "p0" } } }'
match = '{ "standard_metadata.ingress_port" : { "value" : "v0.0" } }'
rule = {
"tableId": tableId2,
"rule_name": 'host_to_net',
"default_rule": default_rule,
"match": match ,
"actions": actions
}
return rule
for msg in consumer:
if msg.topic == "deployed3":
msg = msg.value.decode('utf-8')
worker = json.loads(msg)
print("received message on deployed : ", worker)
# functionHash = worker[u'functionHash']
# functionHash = int(functionHash[9:])
# if(functionHash not in functionToResourceMap):
addRuleForDispatchTable(worker)
# addRuleForWarmstartDispatchTable(worker)
# else:
# addRuleForWarmstartDispatchTable(worker)
# updateRuleOfDispatchTable(worker)
elif msg.topic == "removeWorker2":
msg = msg.value.decode('utf-8')
worker = json.loads(msg)
print("received message on removeWorker : ", worker)
deleteRuleFromDispatchTable(worker)
elif msg.topic == "COLDSTART_WORKER2":
msg = msg.value.decode('utf-8')
worker = json.loads(msg)
print("received message on COLDSTART_WORKER : ", worker)
updateDefaultRuleOfDispatchTable(worker[u'nodes'])
elif msg.topic == constants['topics']['update_function_instance_nic']:
msg = msg.value.decode('utf-8')
worker = json.loads(msg)
print("\nreceived message on UPDATE_FUNCTION_INSTANCE_NIC", worker)
updateRuleOfDispatchTable(worker)
# elif msg.topic == "AUTOSCALE":
# msg = msg.value.decode('utf-8')
# updateDefaultRule2()
# worker1={
# 'node_id': '192.168.2.3',
# 'portExternal': '8081',
# 'mac': '00:22:22:22:22:22',
# 'resource_id': 'resource_id1',
# 'functionHash': 'function_1234'
# }
# worker2={
# 'node_id': '192.168.2.3',
# 'portExternal': '8082',
# 'mac': '00:22:22:22:22:22',
# 'resource_id': 'resource_id2',
# 'functionHash': 'function_1234'
# }
# while(1):
# updateRuleOfDispatchTable(worker1)
# time.sleep(2)
# updateRuleOfDispatchTable(worker2)
# time.sleep(2)
# addRuleForDispatchTable(worker1)
# addRuleForWarmstartDispatchTable(worker1)
# worker2={
# 'node_id': '192.168.2.3',
# 'portExternal': '8082',
# 'mac': '00:22:22:22:22:22',
# 'resource_id': 'resource_id2',
# 'functionHash': 'function_1234'
# }
# # addRuleForDispatchTable(worker2)
# addRuleForWarmstartDispatchTable(worker2)
# updateRuleOfDispatchTable(worker2)
f.close()
\ No newline at end of file
...@@ -14,16 +14,20 @@ ...@@ -14,16 +14,20 @@
"body-parser": "^1.19.0", "body-parser": "^1.19.0",
"express": "^4.17.1", "express": "^4.17.1",
"express-fileupload": "^1.1.6", "express-fileupload": "^1.1.6",
"graceful-fs": "^4.2.9",
"heap": "^0.2.6", "heap": "^0.2.6",
"isolated-vm": "^3.0.0", "isolated-vm": "^4.3.6",
"jspack": "^0.0.4", "jspack": "^0.0.4",
"kafka-logger": "^7.1.0", "kafka-logger": "^7.1.0",
"kafka-node": "^5.0.0", "kafka-node": "^5.0.0",
"morgan": "^1.9.1", "morgan": "^1.9.1",
"mqtt": "^3.0.0", "mqtt": "^4.3.6",
"node-fetch": "^2.6.0", "node-fetch": "^2.6.7",
"node-usage": "0.0.3",
"os-utils": "0.0.14",
"pidusage": "^3.0.0",
"prom-client": "^12.0.0", "prom-client": "^12.0.0",
"redis": "^2.8.0", "redis": "^4.0.4",
"request": "^2.88.0", "request": "^2.88.0",
"request-promise": "^4.2.5", "request-promise": "^4.2.5",
"save": "^2.4.0", "save": "^2.4.0",
......
let workerNodes = {}, timeline = {} let workerNodes = {}, timeline = {}
const constants = require('../constants_local.json') const constants = require('../constants_local.json')
const Heap = require('heap');
// const libSupport = require('./lib');
const dgram = require('dgram');
const udpProxy = dgram.createSocket('udp4');
// const logger = libSupport.logger;
const loadThreashold = 1
var workerHeap = new Heap(function(worker1, worker2) {
return (worker1.system_info.avg_load[0] - worker2.system_info.avg_load[0]);
});
let coldstart_worker = {};
let resourceMap = {} // resource_id -> resource information map
let functionToResourceMap = {} // functionID -> resource_id map
let functionToResourceHeap = {} // functionID -> function Inastances heap map
let functionloadMap = {} // to store previous function instance having low load for functionID
let kafka = require('kafka-node'), let kafka = require('kafka-node'),
Producer = kafka.Producer, Producer = kafka.Producer,
client = new kafka.KafkaClient({ client = new kafka.KafkaClient({
...@@ -10,15 +28,25 @@ let kafka = require('kafka-node'), ...@@ -10,15 +28,25 @@ let kafka = require('kafka-node'),
Consumer = kafka.Consumer, Consumer = kafka.Consumer,
consumer = new Consumer(client, consumer = new Consumer(client,
[ [
{ topic: 'heartbeat' }, // receives heartbeat messages from workers, also acts as worker join message { topic: 'heartbeat2' }, // receives heartbeat messages from workers, also acts as worker join message
{ topic: "request" } // receives deployment details from RM { topic: "request2" }, // receives deployment details from RM
{ topic: constants.topics.check_autoscale },
{ topic: constants.topics.autoscale },
{ topic: constants.topics.deployed },
{ topic: constants.topics.coldstart_worker }, // give the information about worker having low load
{ topic: constants.topics.function_load},
{ topic: constants.topics.remove_worker},
{ topic: constants.topics.remove_function_intstance }
], ],
[ [
{ autoCommit: true } { autoCommit: true }
]) ])
// console.log("consumer : ", consumer)
function getAddress() { function getAddress() {
return Object.keys(workerNodes)[0]; return workerHeap.peek().address;
// return Object.keys(workerNodes)[0];
} }
// payloads = [ // payloads = [
...@@ -30,28 +58,256 @@ function getAddress() { ...@@ -30,28 +58,256 @@ function getAddress() {
// }); // });
// }); // });
function updateColdstartOnNIC() {
let lowloadworker = workerHeap.peek();
if(lowloadworker.address != coldstart_worker.address)
{
console.log("min load worker changed !!!");
coldstart_worker = lowloadworker;
let payload = [{
topic: constants.topics.coldstart_worker ,
messages: JSON.stringify({
"resource_id": coldstart_worker.resource_id,
"timestamp": Date.now(),
"nodes":
{
node_id: coldstart_worker.address,
portExternal: coldstart_worker.portExternal,
mac: coldstart_worker.mac
}
}),
partition: 0
}]
producer.send(payload, () => {
console.log(`\nupdate coldstart on NIC Replied`)
})
}
}
function packPacketFromDictionary(dataPacket) {
let message = new Array(1024)
let base = 0, chain_id, exec_id, function_id, data, function_count
chain_id = struct.PackTo(">I", message, base, [dataPacket.chain_id])
base += 4
exec_id = struct.PackTo(">I", message, base, [dataPacket.exec_id])
base += 4
function_id = struct.PackTo(">I", message, base, [dataPacket.function_id])
base += 4
data = struct.PackTo(">I", message, base, [dataPacket.data])
base += 4
function_count = struct.PackTo("B", message, base, [dataPacket.function_count])
base += 1
// autoscale = struct.PackTo("B", message, base, [dataPacket.autostart])
// base += 1
// fno = struct.PackTo("B", message, base, [dataPacket.fno])
f0 = struct.PackTo("B", message, base, [0])
base += 1
f1 = struct.PackTo("B", message, base, [12])
base += 1
f2 = struct.PackTo("B", message, base, [0])
base += 1
f3 = struct.PackTo("B", message, base, [34])
base += 1
f4 = struct.PackTo("B", message, base, [0])
base += 1
message = Buffer.from(message)
return message
}
function compare(a, b) {
return a.open_request_count - b.open_request_count
}
function autoscale_function(function_instance_info)
{
//TODO : Autoscale function from host DM
// i.e., send request DD from kafka to start new function
// update entry of new instance in the f2r map and heap
console.log("function instance info from autoscaling : ", function_instance_info)
node_id = getAddress()
let payload = [{
topic: node_id,
messages: JSON.stringify({
"type": "execute", // Request sent to Dispatch Daemon via Kafka for actual deployment at the Worker
resource_id: function_instance_info['resource_id'],
runtime: function_instance_info['function_runtime'],
functionHash: function_instance_info['functionHash'],
port: null,
resources: {
memory: 0
}
}),
partition: 0
}]
// logger.info(resourceMap);
producer.send(payload, () => {
console.log(`Autosacling Resource Deployment request sent to Dispatch Agent`)
})
}
function downscaling_function(function_instance_info, resource_id)
{
console.log("downscaling function called !!!\n", function_instance_info)
id = function_instance_info.functionHash + function_instance_info.function_runtime
console.log("function closed, removing entry. ", function_instance_info.functionHash, resource_id)
console.log("inside downscaling function , printing f2rap id : ", id, functionToResourceMap[id])
functionToResourceMap[id][resource_id]['open_request_count'] = Number.MAX_SAFE_INTEGER
functionToResourceHeap[id].updateItem(functionToResourceMap[id][resource_id])
functionToResourceMap[id]['function_instance_count']--
console.log("function map : ", functionToResourceMap[id])
// functionToResourceHeap[id]
}
function updateNicRules()
{
// traverse functionloadmap hashmap
// send update message to smartnic monitor if function instance having low load changes
ids = Object.keys(functionToResourceMap)
console.log("\nupdating function rules on NIC \n\n")
// console.log("\nfunction to REsource MAP : ",functionToResourceMap)
// console.log("\nfunction to REsource HEAP : ",JSON.stringify(functionToResourceHeap))
ids.forEach(id => {
fmin = functionToResourceHeap[id].peek()
// console.log("min finst : ", fmin)
// console.log("loadmap : ", functionloadMap)
if(fmin['resource_id'] != functionloadMap[id]['resource_id'])
{
let payload = [{
topic: constants.topics.update_function_instance_nic,
messages: JSON.stringify({
"resource_id": fmin.resource_id,
"functionHash": functionToResourceMap[id][fmin.resource_id]['functionHash'],
"portExternal": functionToResourceMap[id][fmin.resource_id]['portExternal'],
"mac": functionToResourceMap[id][fmin.resource_id]['mac'],
// "timestamp": Date.now(),
"node_id": fmin.node_id
}),
partition: 0
}]
producer.send(payload, () => {
console.log(`\nsent update to smartNIC monitor\n`)
})
functionloadMap[id]= {
"node_id": fmin.node_id,
"resource_id": fmin.resource_id
// "load": 0,
// "open_request_count": 0
}
}
else{
console.log("\nfunction instance with low load is not changed\n")
}
})
}
/**
* function to check if any fuction is dead and stop sending heartbeat
**/
function check_function_heartbeat()
{
console.log(" checking function heartbeat !!! function to resource map : ", functionToResourceMap)
let ctime = Number(Date.now())
Object.keys(functionToResourceMap).forEach( (fid) => {
Object.keys(functionToResourceMap[fid]).forEach((rid) => {
if(rid != "function_instance_count")
{
let data = functionToResourceMap[fid][rid]
// console.log("INFORMATION : ", functionToResourceMap,functionToResourceMap[fid], data , data.timestamp)
console.log("\n interval between heartbeat for function ", fid, " with resource id : ",rid, " is : ", ctime - data.timestamp)
if(ctime - data.timestamp > constants.heartbeat_threshold)
{
// console.log("function ", fid," instance with rid ", rid, "is down", " data : ", data)
//function didn't sent heartbeat so close the remove function entry
let message = JSON.stringify({
functionHash: data.functionHash, portExternal: data.portExternal,
runtime: data.function_runtime, resource_id: rid, entity_id: 0,
total_request: 0, wait_time: 0
})
// console.log("\nmessage to remove function entry from all components : ", message)
console.log("\nmessage to remove function entry from all components ")
// console.log("Not received heartbeat for too long. Exiting");
if (producer)
producer.send(
[
{
topic: constants.topics.remove_function_intstance,
messages: message
}
], (err, data) => {
if (err)
console.log(err);
//
console.log("Removing entry for function", data.functionHash, "resource_id", data.resource_id);
})
}
}
})
})
}
consumer.on('message', function (message) { consumer.on('message', function (message) {
let topic = message.topic let topic = message.topic
message = message.value message = message.value
// console.log("message ",message) // console.log("message ",message)
// console.log("function map : ", functionToResourceMap)
// console.log("function heap : ", functionToResourceHeap)
if (topic !== "heartbeat") // if (topic !== "heartbeat2")
console.log(message); // console.log("not a heartbeat message : ", message);
if (topic === "heartbeat") { if (topic === "heartbeat2") {
message = JSON.parse(message) message = JSON.parse(message)
if (Date.now() - message.timestamp < 1000) if (Date.now() - message.timestamp < 1000)
{
if (!workerNodes[message.address]) { if (!workerNodes[message.address]) {
workerNodes[message.address] = message workerNodes[message.address] = message
console.log("New worker discovered. Worker List: ") console.log("New worker discovered. Worker List: ")
console.log(workerNodes); console.log(workerNodes);
workerHeap.push(workerNodes[message.address])
if(Object.keys(workerNodes).length === 1)
{
updateColdstartOnNIC();
}
}
else{
// console.log("Got heartbeat updating load of wroker ", message.address)
if(Math.abs(workerNodes[message.address].system_info.avg_load[0] - message.system_info.avg_load[0]) > loadThreashold )
{
workerNodes[message.address].system_info = message.system_info
workerNodes[message.address].timestamp = message.timestamp
console.log("updated wroker load : ", workerNodes[message.address])
workerHeap.updateItem(workerNodes[message.address])
updateColdstartOnNIC();
}
else
{
// console.log("Change in worker load is less than threshold")
//f
}
} }
} else if (topic === "request") { // console.log("\nheap : ",workerHeap)
}
} else if (topic === "request2") {
message = JSON.parse(message) message = JSON.parse(message)
console.log(message); console.log(message);
let payload = [{ let payload = [{
topic: "RESPONSE_RM_2_DM_DUMMY", topic: "RESPONSE_RM_2_DM_DUMMY2",
messages: JSON.stringify({ messages: JSON.stringify({
"resource_id": message.resource_id, "resource_id": message.resource_id,
"timestamp": Date.now(), "timestamp": Date.now(),
...@@ -63,5 +319,235 @@ consumer.on('message', function (message) { ...@@ -63,5 +319,235 @@ consumer.on('message', function (message) {
producer.send(payload, () => { producer.send(payload, () => {
console.log(`Replied`) console.log(`Replied`)
}) })
} else if (topic === constants.topics.check_autoscale) {
message = JSON.parse(message)
console.log("request to check autoscale : ", message);
} else if ( topic == constants.topics.autoscale) {
message = JSON.parse(message)
console.log("request to scale : ", message);
let request_id = Math.floor(Math.random() * 1000)
// req.body.request_id = request_id
// res.request_id = request_id
// requestFlightQueue.set(request_id, res)
// let payload = "HELLO"
// payload.request_id = request_id
// let data = payload.data
let fid= message.functionHash.slice(9)
// res.data_set_time = Date.now()
let packet = packPacketFromDictionary({
chain_id: 0,
exec_id: request_id,
function_id: fid,
data,
function_count: 1,
// autostart: 1,
// fno : 255
})
// res.pack_time = Date.now()
// udpProxy.send(packet, 0, packet.length, "8000", "192.168.2.2", function (err, bytes) {
// logger.info(`forwarded request via UDP, IP 192.168.2.2 Port 8000`)
// res.send_time = Date.now()
// })
// udpProxy.send(packet, 0, packet.length, resource.port, resource.node_id, function (err, bytes) {
// logger.info(`forwarded request via UDP, IP 192.168.2.5 Port ${resource.port}`)
// res.send_time = Date.now()
// })
} else if (topic == constants.topics.deployed) {
// deployed request will be handled to deploy new function insstance
message = JSON.parse(message)
//fuction is deployed successfully
// if function map availabe add it in the heap
// if not availabe create new heap map
console.log("message deployed : ", message)
let id = message.functionHash + message.runtime
// if (resourceMap.has(message.resource_id)) {
// let resource = resourceMap.get(message.resource_id)
// resource.node_id = message.node_id.trim()
// }
resource_id = message.resource_id
resourceMap[message.functionHash + resource_id]= message
if (functionToResourceMap[id]) {
console.log("\nfunction ID already has a function heap so inserting into heap!!!\n")
// let functionResources = functionToResourceMap[id]
// let functionResourcesHeap = functionToResourceHeap[id]
// functionResources.push({
// resource_id: message.resource_id
// })
// functionToResourceMap[id]={}
let ctime = Date.now()
console.log("\n\nctime : ", ctime)
functionToResourceMap[id][resource_id] = {
"functionHash": message.functionHash,
"resource_id": resource_id,
"function_runtime": message.runtime,
"node_id": message.node_id,
"portExternal": message.portExternal,
"mac": message.mac,
"open_request_count": 0,
"load": 0 ,
"timestamp" : Number(ctime)
}
functionToResourceMap[id].function_instance_count++
functionToResourceHeap[id].push(functionToResourceMap[id][resource_id])
// heap.push(functionResources, {
// resource_id: message.resource_id,
// open_request_count: 0,
// cpu_utilization: 0
// }, libSupport.compare_uti)
console.log("Horizontally scaling up: " +JSON.stringify(functionToResourceHeap[id]));
console.log("function to resource MAP : ", functionToResourceMap)
} else {
console.log("\nthere is no function heap for function ID so creating one\n")
// resourceMap[message.functionHash + message.resource_id] = {
// "resource_id" : message.resource_id,
// "function_runtime": message.runtime,
// "node_id": message.node_id
// }
functionToResourceMap[id] = {
function_instance_count: 1
}
// functionToResourceMap[id][function_instance_count]=1
let ctime = Date.now()
console.log("\n\nctime : ", ctime)
functionToResourceMap[id][resource_id] = {
"functionHash": message.functionHash,
"resource_id": resource_id,
"function_runtime": message.runtime,
"node_id": message.node_id,
"portExternal": message.portExternal,
"mac": message.mac,
"open_request_count": 0,
"load": 0 ,
"timestamp": Number(ctime)
}
functionloadMap[id]= {
"functionHash": message.functionHash,
"node_id": message.node_id,
"resource_id": resource_id,
"load": 0,
"open_request_count": 0,
"high_load_count": 0,
"low_load_count": 0
}
functionToResourceHeap[id] = new Heap(compare)
functionToResourceHeap[id].push(functionToResourceMap[id][resource_id])
// functionToResourceMap.set(id, resourceHeap)
// functionResources = heap(compare)
// functionToResourceMap[id]= functionResources
// console.log("Creating new resource pool"+ JSON.stringify(functionToResourceHeap[id]));
console.log("Creating new resource pool"+ functionToResourceHeap[id]);
console.log("function to resource MAP : ", functionToResourceMap)
console.log('function instance count : ',functionToResourceMap[id]['function_instance_count'])
}
}
else if(topic == constants.topics.function_load)
{
message = JSON.parse(message)
resource_id = message.resource_id
id = message.functionHash + message.runtime
if(functionToResourceHeap[id])
{
// console.log("functionToResourceHeap id is present updating heap")
functionToResourceMap[id][resource_id]["timestamp"] = Number(Date.now())
functionToResourceMap[id][resource_id]['open_request_count'] = message.open_request_count
functionToResourceHeap[id].updateItem(functionToResourceMap[id][resource_id])
if(functionToResourceMap[id][resource_id]['open_request_count'] >= constants.autoscalar_metrics.high_open_request_threshold){
// autoscale_function(functionToResourceMap[id][message.resource_id])
// console.log
functionloadMap[id]['high_load_count']++
if(functionloadMap[id]['high_load_count'] >= constants.autoscalar_metrics.high_load_count)
{
console.log("\n\nfunction load increases. UP scale the function\n\n")
//autoscale function : deploy new function instance for this function ID
functionloadMap[id]['high_load_count']=0
autoscale_function(functionToResourceMap[id][resource_id])
}
}
else{
functionloadMap[id]['high_load_count']=0
}
if(functionToResourceMap[id][resource_id]['open_request_count'] <= constants.autoscalar_metrics.low_open_request_threshold
&& functionToResourceMap[id].function_instance_count>1){
functionloadMap[id]['low_load_count']++
if(functionloadMap[id]['low_load_count'] >= constants.autoscalar_metrics.low_load_count)
{
//downscale function: delete any function instance for this function ID if number of function instance > 1
console.log("\n\nfunction load decreases. DOWN scale the function\n\n")
functionloadMap[id]['low_load_count']=0
downscaling_function(functionToResourceMap[id][resource_id], resource_id)
}
}else{
functionloadMap[id]['low_load_count']=0
}
}
else{
console.log("functionToResourceHeap id is NOT present NOT updating heap")
}
// console.log("\n\nupdated function heap ", functionToResourceHeap)
}
else if(topic == constants.topics.remove_function_intstance)
{
message = JSON.parse(message)
console.log("remove function instance called !!!\n", message)
id = message.functionHash + message.runtime
console.log("function closed, removing entry. ", message.functionHash, message.resource_id)
if(functionToResourceMap[id])
{
functionToResourceMap[id][message.resource_id]['open_request_count'] = Number.MAX_SAFE_INTEGER
functionToResourceHeap[id].updateItem(functionToResourceMap[id][message.resource_id])
functionToResourceMap[id]['function_instance_count']--
if(functionToResourceHeap[id].peek()['open_request_count']==Number.MAX_SAFE_INTEGER )
{
console.log("all function instance of this functionid are stopped. so removing entry from map and heap")
delete functionToResourceHeap[id]
delete functionToResourceMap[id]
rmv_worker_message = JSON.stringify(message)
producer.send(
[
{topic: "removeWorker2", messages: rmv_worker_message }
], (err, data) => {
if (err)
console.log(err);
console.log("Ending worker for function", message.functionHash, "resource_id", message.resource_id);
})
}
}else{
console.log("function map for that id is not present!!")
}
console.log("rfi function map : ", functionToResourceMap)
// functionToResourceHeap[id]
} }
// else if(topic == constants.topics.remove_worker)
// {
// message = JSON.parse(message)
// console.log("remove function called !!!\n", message)
// id = message.functionHash + message.runtime
// console.log("function closed, removeing entry. ", message.functionHash, message.resource_id)
// functionToResourceMap[id][message.resource_id]['open_request_count'] = Number.MAX_SAFE_INTEGER
// functionToResourceHeap[id].updateItem(functionToResourceMap[id][message.resource_id])
// functionToResourceMap[id]['function_instance_count']--
// if(functionToResourceHeap[id].peek()['open_request_count']==Number.MAX_SAFE_INTEGER )
// {
// console.log("all function instance of this functionid are stopped. so removing entry from map and heap")
// delete functionToResourceHeap[id]
// delete functionToResourceMap[id]
// }
// console.log("function map : ", functionToResourceMap[id])
// // functionToResourceHeap[id]
// }
}) })
setInterval(updateNicRules, 5000)
setInterval(check_function_heartbeat, 5000)
let workerNodes = {}, timeline = {}
const constants = require('../constants_local.json')
const Heap = require('heap');
let kafka = require('kafka-node'),
Producer = kafka.Producer,
client = new kafka.KafkaClient({
kafkaHost: constants.network.external.kafka_host,
autoConnect: true
}),
producer = new Producer(client),
Consumer = kafka.Consumer,
consumer = new Consumer(client,
[
{ topic: 'heartbeat2' } // receives heartbeat messages from workers, also acts as worker join message
],
[
{ autoCommit: true }
])
client.on('error', function(error) {
console.error(error);
});
consumer.on('message', function (message) {
let topic = message.topic
message = message.value
console.log("message ",message)
})
import socket
import struct
import time
import threading
import random
import time
import sys
import argparse
packet_cnt=0
def receive():
global egress_time, stop_thread, packet_cnt
CLIENT_IP = "0.0.0.0"
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
port = int(sys.argv[1])
s.bind((CLIENT_IP, port))
print "listening to {} at port {}".format(CLIENT_IP, port)
run_status = {}
while True:
packet, addr = s.recvfrom(1024)
# print packet
packet_cnt+=1
# base = 0
# chain_id = struct.unpack(">I", packet[base:base + 4])[0]
# base += 4
# exec_id = struct.unpack(">I", packet[base:base + 4])[0]
# base += 4
# function_id = struct.unpack(">I", packet[base:base + 4])[0]
# base += 4
# data = struct.unpack(">I", packet[base:base + 4])[0]
# base += 4
# function_count = struct.unpack("B", packet[base])[0]
# # base += 1
# # autoscaling = struct.unpack("B", packet[base])[0]
# # base += 1
# # fno = struct.unpack("B", packet[base])[0]
# base += 1
# f0 = struct.unpack("B", packet[base])[0]
# base += 1
# f1 = struct.unpack("B", packet[base])[0]
# base += 1
# f2 = struct.unpack("B", packet[base])[0]
# base += 1
# f3 = struct.unpack("B", packet[base])[0]
# base += 1
# f4 = struct.unpack("B", packet[base])[0]
# rdata = {"chain_id" :chain_id, "exec_id":exec_id, "data":data, "function_id":function_id, "function_count":function_count, "ecahin":[f0,f1, f2, f3, f4]}
# rdata = {"chain_id" :chain_id, "exec_id":exec_id, "data":data, "function_id":function_id, "function_count":function_count, "ecahin":[f0,f1, f2, f3, f4], "autoscaling":autoscaling, "fno":fno}
print("packet count : ", packet_cnt)
# print rdata
# print(packet_cnt)
# print "rec", chain_id, exec_id, data, function_id, function_count, f0,f1, f2, f3, f4
receive()
\ No newline at end of file
...@@ -17,3 +17,73 @@ ...@@ -17,3 +17,73 @@
35000,25.138213139734326,822,5969237,24.1701602935791,30.621051788330078,39.08381462097173 35000,25.138213139734326,822,5969237,24.1701602935791,30.621051788330078,39.08381462097173
40000,29.38630774106816,678,6683877,24.225950241088867,58.105635643005286,104.85795974731427 40000,29.38630774106816,678,6683877,24.225950241088867,58.105635643005286,104.85795974731427
4000,0.17381369331601093,3800,1140189,0.17499923706054688,0.32401084899902344,0.6968975067138672 4000,0.17381369331601093,3800,1140189,0.17499923706054688,0.32401084899902344,0.6968975067138672
,-1642101543770234.5,2,10,-1642101543770234.5,-1642101543770189.0,-1642101543770185.0
,-1642102887380229.5,8,10,-1642102887380225.8,-1642102887379994.8,-1642102887379967.2
,-1642103799768331.5,10,100,-1642103799768290.0,-1642103799767948.5,-1642103799767912.8
,-1642104072570253.8,9,100,-1642104072569945.0,-1642104072569566.0,-1642104072569533.5
,-1642135707705920.2,1,100,-1642135707705881.0,-1642135707705736.5,-1642135707705716.2
,-1642136335077307.8,0,10,-1642136335077362.2,-1642136335077075.8,-1642136335077043.2
,-1642136552107645.2,0,10,-1642136552107616.5,-1642136552107434.8,-1642136552107416.8
,-1642138981970252.2,0,10,-1642138981970292.2,-1642138981970014.2,-1642138981969992.2
,-1642139251495695.5,0,10,-1642139251495702.2,-1642139251495614.5,-1642139251495604.2
,-1642139930932117.8,0,10,-1642139930939564.2,-1642139930868838.0,-1642139930865180.0
,-1642142803621418.5,0,100,-1642142803621372.8,-1642142803621007.5,-1642142803620958.0
,-1642144416729338.2,0,10,-1642144416729343.5,-1642144416729111.0,-1642144416729088.8
,-1642163040078553.5,0,10,-1642163040078622.5,-1642163040078189.8,-1642163040078167.0
,-1642163273809842.8,0,10,-1642163273809840.5,-1642163273809598.0,-1642163273809575.0
,-1643980780608282.0,0,10,-1643980780608322.8,-1643980780608012.5,-1643980780607980.5
,-1644154491732700.5,0,10,-1644154491732672.0,-1644154491732459.2,-1644154491732435.5
,-1644430555024730.2,0,1,-1644430555024730.2,-1644430555024730.2,-1644430555024730.2
,-1644559608047252.2,0,5,-1644559608047257.0,-1644559608047113.0,-1644559608047098.2
,-1644561424025254.8,0,10,-1644561424025232.2,-1644561424025011.8,-1644561424024987.5
,-1644898498720284.2,0,100,-1644898498720300.0,-1644898498720129.0,-1644898498720114.5
,-1644899102961584.2,0,10,-1644899102961567.0,-1644899102961358.5,-1644899102961334.8
,-1644899350723622.2,0,10,-1644899350723562.8,-1644899350723390.0,-1644899350723372.2
,-1644899812188437.8,0,10,-1644899812188417.8,-1644899812188180.8,-1644899812188157.8
,-1644903879231480.0,0,10,-1644903879231481.8,-1644903879231186.2,-1644903879231163.5
,-1644904662119533.2,0,10,-1644904662119466.5,-1644904662119282.0,-1644904662119261.0
,-1644905250419292.0,0,10,-1644905250419236.8,-1644905250419020.5,-1644905250418995.8
,-1644906004052345.8,0,10,-1644906004052319.2,-1644906004052221.0,-1644906004052211.0
,-1644987458120370.8,0,3,-1644987458120377.5,-1644987458120292.2,-1644987458120284.5
,-1644987937521555.0,0,3,-1644987937521564.8,-1644987937521475.0,-1644987937521466.8
,-1644988137466419.5,0,1,-1644988137466419.5,-1644988137466419.5,-1644988137466419.5
,-1644988804294549.5,0,1,-1644988804294549.5,-1644988804294549.5,-1644988804294549.5
,-1644989786114412.2,0,20,-1644989786114380.2,-1644989786114091.0,-1644989786114064.0
,-1644992046742451.8,0,10,-1644992046742450.0,-1644992046742136.2,-1644992046742109.0
,-1645073657244395.0,19,10000,-1645073657402362.0,-1645073656651514.5,-1645073656626720.0
,-1645073969506916.0,0,1,-1645073969506916.0,-1645073969506916.0,-1645073969506916.0
,-1645099795556374.0,0,10,-1645099795556380.0,-1645099795556349.5,-1645099795556343.0
,-1645106804008719.2,0,100,-1645106804008794.8,-1645106804008454.5,-1645106804008442.5
10,11445.918100110946,5,300,11252.794981002808,13202.354073524475,13414.231414794922
,-1645156965640993.0,0,10,-1645156965641000.0,-1645156965640963.5,-1645156965640957.8
,-1645501733453631.5,0,10,-1645501733453637.5,-1645501733453603.2,-1645501733453594.0
,-1645502549035909.2,0,10,-1645502549035909.5,-1645502549035852.2,-1645502549035843.2
,-1645684068466992.5,0,1,-1645684068466992.5,-1645684068466992.5,-1645684068466992.5
,-1645696655353877.0,0,10,-1645696655353880.0,-1645696655353863.2,-1645696655353860.0
,-1645698444206839.2,0,10,-1645698444206850.8,-1645698444206781.2,-1645698444206764.5
,-1645724775477832.5,0,10,-1645724775477838.0,-1645724775477803.2,-1645724775477793.0
,-1645726560905916.2,0,10,-1645726560905927.8,-1645726560905869.8,-1645726560905861.5
,-1645728683841076.2,0,10,-1645728683841079.5,-1645728683841040.5,-1645728683841028.8
,-1645729119773315.0,0,10,-1645729119773321.0,-1645729119773286.8,-1645729119773279.8
,-1645732740016038.2,0,10,-1645732740016038.8,-1645732740016000.5,-1645732740015995.5
,-1645734864964121.0,0,10,-1645734864964126.5,-1645734864964087.8,-1645734864964077.5
,-1645763693654507.8,0,10,-1645763693654514.8,-1645763693654477.5,-1645763693654469.0
,-1645769455194671.8,0,10,-1645769455194673.2,-1645769455194646.0,-1645769455194637.0
,-1645778312738368.2,0,10,-1645778312738378.5,-1645778312738328.2,-1645778312738315.5
,-1645778631271788.0,0,10,-1645778631271799.0,-1645778631271753.5,-1645778631271746.8
,-1645779006597948.0,0,10,-1645779006597953.5,-1645779006597917.0,-1645779006597910.0
,-1645779968981130.2,0,10,-1645779968981134.2,-1645779968981089.5,-1645779968981080.8
,-1645794108211458.8,0,1,-1645794108211458.8,-1645794108211458.8,-1645794108211458.8
,-1645794870034092.8,0,1,-1645794870034092.8,-1645794870034092.8,-1645794870034092.8
,-1645795696235758.0,0,10,-1645795696235760.0,-1645795696235712.0,-1645795696235701.8
,-1645797319166556.8,0,10,-1645797319166552.5,-1645797319166528.0,-1645797319166526.2
30,13284.716145589637,17,900,13141.422986984253,16356.134271621704,16847.612257003784
,-1646316511405169.0,5,10000,-1646316511445304.5,-1646316510826335.2,-1646316510802970.5
,-1646557758059418.2,0,3,-1646557758059430.8,-1646557758059316.2,-1646557758059306.2
,-1646558064938276.0,0,1,-1646558064938276.0,-1646558064938276.0,-1646558064938276.0
,-1646585748089669.8,0,2,-1646585748089669.8,-1646585748089647.5,-1646585748089645.5
,-1646655908233831.0,0,10,-1646655908233894.0,-1646655908233604.5,-1646655908233588.8
,-1646657186206309.2,0,10,-1646657186206309.2,-1646657186206309.2,-1646657186206309.2
,-1646727394628985.2,0,3,-1646727394628985.2,-1646727394628985.2,-1646727394628985.2
,-1646727853616584.5,0,3,-1646727853616584.5,-1646727853616526.5,-1646727853616521.2
,-1646746577144508.5,0,3,-1646746577144508.5,-1646746577144462.5,-1646746577144458.5
const Heap = require('heap');
function compare(obj1, obj2)
{
return obj1["value"] - obj2["value"]
}
f2rmap={}
f2rmap["fid1"]= {
"rid1":{
"id": 1,
"value": 1
},
"rid2": {
"id": 2,
"value": 0
},
"rid3": {
"id": 3,
"value": 3
},
"rid4": {
"id": 4,
"value": 4
}
}
console.log("function map : ",f2rmap)
f2rheap={}
// f2rheap["fid1"]={}
f2rheap['fid1'] = new Heap(compare)
f2rheap['fid1'].push(f2rmap['fid1']['rid1'])
f2rheap['fid1'].push(f2rmap['fid1']['rid2'])
f2rheap['fid1'].push(f2rmap['fid1']['rid3'])
f2rheap['fid1'].push(f2rmap['fid1']['rid4'])
console.log("function heap : ", f2rheap)
f2rharr=f2rheap['fid1'].toArray()
console.log(f2rharr)
// for(i=0; i<f2rheap["fid1"].size(); i++)
// {
// console.log("i",i,": ",f2rheap['fid1'].peek())
// f2rheap["fid1"].pop()
// }
f2rmap['fid1']['rid1']['value']=6
f2rheap['fid1'].updateItem(f2rmap['fid1']['rid1'])
f2rharr=f2rheap['fid1'].toArray()
console.log(f2rharr)
f2rmap['fid1']['rid2']['value']=5
f2rheap['fid1'].updateItem(f2rmap['fid1']['rid2'])
f2rharr=f2rheap['fid1'].toArray()
console.log(f2rharr)
console.log("MAP : ", f2rmap)
// delete f2rmap["fid1"]['rid1']
f2rmap['fid1']['rid1']['value'] = Number.MAX_SAFE_INTEGER;
f2rheap['fid1'].updateItem(f2rmap['fid1']['rid1'])
console.log("MAP : ", f2rmap)
f2rharr=f2rheap['fid1'].toArray()
console.log(f2rharr)
f2rmap['fid1']['rid2']['value'] = Number.MAX_SAFE_INTEGER;
f2rheap['fid1'].updateItem(f2rmap['fid1']['rid2'])
console.log("MAP : ", f2rmap)
f2rharr=f2rheap['fid1'].toArray()
console.log(f2rharr)
f2rmap['fid1']['rid3']['value'] = Number.MAX_SAFE_INTEGER;
f2rheap['fid1'].updateItem(f2rmap['fid1']['rid3'])
console.log("MAP : ", f2rmap)
f2rharr=f2rheap['fid1'].toArray()
console.log(f2rharr)
f2rmap['fid1']['rid4']['value'] = Number.MAX_SAFE_INTEGER;
f2rheap['fid1'].updateItem(f2rmap['fid1']['rid4'])
console.log("MAP : ", f2rmap)
f2rharr=f2rheap['fid1'].toArray()
console.log(f2rharr)
if(f2rheap['fid1'].peek()['value'] === Number.MAX_SAFE_INTEGER)
{
console.log("function map is empty, deleting element")
delete f2rheap['fid1']
delete f2rmap['fid1']
}
console.log("MAP : ", f2rmap)
console.log("HEAP : ", f2rheap)
\ No newline at end of file
for(let i=0; i<100; i++) // for(let i=0; i<100; i++)
{ // {
console.log("running test1 js") console.log("running test1 js")
} // }
\ No newline at end of file \ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment