Commit 6e2e656a authored by Mahendra Patel's avatar Mahendra Patel

added coldstart on NIC, autoscaling and loadbalancing

parent 0212678f
echo $1 echo $1
# python2 send.py --client-port 8000 --closed 1 --offload 0 --req-count 50 --send-data 10 --fid $1 # python2 send.py --client-port 8000 --closed 1 --offload 0 --req-count 50 --send-data 10 --fid $1
# sudo ip netns exec ns_server python benchmark_dispatcher2.py --fid 369020 --c 1 --t 1 --n 2 # sudo ip netns exec ns_server python benchmark_dispatcher2.py --fid 369020 --c 1 --t 1 --n 2
# sudo ip netns exec ns_server python benchmark_dispatcher2.py --fid $1 --c 1 --rps 2 --req_count 10
sudo ip netns exec ns_server python benchmark_dispatcher.py --fid $1 --c 20 --t 300 --rps $2
\ No newline at end of file #! /bin/bash -ex
rps_flag=0
n_flag=0
while getopts 'rn' flag; do
case "${flag}" in
r) rps_flag=1 ;;
n) n_flag=1 ;;
esac
done
echo $1, $2, $3
if [[ $rps_flag -eq 1 ]]
then
sudo ip netns exec ns_server python benchmark_dispatcher.py --fid $2 --c 50 --t 30 --rps $3
fi
if [[ $n_flag -eq 1 ]]
then
sudo ip netns exec ns_server python benchmark_dispatcher.py --fid $2 --c 50 --t 100 --n $3
fi
...@@ -22,7 +22,6 @@ group.add_argument('--rps', help='Requests per second', ...@@ -22,7 +22,6 @@ group.add_argument('--rps', help='Requests per second',
group.add_argument('--n', help='Number of requests to send', group.add_argument('--n', help='Number of requests to send',
type=int, action="store") type=int, action="store")
args = parser.parse_args() args = parser.parse_args()
PORT = 8000 PORT = 8000
...@@ -37,7 +36,6 @@ packet_holder = [[] for i in range(12)] ...@@ -37,7 +36,6 @@ packet_holder = [[] for i in range(12)]
ingress_time = {} ingress_time = {}
stop_thread = False stop_thread = False
def receive(i): def receive(i):
global stop_thread, packet_holder global stop_thread, packet_holder
CLIENT_IP = "0.0.0.0" CLIENT_IP = "0.0.0.0"
...@@ -59,7 +57,6 @@ def receive(i): ...@@ -59,7 +57,6 @@ def receive(i):
packet_holder[i].append((packet, time.time() )) packet_holder[i].append((packet, time.time() ))
# print "r", "{0:f}".format((time.time() * 1000)), "{0:f}".format(ingress_time[exec_id]) # print "r", "{0:f}".format((time.time() * 1000)), "{0:f}".format(ingress_time[exec_id])
def genPacket(): def genPacket():
global fid global fid
packet = None packet = None
...@@ -70,11 +67,14 @@ def genPacket(): ...@@ -70,11 +67,14 @@ def genPacket():
f0 = 0; f1 = 1; f2 = 2; f3 = 0; f4 = 0 f0 = 0; f1 = 1; f2 = 2; f3 = 0; f4 = 0
# print chain_id, exec_id, "function_id", function_id, function_count, \ # print chain_id, exec_id, "function_id", function_id, function_count, \
# f0, f1, f2, f3, f4, # f0, f1, f2, f3, f4,
dataInt =1
autoscaling = 1; fno = 255
print(chain_id , exec_id , function_id , dataInt , function_count , autoscaling , fno)
chain_id = struct.pack(">I", chain_id) # chain id chain_id = struct.pack(">I", chain_id) # chain id
exec_id_packed = struct.pack(">I", exec_id) # execution id exec_id_packed = struct.pack(">I", exec_id) # execution id
dataInt =1
# print " dataInt", dataInt # print " dataInt", dataInt
data = struct.pack(">I", dataInt) # data data = struct.pack(">I", dataInt) # data
...@@ -86,11 +86,15 @@ def genPacket(): ...@@ -86,11 +86,15 @@ def genPacket():
f3 = struct.pack("B", f3) # f3 -> f1 f2 f3 = struct.pack("B", f3) # f3 -> f1 f2
f4 = struct.pack("B", f4) # f4 -> f3 f4 = struct.pack("B", f4) # f4 -> f3
autoscaling = struct.pack("B", autoscaling) # f2 -> f0
fno = struct.pack("B", fno) # f3 -> f1 f2
# packet = chain_id + exec_id_packed + function_id + data + function_count + autoscaling + fno + f0 + f1 + f2 + f3 + f4
packet = chain_id + exec_id_packed + function_id + data + function_count + f0 + f1 + f2 + f3 + f4 packet = chain_id + exec_id_packed + function_id + data + function_count + f0 + f1 + f2 + f3 + f4
# print dataInt, offload_status # print dataInt, offload_status
return packet, exec_id return packet, exec_id
def sendThread(start_time, runtime, sleep_time): def sendThread(start_time, runtime, sleep_time):
global ingress_time global ingress_time
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
...@@ -104,7 +108,6 @@ def sendThread(start_time, runtime, sleep_time): ...@@ -104,7 +108,6 @@ def sendThread(start_time, runtime, sleep_time):
ingress_time[exec_id] = time.time() ingress_time[exec_id] = time.time()
time.sleep(sleep_time) time.sleep(sleep_time)
def send(): def send():
global egress_time, ingress_time, concurrency, runtime, stop_thread global egress_time, ingress_time, concurrency, runtime, stop_thread
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
...@@ -113,13 +116,15 @@ def send(): ...@@ -113,13 +116,15 @@ def send():
print("chain id, exec id, data, function count, functions dependencies...") print("chain id, exec id, data, function count, functions dependencies...")
# op = struct.unpack("B", packet[0]) # op = struct.unpack("B", packet[0])
cnt = 0
if args.n is not None: if args.n is not None:
for i in range(args.n): for i in range(args.n):
packet, exec_id = genPacket() packet, exec_id = genPacket()
s.sendto(packet, (SERVER_IP, PORT)) s.sendto(packet, (SERVER_IP, PORT))
ingress_time[exec_id] = time.time() * 1000 ingress_time[exec_id] = time.time() * 1000
print("send", "{0:f}".format(ingress_time[exec_id])) print("send", "{0:f}".format(ingress_time[exec_id]))
cnt +=1
print("cnt request send : ", cnt)
elif args.rps is not None: elif args.rps is not None:
...@@ -127,8 +132,7 @@ def send(): ...@@ -127,8 +132,7 @@ def send():
sleep_time = concurrency / float(args.rps) sleep_time = concurrency / float(args.rps)
print("calculated inter-arrival time, offload mode", sleep_time) print("calculated inter-arrival time, offload mode", sleep_time)
for i in range(concurrency): for i in range(concurrency):
t = threading.Thread(target=sendThread, args=[ t = threading.Thread(target=sendThread, args=[start_time, runtime, sleep_time])
start_time, runtime, sleep_time])
t.daemon = True t.daemon = True
t.start() t.start()
time.sleep(runtime) time.sleep(runtime)
......
This diff is collapsed.
...@@ -2,9 +2,12 @@ ...@@ -2,9 +2,12 @@
"registry_url": "10.129.2.201:5000/", "registry_url": "10.129.2.201:5000/",
"master_port": 8080, "master_port": 8080,
"master_address": "10.129.2.201", "master_address": "10.129.2.201",
"daemon_port": 9000,
"daemon_mac": "00:22:22:22:22:22",
"grunt_host": "https://www.namandixit.net/lovecraftian_nightmares/grunt", "grunt_host": "https://www.namandixit.net/lovecraftian_nightmares/grunt",
"couchdb_host": "10.129.2.201:5984", "couchdb_host": "10.129.2.201:5984",
"env": "env_udp2.js", "env": "env_udp2.js",
"runtime": "process",
"db": { "db": {
"function_meta": "serverless", "function_meta": "serverless",
"metrics": "metrics", "metrics": "metrics",
...@@ -12,31 +15,42 @@ ...@@ -12,31 +15,42 @@
"explicit_chain_meta": "explicit_chain" "explicit_chain_meta": "explicit_chain"
}, },
"network": { "network": {
"network_bridge": "xanadu_kafka-serverless", "network_bridge": "xanadu_kafka_serverless",
"use_bridge": false, "use_bridge": false,
"internal": { "internal": {
"kafka_host": "kafka:9092" "kafka_host": "10.129.2.201:9092"
}, },
"external": { "external": {
"kafka_host": "10.129.2.201:9092" "kafka_host": "10.129.2.201:9092"
} }
}, },
"topics": { "topics": {
"request_dm_2_rm": "request", "request_dm_2_rm": "request2",
"heartbeat": "heartbeat", "heartbeat": "heartbeat2",
"deployed": "deployed", "deployed": "deployed3",
"remove_worker": "removeWorker", "remove_worker": "removeWorker2",
"response_rm_2_dm": "RESPONSE_RM_2_DM_DUMMY", "response_rm_2_dm": "RESPONSE_RM_2_DM_DUMMY2",
"hscale": "hscale", "hscale": "hscale2",
"metrics_worker": "metrics_worker", "metrics_worker": "metrics_worker2",
"log_channel": "LOG_COMMON" "log_channel": "LOG_COMMON2",
"coldstart_worker": "COLDSTART_WORKER2",
"check_autoscale": "CHECK_AUTOSCALE2",
"autoscale": "AUTOSCALE2",
"function_load": "FUNCTION_LOAD3",
"update_function_instance_nic": "UPDATE_FUNCTION_INSTANCE_NIC",
"remove_function_intstance": "REMOVE_FUNCTION_INSTANCE2"
}, },
"autoscalar_metrics": { "autoscalar_metrics": {
"open_request_threshold": 100 "high_open_request_threshold": 10,
"low_open_request_threshold": 1,
"function_load_threshold": 5,
"low_load_count":5,
"high_load_count":5
}, },
"metrics": { "metrics": {
"alpha": 0.7 "alpha": 0.7
}, },
"heartbeat_threshold": 5000,
"speculative_deployment": true, "speculative_deployment": true,
"JIT_deployment": true, "JIT_deployment": true,
"id_size": 20 "id_size": 20
......
#! /bin/bash -x
echo "before deletetion list of kafka topic"
echo "--------------------------------------"
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --list --zookeeper 10.129.2.201:2181
echo "\n========================================================================"
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic AUTOSCALE
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic CHECK_AUTOSCALE
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic COLDSTART_WORKER
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic FUNCTION_LOAD
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic LOG_COMMON
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic 10.129.2.201
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic 192.168.2.3
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic RESPONSE_RM_2_DM_DUMMY
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic deployed
# /home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic heartbeat
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic hscale
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic metrics_worker
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic removeWorker
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --delete --zookeeper 10.129.2.201:2181 --topic request
echo "after deletetion list of kafka topic"
echo "-------------------------"
/home/pcube/mahendra/downloads/kafka/bin/kafka-topics.sh --list --zookeeper 10.129.2.201:2181
\ No newline at end of file
{"id":"10.129.2.201","master_node":"192.168.0.105"} {"id":"192.168.2.3","master_node":"192.168.2.3"}
\ No newline at end of file \ No newline at end of file
...@@ -85,6 +85,7 @@ function runContainer(metadata) { ...@@ -85,6 +85,7 @@ function runContainer(metadata) {
const process_checkImage = spawn('docker', ["inspect", registry_url + imageName]) const process_checkImage = spawn('docker', ["inspect", registry_url + imageName])
process_checkImage.on('close', (code) => { process_checkImage.on('close', (code) => {
console.log("\ncode : ", code)
if (code != 0) { if (code != 0) {
const process_pullImage = spawn('docker', ["pull", registry_url + imageName]); const process_pullImage = spawn('docker', ["pull", registry_url + imageName]);
...@@ -100,11 +101,11 @@ function runContainer(metadata) { ...@@ -100,11 +101,11 @@ function runContainer(metadata) {
let process = null; let process = null;
if (constants.network.use_bridge) if (constants.network.use_bridge)
process = spawn('docker', ["create", "--rm", `--network=${constants.network.network_bridge}`, "-p", `${port}:${port}`, process = spawn('docker', ["create", "--rm", `--network=${constants.network.network_bridge}`, "-p", `${port}:${port}`,
"-p", `${port}:${port}/udp`, "--name", resource_id, registry_url + imageName, "-p", `${port}:${port}/udp`, "--mac-address","00:22:22:22:22:22","--name", resource_id, registry_url + imageName,
resource_id, imageName, port, "container", constants.network.internal.kafka_host]); resource_id, imageName, port, "container", constants.network.internal.kafka_host]);
else else
process = spawn('docker', ["create", "--rm", "-p", `${port}:${port}`, process = spawn('docker', ["create", "--rm", "-p", `${port}:${port}`,
"-p", `${port}:${port}/udp`, "--name", resource_id, registry_url + imageName, "-p", `${port}:${port}/udp`, "--mac-address","00:22:22:22:22:22", "--name", resource_id, registry_url + imageName,
resource_id, imageName, port, "container", constants.network.internal.kafka_host]); resource_id, imageName, port, "container", constants.network.internal.kafka_host]);
let result = ""; let result = "";
...@@ -150,14 +151,19 @@ function runContainer(metadata) { ...@@ -150,14 +151,19 @@ function runContainer(metadata) {
/** /**
* create docker on the default bridge * create docker on the default bridge
*/ */
let docker_args = null;
if (constants.network.use_bridge) if (constants.network.use_bridge)
process = spawn('docker', ["create", "--rm", `--network=${constants.network.network_bridge}`, docker_args = ["create", "--rm", `--network=${constants.network.network_bridge}`,
"-p", `${port}:${port}`, "-p", `${port}:${port}/udp`, "--name", resource_id, "-p", `${port}:${port}/tcp`, "-p", `${port}:${port}/udp`, "--name", resource_id,
registry_url + imageName, resource_id, imageName, port, "container", constants.network.internal.kafka_host]); registry_url + imageName, resource_id, imageName, port, "container", constants.network.internal.kafka_host];
else else
process = spawn('docker', ["create", docker_args = ["create",
"-p", `${port}:${port}`, "-p", `${port}:${port}/udp`, "--name", resource_id, "-p", `${port}:${port}/tcp`, "-p", `${port}:${port}/udp`, "--name", resource_id,
registry_url + imageName, resource_id, imageName, port, "container", constants.network.internal.kafka_host]); registry_url + imageName, resource_id, imageName, port, "container", constants.network.internal.kafka_host];
console.log("docker args :: ", docker_args)
process = spawn('docker',docker_args);
let result = ""; let result = "";
// timeStart = Date.now() // timeStart = Date.now()
console.log("resource id is: ",resource_id) console.log("resource id is: ",resource_id)
...@@ -173,8 +179,9 @@ function runContainer(metadata) { ...@@ -173,8 +179,9 @@ function runContainer(metadata) {
*/ */
// let add_network = spawn('docker', ['network', 'connect', 'pub_net', resource_id]) // let add_network = spawn('docker', ['network', 'connect', 'pub_net', resource_id])
let add_network = spawn('docker', ['network', 'connect', 'pub_net', resource_id]) let add_network = spawn('docker', ['network', 'connect', 'pub_net', resource_id])
// let add_network = spawn('docker', ['network', 'connect', constants.network.network_bridge, resource_id])
let _ = spawn('docker', ['start', resource_id]) let _ = spawn('docker', ['start', resource_id,'-i'])
_.stdout.on('data', (data) => { _.stdout.on('data', (data) => {
logger.info(data.toString()) logger.info(data.toString())
......
This diff is collapsed.
...@@ -134,6 +134,21 @@ const logger = winston.createLogger({ ...@@ -134,6 +134,21 @@ const logger = winston.createLogger({
}); });
function getPort(usedPort) {
let port = -1, ctr = 0
do {
let min = Math.ceil(30000);
let max = Math.floor(60000);
port = Math.floor(Math.random() * (max - min + 1)) + min;
ctr += 1;
if (ctr > 30000) {
port = -1
break
}
} while (usedPort.has(port))
return port
}
module.exports = { module.exports = {
download, makeid, updateConfig, makeTopic, returnPort, logger download, makeid, updateConfig, makeTopic, returnPort, logger, getPort
} }
...@@ -17,7 +17,8 @@ ...@@ -17,7 +17,8 @@
"kafka-node": "^5.0.0", "kafka-node": "^5.0.0",
"morgan": "^1.9.1", "morgan": "^1.9.1",
"mqtt": "^4.2.8", "mqtt": "^4.2.8",
"node-fetch": "^2.6.0", "node-fetch": "^2.6.7",
"os-utils": "0.0.14",
"redis": "^3.1.2", "redis": "^3.1.2",
"request": "^2.88.2", "request": "^2.88.2",
"usage": "^0.7.1", "usage": "^0.7.1",
......
const secrets = require('./secrets.json')
const constants = require('.././constants_local.json')
let db = new Map(), // queue holding request to be dispatched
resourceMap = new Map(), // map between resource_id and resource details like node_id, port, associated function etc
functionToResource = new Map(), // a function to resource map. Each map contains a minheap of
// resources associated with the function
workerNodes = new Map(), // list of worker nodes currently known to the DM
functionBranchTree = new Map(), // a tree to store function branch predictions
conditionProbabilityExplicit = new Map(), // tree holding conditional probabilities for explicit chains
requestFlightQueue = new Map()// map to store in flight requests
/**
* URL to the couchdb database server used to store data
*/
module.exports = {
db, functionBranchTree, functionToResource, workerNodes, resourceMap,
conditionProbabilityExplicit, requestFlightQueue
}
let a = 10, c = "abc"
let data = {
a: a,
c: c,
timestamp: Date.now(),
}
console.log(data)
// const sleep = (waitTimeInMs) => new Promise(resolve => setTimeout(resolve, waitTimeInMs));
// async function work1()
// {
// for(let i=0; i<4; i++)
// {
// console.log(1,1)
// await sleep(1000);
// console.log(1,2)
// await sleep(1000);
// }
// }
// async function work2()
// {
// for(let i=0; i<4; i++)
// {
// console.log(2,1)
// await sleep(1000);
// console.log(2,2)
// await sleep(1000);
// }
// }
// work1()
// work2()
// // function sleep(ms) {
// // return new Promise((resolve) => {
// // setTimeout(resolve, ms);
// // });
// // }
// // await sleep(1000)
// // function sleep(ms) {
// // return new Promise((resolve) => {
// // setTimeout(resolve, ms);
// // });
// // }
// // async function sleep(ms)
// // {
// // new Promise(resolve => setTimeout(resolve, ms));
// // }
// // let ms = 10000
// // for(let i=0; i<2; i++)
// // {
// // console.log(1)
// // await sleep(100000)
// // // await new Promise(resolve => setTimeout(resolve, ms));
// // console.log(2)
// // await sleep(100000)
// // // await new Promise(resolve => setTimeout(resolve, ms));
// // }
// // var pidusage = require('pidusage')
// // let cpuper = 0
// // const compute = async () => {
// // const stats = await pidusage(process.pid)
// // // do something
// // // console.log(stats.cpu)
// // cpuper = stats.cpu
// // }
// // // Compute statistics every second:
// // const interval = async (time) => {
// // setTimeout(async () => {
// // await compute()
// // console.log(cpuper)
// // interval(time)
// // }, time)
// // }
// // interval(1000)
// // // function compute(cb) {
// // // pidusage(process.pid, function (err, stats) {
// // // console.log(stats)
// // // => {
// // // cpu: 10.0, // percentage (from 0 to 100*vcore)
// // // memory: 357306368, // bytes
// // // ppid: 312, // PPID
// // // pid: 727, // PID
// // // ctime: 867000, // ms user + system time
// // // elapsed: 6650000, // ms since the start of the process
// // // timestamp: 864000000 // ms since epoch
// // // }
// // // cb()
// // // })
// // // }
// // // function interval(time) {
// // // setTimeout(function() {
// // // compute(function() {
// // // interval(time)
// // // })
// // // }, time)
// // // }
// // // Compute statistics every second:
// // // interval(1000)
// // // pidusage(process.pid, function (err, stats) {
// // // console.log(stats)
// // // // => {
// // // // cpu: 10.0, // percentage (from 0 to 100*vcore)
// // // // memory: 357306368, // bytes
// // // // ppid: 312, // PPID
// // // // pid: 727, // PID
// // // // ctime: 867000, // ms user + system time
// // // // elapsed: 6650000, // ms since the start of the process
// // // // timestamp: 864000000 // ms since epoch
// // // // }
// // // // cb()
// // // })
// // // var usage = require('usage');
// // // var pid = process.pid // you can use any valid PID instead
// // // usage.lookup(pid, function(err, result) {
// // // });
// // // // const Heap = require('heap');
// // // const osutils = require('os-utils')
// // function createLoad()
// // {
// // // while(1)
// // // {
// // arr = []
// // for(let i=2; i<1000; i++)
// // {
// // for(let j=2; j<i; j++)
// // {
// // for(let k=0; k<i; k++){
// // }
// // }
// // }
// // // console.log("\n\n")
// // // }
// // }
// // // function getCPUUsage () {
// // // return new Promise(resolve => {
// // // osutils.cpuUsage(value => resolve(value))
// // // })
// // // }
// // // async function testCPUUsage() {
// // // const cpuUsage = await getCPUUsage();
// // // console.log(`test CPU usage: ${(cpuUsage * 100)}%`);
// // // }
// // // // function getCpuUsage(){
// // // // os_utils.cpuUsage(function(v){
// // // // console.log( 'CPU (%):' + v );
// // // // });
// // // // os_utils.cpuFree(function(v){
// // // // console.log( 'CPU Free:' + v );
// // // // });
// // // // console.log("\n")
// // // // }
// // // setInterval(testCPUUsage,1000);
// // setInterval(createLoad,1000);
// // // // var heap = new Heap(function(a, b) {
// // // // return a.foo - b.foo;
// // // // });
// // // // let map = new Map();
// // // // // a = {foo : 3};
// // // // // b = {foo : 4};
// // // // // c = {foo : 2};
// // // // arr = [{foo : 4},{foo : 5},{foo : 2}]
// // // // // map.set("foo1", a);
// // // // // map.set("foo2", b);
// // // // // map.set("foo3", c);
// // // // // heap.push({foo: 3});
// // // // // heap.push({foo: 1});
// // // // // heap.push({foo: 2});
// // // // heap.push(arr[0]);
// // // // console.log(heap)
// // // // heap.push(arr[1]);
// // // // console.log(heap)
// // // // heap.push(arr[2]);
// // // // console.log(heap)
// // // // arr[0].foo = 1;
// // // // // heap.pop(b);
// // // // console.log(heap)
// // // // heap.updateItem(arr[0])
// // // // console.log(heap)
// // // // heap.pop();
// // // // console.log(heap)
...@@ -87,7 +87,8 @@ app.get('/metrics', (req, res) => { ...@@ -87,7 +87,8 @@ app.get('/metrics', (req, res) => {
/** /**
* REST API to receive deployment requests * REST API to receive deployment requests
*/ **/
app.post('/serverless/deploy', (req, res) => { app.post('/serverless/deploy', (req, res) => {
console.log("req = "+req+" ** "+req.body.runtime+" ** "+req.body.serverless,req.files,req.files.serverless, req.files.nicfunction)//newcode console.log("req = "+req+" ** "+req.body.runtime+" ** "+req.body.serverless,req.files,req.files.serverless, req.files.nicfunction)//newcode
console.log("res = "+res)//newcode console.log("res = "+res)//newcode
...@@ -144,7 +145,8 @@ app.post('/serverless/deploy', (req, res) => { ...@@ -144,7 +145,8 @@ app.post('/serverless/deploy', (req, res) => {
res.send("error").status(400) res.send("error").status(400)
} }
else { else {
let func_id = parseInt(functionHash.slice(0,5),16) let func_id = functionHash
// let func_id = parseInt(functionHash.slice(0,5),16)
//console.log(func_id) //console.log(func_id)
console.log("Function id to be used is: ", func_id) console.log("Function id to be used is: ", func_id)
idToFunchashMap.set(func_id, functionHash) idToFunchashMap.set(func_id, functionHash)
...@@ -203,8 +205,6 @@ function deployContainer(path, imageName) { ...@@ -203,8 +205,6 @@ function deployContainer(path, imageName) {
, function (err) { , function (err) {
if (err) { if (err) {
logger.error("failed", err); logger.error("failed", err);
reject(err); reject(err);
} }
else { else {
...@@ -345,7 +345,6 @@ function postDeploy(message) { ...@@ -345,7 +345,6 @@ function postDeploy(message) {
res.status(400).json({ reason: message.reason }) res.status(400).json({ reason: message.reason })
} }
db.delete(id) db.delete(id)
return; return;
} }
...@@ -382,7 +381,6 @@ function postDeploy(message) { ...@@ -382,7 +381,6 @@ function postDeploy(message) {
functionToResource.set(id, resourceHeap) functionToResource.set(id, resourceHeap)
logger.warn("Creating new resource pool" logger.warn("Creating new resource pool"
+ JSON.stringify(functionToResource.get(id))); + JSON.stringify(functionToResource.get(id)));
} }
try { try {
...@@ -412,7 +410,6 @@ function postDeploy(message) { ...@@ -412,7 +410,6 @@ function postDeploy(message) {
} catch (e) { } catch (e) {
logger.error(e.message) logger.error(e.message)
} }
} }
consumer.on('message', function (message) { consumer.on('message', function (message) {
...@@ -420,13 +417,13 @@ consumer.on('message', function (message) { ...@@ -420,13 +417,13 @@ consumer.on('message', function (message) {
let topic = message.topic let topic = message.topic
message = message.value message = message.value
// console.log(topic, message) // console.log(topic, message)
if (topic === "response") { if (topic === "response2") {
logger.info("response " + message); logger.info("response " + message);
} else if (topic === constants.topics.heartbeat) { } else if (topic === constants.topics.heartbeat) {
message = JSON.parse(message) message = JSON.parse(message)
// console.log(message) // console.log(message)
console.log("node_to_resource_mapping : ", node_to_resource_mapping)
if (Date.now() - message.timestamp < 1000) if (Date.now() - message.timestamp < 1000)
if (!workerNodes.has(message.address)) { if (!workerNodes.has(message.address)) {
workerNodes.set(message.address, message.timestamp) workerNodes.set(message.address, message.timestamp)
...@@ -436,10 +433,10 @@ consumer.on('message', function (message) { ...@@ -436,10 +433,10 @@ consumer.on('message', function (message) {
else else
{ {
if(node_to_resource_mapping.has(message.address)) { if(node_to_resource_mapping.has(message.address)) {
console.log("")
let resource_id = node_to_resource_mapping.get(message.address) let resource_id = node_to_resource_mapping.get(message.address)
resource_to_cpu_util.set(resource_id,message.system_info.loadavg) resource_to_cpu_util.set(resource_id,message.system_info.loadavg)
} }
} }
} else if (topic == constants.topics.deployed) { } else if (topic == constants.topics.deployed) {
try { try {
...@@ -487,7 +484,6 @@ consumer.on('message', function (message) { ...@@ -487,7 +484,6 @@ consumer.on('message', function (message) {
} }
} else if (topic == constants.topics.hscale) { } else if (topic == constants.topics.hscale) {
message = JSON.parse(message) message = JSON.parse(message)
let resource_id = libSupport.makeid(constants.id_size), // each function resource request is associated with an unique ID let resource_id = libSupport.makeid(constants.id_size), // each function resource request is associated with an unique ID
...@@ -573,7 +569,7 @@ function autoscalar() { ...@@ -573,7 +569,7 @@ function autoscalar() {
functionToResource.forEach((resourceList, functionKey, map) => { functionToResource.forEach((resourceList, functionKey, map) => {
if (resourceList.length > 0 && if (resourceList.length > 0 &&
resourceList[resourceList.length - 1].open_request_count > constants.autoscalar_metrics.open_request_threshold) { resourceList[resourceList.length - 1].open_request_count > constants.autoscalar_metrics.high_open_request_threshold) {
let resource = resourceMap.get(resourceList[resourceList.length - 1].resource_id) let resource = resourceMap.get(resourceList[resourceList.length - 1].resource_id)
logger.warn(`resource ${resourceList[resourceList.length - 1]} exceeded autoscalar threshold. Scaling up!`) logger.warn(`resource ${resourceList[resourceList.length - 1]} exceeded autoscalar threshold. Scaling up!`)
let payload = [{ let payload = [{
...@@ -587,13 +583,16 @@ function autoscalar() { ...@@ -587,13 +583,16 @@ function autoscalar() {
} }
function heapUpdate() { function heapUpdate() {
console.log("functionToResource : ", functionToResource)
console.log("resource_to_cpu_util : ", resource_to_cpu_util)
functionToResource.forEach((resourceArray, functionKey) => { functionToResource.forEach((resourceArray, functionKey) => {
//resourceArray = resourceList.toArray() //resourceArray = resourceList.toArray()
console.log("Function being updated: ",functionKey) // console.log("Function being updated: ",functionKey)
for (let i = 0; i < resourceArray.length; i++) { for (let i = 0; i < resourceArray.length; i++) {
let res_i = resourceArray[i].resource_id; let res_i = resourceArray[i].resource_id;
resourceArray[i].cpu_utilization = resource_to_cpu_util.get(res_i); resourceArray[i].cpu_utilization = resource_to_cpu_util.get(res_i);
console.log("Avg load on resource-worker ",i, ": ", resourceArray[i].cpu_utilization) console.log("Avg load on resource-worker ",i, ": ", resourceArray[i].cpu_utilization)
console.log("Avg load on resource-worker ",i, ": ", resourceArray[i])
} }
heap.heapify(resourceArray, libSupport.compare_uti) heap.heapify(resourceArray, libSupport.compare_uti)
...@@ -684,9 +683,7 @@ async function speculative_deployment(req, runtime) { ...@@ -684,9 +683,7 @@ async function speculative_deployment(req, runtime) {
console.log(self, "current delay", currentDelay, "invoke time:", currentDelay - metrics[self].container.starttime); console.log(self, "current delay", currentDelay, "invoke time:", currentDelay - metrics[self].container.starttime);
setTimeout(chainHandler.notify, invokeTime, "container", self) setTimeout(chainHandler.notify, invokeTime, "container", self)
} }
}) })
} else { } else {
/** /**
* Perform Speculation without JIT * Perform Speculation without JIT
...@@ -713,8 +710,8 @@ async function speculative_deployment(req, runtime) { ...@@ -713,8 +710,8 @@ async function speculative_deployment(req, runtime) {
} }
} }
} }
setInterval(libSupport.metrics.broadcastMetrics, 5000) // setInterval(libSupport.metrics.broadcastMetrics, 5000)
// setInterval(autoscalar, 1000); // setInterval(autoscalar, 1000);
setInterval(dispatch, 1000); setInterval(dispatch, 1000);
// setInterval(heapUpdate, 1000); // setInterval(heapUpdate, 5000);
app.listen(port, () => logger.info(`Server listening on port ${port}!`)) app.listen(port, () => logger.info(`Server listening on port ${port}!`))
\ No newline at end of file
...@@ -75,10 +75,13 @@ function generateExecutor(functionPath, functionHash) { ...@@ -75,10 +75,13 @@ function generateExecutor(functionPath, functionHash) {
let output = input.slice(0, insertIndex) + functionFile + input.slice(insertIndex) let output = input.slice(0, insertIndex) + functionFile + input.slice(insertIndex)
let hash = crypto.createHash('md5').update(output).digest("hex"); let hash = crypto.createHash('md5').update(output).digest("hex");
console.log(hash); let func_id = parseInt(hash.slice(0,5),16)
console.log(func_id);
fs.writeFileSync(functionPath + hash + ".js", output) // fs.writeFileSync(functionPath + hash + ".js", output)
return hash fs.writeFileSync(functionPath + "function_" + func_id + ".js", output )
return "function_"+func_id
// return hash
} }
/** /**
...@@ -89,41 +92,42 @@ function generateExecutor(functionPath, functionHash) { ...@@ -89,41 +92,42 @@ function generateExecutor(functionPath, functionHash) {
*/ */
function generateMicrocExecutor(functionPath, functionName, jsfunctionhash) { function generateMicrocExecutor(functionPath, functionName, jsfunctionhash) {
//creating function.c //creating function.c
let function_temp = fs.readFileSync(`./repository/worker_env/function_temp.c`) // let function_temp = fs.readFileSync(`./repository/worker_env/function_temp.c`)
let function_def = fs.readFileSync(functionPath + functionName) // let function_def = fs.readFileSync(functionPath + functionName)
let searchSize = "//ADD_FUNCTION".length // let searchSize = "//ADD_FUNCTION".length
let fid = parseInt(jsfunctionhash.slice(0,5), 16) // let fid = parseInt(jsfunctionhash.slice(0,5), 16)
let insertIndex = function_temp.indexOf("//ADD_FUNCTION") + searchSize // let insertIndex = function_temp.indexOf("//ADD_FUNCTION") + searchSize
let function_name = "void function_"+ fid +"(PIF_PLUGIN_map_hdr_T *mapHdr)" // let function_name = "void function_"+ fid +"(PIF_PLUGIN_map_hdr_T *mapHdr)"
let full_function = function_temp.slice(0, insertIndex) +"\n"+ function_name + "{\n" +function_def +"\n}"+ function_temp.slice(insertIndex) // let full_function = function_temp.slice(0, insertIndex) +"\n"+ function_name + "{\n" +function_def +"\n}"+ function_temp.slice(insertIndex)
// let hash = crypto.createHash('md5').update(full_function).digest("hex"); // // let hash = crypto.createHash('md5').update(full_function).digest("hex");
// console.log(hash); // // console.log(hash);
console.log(full_function); // console.log(full_function);
fs.writeFileSync(functionPath +"offload/"+ jsfunctionhash + ".c", full_function) // fs.writeFileSync(functionPath +"offload/"+ jsfunctionhash + ".c", full_function)
//adding call to function when match with fid // //adding call to function when match with fid
return new Promise((resolve) => { // return new Promise((resolve) => {
let main_function_temp = fs.readFileSync(functionPath +"offload/"+ "static_dispatch_function.c") // let main_function_temp = fs.readFileSync(functionPath +"offload/"+ "static_dispatch_function.c")
// let client_function = fs.readFileSync(functionPath + "offload/"+jsfunctionhash+".c") // // let client_function = fs.readFileSync(functionPath + "offload/"+jsfunctionhash+".c")
searchSize = "//ADD_FUNCTION_EXTERNS".length // searchSize = "//ADD_FUNCTION_EXTERNS".length
insertIndex = main_function_temp.indexOf("//ADD_FUNCTION_EXTERNS") + searchSize // insertIndex = main_function_temp.indexOf("//ADD_FUNCTION_EXTERNS") + searchSize
let extern_name = "extern void function_"+fid +"(PIF_PLUGIN_map_hdr_T *mapHdr)" // let extern_name = "extern void function_"+fid +"(PIF_PLUGIN_map_hdr_T *mapHdr)"
let main_function = main_function_temp.slice(0, insertIndex) +"\n"+ extern_name+";\n"+ main_function_temp.slice(insertIndex) // let main_function = main_function_temp.slice(0, insertIndex) +"\n"+ extern_name+";\n"+ main_function_temp.slice(insertIndex)
console.log("MAIN FUNCTION : \n",main_function) // console.log("MAIN FUNCTION : \n",main_function)
let hash = crypto.createHash('md5').update(full_function).digest("hex"); // let hash = crypto.createHash('md5').update(full_function).digest("hex");
// console.log(hash); // // console.log(hash);
searchSize = "//ADD_FUNCTION_CONDITION".length // searchSize = "//ADD_FUNCTION_CONDITION".length
insertIndex = main_function.indexOf("//ADD_FUNCTION_CONDITION") + searchSize // insertIndex = main_function.indexOf("//ADD_FUNCTION_CONDITION") + searchSize
let inc_pkt_count = "function_packet_count["+fid+"-10000]++;" // let inc_pkt_count = "function_packet_count["+fid+"-10000]++;"
let if_else_cond = "else if( fid == "+fid + " ) {\n "+inc_pkt_count +"\nfunction_"+fid+"(mapHdr);\n}" // let if_else_cond = "else if( fid == "+fid + " ) {\n "+inc_pkt_count +"\nfunction_"+fid+"(mapHdr);\n}"
let main_function_full = main_function.slice(0, insertIndex) +"\n"+ if_else_cond +"\n"+ main_function.slice(insertIndex) // let main_function_full = main_function.slice(0, insertIndex) +"\n"+ if_else_cond +"\n"+ main_function.slice(insertIndex)
console.log(main_function_full); // console.log(main_function_full);
fs.writeFileSync(functionPath +"offload/"+ "static_dispatch_function.c", main_function_full) // fs.writeFileSync(functionPath +"offload/"+ "static_dispatch_function.c", main_function_full)
return hash // return 'xyz';
}); // return hash
// });
} }
/** /**
...@@ -251,6 +255,7 @@ function getPort(usedPort) { ...@@ -251,6 +255,7 @@ function getPort(usedPort) {
break break
} }
} while (usedPort.has(port)) } while (usedPort.has(port))
usedPort.set(port, True)
return port return port
} }
...@@ -560,13 +565,33 @@ function unpackPacket(packet) { ...@@ -560,13 +565,33 @@ function unpackPacket(packet) {
base += 4 base += 4
function_count = struct.Unpack("B", packet, base) function_count = struct.Unpack("B", packet, base)
base += 1
autoscale = struct.Unpack("B", packet, base)
base += 1
fno = struct.Unpack("B", packet, base)
base += 1
f0 = struct.Unpack("B", packet, base)
base += 1
f1 = struct.Unpack("B", packet, base)
base += 1
f2 = struct.Unpack("B", packet, base)
base += 1
f3 = struct.Unpack("B", packet, base)
base += 1
f4 = struct.Unpack("B", packet, base)
return { return {
chain_id: chain_id[0], chain_id: chain_id[0],
exec_id: exec_id[0], exec_id: exec_id[0],
data: data[0], data: data[0],
function_count: function_count[0], function_count: function_count[0],
function_id: function_id[0] function_id: function_id[0],
echain: [f1[0], f2[0], f3[0], f4[0]],
autosacle: autoscale[0],
fno: fno[0]
} }
} }
...@@ -609,6 +634,41 @@ function packPacket(dataPacket) { ...@@ -609,6 +634,41 @@ function packPacket(dataPacket) {
return message return message
} }
function packPacketFromDictionary(dataPacket) {
let message = new Array(1024)
let base = 0, chain_id, exec_id, function_id, data, function_count
chain_id = struct.PackTo(">I", message, base, [dataPacket.chain_id])
base += 4
exec_id = struct.PackTo(">I", message, base, [dataPacket.exec_id])
base += 4
function_id = struct.PackTo(">I", message, base, [dataPacket.function_id])
base += 4
data = struct.PackTo(">I", message, base, [dataPacket.data])
base += 4
function_count = struct.PackTo("B", message, base, [dataPacket.function_count])
base += 1
// autoscale = struct.PackTo("B", message, base, [dataPacket.autostart])
// base += 1
// fno = struct.PackTo("B", message, base, [dataPacket.fno])
f0 = struct.PackTo("B", message, base, [0])
base += 1
f1 = struct.PackTo("B", message, base, [12])
base += 1
f2 = struct.PackTo("B", message, base, [0])
base += 1
f3 = struct.PackTo("B", message, base, [34])
base += 1
f4 = struct.PackTo("B", message, base, [0])
base += 1
message = Buffer.from(message)
return message
}
udpProxy.bind(constants.master_port); // starting UDP server for offloaded endpoints udpProxy.bind(constants.master_port); // starting UDP server for offloaded endpoints
...@@ -616,5 +676,5 @@ udpProxy.bind(constants.master_port); // starting UDP server for offloaded endpo ...@@ -616,5 +676,5 @@ udpProxy.bind(constants.master_port); // starting UDP server for offloaded endpo
makeid, generateExecutor, generateMicrocExecutor, reverseProxy, makeid, generateExecutor, generateMicrocExecutor, reverseProxy,
getPort, logger, compare, compare_uti, getPort, logger, compare, compare_uti,
logBroadcast, fetchData, metrics, logBroadcast, fetchData, metrics,
producer producer, packPacket, packPacketFromDictionary, unpackPacket
} }
# #
# Generated Makefile for orchestrator_speedo # Generated Makefile for orchestrator_autoscaling_design1
# #
ifndef SDKDIR ifndef SDKDIR
...@@ -122,7 +122,7 @@ ifneq ($(NFAS_FOUND),found) ...@@ -122,7 +122,7 @@ ifneq ($(NFAS_FOUND),found)
$(warning warning: nfas not found or not executable, on windows please run nfp4term.bat) $(warning warning: nfas not found or not executable, on windows please run nfp4term.bat)
endif endif
$(OUTDIR)/orchestrator_speedo.nffw: $(OUTDIR)/nfd_pcie0_pd0.list/nfd_pcie0_pd0.list \ $(OUTDIR)/orchestrator_autoscaling_design1.nffw: $(OUTDIR)/nfd_pcie0_pd0.list/nfd_pcie0_pd0.list \
$(OUTDIR)/nfd_pcie0_pci_in_issue1.list/nfd_pcie0_pci_in_issue1.list \ $(OUTDIR)/nfd_pcie0_pci_in_issue1.list/nfd_pcie0_pci_in_issue1.list \
$(OUTDIR)/nfd_pcie0_pci_out_me0.list/nfd_pcie0_pci_out_me0.list \ $(OUTDIR)/nfd_pcie0_pci_out_me0.list/nfd_pcie0_pci_out_me0.list \
$(OUTDIR)/nbi_init_csr.list/nbi_init_csr.list \ $(OUTDIR)/nbi_init_csr.list/nbi_init_csr.list \
...@@ -152,14 +152,7 @@ $(OUTDIR)/orchestrator_speedo.nffw: $(OUTDIR)/nfd_pcie0_pd0.list/nfd_pcie0_pd0.l ...@@ -152,14 +152,7 @@ $(OUTDIR)/orchestrator_speedo.nffw: $(OUTDIR)/nfd_pcie0_pd0.list/nfd_pcie0_pd0.l
-u i36.me7 $(OUTDIR)/nfd_pcie0_pd1.list/nfd_pcie0_pd1.list \ -u i36.me7 $(OUTDIR)/nfd_pcie0_pd1.list/nfd_pcie0_pd1.list \
-u pcie0.me2 $(OUTDIR)/nfd_pcie0_pci_in_issue0.list/nfd_pcie0_pci_in_issue0.list \ -u pcie0.me2 $(OUTDIR)/nfd_pcie0_pci_in_issue0.list/nfd_pcie0_pci_in_issue0.list \
-u i48.me1 $(OUTDIR)/gro1.list/gro1.list \ -u i48.me1 $(OUTDIR)/gro1.list/gro1.list \
-u i32.me0 i33.me0 i34.me0 i35.me0 i36.me0 i32.me1 i33.me1 \ -u i32.me0 i33.me0 i34.me0 i35.me0 i36.me0 $(OUTDIR)/pif_app_nfd.list/pif_app_nfd.list \
i34.me1 i35.me1 i36.me1 i32.me2 i33.me2 i34.me2 i35.me2 \
i36.me2 i32.me3 i33.me3 i34.me3 i35.me3 i36.me3 i32.me4 \
i33.me4 i34.me4 i35.me4 i36.me4 i32.me5 i33.me5 i34.me5 \
i35.me5 i36.me5 i32.me6 i33.me6 i34.me6 i35.me6 i32.me7 \
i33.me7 i34.me7 i35.me7 i32.me8 i33.me8 i34.me8 i35.me8 \
i32.me9 i33.me9 i34.me9 i35.me9 i32.me10 i33.me10 i34.me10 \
i35.me10 i32.me11 i33.me11 i34.me11 i35.me11 $(OUTDIR)/pif_app_nfd.list/pif_app_nfd.list \
-u i36.me11 $(OUTDIR)/app_master.list/app_master.list \ -u i36.me11 $(OUTDIR)/app_master.list/app_master.list \
-u i48.me2 $(OUTDIR)/nfd_svc.list/nfd_svc.list \ -u i48.me2 $(OUTDIR)/nfd_svc.list/nfd_svc.list \
-u i48.me3 $(OUTDIR)/blm0.list/blm0.list \ -u i48.me3 $(OUTDIR)/blm0.list/blm0.list \
...@@ -179,24 +172,24 @@ $(PIFOUTDIR)/build_info.json: $(MAKEFILE_LIST) ...@@ -179,24 +172,24 @@ $(PIFOUTDIR)/build_info.json: $(MAKEFILE_LIST)
@echo generating $@ @echo generating $@
@echo --------- @echo ---------
@mkdir -p $(PIFOUTDIR) @mkdir -p $(PIFOUTDIR)
@echo -n {\"sku\": \"nfp-4xxxc-b0\", \"worker_mes\": [\"i32.me0\", \"i33.me0\", \"i34.me0\", \"i35.me0\", \"i36.me0\", \"i32.me1\", \"i33.me1\", \"i34.me1\", \"i35.me1\", \"i36.me1\", \"i32.me2\", \"i33.me2\", \"i34.me2\", \"i35.me2\", \"i36.me2\", \"i32.me3\", \"i33.me3\", \"i34.me3\", \"i35.me3\", \"i36.me3\", \"i32.me4\", \"i33.me4\", \"i34.me4\", \"i35.me4\", \"i36.me4\", \"i32.me5\", \"i33.me5\", \"i34.me5\", \"i35.me5\", \"i36.me5\", \"i32.me6\", \"i33.me6\", \"i34.me6\", \"i35.me6\", \"i32.me7\", \"i33.me7\", \"i34.me7\", \"i35.me7\", \"i32.me8\", \"i33.me8\", \"i34.me8\", \"i35.me8\", \"i32.me9\", \"i33.me9\", \"i34.me9\", \"i35.me9\", \"i32.me10\", \"i33.me10\", \"i34.me10\", \"i35.me10\", \"i32.me11\", \"i33.me11\", \"i34.me11\", \"i35.me11\"], \"reduced_thread_usage\": true, \"debug_info\": true, \"simulation\": false} >$@ @echo -n {\"sku\": \"nfp-4xxxc-b0\", \"worker_mes\": [\"i32.me0\", \"i33.me0\", \"i34.me0\", \"i35.me0\", \"i36.me0\"], \"reduced_thread_usage\": true, \"debug_info\": true, \"simulation\": false} >$@
# #
# Generate IR from P4 # Generate IR from P4
# #
$(OUTDIR)/orchestrator_speedo.yml: p4src/orchestrator_speedo.p4 \ $(OUTDIR)/orchestrator_autoscaling_design1.yml: p4src/orchestrator_autoscaling_design1.p4 \
$(MAKEFILE_LIST) $(MAKEFILE_LIST)
@echo --------- @echo ---------
@echo compiling p4 $@ @echo compiling p4 $@
@echo --------- @echo ---------
@mkdir -p $(PIFOUTDIR) @mkdir -p $(PIFOUTDIR)
$(SDKP4DIR)/bin/nfp4c -o $(OUTDIR)/orchestrator_speedo.yml \ $(SDKP4DIR)/bin/nfp4c -o $(OUTDIR)/orchestrator_autoscaling_design1.yml \
--p4-version 16 \ --p4-version 16 \
--p4-compiler p4c-nfp \ --p4-compiler p4c-nfp \
--source_info \ --source_info \
p4src/orchestrator_speedo.p4 p4src/orchestrator_autoscaling_design1.p4
# #
...@@ -229,16 +222,16 @@ $(PIFOUTDIR)/pif_pkt_clone%h \ ...@@ -229,16 +222,16 @@ $(PIFOUTDIR)/pif_pkt_clone%h \
$(PIFOUTDIR)/pif_flcalc%c \ $(PIFOUTDIR)/pif_flcalc%c \
$(PIFOUTDIR)/pif_flcalc%h \ $(PIFOUTDIR)/pif_flcalc%h \
$(PIFOUTDIR)/pif_field_lists%h \ $(PIFOUTDIR)/pif_field_lists%h \
$(PIFOUTDIR)/pif_parrep_pvs_sync%c : $(OUTDIR)/orchestrator_speedo%yml $(MAKEFILE_LIST) $(PIFOUTDIR)/pif_parrep_pvs_sync%c : $(OUTDIR)/orchestrator_autoscaling_design1%yml $(MAKEFILE_LIST)
@echo --------- @echo ---------
@echo generating pif $@ @echo generating pif $@
@echo --------- @echo ---------
@mkdir -p $(PIFOUTDIR) @mkdir -p $(PIFOUTDIR)
$(SDKP4DIR)/bin/nfirc -o $(PIFOUTDIR)/ \ $(SDKP4DIR)/bin/nfirc -o $(PIFOUTDIR)/ \
--p4info $(OUTDIR)/orchestrator_speedo.p4info.json \ --p4info $(OUTDIR)/orchestrator_autoscaling_design1.p4info.json \
--debugpoints \ --debugpoints \
--mac_ingress_timestamp \ --mac_ingress_timestamp \
$(OUTDIR)/orchestrator_speedo.yml $(OUTDIR)/orchestrator_autoscaling_design1.yml
# #
...@@ -707,8 +700,7 @@ $(OUTDIR)/pif_app_nfd.list/pif_app_nfd.list: $(SDKP4DIR)/components/nfp_pif/me/a ...@@ -707,8 +700,7 @@ $(OUTDIR)/pif_app_nfd.list/pif_app_nfd.list: $(SDKP4DIR)/components/nfp_pif/me/a
$(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_flcalc_algorithms.c \ $(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_flcalc_algorithms.c \
$(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_memops.c \ $(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_memops.c \
$(SDKP4DIR)/components/dcfl/me/lib/dcfl/libdcfl.c \ $(SDKP4DIR)/components/dcfl/me/lib/dcfl/libdcfl.c \
p4src/static_dispatch_function.c \ ../../../../../../../opt/netronome/p4/components/flowcache/me/lib/flowcache/flow_cache_global_c.h \
p4src/nic_function_test.c \
$(PIFOUTDIR)/pif_design.h \ $(PIFOUTDIR)/pif_design.h \
$(MAKEFILE_LIST) $(MAKEFILE_LIST)
@echo --------- @echo ---------
...@@ -762,6 +754,7 @@ $(OUTDIR)/pif_app_nfd.list/pif_app_nfd.list: $(SDKP4DIR)/components/nfp_pif/me/a ...@@ -762,6 +754,7 @@ $(OUTDIR)/pif_app_nfd.list/pif_app_nfd.list: $(SDKP4DIR)/components/nfp_pif/me/a
-I$(SDKP4DIR)/components/dcfl/me/lib/dcfl \ -I$(SDKP4DIR)/components/dcfl/me/lib/dcfl \
-I$(SDKP4DIR)/components/dcfl/shared/include/dcfl \ -I$(SDKP4DIR)/components/dcfl/shared/include/dcfl \
-I$(SDKDIR)/components/standardlibrary/include \ -I$(SDKDIR)/components/standardlibrary/include \
-I../../../../../../../opt/netronome/p4/components/flowcache/me/lib/flowcache \
-FI$(SDKP4DIR)/components/nfp_pif/me/apps/pif_app_nfd/include/config.h \ -FI$(SDKP4DIR)/components/nfp_pif/me/apps/pif_app_nfd/include/config.h \
-Fo$(OUTDIR)/pif_app_nfd.list/ \ -Fo$(OUTDIR)/pif_app_nfd.list/ \
-Fe$(OUTDIR)/pif_app_nfd.list/pif_app_nfd $(NFCC_FLAGS) \ -Fe$(OUTDIR)/pif_app_nfd.list/pif_app_nfd $(NFCC_FLAGS) \
...@@ -818,8 +811,7 @@ $(OUTDIR)/pif_app_nfd.list/pif_app_nfd.list: $(SDKP4DIR)/components/nfp_pif/me/a ...@@ -818,8 +811,7 @@ $(OUTDIR)/pif_app_nfd.list/pif_app_nfd.list: $(SDKP4DIR)/components/nfp_pif/me/a
$(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_flcalc_algorithms.c \ $(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_flcalc_algorithms.c \
$(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_memops.c \ $(SDKP4DIR)/components/nfp_pif/me/lib/pif/src/pif_memops.c \
$(SDKP4DIR)/components/dcfl/me/lib/dcfl/libdcfl.c \ $(SDKP4DIR)/components/dcfl/me/lib/dcfl/libdcfl.c \
p4src/static_dispatch_function.c \ ../../../../../../../opt/netronome/p4/components/flowcache/me/lib/flowcache/flow_cache_global_c.h
p4src/nic_function_test.c
# #
# APP_MASTER # APP_MASTER
......
...@@ -14,6 +14,8 @@ sudo ifconfig vf0_1 192.168.2.3/24 up ...@@ -14,6 +14,8 @@ sudo ifconfig vf0_1 192.168.2.3/24 up
echo "y" | docker system prune echo "y" | docker system prune
docker network create -d macvlan --subnet=192.168.2.0/24 --aux-address="vf0_0=192.168.2.2" --aux-address="vf0_1=192.168.2.3" --aux-address="vf0_2=192.168.2.4" -o parent=vf0_1 pub_net docker network create -d macvlan --subnet=192.168.2.0/24 --aux-address="vf0_0=192.168.2.2" --aux-address="vf0_1=192.168.2.3" --aux-address="vf0_2=192.168.2.4" -o parent=vf0_1 pub_net
# docker network create -d bridge --subnet=192.168.2.0/24 --aux-address="vf0_0=192.168.2.2" --aux-address="vf0_1=192.168.2.3" --aux-address="vf0_2=192.168.2.4" xanadu_kafka-serverless
# move vf0_0 into its own namespace # move vf0_0 into its own namespace
# sudo ip netns exec ns_server ip link set vf0_0 netns 1 # sudo ip netns exec ns_server ip link set vf0_0 netns 1
sudo ip netns delete ns_server sudo ip netns delete ns_server
...@@ -51,3 +53,12 @@ sudo ip netns exec ns_server ifconfig vf0_2 mtu 9000 ...@@ -51,3 +53,12 @@ sudo ip netns exec ns_server ifconfig vf0_2 mtu 9000
# sudo ip addr add 10.129.6.5/24 dev bridgek0 # sudo ip addr add 10.129.6.5/24 dev bridgek0
# sudo ip link set bridgek0 up # sudo ip link set bridgek0 up
# create veth cable for kafka
# sudo ip link add veth_nnic0 type veth peer name veth_nnic1
# sudo ip link set veth_nnic0 netns ns_server
# sudo ip netns exec ns_server ip addr add 10.128.2.201/24 dev veth_nnic0
# sudo ip netns exec ns_server ip link set dev veth_nnic0 up
# sudo ip addr add 10.128.2.200/24 dev veth_nnic1
# sudo ip link set dev veth_nnic1 up
...@@ -17,7 +17,9 @@ done ...@@ -17,7 +17,9 @@ done
if [[ $compile_flag -eq 1 ]] if [[ $compile_flag -eq 1 ]]
then then
# compile the nfp code # compile the nfp code
sudo /opt/netronome/p4/bin/nfp4build -o ./p4src/orchestrator.nffw -p ./p4src/out -4 ./p4src/orchestrator.p4 -l lithium --nfp4c_p4_version 16 --nfirc_mac_ingress_timestamp --nfp4c_p4_compiler p4c-nfp # sudo /opt/netronome/p4/bin/nfp4build -o ./p4src/orchestrator_autoscaling_design1.nffw -p ./p4src/out -4 ./p4src/orchestrator_autoscaling_design1.p4 -c ./p4src/packet_counter.c /opt/netronome/p4/components/flowcache/me/lib/flowcache/flow_cache_global_c.h -l lithium --nfp4c_p4_version 16 --nfirc_mac_ingress_timestamp --nfp4c_p4_compiler p4c-nfp -A 1 -I /opt/netronome/p4/components/flowcache/me/lib/flowcache/
sudo /opt/netronome/p4/bin/nfp4build -o ./p4src/orchestrator_autoscaling_design1.nffw -p ./p4src/out -4 ./p4src/orchestrator_autoscaling_design1.p4 -c /opt/netronome/p4/components/flowcache/me/lib/flowcache/flow_cache_global_c.h -l lithium --nfp4c_p4_version 16 --nfirc_mac_ingress_timestamp --nfp4c_p4_compiler p4c-nfp -A 1 -I /opt/netronome/p4/components/flowcache/me/lib/flowcache/
#sudo /opt/netronome/p4/bin/nfp4build -o ./p4src/orchestrator.nffw -p ./p4src/out -4 ./p4src/orchestrator.p4 -c ./p4src/memory.c ./p4src/memory2.c -l lithium --nfp4c_p4_version 16 --nfirc_mac_ingress_timestamp --nfp4c_p4_compiler p4c-nfp #sudo /opt/netronome/p4/bin/nfp4build -o ./p4src/orchestrator.nffw -p ./p4src/out -4 ./p4src/orchestrator.p4 -c ./p4src/memory.c ./p4src/memory2.c -l lithium --nfp4c_p4_version 16 --nfirc_mac_ingress_timestamp --nfp4c_p4_compiler p4c-nfp
#cd $nic_function_loc #cd $nic_function_loc
#files=$(./generate_names.sh) #files=$(./generate_names.sh)
...@@ -33,7 +35,7 @@ then ...@@ -33,7 +35,7 @@ then
cd /opt/netronome/p4/bin/ cd /opt/netronome/p4/bin/
# offload # offload
sudo ./rtecli design-load -f $location/p4src/orchestrator.nffw -c $location/p4src/echo.p4cfg -p $location/p4src/out/pif_design.json sudo ./rtecli design-load -f $location/p4src/orchestrator_autoscaling_design1.nffw -c $location/p4src/echo_autoscaling1.p4cfg -p $location/p4src/out/pif_design.json
# returning back to base # returning back to base
cd $location cd $location
......
{
"registers": {
"configs": []
},
"tables": {
"ingress::fwd": {
"rules": [
{
"action": {
"type": "ingress::fwd_act",
"data": {
"port": {
"value": "p0"
}
}
},
"name": "host_to_net",
"match": {
"standard_metadata.ingress_port": {
"value": "v0.0"
}
}
},
{
"action": {
"type": "ingress::fwd_act",
"data": {
"port": {
"value": "v0.1"
}
}
},
"name": "net_to_host",
"match": {
"standard_metadata.ingress_port": {
"value": "p1"
}
}
},
{
"action": {
"type": "ingress::fwd_act",
"data": {
"port": {
"value": "v0.0"
}
}
},
"name": "net_to_host",
"match": {
"standard_metadata.ingress_port": {
"value": "p0"
}
}
},
{
"action": {
"type": "ingress::fwd_act",
"data": {
"port": {
"value": "p1"
}
}
},
"name": "net_to_host",
"match": {
"standard_metadata.ingress_port": {
"value": "v0.1"
}
}
}
]
},
"ingress::dispatch": {
"rules": [
{
"action": {
"type" : "ingress::dispatch_act",
"data" : {
"dstAddr" : { "value" : "192.168.2.3" },
"ethernetAddr" : { "value" : "00:22:22:22:22:22" },
"dstPort" : { "value" : "8080" },
"egress_port" : { "value" : "v0.1" }
}
},
"name": "dispatch_to_worker5a1",
"match": {
"map_hdr.function_id" : {
"value" : "38813"
}
}
}
],
"default_rule": {
"action": {
"type" : "ingress::dispatch_act",
"data" : {
"dstAddr" : { "value" : "192.168.2.3" },
"dstPort" : { "value" : "8080" },
"egress_port" : { "value" : "v0.1" },
"ethernetAddr" : { "value" : "00:22:22:22:22:22" }
}
},
"name": "default"
}
}
},
"multicast": {},
"meters": {
"configs": []
}
}
{
"registers": {
"configs": []
},
"tables": {
"ingress::fwd": {
"rules": [
{
"action": {
"type": "ingress::fwd_act",
"data": {
"port": {
"value": "p0"
}
}
},
"name": "host_to_net",
"match": {
"standard_metadata.ingress_port": {
"value": "v0.0"
}
}
},
{
"action": {
"type": "ingress::fwd_act",
"data": {
"port": {
"value": "v0.1"
}
}
},
"name": "net_to_host",
"match": {
"standard_metadata.ingress_port": {
"value": "p1"
}
}
},
{
"action": {
"type": "ingress::fwd_act",
"data": {
"port": {
"value": "v0.0"
}
}
},
"name": "net_to_host",
"match": {
"standard_metadata.ingress_port": {
"value": "p0"
}
}
},
{
"action": {
"type": "ingress::fwd_act",
"data": {
"port": {
"value": "p1"
}
}
},
"name": "net_to_host",
"match": {
"standard_metadata.ingress_port": {
"value": "v0.1"
}
}
}
]
},
"ingress::dispatch": {
"rules": [
{
"action": {
"type" : "ingress::dispatch_act",
"data" : {
"dstAddr" : { "value" : "192.168.2.3" },
"ethernetAddr" : { "value" : "00:22:22:22:22:22" },
"dstPort" : { "value" : "8080" },
"egress_port" : { "value" : "v0.1" },
"autoscaling" : {"value" : "3"}
}
},
"name": "dispatch_to_worker5a1",
"match": {
"map_hdr.function_id" : {
"value" : "38813"
}
}
}
],
"default_rule": {
"action": {
"type" : "ingress::dispatch_act",
"data" : {
"dstAddr" : { "value" : "192.168.2.3" },
"dstPort" : { "value" : "8080" },
"egress_port" : { "value" : "v0.1" },
"ethernetAddr" : { "value" : "00:22:22:22:22:22" },
"autoscaling" : {"value" : "1"}
}
},
"name": "default"
}
},
"ingress::warmstart_dispatch": {
"rules": [
{
"action": {
"type": "ingress::warmstart_dispatch_act",
"data": {
"dstAddr" : { "value" : "192.168.2.3" },
"dstPort" : { "value" : "8081" },
"egress_port" : { "value" : "v0.1" },
"ethernetAddr" : { "value" : "00:22:22:22:22:22" }
}
},
"name": "autodsfsff1",
"match": {
"map_hdr.function_id" : {
"value" : "38813"
},
"map_hdr.fno" : {
"value" : "0"
}
}
},
{
"action": {
"type": "ingress::warmstart_dispatch_act",
"data": {
"dstAddr" : { "value" : "192.168.2.3" },
"dstPort" : { "value" : "8082" },
"egress_port" : { "value" : "v0.1" },
"ethernetAddr" : { "value" : "00:22:22:22:22:22" }
}
},
"name": "autosjgjtyntyntcalef2",
"match": {
"map_hdr.function_id" : {
"value" : "38813"
},
"map_hdr.fno" : {
"value" : "1"
}
}
},
{
"action": {
"type": "ingress::warmstart_dispatch_act",
"data": {
"dstAddr" : { "value" : "192.168.2.3" },
"dstPort" : { "value" : "8083" },
"egress_port" : { "value" : "v0.1" },
"ethernetAddr" : { "value" : "00:22:22:22:22:22" }
}
},
"name": "autosjgjtyntyntcalef2",
"match": {
"map_hdr.function_id" : {
"value" : "38813"
},
"map_hdr.fno" : {
"value" : "2"
}
}
}
]
}
},
"multicast": {},
"meters": {
"configs": []
}
}
...@@ -62,11 +62,14 @@ header map_hdr_t { ...@@ -62,11 +62,14 @@ header map_hdr_t {
bit<32> function_id; bit<32> function_id;
bit<32> data; bit<32> data;
bit<8> function_count; bit<8> function_count;
// bit<8> autoscaling;
// bit<8> fno;
bit<8> f0; bit<8> f0;
bit<8> f1; bit<8> f1;
bit<8> f2; bit<8> f2;
bit<8> f3; bit<8> f3;
bit<8> f4; bit<8> f4;
// bit<8> batch_count; // bit<8> batch_count;
} }
......
...@@ -4,7 +4,7 @@ parser ParserImpl(packet_in packet, out headers hdr, inout metadata meta, inout ...@@ -4,7 +4,7 @@ parser ParserImpl(packet_in packet, out headers hdr, inout metadata meta, inout
packet.extract<ipv4_t>(hdr.ipv4); packet.extract<ipv4_t>(hdr.ipv4);
transition select(hdr.ipv4.fragOffset, hdr.ipv4.ihl, hdr.ipv4.protocol) { transition select(hdr.ipv4.fragOffset, hdr.ipv4.ihl, hdr.ipv4.protocol) {
(13w0x0 &&& 13w0x0, 4w0x5 &&& 4w0xf, 8w0x11 &&& 8w0xff): parse_udp; (13w0x0 &&& 13w0x0, 4w0x5 &&& 4w0xf, 8w0x11 &&& 8w0xff): parse_udp;
// (13w0x0 &&& 13w0x0, 4w0x5, 8w0x6): parse_tcp; //(13w0x0 &&& 13w0x0, 4w0x5, 8w0x6): parse_tcp;
default: accept; default: accept;
} }
} }
...@@ -26,10 +26,10 @@ parser ParserImpl(packet_in packet, out headers hdr, inout metadata meta, inout ...@@ -26,10 +26,10 @@ parser ParserImpl(packet_in packet, out headers hdr, inout metadata meta, inout
} }
} }
// state parse_tcp { //state parse_tcp {
// packet.extract(hdr.tcp); // packet.extract(hdr.tcp);
// transition accept; // transition accept;
// } //}
@name(".parse_map_hdr") state parse_map_hdr { @name(".parse_map_hdr") state parse_map_hdr {
packet.extract(hdr.map_hdr); packet.extract(hdr.map_hdr);
......
...@@ -427,7 +427,7 @@ layout: ...@@ -427,7 +427,7 @@ layout:
########################################## ##########################################
source_info: source_info:
date: 2021/12/15 05:37:07 date: 2022/01/20 19:51:09
output_file: p4src/orchestrator.yml output_file: p4src/orchestrator.yml
p4_version: '16' p4_version: '16'
source_files: source_files:
......
#include <core.p4>
#define V1MODEL_VERSION 20200408
#include <v1model.p4>
#include "includes/defines.p4"
#include "includes/headers.p4"
#include "includes/parsers.p4"
//extern void prime();
//extern void prime2();
//extern void packet_counter();
//extern void static_dispatch_function();
struct small_dig{
bit<16> port;
}
struct digest_check_udp_port{
bit<16> udp_port;
bit<32> fid;
bit<4> packet_count;
bit<32> src_ip;
bit<32> dst_ip;
// bit<8> autoscaling;
// bit<8> fno;
}
control ingress(inout headers hdr, inout metadata meta, inout standard_metadata_t standard_metadata) {
//register<bit<8>>(10000) function_id_check;
register<bit<4>>(1) fwd_checks;
//bit<8> pc;
bit<4> pc2=0;
bit<1> static=1w1;
@name(".fwd_act") action fwd_act(bit<16> port) {
standard_metadata.egress_spec = port;
}
@name(".fwd") table fwd {
actions = {
fwd_act;
}
key = {
standard_metadata.ingress_port : exact;
}
}
@name(".dispatch_act") action dispatch_act(bit<32> dstAddr, bit<16> dstPort, bit<48> ethernetAddr, bit<16>egress_port) {
hdr.ipv4.dstAddr = dstAddr;
hdr.udp.dstPort = dstPort;
hdr.ethernet.dstAddr = ethernetAddr;
}
// #pragma netro no_lookup_caching dispatch_act
@name(".dispatch") table dispatch {
actions = {
dispatch_act;
}
key = {
hdr.map_hdr.function_id : exact;
}
}
apply {
if (hdr.ipv4.isValid() && hdr.udp.dstPort == DISPATCHER_PORT) {
dispatch.apply();
digest_check_udp_port dig;
dig.udp_port = hdr.udp.dstPort;
dig.fid = hdr.map_hdr.function_id;
dig.packet_count = pc2;
dig.src_ip = hdr.ipv4.srcAddr;
dig.dst_ip = hdr.ipv4.dstAddr;
//dig.autoscaling = hdr.map_hdr.autoscaling;
//dig.fno = hdr.map_hdr.fno;
digest<digest_check_udp_port>(0, dig );
fwd.apply();
} else {
fwd.apply();
}
//bit<16>mod = 16w10;
//hdr.udp.dstPort = 10000+(pc2 % mod);
}
}
control egress(inout headers hdr, inout metadata meta, inout standard_metadata_t standard_metadata) {
// @name(".ethernet_set_mac_act") action ethernet_set_mac_act(bit<48> smac, bit<48> dmac) {
// hdr.ethernet.srcAddr = smac;
// hdr.ethernet.dstAddr = dmac;
// }
// @name(".ethernet_set_mac") table ethernet_set_mac {
// actions = {
// ethernet_set_mac_act;
// }
// key = {
// standard_metadata.egress_port: exact;
// }
// }
@name("fix_checksum") action fix_checksum() {
hdr.udp.checksum = 16w0;
}
apply {
// if (hdr.udp.dstPort == MDS_PORT) {
// ethernet_set_mac.apply();
// }
fix_checksum();
}
}
control DeparserImpl(packet_out packet, in headers hdr) {
apply {
packet.emit<ethernet_t>(hdr.ethernet);
packet.emit<ipv4_t>(hdr.ipv4);
packet.emit<udp_t>(hdr.udp);
packet.emit<map_hdr_t>(hdr.map_hdr);
}
}
control verifyChecksum(inout headers hdr, inout metadata meta) {
apply {
verify_checksum(
hdr.ipv4.isValid(),
{ hdr.ipv4.version,
hdr.ipv4.ihl,
hdr.ipv4.diffserv,
hdr.ipv4.totalLen,
hdr.ipv4.identification,
hdr.ipv4.flags,
hdr.ipv4.fragOffset,
hdr.ipv4.ttl,
hdr.ipv4.protocol,
hdr.ipv4.srcAddr,
hdr.ipv4.dstAddr },
hdr.ipv4.hdrChecksum,
HashAlgorithm.csum16);
}
}
control computeChecksum(inout headers hdr, inout metadata meta) {
apply {
update_checksum(
hdr.ipv4.isValid(),
{ hdr.ipv4.version,
hdr.ipv4.ihl,
hdr.ipv4.diffserv,
hdr.ipv4.totalLen,
hdr.ipv4.identification,
hdr.ipv4.flags,
hdr.ipv4.fragOffset,
hdr.ipv4.ttl,
hdr.ipv4.protocol,
hdr.ipv4.srcAddr,
hdr.ipv4.dstAddr },
hdr.ipv4.hdrChecksum,
HashAlgorithm.csum16);
}
}
V1Switch<headers, metadata>(ParserImpl(), verifyChecksum(), ingress(), egress(), computeChecksum(), DeparserImpl()) main;
{
"tables": [
{
"preamble": {
"id": 33595533,
"name": "fwd",
"alias": "fwd"
},
"matchFields": [
{
"id": 1,
"name": "standard_metadata.ingress_port",
"bitwidth": 16,
"matchType": "EXACT"
}
],
"actionRefs": [
{
"id": 16805069
},
{
"id": 16800567,
"annotations": [
"@defaultonly()"
]
}
],
"size": "1024"
},
{
"preamble": {
"id": 33612818,
"name": "dispatch",
"alias": "dispatch"
},
"matchFields": [
{
"id": 1,
"name": "map_hdr.function_id",
"bitwidth": 32,
"matchType": "EXACT"
}
],
"actionRefs": [
{
"id": 16786857
},
{
"id": 16800567,
"annotations": [
"@defaultonly()"
]
}
],
"size": "1024"
}
],
"actions": [
{
"preamble": {
"id": 16800567,
"name": "NoAction",
"alias": "NoAction"
}
},
{
"preamble": {
"id": 16805069,
"name": "fwd_act",
"alias": "fwd_act"
},
"params": [
{
"id": 1,
"name": "port",
"bitwidth": 16
}
]
},
{
"preamble": {
"id": 16786857,
"name": "dispatch_act",
"alias": "dispatch_act"
},
"params": [
{
"id": 1,
"name": "dstAddr",
"bitwidth": 32
},
{
"id": 2,
"name": "dstPort",
"bitwidth": 16
},
{
"id": 3,
"name": "ethernetAddr",
"bitwidth": 48
},
{
"id": 4,
"name": "egress_port",
"bitwidth": 16
}
]
},
{
"preamble": {
"id": 16841338,
"name": "fix_checksum",
"alias": "fix_checksum"
}
}
]
}
#include <core.p4>
#define V1MODEL_VERSION 20200408
#include <v1model.p4>
#include "includes/defines.p4"
#include "includes/headers.p4"
#include "includes/parsers.p4"
//extern void prime();
//extern void prime2();
extern void packet_counter();
//extern void static_dispatch_function();
struct small_dig{
bit<16> port;
}
struct digest_check_udp_port{
bit<16> udp_port;
bit<32> fid;
bit<4> packet_count;
bit<32> src_ip;
bit<32> dst_ip;
bit<8> autoscaling;
bit<8> fno;
}
control ingress(inout headers hdr, inout metadata meta, inout standard_metadata_t standard_metadata) {
//register<bit<8>>(10000) function_id_check;
register<bit<4>>(1) fwd_checks;
//bit<8> pc;
bit<4> pc2=0;
bit<1> static=1w1;
@name(".fwd_act") action fwd_act(bit<16> port) {
standard_metadata.egress_spec = port;
}
@name(".fwd") table fwd {
actions = {
fwd_act;
}
key = {
standard_metadata.ingress_port : exact;
}
}
@name(".dispatch_act") action dispatch_act(bit<32> dstAddr, bit<16> dstPort, bit<48> ethernetAddr, bit<16>egress_port, bit<8>autoscaling) {
hdr.ipv4.dstAddr = dstAddr;
hdr.udp.dstPort = dstPort;
hdr.ethernet.dstAddr = ethernetAddr;
hdr.map_hdr.autoscaling = autoscaling;
//if(autoscaling>1)
//{
@atomic {
packet_counter();
}
//}
}
#pragma netro no_lookup_caching dispatch_act
@name(".dispatch") table dispatch {
actions = {
dispatch_act;
}
key = {
hdr.map_hdr.function_id : exact;
}
}
#pragma netro no_lookup_caching dispatch
@name(".warmstart_dispatch_act") action warmstart_dispatch_act(bit<32> dstAddr, bit<16> dstPort, bit<48> ethernetAddr , bit<16> egress_port) {
hdr.ipv4.dstAddr = dstAddr;
hdr.udp.dstPort = dstPort;
hdr.ethernet.dstAddr = ethernetAddr;
}
#pragma netro no_lookup_caching warmstart_dispatch_act
@name(".warmstart_dispatch") table warmstart_dispatch {
actions = {
warmstart_dispatch_act;
}
key = {
hdr.map_hdr.function_id : exact;
hdr.map_hdr.fno : exact;
}
}
#pragma netro warmstart_dispatch no_lookup_caching
apply {
if (hdr.ipv4.isValid() && hdr.udp.dstPort == DISPATCHER_PORT) {
//function_id_check.read(pc,0);
//pc = 8w2;
//pc = hdr.map_hdr.function_id;
//function_id_check.write(0,pc);
@atmoic{
if(hdr.map_hdr.fno==255)
{
hdr.map_hdr.function_id = 1;
}
dispatch.apply();
fwd_checks.read(pc2,0);
pc2 = pc2 + 1;
fwd_checks.write(0,pc2);
if(hdr.map_hdr.autoscaling > 1)
{
warmstart_dispatch.apply();
}
}
digest_check_udp_port dig;
dig.udp_port = hdr.udp.dstPort;
dig.fid = hdr.map_hdr.function_id;
dig.packet_count = pc2;
dig.src_ip = hdr.ipv4.srcAddr;
dig.dst_ip = hdr.ipv4.dstAddr;
dig.autoscaling = hdr.map_hdr.autoscaling;
dig.fno = hdr.map_hdr.fno;
digest<digest_check_udp_port>(0, dig );
fwd.apply();
} else {
fwd.apply();
}
//bit<16>mod = 16w10;
//hdr.udp.dstPort = 10000+(pc2 % mod);
}
}
control egress(inout headers hdr, inout metadata meta, inout standard_metadata_t standard_metadata) {
// @name(".ethernet_set_mac_act") action ethernet_set_mac_act(bit<48> smac, bit<48> dmac) {
// hdr.ethernet.srcAddr = smac;
// hdr.ethernet.dstAddr = dmac;
// }
// @name(".ethernet_set_mac") table ethernet_set_mac {
// actions = {
// ethernet_set_mac_act;
// }
// key = {
// standard_metadata.egress_port: exact;
// }
// }
@name("fix_checksum") action fix_checksum() {
hdr.udp.checksum = 16w0;
}
apply {
// if (hdr.udp.dstPort == MDS_PORT) {
// ethernet_set_mac.apply();
// }
fix_checksum();
}
}
control DeparserImpl(packet_out packet, in headers hdr) {
apply {
packet.emit<ethernet_t>(hdr.ethernet);
packet.emit<ipv4_t>(hdr.ipv4);
packet.emit<udp_t>(hdr.udp);
packet.emit<map_hdr_t>(hdr.map_hdr);
}
}
control verifyChecksum(inout headers hdr, inout metadata meta) {
apply {
verify_checksum(
hdr.ipv4.isValid(),
{ hdr.ipv4.version,
hdr.ipv4.ihl,
hdr.ipv4.diffserv,
hdr.ipv4.totalLen,
hdr.ipv4.identification,
hdr.ipv4.flags,
hdr.ipv4.fragOffset,
hdr.ipv4.ttl,
hdr.ipv4.protocol,
hdr.ipv4.srcAddr,
hdr.ipv4.dstAddr },
hdr.ipv4.hdrChecksum,
HashAlgorithm.csum16);
}
}
control computeChecksum(inout headers hdr, inout metadata meta) {
apply {
update_checksum(
hdr.ipv4.isValid(),
{ hdr.ipv4.version,
hdr.ipv4.ihl,
hdr.ipv4.diffserv,
hdr.ipv4.totalLen,
hdr.ipv4.identification,
hdr.ipv4.flags,
hdr.ipv4.fragOffset,
hdr.ipv4.ttl,
hdr.ipv4.protocol,
hdr.ipv4.srcAddr,
hdr.ipv4.dstAddr },
hdr.ipv4.hdrChecksum,
HashAlgorithm.csum16);
}
}
V1Switch<headers, metadata>(ParserImpl(), verifyChecksum(), ingress(), egress(), computeChecksum(), DeparserImpl()) main;
{
"tables": [
{
"preamble": {
"id": 33595533,
"name": "fwd",
"alias": "fwd"
},
"matchFields": [
{
"id": 1,
"name": "standard_metadata.ingress_port",
"bitwidth": 16,
"matchType": "EXACT"
}
],
"actionRefs": [
{
"id": 16805069
},
{
"id": 16800567,
"annotations": [
"@defaultonly()"
]
}
],
"size": "1024"
},
{
"preamble": {
"id": 33612818,
"name": "dispatch",
"alias": "dispatch"
},
"matchFields": [
{
"id": 1,
"name": "map_hdr.function_id",
"bitwidth": 32,
"matchType": "EXACT"
}
],
"actionRefs": [
{
"id": 16786857
},
{
"id": 16800567,
"annotations": [
"@defaultonly()"
]
}
],
"size": "1024"
},
{
"preamble": {
"id": 33605697,
"name": "warmstart_dispatch",
"alias": "warmstart_dispatch"
},
"matchFields": [
{
"id": 1,
"name": "map_hdr.function_id",
"bitwidth": 32,
"matchType": "EXACT"
},
{
"id": 2,
"name": "map_hdr.fno",
"bitwidth": 8,
"matchType": "EXACT"
}
],
"actionRefs": [
{
"id": 16819926
},
{
"id": 16800567,
"annotations": [
"@defaultonly()"
]
}
],
"size": "1024"
}
],
"actions": [
{
"preamble": {
"id": 16800567,
"name": "NoAction",
"alias": "NoAction"
}
},
{
"preamble": {
"id": 16805069,
"name": "fwd_act",
"alias": "fwd_act"
},
"params": [
{
"id": 1,
"name": "port",
"bitwidth": 16
}
]
},
{
"preamble": {
"id": 16786857,
"name": "dispatch_act",
"alias": "dispatch_act"
},
"params": [
{
"id": 1,
"name": "dstAddr",
"bitwidth": 32
},
{
"id": 2,
"name": "dstPort",
"bitwidth": 16
},
{
"id": 3,
"name": "ethernetAddr",
"bitwidth": 48
},
{
"id": 4,
"name": "egress_port",
"bitwidth": 16
},
{
"id": 5,
"name": "autoscaling",
"bitwidth": 8
}
]
},
{
"preamble": {
"id": 16819926,
"name": "warmstart_dispatch_act",
"alias": "warmstart_dispatch_act"
},
"params": [
{
"id": 1,
"name": "dstAddr",
"bitwidth": 32
},
{
"id": 2,
"name": "dstPort",
"bitwidth": 16
},
{
"id": 3,
"name": "ethernetAddr",
"bitwidth": 48
},
{
"id": 4,
"name": "egress_port",
"bitwidth": 16
}
]
},
{
"preamble": {
"id": 16841338,
"name": "fix_checksum",
"alias": "fix_checksum"
}
}
]
}
//=============================================================================================================
#include <stdint.h>
// #include <stdlib.h>
#include <nfp/me.h>
#include <nfp/mem_atomic.h>
#include <pif_common.h>
#include "pif_plugin.h"
//=============================================================================================================
// __export __emem uint8_t pkt_cntrs[20];
int pif_plugin_packet_counter(EXTRACTED_HEADERS_T *headers, MATCH_DATA_T *match_data) {
PIF_PLUGIN_map_hdr_T *mapHdr = pif_plugin_hdr_get_map_hdr(headers);
uint32_t fid = mapHdr->function_id;
if(mapHdr->autoscaling > 1)
{
uint32_t exec_id = mapHdr->exec_id;
uint8_t maxf = mapHdr->autoscaling;
mapHdr->fno = 0;
mapHdr->fno = exec_id%maxf;
// pkt_cntrs++;
// mapHdr->function_id = mapHdr->function_id + mapHdr->fno;
}
mapHdr->data = mapHdr->fno;
// mapHdr->function_id = ma
return PIF_PLUGIN_RETURN_FORWARD;
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
for(let i=0; i<100; i++) // for(let i=0; i<100; i++)
{ // {
console.log("running test1 js") console.log("running test1 js")
} // }
\ No newline at end of file \ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment