Commit 3a6cef8c authored by Shah Rinku's avatar Shah Rinku

Working solution for dispatcher NIC offload

parent 2867ff9b
......@@ -7,3 +7,29 @@ secrets.json
resource_system/bin/**
resource_system/version.linux
local_experiments/
.vscode
p4src/Makefile-nfp4build
p4src/app_master.list/
p4src/blm0.list/
p4src/echo.nffw
p4src/echo.yml
p4src/flowcache_timeout_emu0.list/
p4src/gro0.list/
p4src/gro1.list/
p4src/nbi_init_csr.list/
p4src/nfd_pcie0_notify.list/
p4src/nfd_pcie0_pci_in_gather.list/
p4src/nfd_pcie0_pci_in_issue0.list/
p4src/nfd_pcie0_pci_in_issue1.list/
p4src/nfd_pcie0_pci_out_me0.list/
p4src/nfd_pcie0_pd0.list/
p4src/nfd_pcie0_pd1.list/
p4src/nfd_pcie0_sb.list/
p4src/nfd_svc.list/
p4src/out/
p4src/pif_app_nfd.list/
client/Makefile-nfp4build
*.list
p4src/out_dir
*.nffw
import socket
import struct
import time
import threading
import random
import time
import argparse
parser = argparse.ArgumentParser(description='Mininet demo')
parser.add_argument('--fid', help='Funtion id',
type=int, action="store", required=False)
parser.add_argument('--c', help='Concurrency',
type=int, action="store", required=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--rps', help='Requests per second',
type=int, action="store")
group.add_argument('--n', help='Number of requests to send',
type=int, action="store")
args = parser.parse_args()
PORT = 8000
dataInt = 0
fid = args.fid
SERVER_IP = "192.168.2.3"
egress_time = []
ingress_time = []
stop_thread = False
def receive():
global egress_time, stop_thread
CLIENT_IP = "0.0.0.0"
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((CLIENT_IP, 8080))
print "listening to {} at port {}".format(CLIENT_IP, 8080)
run_status = {}
while True:
if stop_thread:
break
packet, addr = s.recvfrom(1024)
# print packet
base = 0
chain_id = struct.unpack(">I", packet[base:base + 4])[0]
base += 4
exec_id = struct.unpack(">I", packet[base:base + 4])[0]
base += 4
function_id = struct.unpack(">I", packet[base:base + 4])[0]
base += 4
data = struct.unpack(">I", packet[base:base + 4])[0]
base += 4
function_count = struct.unpack("B", packet[base])[0]
t = int(time.time() * 1000) % 1000000000
data = int(data) - t
print "rec", chain_id, exec_id, data, function_id, function_count,
def genPacket():
global fid
packet = None
exec_id = random.randint(0, 2 ** 30)
print exec_id
chain_id = 1
function_count = 5
function_id = fid if (fid) else 1
f0 = 0; f1 = 1; f2 = 2; f3 = 0; f4 = 0
print chain_id, exec_id, "function_id", function_id, function_count, \
f0, f1, f2, f3, f4
chain_id = struct.pack(">I", chain_id) # chain id
exec_id = struct.pack(">I", exec_id) # execution id
dataInt = int(time.time() * 1000) % 1000000000
data = struct.pack(">I", dataInt) # data
function_count = struct.pack("B", function_count) # function count
function_id_packed = struct.pack(">I", function_id)
f0 = struct.pack("B", f0) # f0
f1 = struct.pack("B", f1) # f1
f2 = struct.pack("B", f2) # f2 -> f0
f3 = struct.pack("B", f3) # f3 -> f1 f2
f4 = struct.pack("B", f4) # f4 -> f3
packet = chain_id + exec_id + function_id_packed + data + function_count + f0 + f1 + f2 + f3 + f4
# print dataInt, offload_status
return packet, function_id
def sendThread(start_time, runtime, sleep_time):
global ingress_time
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
packet, function_id = genPacket()
if time.time() - start_time > runtime:
break
s.sendto(packet, (SERVER_IP, PORT))
ingress_time.append(time.time())
time.sleep(sleep_time)
def send():
global egress_time, ingress_time
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print "Sending packet to %s at port %s" % (SERVER_IP, PORT)
print "chain id, exec id, data, function count, functions dependencies..."
# op = struct.unpack("B", packet[0])
packet, _ = genPacket()
if args.n is not None:
for i in range(args.req_count):
s.sendto(packet, (SERVER_IP, PORT))
ingress_time.append(time.time())
elif args.rps is not None:
runtime = 10
thread_count = args.c
start_time = time.time()
sleep_time = thread_count / float(args.rps)
print "calculated inter-arrival time, offload mode", sleep_time
for i in range(thread_count):
t = threading.Thread(target=sendThread, args=[
start_time, runtime, sleep_time])
t.daemon = True
t.start()
time.sleep(runtime)
stop_thread = True
# s.sendto(packet, (SERVER_IP, PORT))
# r.join()
r = threading.Thread(name="receive", target=receive)
r.daemon = True
r.start()
time.sleep(1)
send()
r.join()
......@@ -13,6 +13,7 @@
"body-parser": "^1.19.0",
"express": "^4.17.1",
"express-fileupload": "^1.1.6",
"jspack": "^0.0.4",
"kafka-node": "^5.0.0",
"morgan": "^1.9.1",
"mqtt": "^3.0.0",
......
This diff is collapsed.
......@@ -66,7 +66,7 @@ app.use(fileUpload())
app.use('/serverless/chain', chainHandler.router); // chain router (explicit_chain_handler.js) for handling explicit chains
let requestQueue = []
const WINDOW_SIZE = 10
const WINDOW_SIZE = 1
const port = constants.master_port
const registry_url = constants.registry_url
......@@ -220,6 +220,7 @@ app.post('/serverless/execute/:id', (req, res) => {
res.timestamp = Date.now()
if (functionToResource.has(id)) {
res.start = 'warmstart'
res.dispatch_time = Date.now()
libSupport.reverseProxy(req, res)
} else {
res.start = 'coldstart'
......
......@@ -77,6 +77,7 @@ function generateExecutor(functionPath, functionHash) {
* @param {JSON} res Object to use to return the response to the user
*/
async function reverseProxy(req, res) {
res.reverse_ingress = Date.now()
if (req.headers['x-chain-type'] !== 'explicit' && req.body.type === "tcp")
branchChainPredictor(req)
let runtime = req.body.runtime
......@@ -93,8 +94,10 @@ async function reverseProxy(req, res) {
let url = `http://${resource.node_id}:${resource.port}/serverless/function/execute`
// logger.info("Request received at reverseproxy. Forwarding to: " + url);
forwardTo.open_request_count += 1
heap.heapify(functionHeap, compare) // maintain loadbalancer by heapifying the Map
// forwardTo.open_request_count += 1
// TODO: stopping loadbalancer
// heap.heapify(functionHeap, compare) // maintain loadbalancer by heapifying the Map
res.lookup_time = Date.now()
var options = {
method: 'POST',
uri: url,
......@@ -153,6 +156,7 @@ async function reverseProxy(req, res) {
let payload = req.body
payload.request_id = request_id
let data = payload.data
res.data_set_time = Date.now()
let packet = packPacket({
chain_id: 0,
exec_id: request_id,
......@@ -160,8 +164,10 @@ async function reverseProxy(req, res) {
data,
function_count: 1
})
res.pack_time = Date.now()
udpProxy.send(packet, 0, packet.length, resource.port, resource.node_id, function (err, bytes) {
// logger.info(`forwarded request via UDP, IP 192.168.2.5 Port ${resource.port}`)
res.send_time = Date.now()
})
}
}
......@@ -415,6 +421,23 @@ udpProxy.on('message', (msg, rinfo) => {
let result = unpackPacket(msg)
let res = requestFlightQueue.get(result.exec_id)
res.json(result)
console.log("resource_lookup",
res.dispatch_time - res.timestamp,
"reverse_proxy_call",
res.reverse_ingress - res.dispatch_time,
"metadata_lookup",
res.lookup_time - res.reverse_ingress,
"data_set_time",
res.data_set_time - res.lookup_time,
"pack_time",
res.pack_time - res.data_set_time,
"network_send",
res.send_time - res.pack_time,
"total_dispatch_delay",
res.send_time - res.timestamp,
"E2E time:",
Date.now() - res.timestamp
)
});
udpProxy.on('listening', () => {
......@@ -459,6 +482,7 @@ function packPacket(dataPacket) {
let message = new Array(1024)
let base = 0, chain_id, exec_id, function_id, data, function_count
let f0, f1, f2, f3, f4, t1, t2, t3, t4
chain_id = struct.PackTo(">I", message, base, [dataPacket.chain_id])
base += 4
exec_id = struct.PackTo(">I", message, base, [dataPacket.exec_id])
......@@ -468,6 +492,27 @@ function packPacket(dataPacket) {
data = struct.PackTo(">I", message, base, [dataPacket.data])
base += 4
function_count = struct.PackTo("B", message, base, [dataPacket.function_count])
base += 1
f0 = struct.PackTo("B", message, base, [0])
base += 1
f1 = struct.PackTo("B", message, base, [12])
base += 1
f2 = struct.PackTo("B", message, base, [0])
base += 1
f3 = struct.PackTo("B", message, base, [34])
base += 1
f4 = struct.PackTo("B", message, base, [0])
base += 1
t1 = struct.PackTo(">I", message, base, [Date.now()])
base += 4
t2 = struct.PackTo("I", message, base, [1234])
base += 4
t3 = struct.PackTo("I", message, base, [0])
base += 8
t4 = struct.PackTo("I", message, base, [0])
message = Buffer.from(message)
return message
}
......
#
# Generated Makefile for echo
# Generated Makefile for orchestrator
#
ifndef SDKDIR
......@@ -122,7 +122,7 @@ ifneq ($(NFAS_FOUND),found)
$(warning warning: nfas not found or not executable, on windows please run nfp4term.bat)
endif
$(OUTDIR)/echo.nffw: $(OUTDIR)/nfd_pcie0_pd0.list/nfd_pcie0_pd0.list \
$(OUTDIR)/orchestrator.nffw: $(OUTDIR)/nfd_pcie0_pd0.list/nfd_pcie0_pd0.list \
$(OUTDIR)/nfd_pcie0_pci_in_issue1.list/nfd_pcie0_pci_in_issue1.list \
$(OUTDIR)/nfd_pcie0_pci_out_me0.list/nfd_pcie0_pci_out_me0.list \
$(OUTDIR)/nbi_init_csr.list/nbi_init_csr.list \
......@@ -186,17 +186,17 @@ $(PIFOUTDIR)/build_info.json: $(MAKEFILE_LIST)
# Generate IR from P4
#
$(OUTDIR)/echo.yml: p4src/echo.p4 \
$(OUTDIR)/orchestrator.yml: p4src/orchestrator.p4 \
$(MAKEFILE_LIST)
@echo ---------
@echo compiling p4 $@
@echo ---------
@mkdir -p $(PIFOUTDIR)
$(SDKP4DIR)/bin/nfp4c -o $(OUTDIR)/echo.yml \
$(SDKP4DIR)/bin/nfp4c -o $(OUTDIR)/orchestrator.yml \
--p4-version 16 \
--p4-compiler p4c-nfp \
--source_info \
p4src/echo.p4
p4src/orchestrator.p4
#
......@@ -229,15 +229,16 @@ $(PIFOUTDIR)/pif_pkt_clone%h \
$(PIFOUTDIR)/pif_flcalc%c \
$(PIFOUTDIR)/pif_flcalc%h \
$(PIFOUTDIR)/pif_field_lists%h \
$(PIFOUTDIR)/pif_parrep_pvs_sync%c : $(OUTDIR)/echo%yml $(MAKEFILE_LIST)
$(PIFOUTDIR)/pif_parrep_pvs_sync%c : $(OUTDIR)/orchestrator%yml $(MAKEFILE_LIST)
@echo ---------
@echo generating pif $@
@echo ---------
@mkdir -p $(PIFOUTDIR)
$(SDKP4DIR)/bin/nfirc -o $(PIFOUTDIR)/ \
--p4info $(OUTDIR)/echo.p4info.json \
--p4info $(OUTDIR)/orchestrator.p4info.json \
--debugpoints \
$(OUTDIR)/echo.yml
--mac_ingress_timestamp \
$(OUTDIR)/orchestrator.yml
#
......
#!/bin/bash -x
sudo ifconfig vf0_0 down
sudo ifconfig vf0_0 hw ether 00:11:11:11:11:11
sudo ifconfig vf0_1 down
sudo ifconfig vf0_1 hw ether 00:22:22:22:22:22
sudo ifconfig vf0_0 192.168.2.2/24 up
sudo ifconfig vf0_1 192.168.2.3/24 up
......@@ -7,17 +11,15 @@ sudo ifconfig vf0_1 192.168.2.3/24 up
echo "y" | docker system prune
docker network create -d macvlan --subnet=192.168.2.0/24 --aux-address="vf0_0=192.168.2.2" --aux-address="vf0_1=192.168.2.3" -o parent=vf0_1 pub_net
# move vf0_0 into its own namespace
# sudo ip netns exec ns_server ip link set vf0_0 netns 1
# sudo ip netns delete ns_server
# sudo ip netns add ns_server
move vf0_0 into its own namespace
sudo ip netns exec ns_server ip link set vf0_0 netns 1
sudo ip netns delete ns_server
sudo ip netns add ns_server
# sudo ip link set vf0_0 netns ns_server
# sudo ip netns exec ns_server ip addr add dev vf0_0 192.168.2.2/24
# sudo ip netns exec ns_server ip link set dev vf0_0 up
sudo ip link set vf0_0 netns ns_server
sudo ip netns exec ns_server ip addr add dev vf0_0 192.168.2.2/24
sudo ip netns exec ns_server ip link set dev vf0_0 up
# sudo ip netns exec ns_server arp -s 192.168.2.3 00:22:22:22:22:22 -i vf0_0
# sudo ip netns exec ns_server arp -s 192.168.2.4 00:33:33:33:33:33 -i vf0_0
sudo ip netns exec ns_server arp -s 192.168.2.3 00:22:22:22:22:22 -i vf0_0
# sudo arp -s 192.168.2.2 00:11:11:11:11:11 -i vf0_1
# sudo arp -s 192.168.2.4 00:33:33:33:33:33 -i vf0_1
\ No newline at end of file
sudo arp -s 192.168.2.2 00:11:11:11:11:11 -i vf0_1
sudo ip netns delete ns_server
sudo ip netns delete ns_client
sudo ip netns add ns_server
sudo ip netns add ns_client
echo "namespace created"
sudo ifconfig vf0_0 down
sudo ifconfig vf0_0 hw ether 00:11:11:11:11:11
sudo ip link set vf0_0 up
sudo ip address add 192.168.2.2/24 dev vf0_0
sudo ethtool --offload vf0_0 rx off tx off sg off
sudo ethtool -K vf0_0 gso off
sudo ifconfig vf0_1 down
sudo ifconfig vf0_1 hw ether 00:22:22:22:22:22
sudo ip link set vf0_1 up
sudo ip address add 192.168.2.3/24 dev vf0_1
sudo ethtool --offload vf0_1 rx off tx off sg off
sudo ethtool -K vf0_1 gso off
echo "IPs assigned"
#sudo arp -s 192.168.2.3 00:22:22:22:22:22 -i vf0_0
#sudo arp -s 192.168.2.4 00:33:33:33:33:33 -i vf0_0
#sudo arp -s 192.168.2.2 00:11:11:11:11:11 -i vf0_1
#sudo arp -s 192.168.2.4 00:33:33:33:33:33 -i vf0_1
sudo ip link set vf0_0 netns ns_server
sudo ip netns exec ns_server ip addr add dev vf0_0 192.168.2.2/24
sudo ip netns exec ns_server ip link set dev vf0_0 up
sudo ip netns exec ns_server arp -s 192.168.2.3 00:22:22:22:22:22 -i vf0_0
sudo ip netns exec ns_server arp -s 192.168.2.4 00:33:33:33:33:33 -i vf0_0
sudo ip link set vf0_1 netns ns_client
sudo ip netns exec ns_client ip addr add dev vf0_1 192.168.2.3/24
sudo ip netns exec ns_client ip link set dev vf0_1 up
sudo ip netns exec ns_client arp -s 192.168.2.2 00:11:11:11:11:11 -i vf0_1
sudo ip netns exec ns_client arp -s 192.168.2.2 00:11:11:11:11:11 -i vf0_1
sudo ip netns exec ns_client arp -s 192.168.2.4 00:33:33:33:33:33 -i vf0_1
\ No newline at end of file
......@@ -15,7 +15,7 @@ done
if [[ $compile_flag -eq 1 ]]
then
# compile the nfp code
sudo /opt/netronome/p4/bin/nfp4build -o ./p4src/echo.nffw -p ./p4src/out -4 ./p4src/echo.p4 -l lithium --nfp4c_p4_version 16 --nfp4c_p4_compiler p4c-nfp -c ./p4src/prime.c
sudo /opt/netronome/p4/bin/nfp4build -o ./p4src/orchestrator.nffw -p ./p4src/out -4 ./p4src/orchestrator.p4 -l lithium --nfp4c_p4_version 16 --nfirc_mac_ingress_timestamp --nfp4c_p4_compiler p4c-nfp -c ./p4src/prime.c
fi
if [[ $offload_flag -eq 1 ]]
......@@ -24,7 +24,7 @@ then
cd /opt/netronome/p4/bin/
# offload
sudo ./rtecli design-load -f $location/p4src/echo.nffw -c $location/p4src/echo.p4cfg -p $location/p4src/out/pif_design.json
sudo ./rtecli design-load -f $location/p4src/orchestrator.nffw -c $location/p4src/echo.p4cfg -p $location/p4src/out/pif_design.json
# returning back to base
cd $location
......
#! /bin/bash -ex
compile_flag=0
offload_flag=0
assign_ip_flag=0
location=$(pwd)
while getopts 'coi' flag; do
case "${flag}" in
c) compile_flag=1 ;;
o) offload_flag=1 ;;
i) assign_ip_flag=1 ;;
esac
done
if [[ $compile_flag -eq 1 ]]
then
# compile the nfp code
sudo /opt/netronome/p4/bin/nfp4build -o ./p4src/test.nffw -p ./p4src/out -4 ./p4src/test.p4 -l lithium --nfp4c_p4_version 16 --nfirc_mac_ingress_timestamp --nfp4c_p4_compiler p4c-nfp -e
fi
if [[ $offload_flag -eq 1 ]]
then
# move to p4 bin
cd /opt/netronome/p4/bin/
# offload
sudo ./rtecli design-load -f $location/p4src/test.nffw -c $location/p4src/test.p4cfg -p $location/p4src/out/pif_design.json
# returning back to base
cd $location
fi
if [[ $assign_ip_flag -eq 1 ]]
then
#assigning IPs to network interfaces
sudo ./assign_ip_test.sh
fi
......@@ -643,35 +643,9 @@
"source_fragment" : "dispatch_act"
}
},
{
"name" : "act",
"id" : 4,
"runtime_data" : [],
"primitives" : [
{
"op" : "assign",
"parameters" : [
{
"type" : "field",
"value" : ["map_hdr", "data"]
},
{
"type" : "hexstr",
"value" : "0x00000064"
}
],
"source_info" : {
"filename" : "p4src/echo.p4",
"line" : 51,
"column" : 9,
"source_fragment" : "hdr.map_hdr.data = 32w100"
}
}
]
},
{
"name" : "fix_checksum",
"id" : 5,
"id" : 4,
"runtime_data" : [],
"primitives" : [
{
......@@ -714,32 +688,9 @@
},
"init_table" : "node_2",
"tables" : [
{
"name" : "tbl_act",
"id" : 0,
"key" : [],
"match_type" : "exact",
"type" : "simple",
"max_size" : 1024,
"with_counters" : false,
"support_timeout" : false,
"direct_meters" : null,
"action_ids" : [4],
"actions" : ["act"],
"base_default_next" : "dispatch",
"next_tables" : {
"act" : "dispatch"
},
"default_entry" : {
"action_id" : 4,
"action_const" : true,
"action_data" : [],
"action_entry_const" : true
}
},
{
"name" : "dispatch",
"id" : 1,
"id" : 0,
"source_info" : {
"filename" : "p4src/echo.p4",
"line" : 40,
......@@ -775,7 +726,7 @@
},
{
"name" : "fwd",
"id" : 2,
"id" : 1,
"source_info" : {
"filename" : "p4src/echo.p4",
"line" : 16,
......@@ -811,7 +762,7 @@
},
{
"name" : "fwd",
"id" : 3,
"id" : 2,
"source_info" : {
"filename" : "p4src/echo.p4",
"line" : 16,
......@@ -888,7 +839,7 @@
}
}
},
"true_next" : "tbl_act",
"true_next" : "dispatch",
"false_next" : "fwd"
}
]
......@@ -906,7 +857,7 @@
"tables" : [
{
"name" : "tbl_fix_checksum",
"id" : 4,
"id" : 3,
"key" : [],
"match_type" : "exact",
"type" : "simple",
......@@ -914,14 +865,14 @@
"with_counters" : false,
"support_timeout" : false,
"direct_meters" : null,
"action_ids" : [5],
"action_ids" : [4],
"actions" : ["fix_checksum"],
"base_default_next" : null,
"next_tables" : {
"fix_checksum" : null
},
"default_entry" : {
"action_id" : 5,
"action_id" : 4,
"action_const" : true,
"action_data" : [],
"action_entry_const" : true
......
......@@ -227,12 +227,6 @@ egress::fix_checksum:
src_lineno: 75
type: action
ingress::act:
implementation: modify_field(map_hdr.data, 0x00000064);
src_filename: ''
src_lineno: 1
type: action
ingress::dispatch_act:
implementation: |-
modify_field(ipv4.dstAddr, dstAddr);
......@@ -290,17 +284,6 @@ ingress::fwd:
src_lineno: 16
type: table
ingress::tbl_act:
allowed_actions:
- ingress::act
default_entry:
action: ingress::act
const: true
max_entries: 1025
src_filename: ''
src_lineno: 1
type: table
##########################################
# Ingress conditionals sets #
......@@ -324,9 +307,8 @@ ingress_flow:
implementation: |-
digraph {
"_condition_0" -> "ingress::fwd" [condition = false]
"_condition_0" -> "ingress::tbl_act" [condition = true]
"_condition_0" -> "ingress::dispatch" [condition = true]
"ingress::fwd" -> "exit_control_flow" [action = always]
"ingress::tbl_act" -> "ingress::dispatch" [action = always]
"ingress::dispatch" -> "ingress::fwd" [action = always]
}
start_state: _condition_0
......@@ -379,7 +361,7 @@ layout:
##########################################
source_info:
date: 2021/03/04 18:32:12
date: 2021/03/19 14:57:28
output_file: p4src/echo.yml
p4_version: '16'
source_files:
......
#define REPLY_PORT 9000
#define DISPATCHER_PORT 8000
#define MDS_PORT 8000
#define NUM_CACHE 128
#define CLUSTER_COUNT 2
#define DEAD_EGRESS_PORT 9
#define CONTROLLER_IP 0x0a000002
#define SERVER_IP 0x0a000001
#define CONTROLLER_IP 0xc0a80203
#define SERVER_IP 0xc0a80202
#define NC_READ_REQUEST 0
#define NC_READ_REPLY 1
......@@ -16,8 +17,8 @@
#define PKT_INSTANCE_TYPE_NORMAL 0
#define PKT_INSTANCE_TYPE_INGRESS_CLONE 1
#define PKT_INSTANCE_TYPE_EGRESS_CLONE 2
#define PKT_INSTANCE_TYPE_COALESCED 3
#define PKT_INSTANCE_TYPE_INGRESS_RECIRC 4
#define PKT_INSTANCE_TYPE_EGRESS_CLONE 9
#define PKT_INSTANCE_TYPE_COALESCED 10
#define PKT_INSTANCE_TYPE_INGRESS_RECIRC 3
#define PKT_INSTANCE_TYPE_REPLICATION 5
#define PKT_INSTANCE_TYPE_RESUBMIT 6
\ No newline at end of file
#define PKT_INSTANCE_TYPE_RESUBMIT 4
\ No newline at end of file
struct resubmit_meta_t {
bit<8> current_state;
bit<32> data;
}
struct ingress_metadata_t {
bit<1> drop;
bit<9> egress_port;
bit<4> packet_type;
}
register< bit<8>>(16384) current_state;
register< bit<8>>(16384) dispatch_state;
header ethernet_t {
bit<48> dstAddr;
bit<48> srcAddr;
bit<16> etherType;
}
header ipv4_t {
bit<4> version;
bit<4> ihl;
bit<8> diffserv;
bit<16> totalLen;
bit<16> identification;
bit<3> flags;
bit<13> fragOffset;
bit<8> ttl;
bit<8> protocol;
bit<16> hdrChecksum;
bit<32> srcAddr;
bit<32> dstAddr;
}
header udp_t {
bit<16> srcPort;
bit<16> dstPort;
bit<16> length_;
bit<16> checksum;
}
header map_hdr_t {
bit<32> chain_id;
bit<32> exec_id;
bit<8> function_id;
bit<32> data;
bit<8> function_count;
bit<8> f0;
bit<8> f1;
bit<8> f2;
bit<8> f3;
bit<8> f4;
}
struct exec_hdr_t {
bit<8> function_count;
bit<8> function;
}
struct metadata {
@name(".ing_metadata")
ingress_metadata_t ing_metadata;
@name(".resubmit_meta")
resubmit_meta_t resubmit_meta;
@name(".exec_hdr")
exec_hdr_t exec_hdr;
}
struct headers {
@name(".ethernet")
ethernet_t ethernet;
@name(".ipv4")
ipv4_t ipv4;
@name(".udp")
udp_t udp;
@name(".map_hdr")
map_hdr_t map_hdr;
}
\ No newline at end of file
......@@ -25,16 +25,7 @@ control ingress(inout headers hdr, inout metadata meta, inout standard_metadata_
@name(".dispatch_act") action dispatch_act(bit<32> dstAddr, bit<16> dstPort, bit<48> ethernetAddr , bit<16> egress_port) {
hdr.ipv4.dstAddr = dstAddr;
hdr.udp.dstPort = dstPort;
// hdr.ethernet.dstAddr = ethernetAddr;
// standard_metadata.egress_spec = egress_port;
// bit<48> tempEth = hdr.ethernet.dstAddr;
// hdr.ethernet.dstAddr = hdr.ethernet.srcAddr;
// hdr.ethernet.srcAddr = tempEth;
// standard_metadata.egress_spec = standard_metadata.ingress_port;
// hdr.ipv4.ttl = hdr.ipv4.ttl - 8w1;
hdr.ethernet.dstAddr = ethernetAddr;
}
@name(".dispatch") table dispatch {
......@@ -48,7 +39,7 @@ control ingress(inout headers hdr, inout metadata meta, inout standard_metadata_
apply {
if (hdr.ipv4.isValid() && hdr.udp.dstPort == DISPATCHER_PORT) {
hdr.map_hdr.data = 32w100;
dispatch.apply();
fwd.apply();
} else {
......
{
"tables": [
{
"preamble": {
"id": 33595533,
"name": "fwd",
"alias": "fwd"
},
"matchFields": [
{
"id": 1,
"name": "standard_metadata.ingress_port",
"bitwidth": 16,
"matchType": "EXACT"
}
],
"actionRefs": [
{
"id": 16805069
},
{
"id": 16800567,
"annotations": [
"@defaultonly()"
]
}
],
"size": "1024"
},
{
"preamble": {
"id": 33612818,
"name": "dispatch",
"alias": "dispatch"
},
"matchFields": [
{
"id": 1,
"name": "map_hdr.function_id",
"bitwidth": 32,
"matchType": "EXACT"
}
],
"actionRefs": [
{
"id": 16786857
},
{
"id": 16800567,
"annotations": [
"@defaultonly()"
]
}
],
"size": "1024"
}
],
"actions": [
{
"preamble": {
"id": 16800567,
"name": "NoAction",
"alias": "NoAction"
}
},
{
"preamble": {
"id": 16805069,
"name": "fwd_act",
"alias": "fwd_act"
},
"params": [
{
"id": 1,
"name": "port",
"bitwidth": 16
}
]
},
{
"preamble": {
"id": 16786857,
"name": "dispatch_act",
"alias": "dispatch_act"
},
"params": [
{
"id": 1,
"name": "dstAddr",
"bitwidth": 32
},
{
"id": 2,
"name": "dstPort",
"bitwidth": 16
},
{
"id": 3,
"name": "ethernetAddr",
"bitwidth": 48
},
{
"id": 4,
"name": "egress_port",
"bitwidth": 16
}
]
},
{
"preamble": {
"id": 16841338,
"name": "fix_checksum",
"alias": "fix_checksum"
}
}
]
}
##########################################
# Header instance definitions #
##########################################
errors:
type: enum
values:
- NoError: 1
- PacketTooShort: 2
- NoMatch: 3
- StackOutOfBounds: 4
- HeaderTooShort: 5
- ParserTimeout: 6
ethernet:
fields:
- dstAddr: 48
- srcAddr: 48
- etherType: 16
type: header
exec_hdr:
fields:
- function_count: 8
- function: 8
type: metadata
ing_metadata:
fields:
- drop: 1
- egress_port: 9
- packet_type: 4
- _padding: 2
type: metadata
ipv4:
calculated_fields:
- condition: valid(ipv4)
field: hdrChecksum
func: calc
type: verify
- condition: valid(ipv4)
field: hdrChecksum
func: calc_0
type: update
fields:
- version: 4
- ihl: 4
- diffserv: 8
- totalLen: 16
- identification: 16
- flags: 3
- fragOffset: 13
- ttl: 8
- protocol: 8
- hdrChecksum: 16
- srcAddr: 32
- dstAddr: 32
type: header
map_hdr:
fields:
- chain_id: 32
- exec_id: 32
- function_id: 32
- data: 32
- function_count: 8
- f0: 8
- f1: 8
- f2: 8
- f3: 8
- f4: 8
type: header
resubmit_meta:
fields:
- current_state: 8
- data: 32
type: metadata
standard_metadata:
fields:
- ingress_port: 16
- packet_length: 14
- egress_spec: 16
- egress_port: 16
- egress_instance: 10
- instance_type: 4
- clone_spec: 32
- parser_error_location: 8
- parser_status: 3
- checksum_error: 1
type: metadata
udp:
fields:
- srcPort: 16
- dstPort: 16
- length_: 16
- checksum: 16
type: header
##########################################
# Register definitions #
##########################################
current_state:
class: global
fields:
- value: 8
instance_count: 16384
type: register
dispatch_state:
class: global
fields:
- value: 8
instance_count: 16384
type: register
##########################################
# Field list definitions #
##########################################
field_list_1:
fields:
- ipv4.version
- ipv4.ihl
- ipv4.diffserv
- ipv4.totalLen
- ipv4.identification
- ipv4.flags
- ipv4.fragOffset
- ipv4.ttl
- ipv4.protocol
- ipv4.srcAddr
- ipv4.dstAddr
type: field_list
##########################################
# Field list calculations #
##########################################
calc:
algorithm: csum16
inputs:
- field_list_1
output_width: 16
type: field_list_calculation
calc_0:
algorithm: csum16
inputs:
- field_list_1
output_width: 16
type: field_list_calculation
##########################################
# Parse states #
##########################################
parse_ipv4:
implementation: extract(ipv4);
select_value:
- ipv4.fragOffset
- ipv4.ihl
- ipv4.protocol
src_filename: p4src/includes/parsers.p4
src_lineno: 3
type: parse_state
parse_map_hdr:
implementation: extract(map_hdr);
src_filename: p4src/includes/parsers.p4
src_lineno: 20
type: parse_state
parse_udp:
implementation: extract(udp);
select_value:
- udp.dstPort
src_filename: p4src/includes/parsers.p4
src_lineno: 11
type: parse_state
start:
implementation: extract(ethernet);
select_value:
- ethernet.etherType
src_filename: p4src/includes/parsers.p4
src_lineno: 25
type: parse_state
##########################################
# Parser #
##########################################
parser:
format: dot
implementation: |-
digraph {
start -> parse_ipv4 [value="0x0800", mask="none", order="0"]
start -> exit [value="default", mask="none", order="1"]
parse_ipv4 -> parse_udp [value="0x00000511", mask="0x00000fff", order="0"]
parse_ipv4 -> exit [value="default", mask="none", order="1"]
parse_udp -> parse_map_hdr [value="0x1f40", mask="none", order="0"]
parse_udp -> parse_map_hdr [value="0x2328", mask="none", order="1"]
parse_udp -> exit [value="default", mask="none", order="2"]
parse_map_hdr -> exit [value="default", mask="none", order="0"]
}
start_state: start
type: parser
##########################################
# Action sets #
##########################################
egress::fix_checksum:
implementation: modify_field(udp.checksum, 0x0000);
src_filename: p4src/orchestrator.p4
src_lineno: 66
type: action
ingress::dispatch_act:
implementation: |-
modify_field(ipv4.dstAddr, dstAddr);
modify_field(udp.dstPort, dstPort);
modify_field(ethernet.dstAddr, ethernetAddr);
parameter_list:
- dstAddr: 32
- dstPort: 16
- ethernetAddr: 48
- egress_port: 16
src_filename: p4src/orchestrator.p4
src_lineno: 25
type: action
ingress::fwd_act:
implementation: modify_field(standard_metadata.egress_spec, port);
parameter_list:
- port: 16
src_filename: p4src/orchestrator.p4
src_lineno: 12
type: action
##########################################
# Ingress and Egress tables #
##########################################
egress::tbl_fix_checksum:
allowed_actions:
- egress::fix_checksum
default_entry:
action: egress::fix_checksum
const: true
max_entries: 1025
src_filename: ''
src_lineno: 1
type: table
ingress::dispatch:
allowed_actions:
- ingress::dispatch_act
match_on:
map_hdr.function_id: exact
max_entries: 1025
src_filename: p4src/orchestrator.p4
src_lineno: 31
type: table
ingress::fwd:
allowed_actions:
- ingress::fwd_act
match_on:
standard_metadata.ingress_port: exact
max_entries: 1025
src_filename: p4src/orchestrator.p4
src_lineno: 16
type: table
##########################################
# Ingress conditionals sets #
##########################################
_condition_0:
condition: (((valid(ipv4))) and (((udp.dstPort) == (8000))))
format: bracketed_expr
src_filename: p4src/orchestrator.p4
src_lineno: 41
type: conditional
##########################################
# Ingress control flow #
##########################################
ingress_flow:
doc: control flow for ingress
format: dot
implementation: |-
digraph {
"_condition_0" -> "ingress::fwd" [condition = false]
"_condition_0" -> "ingress::dispatch" [condition = true]
"ingress::fwd" -> "exit_control_flow" [action = always]
"ingress::dispatch" -> "ingress::fwd" [action = always]
}
start_state: _condition_0
type: control_flow
##########################################
# Egress control flow #
##########################################
egress_flow:
doc: control flow for egress
format: dot
implementation: |-
digraph {
"egress::tbl_fix_checksum" -> "exit_control_flow" [action = always]
}
start_state: egress::tbl_fix_checksum
type: control_flow
##########################################
# Deparsers #
##########################################
deparser:
order:
- ethernet
- ipv4
- udp
- map_hdr
type: deparser
##########################################
# Processor layout #
##########################################
layout:
format: list
implementation:
- parser
- ingress
- egress
type: processor_layout
##########################################
# Source info #
##########################################
source_info:
date: 2021/04/23 22:03:40
output_file: p4src/orchestrator.yml
p4_version: '16'
source_files:
- p4src/orchestrator.p4
- ''
- /opt/netronome/p4/include/16/p4include/core.p4
type: source_info
......@@ -2,7 +2,7 @@
#define V1MODEL_VERSION 20200408
#include <v1model.p4>
#include "includes/defines.p4"
#include "includes/headers.p4"
#include "includes/headers_test.p4"
#include "includes/parsers.p4"
......@@ -40,10 +40,15 @@ control ingress(inout headers hdr, inout metadata meta, inout standard_metadata_
chain_state = chain_state | (8w1 << hdr.map_hdr.function_id);
current_state.write(index, chain_state);
}
// hdr.map_hdr.data = (bit<32>)chain_state;
fwd.apply();
hdr.map_hdr.data = (bit<32>)chain_state;
// fwd.apply();
standard_metadata.egress_spec = standard_metadata.ingress_port;
} else {
fwd.apply();
}
} else {
fwd.apply();
}
}
}
......@@ -68,6 +73,14 @@ control egress(inout headers hdr, inout metadata meta, inout standard_metadata_t
clone3(CloneType.E2E, 4, {standard_metadata, meta});
}
@name(".clone_packet") action clone_packet2() {
clone3(CloneType.E2E, 3, {standard_metadata, meta});
}
@name(".clone_packet") action clone_packet3() {
clone3(CloneType.E2E, 2, {standard_metadata, meta});
}
@name(".recirculate_packet") action recirculate_packet() {
recirculate({standard_metadata, meta});
}
......@@ -75,9 +88,9 @@ control egress(inout headers hdr, inout metadata meta, inout standard_metadata_t
@name(".update_packet") action update_packet() {
hdr.udp.dstPort = MDS_PORT;
hdr.udp.srcPort = REPLY_PORT;
hdr.ipv4.dstAddr = CONTROLLER_IP;
hdr.ipv4.srcAddr = SERVER_IP;
standard_metadata.egress_port = 4;
hdr.ipv4.dstAddr = SERVER_IP;
// hdr.ipv4.srcAddr = SERVER_IP;
standard_metadata.egress_spec = standard_metadata.ingress_port;
}
@name(".reroute_packet_act") action reroute_packet_act(bit<32> dstAddr) {
......@@ -97,6 +110,7 @@ control egress(inout headers hdr, inout metadata meta, inout standard_metadata_t
if (hdr.udp.dstPort == MDS_PORT) {
if (standard_metadata.instance_type == PKT_INSTANCE_TYPE_NORMAL || standard_metadata.instance_type == PKT_INSTANCE_TYPE_INGRESS_RECIRC) {
meta.exec_hdr.function_count = hdr.map_hdr.function_count;
hdr.map_hdr.function_id = hdr.map_hdr.function_count;
// hdr.map_hdr.data = (bit<32>)standard_metadata.egress_port;
} else if (standard_metadata.instance_type == PKT_INSTANCE_TYPE_EGRESS_CLONE) {
meta.exec_hdr.function_count = meta.exec_hdr.function_count - 8w1;
......@@ -106,52 +120,60 @@ control egress(inout headers hdr, inout metadata meta, inout standard_metadata_t
* create a packet replica for next function to be checked
**/
if (meta.exec_hdr.function_count > 0) {
if (meta.exec_hdr.function_count == 5)
clone_packet();
hdr.map_hdr.data = (bit<32>)standard_metadata.instance_type;
}
bit<8> dependency = 8w0;
bit<8> chain_state;
bit<8> function_state = 8w0;
@atomic {
current_state.read(chain_state, hdr.map_hdr.exec_id);
}
if (meta.exec_hdr.function_count == 8w0) {
dependency = hdr.map_hdr.f0;
function_state = 8w1 << 0;
} else if (meta.exec_hdr.function_count == 8w1) {
dependency = hdr.map_hdr.f1;
function_state = 8w1 << 1;
} else if (meta.exec_hdr.function_count == 8w2) {
dependency = hdr.map_hdr.f2;
function_state = 8w1 << 2;
} else if (meta.exec_hdr.function_count == 8w3) {
dependency = hdr.map_hdr.f3;
function_state = 8w1 << 3;
} else if (meta.exec_hdr.function_count == 8w4) {
dependency = hdr.map_hdr.f4;
function_state = 8w1 << 4;
}
// hdr.map_hdr.data = (bit<32>)(chain_state & function_state);
/**
* if current function under scanner has not executed
* and its dependency condition is met continue
**/
// hdr.map_hdr.data = (bit<32>)123;
bit<8> function_dispatch_state;
@atomic {
dispatch_state.read(function_dispatch_state, hdr.map_hdr.exec_id);
// hdr.map_hdr.data = (bit<32>) function_dispatch_state;
if (((function_dispatch_state & function_state) == 8w0) && ((chain_state & dependency) == dependency)) {
function_dispatch_state = function_dispatch_state | (function_state);
dispatch_state.write(hdr.map_hdr.exec_id, function_dispatch_state);
// reroute_packet.apply();
if (meta.exec_hdr.function_count == 4)
clone_packet3();
if (meta.exec_hdr.function_count == 3)
clone_packet2();
hdr.map_hdr.data = 32w1;
} else {
hdr.ipv4.dstAddr = 0x0a000009;
}
}
hdr.map_hdr.data = 32w2;
}
// bit<8> dependency = 8w0;
// bit<8> chain_state;
// bit<8> function_state = 8w0;
// @atomic {
// current_state.read(chain_state, hdr.map_hdr.exec_id);
// }
// if (meta.exec_hdr.function_count == 8w0) {
// dependency = hdr.map_hdr.f0;
// function_state = 8w1 << 0;
// } else if (meta.exec_hdr.function_count == 8w1) {
// dependency = hdr.map_hdr.f1;
// function_state = 8w1 << 1;
// } else if (meta.exec_hdr.function_count == 8w2) {
// dependency = hdr.map_hdr.f2;
// function_state = 8w1 << 2;
// } else if (meta.exec_hdr.function_count == 8w3) {
// dependency = hdr.map_hdr.f3;
// function_state = 8w1 << 3;
// } else if (meta.exec_hdr.function_count == 8w4) {
// dependency = hdr.map_hdr.f4;
// function_state = 8w1 << 4;
// }
// hdr.map_hdr.data = (bit<32>)(meta.exec_hdr.function_count);
// /**
// * if current function under scanner has not executed
// * and its dependency condition is met continue
// **/
// // hdr.map_hdr.data = (bit<32>)123;
// bit<8> function_dispatch_state;
// @atomic {
// dispatch_state.read(function_dispatch_state, hdr.map_hdr.exec_id);
// // hdr.map_hdr.data = (bit<32>) function_dispatch_state;
// if (((function_dispatch_state & function_state) == 8w0) && ((chain_state & dependency) == dependency)) {
// function_dispatch_state = function_dispatch_state | (function_state);
// dispatch_state.write(hdr.map_hdr.exec_id, function_dispatch_state);
// // reroute_packet.apply();
// hdr.udp.dstPort = MDS_PORT;
// } else {
// hdr.udp.dstPort = 5000;
// }
// }
} else if (hdr.udp.dstPort == REPLY_PORT) {
update_packet();
recirculate_packet();
......
......@@ -50,6 +50,13 @@
}
]
},
{
"preamble": {
"id": 16834936,
"name": "clone_packet",
"alias": "clone_packet"
}
},
{
"preamble": {
"id": 16806395,
......
This diff is collapsed.
......@@ -4,23 +4,37 @@ import time
import thread
import argparse
NC_PORT = 8000
NC_PORT = 8081
REPLY_PORT = 9000
SERVER_IP = "192.168.2.2"
parser = argparse.ArgumentParser(description='Mininet demo')
parser.add_argument('--client-ip', help='IP of client',
type=str, action="store", required=True)
parser.add_argument('--client-port', help='Port of the client',
type=int, action="store", required=False)
parser.add_argument('--reply-port', help='Port to reply to',
type=int, action="store", required=False)
parser.add_argument('--r', help='Port to reply to',
type=int, action="store", required=False)
args = parser.parse_args()
CLIENT_IP = args.client_ip
toReply = True if args.r else False
NC_PORT = args.client_port if args.client_port else NC_PORT
REPLY_PORT = args.reply_port if args.reply_port else REPLY_PORT
# CLIENT_IP = "192.168.0.105"
run_status = {}
len_key = 16
counter = 0
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((CLIENT_IP, NC_PORT))
s.bind(("0.0.0.0", NC_PORT))
print REPLY_PORT
while True:
packet, addr = s.recvfrom(1024)
# print packet
# print packet
counter = counter + 1
base = 0
......@@ -28,13 +42,27 @@ while True:
base += 4
exec_id = struct.unpack(">I", packet[base:base + 4])[0]
base += 4
function_id = struct.unpack(">I", packet[base:base + 4])[0]
function_id = struct.unpack("B", packet[base])[0]
base += 1
data = struct.unpack(">I", packet[base:base + 4])[0]
base += 4
function_count = struct.unpack("B", packet[base])[0]
print chain_id, exec_id, data, "function_id", function_id, function_count
if exec_id not in run_status:
run_status[exec_id] = set()
if function_id not in run_status[exec_id]:
run_status[exec_id].add(function_id)
# else:
# continue
print chain_id, exec_id, "data=", data, "function_id=", function_id, function_count,
data = []
for i in range(1, 6):
data.append(int(struct.unpack("B", packet[i+base])[0]))
print data
chain_id = struct.pack(">I", chain_id) # chain_id
exec_id = struct.pack(">I", exec_id)
function_id = struct.pack("B", function_id)
current_state = struct.pack("B", 0)
packetout = chain_id + exec_id + function_id + packet[9:]
if toReply:
s.sendto(packet, (SERVER_IP, REPLY_PORT))
......@@ -36,7 +36,7 @@ print args.send_data
PORT = args.client_port
dataInt = args.send_data
fid = args.fid
SERVER_IP = "192.168.2.2"
SERVER_IP = "192.168.2.3"
egress_time = []
ingress_time = []
......@@ -76,12 +76,12 @@ def genPacket():
chain_id = 1
# data = 100
function_count = 5
function_id = fid if (fid) else 0
function_id = fid if (fid) else 1
f0 = 0
f1 = 0
f1 = 1
f2 = 2
f3 = 6
f4 = 2
f3 = 0
f4 = 0
print chain_id, exec_id, "function_id", function_id, function_count, \
f0, f1, f2, f3, f4
offload_status = False
......@@ -100,7 +100,7 @@ def genPacket():
data = struct.pack(">I", dataInt) # data
# print "{0:b}".format(data)
function_count = struct.pack("B", function_count) # function count
function_id = struct.pack(">I", function_id) # function count
function_id = struct.pack(">I", function_id) # function count (changed to byte for test was >I)
f0 = struct.pack("B", f0) # f0
f1 = struct.pack("B", f1) # f1
f2 = struct.pack("B", f2) # f2 -> f0
......
......@@ -34,11 +34,11 @@ def addRule(worker):
default_rule = False
rule = makeRule(ip, port, mac, functionHash, tableId, rule_name, default_rule)
ruleDictionary[functionHash] = rule
print ruleDictionary
print "rule added: ", ruleDictionary
RTEInterface.Tables.AddRule(
rule["tableId"], rule["rule_name"], rule["default_rule"], rule["match"], rule["actions"])
ruleList = RTEInterface.Tables.ListRules(tableId)
print ruleList
print "new rule list: ", ruleList, "\n\n"
return 0
def deleteRule(worker):
......@@ -49,7 +49,8 @@ def deleteRule(worker):
rule["tableId"], rule["rule_name"], rule["default_rule"], rule["match"], rule["actions"])
del ruleDictionary[functionHash]
ruleList = RTEInterface.Tables.ListRules(tableId)
print ruleList
print "deleted rule"
print "new rule list: ", ruleList, "\n\n"
return 0
......
......@@ -15,7 +15,7 @@ struct = struct.jspack
const app = express()
let port = 5000, resource_id, functionHash, runtime, idleTime = 30, flagFirstRequest = true
let port = 5000, resource_id, functionHash, runtime, idleTime = 300, flagFirstRequest = true
let waitTime
resource_id = process.argv[2]
......@@ -139,12 +139,12 @@ server.on('error', (err) => {
server.on('message', (msg, rinfo) => {
// console.log("message", msg)
let payload = unpackPacket(msg)
console.log(payload, typeof payload);
// console.log(payload, typeof payload);
lastRequest = Date.now()
// console.log("network stack time", lastRequest - payload.t1)
totalRequest++
executor(payload).then(result => {
result = packPacket(payload)
console.log(result)
try {
udpProxy.send(result, 0, result.length, "8080", rinfo.address, function (err, bytes) {
if (err)
......@@ -161,7 +161,7 @@ server.on('message', (msg, rinfo) => {
function unpackPacket(packet) {
// let buffer = new Array(1024)
let chain_id = null, exec_id = null, function_count = null, function_id = null, data = null
let base = 0
let base = 0, f0, f1, f2, f3, f4, t1, t2, t3, t4
chain_id = struct.Unpack(">I", packet, base)
base += 4
exec_id = struct.Unpack(">I", packet, base)
......@@ -170,9 +170,30 @@ function unpackPacket(packet) {
base += 4
data = struct.Unpack(">I", packet, base)
base += 4
function_count = struct.Unpack("B", packet, base)
function_count = struct.Unpack("I", packet, base)
base += 4
f0 = struct.Unpack("B", packet, base)
base += 1
f1 = struct.Unpack("B", packet, base)
base += 1
f2 = struct.Unpack("B", packet, base)
base += 1
f3 = struct.Unpack("B", packet, base)
base += 1
f4 = struct.Unpack("B", packet, base)
base += 1
t1 = struct.Unpack("I", packet, base)
base += 8
t2 = struct.Unpack("I", packet, base)
base += 8
t3 = struct.Unpack("I", packet, base)
base += 8
t4 = struct.Unpack("I", packet, base)
console.log("chain_id", chain_id, "exec_id", exec_id, "data", data, "function_count", function_count, "function_id", function_id)
// console.log("chain_id", chain_id, "exec_id", exec_id, "data", data, "function_count", function_count, "function_id", function_id)
return {
......@@ -180,7 +201,8 @@ function unpackPacket(packet) {
exec_id: exec_id[0],
data: data[0],
function_count: function_count[0],
function_id: function_id[0]
function_id: function_id[0],
f0, f1, f2, f3, f4, t1, t2, t3, t4
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment