Commit 0357774b authored by Mahendra Patel's avatar Mahendra Patel

commit_message

parent 3a6cef8c
......@@ -4,7 +4,7 @@ import time
import threading
import random
import time
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Mininet demo')
......@@ -13,6 +13,8 @@ parser.add_argument('--fid', help='Funtion id',
type=int, action="store", required=False)
parser.add_argument('--c', help='Concurrency',
type=int, action="store", required=True)
parser.add_argument('--t', help='Runtime',
type=int, action="store", required=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--rps', help='Requests per second',
type=int, action="store")
......@@ -25,106 +27,100 @@ args = parser.parse_args()
PORT = 8000
dataInt = 0
fid = args.fid
runtime = args.t
concurrency = args.c
SERVER_IP = "192.168.2.3"
egress_time = []
ingress_time = []
packet_holder = [None] * 11
ingress_time = {}
stop_thread = False
def receive():
global egress_time, stop_thread
def receive(i):
global stop_thread, packet_holder
CLIENT_IP = "0.0.0.0"
port = 10000 + i
print i
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((CLIENT_IP, 8080))
print "listening to {} at port {}".format(CLIENT_IP, 8080)
s.bind((CLIENT_IP, port))
print("listening to {} at port {}".format(CLIENT_IP, port))
run_status = {}
packet_holder[i] = []
while True:
if stop_thread:
break
packet, addr = s.recvfrom(1024)
# print packet
base = 0
chain_id = struct.unpack(">I", packet[base:base + 4])[0]
base += 4
exec_id = struct.unpack(">I", packet[base:base + 4])[0]
base += 4
function_id = struct.unpack(">I", packet[base:base + 4])[0]
base += 4
data = struct.unpack(">I", packet[base:base + 4])[0]
base += 4
function_count = struct.unpack("B", packet[base])[0]
t = int(time.time() * 1000) % 1000000000
data = int(data) - t
print "rec", chain_id, exec_id, data, function_id, function_count,
packet_holder[i].append((packet, time.time() ))
# print "r", "{0:f}".format((time.time() * 1000)), "{0:f}".format(ingress_time[exec_id])
def genPacket():
global fid
packet = None
exec_id = random.randint(0, 2 ** 30)
print exec_id
chain_id = 1
function_count = 5
function_id = fid if (fid) else 1
f0 = 0; f1 = 1; f2 = 2; f3 = 0; f4 = 0
print chain_id, exec_id, "function_id", function_id, function_count, \
f0, f1, f2, f3, f4
# print chain_id, exec_id, "function_id", function_id, function_count, \
# f0, f1, f2, f3, f4,
chain_id = struct.pack(">I", chain_id) # chain id
exec_id = struct.pack(">I", exec_id) # execution id
exec_id_packed = struct.pack(">I", exec_id) # execution id
dataInt = int(time.time() * 1000) % 1000000000
dataInt = 0
# print " dataInt", dataInt
data = struct.pack(">I", dataInt) # data
function_count = struct.pack("B", function_count) # function count
function_id_packed = struct.pack(">I", function_id)
function_id = struct.pack(">I", function_id)
f0 = struct.pack("B", f0) # f0
f1 = struct.pack("B", f1) # f1
f2 = struct.pack("B", f2) # f2 -> f0
f3 = struct.pack("B", f3) # f3 -> f1 f2
f4 = struct.pack("B", f4) # f4 -> f3
packet = chain_id + exec_id + function_id_packed + data + function_count + f0 + f1 + f2 + f3 + f4
packet = chain_id + exec_id_packed + function_id + data + function_count + f0 + f1 + f2 + f3 + f4
# print dataInt, offload_status
return packet, function_id
return packet, exec_id
def sendThread(start_time, runtime, sleep_time):
global ingress_time
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
packet, function_id = genPacket()
if time.time() - start_time > runtime:
break
packet, exec_id = genPacket()
if exec_id in ingress_time:
continue
s.sendto(packet, (SERVER_IP, PORT))
ingress_time.append(time.time())
ingress_time[exec_id] = time.time()
time.sleep(sleep_time)
def send():
global egress_time, ingress_time
global egress_time, ingress_time, concurrency, runtime, stop_thread
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print "Sending packet to %s at port %s" % (SERVER_IP, PORT)
print "chain id, exec id, data, function count, functions dependencies..."
print("Sending packet to %s at port %s" % (SERVER_IP, PORT))
print("Runtime: %d Concurrency %d" % (runtime, concurrency))
print("chain id, exec id, data, function count, functions dependencies...")
# op = struct.unpack("B", packet[0])
packet, _ = genPacket()
if args.n is not None:
for i in range(args.req_count):
for i in range(args.n):
packet, exec_id = genPacket()
s.sendto(packet, (SERVER_IP, PORT))
ingress_time.append(time.time())
ingress_time[exec_id] = time.time() * 1000
print("s", "{0:f}".format(ingress_time[exec_id]))
elif args.rps is not None:
runtime = 10
thread_count = args.c
start_time = time.time()
sleep_time = thread_count / float(args.rps)
print "calculated inter-arrival time, offload mode", sleep_time
for i in range(thread_count):
sleep_time = concurrency / float(args.rps)
print("calculated inter-arrival time, offload mode", sleep_time)
for i in range(concurrency):
t = threading.Thread(target=sendThread, args=[
start_time, runtime, sleep_time])
t.daemon = True
......@@ -135,13 +131,37 @@ def send():
# s.sendto(packet, (SERVER_IP, PORT))
# r.join()
r = threading.Thread(name="receive", target=receive)
r.daemon = True
r.start()
def printStatistics():
global runtime
e2e_time = []
for packetThread in packet_holder:
for packetTuple in packetThread:
packet = packetTuple[0]
base = 0
chain_id = struct.unpack(">I", packet[base:base + 4])[0]
base += 4
exec_id = struct.unpack(">I", packet[base:base + 4])[0]
e2e_time.append((packetTuple[1] - ingress_time[exec_id])* 1000)
print e2e_time
data = np.array(e2e_time, dtype=float)
p50 = np.percentile(data, 50)
p95 = np.percentile(data, 95)
p99 = np.percentile(data, 99)
mean = np.mean(data)
print("mean \t p50 \t p95 \t p99")
print(mean, p50, p95, p99)
print("rps", len(e2e_time) / runtime, len(ingress_time))
return 0
for i in range(0, 11):
r = threading.Thread(name="receive", target=receive, args=[i])
r.daemon = True
r.start()
time.sleep(1)
send()
r.join()
time.sleep(2)
# r.join()
printStatistics()
import socket
import struct
import time
import threading
import random
import time
import argparse
parser = argparse.ArgumentParser(description='Mininet demo')
parser.add_argument('--fid', help='Funtion id',
type=int, action="store", required=False)
parser.add_argument('--c', help='Concurrency',
type=int, action="store", required=True)
parser.add_argument('--req_count', help='request count',
type=int, action="store", required=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--rps', help='Requests per second',
type=int, action="store")
group.add_argument('--n', help='Number of requests to send',
type=int, action="store")
args = parser.parse_args()
PORT = 8000
dataInt = 0
fid = args.fid
SERVER_IP = "192.168.2.3"
egress_time = []
ingress_time = []
stop_thread = False
def receive():
global egress_time, stop_thread
CLIENT_IP = "0.0.0.0"
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((CLIENT_IP, 8080))
print "listening to {} at port {}".format(CLIENT_IP, 8080)
run_status = {}
while True:
if stop_thread:
break
packet, addr = s.recvfrom(1024)
# print packet
base = 0
chain_id = struct.unpack(">I", packet[base:base + 4])[0]
base += 4
exec_id = struct.unpack(">I", packet[base:base + 4])[0]
base += 4
function_id = struct.unpack(">I", packet[base:base + 4])[0]
base += 4
data = struct.unpack(">I", packet[base:base + 4])[0]
base += 4
function_count = struct.unpack("B", packet[base])[0]
t = int(time.time() * 1000) % 1000000000
data = int(data) - t
print "rec", chain_id, exec_id, data, function_id, function_count,
def genPacket():
global fid
packet = None
exec_id = random.randint(0, 2 ** 30)
print exec_id
chain_id = 1
function_count = 5
function_id = fid if (fid) else 1
f0 = 0; f1 = 1; f2 = 2; f3 = 0; f4 = 0
print chain_id, exec_id, "function_id", function_id, function_count, \
f0, f1, f2, f3, f4
chain_id = struct.pack(">I", chain_id) # chain id
exec_id = struct.pack(">I", exec_id) # execution id
dataInt = int(time.time() * 1000) % 1000000000
data = struct.pack(">I", dataInt) # data
function_count = struct.pack("B", function_count) # function count
function_id_packed = struct.pack(">I", function_id)
f0 = struct.pack("B", f0) # f0
f1 = struct.pack("B", f1) # f1
f2 = struct.pack("B", f2) # f2 -> f0
f3 = struct.pack("B", f3) # f3 -> f1 f2
f4 = struct.pack("B", f4) # f4 -> f3
packet = chain_id + exec_id + function_id_packed + data + function_count + f0 + f1 + f2 + f3 + f4
# print dataInt, offload_status
return packet, function_id
def sendThread(start_time, runtime, sleep_time):
global ingress_time
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
packet, function_id = genPacket()
if time.time() - start_time > runtime:
break
s.sendto(packet, (SERVER_IP, PORT))
ingress_time.append(time.time())
time.sleep(sleep_time)
def send():
global egress_time, ingress_time
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print "Sending packet to %s at port %s" % (SERVER_IP, PORT)
print "chain id, exec id, data, function count, functions dependencies..."
# op = struct.unpack("B", packet[0])
packet, _ = genPacket()
if args.n is not None:
for i in range(args.req_count):
s.sendto(packet, (SERVER_IP, PORT))
ingress_time.append(time.time())
elif args.rps is not None:
runtime = 10
thread_count = args.c
start_time = time.time()
sleep_time = thread_count / float(args.rps)
print "calculated inter-arrival time, offload mode", sleep_time
for i in range(thread_count):
t = threading.Thread(target=sendThread, args=[
start_time, runtime, sleep_time])
t.daemon = True
t.start()
time.sleep(runtime)
stop_thread = True
# s.sendto(packet, (SERVER_IP, PORT))
# r.join()
r = threading.Thread(name="receive", target=receive)
r.daemon = True
r.start()
time.sleep(1)
send()
r.join()
import socket
import struct
import time
import threading
import random
import time
import numpy as np
import argparse
import signal
parser = argparse.ArgumentParser(description='Mininet demo')
packet_holder = [None] * 11
ingress_time = {}
stop_thread = False
runtime = 10
def receive(i):
global stop_thread, packet_holder
CLIENT_IP = "0.0.0.0"
port = 10000 + i
print i
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((CLIENT_IP, port))
print("listening to {} at port {}".format(CLIENT_IP, port))
run_status = {}
packet_holder[i] = []
while True:
if stop_thread:
break
packet, addr = s.recvfrom(1024)
packet_holder[i].append((packet, time.time() ))
# print "r", "{0:f}".format((time.time() * 1000)), "{0:f}".format(ingress_time[exec_id])
def printStatistics():
global runtime
e2e_time = []
for packetThread in packet_holder:
for packetTuple in packetThread:
packet = packetTuple[0]
base = 0
chain_id = struct.unpack(">I", packet[base:base + 4])[0]
base += 4
exec_id = struct.unpack(">I", packet[base:base + 4])[0]
# e2e_time.append((packetTuple[1] - ingress_time[exec_id])* 1000)
# data = np.array(e2e_time, dtype=float)
# p50 = np.percentile(data, 50)
# p95 = np.percentile(data, 95)
# p99 = np.percentile(data, 99)
# mean = np.mean(data)
# print("mean \t p50 \t p95 \t p99")
# print(mean, p50, p95, p99)
print("rps", len(e2e_time) / runtime, len(ingress_time))
return 0
ri = []
for i in range(0, 11):
r = threading.Thread(name="receive", target=receive, args=[i])
r.daemon = True
r.start()
ri.append(r)
def signal_handler(sig, frame):
global stop_thread
print "sigint"
stop_thread = True
print "here"
time.sleep(15)
printStatistics()
......@@ -4,7 +4,7 @@
"master_address": "localhost",
"grunt_host": "https://www.namandixit.net/lovecraftian_nightmares/grunt",
"couchdb_host": "10.129.6.5:5984",
"env": "env_udp.js",
"env": "env_cpp.js",
"db": {
"function_meta": "serverless",
"metrics": "metrics",
......@@ -15,10 +15,10 @@
"network_bridge": "hybrid_kafka-serverless",
"use_bridge": false,
"internal": {
"kafka_host": "10.129.6.5:9092"
"kafka_host": "127.0.0.1:9092"
},
"external": {
"kafka_host": "10.129.6.5:9092"
"kafka_host": "127.0.0.1:29092"
}
},
"topics": {
......
......@@ -4,6 +4,7 @@
"master_address": "localhost",
"grunt_host": "https://www.namandixit.net/lovecraftian_nightmares/grunt",
"couchdb_host": "localhost:5984",
"env": "env_udp.js",
"db": {
"function_meta": "serverless",
"metrics": "metrics",
......@@ -11,13 +12,13 @@
"explicit_chain_meta": "explicit_chain"
},
"network": {
"network_bridge": "hybrid_kafka-serverless",
"network_bridge": "xanadu_kafka-serverless",
"use_bridge": true,
"internal": {
"kafka_host": "kafka:9092"
},
"external": {
"kafka_host": "localhost:29092"
"kafka_host": "10.129.2.201:9092"
}
},
"topics": {
......
......@@ -2,7 +2,7 @@
// const isolateBackend = require('./isolate')
const fs = require('fs')
const { spawn } = require('child_process');
const constants = require("../constants.json")
const constants = require("../constants_local.json")
const libSupport = require('./lib')
const { Worker, isMainThread, workerData } = require('worker_threads');
const registry_url = constants.registry_url
......@@ -38,6 +38,7 @@ function runIsolate(local_repository, metadata) {
}
function runProcess(local_repository, metadata) {
console.log("inside run process : ",metadata, local_repository)
let port = metadata.port,
functionHash = metadata.functionHash,
resource_id = metadata.resource_id,
......@@ -76,7 +77,7 @@ function runContainer(metadata) {
memory = metadata.resources.memory
logger.info(imageName);
console.log('run contianer function : ', metadata, imageName, port, resource_id, memory)
return new Promise((resolve, reject) => {
let timeStart = Date.now()
......@@ -111,7 +112,9 @@ function runContainer(metadata) {
logger.info(`stdout: ${data}`);
let timeDifference = Math.ceil((Date.now() - timeStart))
logger.info("container run time taken: ", timeDifference);
let add_network = spawn('docker', ['network', 'connect', 'pub_net', resource_id])
// let add_network = spawn('docker', ['network', 'connect', 'pub_net', resource_id])
let add_network = spawn('docker', ['network', 'connect', 'macvlantest', resource_id])
let _ = spawn('docker', ['start', resource_id])
_.on('data', (data) => {
......@@ -140,7 +143,8 @@ function runContainer(metadata) {
})
} else {
logger.info("container starting at port", port);
logger.info("container starting at port", port,"to check");
console.log(port, "no to check!!")
let process = null;
/**
* create docker on the default bridge
......@@ -162,7 +166,9 @@ function runContainer(metadata) {
/**
* attach smartnic interface
*/
let add_network = spawn('docker', ['network', 'connect', 'pub_net', resource_id])
// let add_network = spawn('docker', ['network', 'connect', 'pub_net', resource_id])
let add_network = spawn('docker', ['network', 'connect', 'pub_net', resource_id])
let _ = spawn('docker', ['start', resource_id])
_.stdout.on('data', (data) => {
......
'use strict';
const constants = require(".././constants.json")
const constants = require(".././constants_local.json")
const secrets = require('./secrets.json')
const config = require('./config.json')
const libSupport = require('./lib')
......@@ -131,6 +131,7 @@ function startWorker(local_repository, producer, metadata) {
}], () => { })
})
else if (runtime === "process")
// console.log("rutime is process : ",metadata)
execute.runProcess(local_repository, metadata)
.catch(err => {
logger.error("=====================deployment failed=========================");
......
......@@ -2,7 +2,7 @@ const fetch = require('node-fetch');
const fs = require('fs');
const process = require('process')
const { spawnSync } = require('child_process');
const constants = require(".././constants.json")
const constants = require(".././constants_local.json")
const kafka = require('kafka-node')
const winston = require('winston')
const { createLogger, format, transports } = winston;
......@@ -24,6 +24,7 @@ function updateConfig() {
file.id = data[data.length - 1]
fs.writeFileSync('./config.json', JSON.stringify(file));
console.log("Updated Config file");
console.log("updateconfig file ", file)
}
function makeTopic(id) {
......
......@@ -5,7 +5,7 @@ const router = express.Router()
const fs = require('fs')
const { spawn } = require('child_process')
const fetch = require('node-fetch')
const constants = require('../constants.json')
const constants = require('../constants_local.json')
const operator = require('./operator')
const sharedMeta = require('./shared_meta')
const util = require('util')
......
......@@ -2,7 +2,7 @@
const express = require('express');
const fileUpload = require('express-fileupload');
const constants = require('.././constants.json');
const constants = require('.././constants_local.json');
const chainHandler = require('./explicit_chain_handler');
const secrets = require('./secrets.json');
const fs = require('fs');
......@@ -14,7 +14,7 @@ const fetch = require('node-fetch');
// const apiSpec = require('./swagger.json');
const util = require('util')
const sharedMeta = require('./shared_meta')
var bodyParser = require('body-parser'); // newcode
const app = express()
const libSupport = require('./lib')
......@@ -58,7 +58,11 @@ app.use(morgan('combined', {
skip: function (req, res) { return res.statusCode < 400 }
}))
app.use(express.json());
app.use(express.urlencoded({ extended: true }));
//app.use(express.bodyParser());//newcode
//app.use(express.urlencoded({ extended: true }));//com
app.use(bodyParser.urlencoded({ extended: false }));//newcode
app.use(bodyParser.json());//newcode
const file_path = __dirname + "/repository/"
app.use('/repository', express.static(file_path)); // file server hosting deployed functions
app.use(fileUpload())
......@@ -79,11 +83,17 @@ app.get('/metrics', (req, res) => {
* REST API to receive deployment requests
*/
app.post('/serverless/deploy', (req, res) => {
console.log("req = "+req+" ** "+req.body.runtime+" ** "+req.body.serverless,req.files,req.files.serverless)//newcode
console.log("res = "+res)//newcode
// console.log("req json = "+JSON.parse(req)) //newcode
console.log("baseurl : ",req.baseUrl)
console.log('Request URL:', req.originalUrl)
let runtime = req.body.runtime
let file = req.files.serverless
console.log("req = "+req)
let functionHash = file.md5
console.log("filepath: ",file_path,"hash: ",functionHash)
file.mv(file_path + functionHash, function (err) { // move function file to repository
functionHash = libSupport.generateExecutor(file_path, functionHash)
......@@ -157,12 +167,16 @@ function deployContainer(path, imageName) {
/**
* Generating dockerfile for the received function
*/
let environmentCopy = ""
if (constants.env === "env_cpp.js")
environmentCopy = "COPY ./worker_env/server /app"
fs.writeFile('./repository/Dockerfile',
`FROM node:latest
WORKDIR /app
COPY ./worker_env/package.json /app
ADD ./worker_env/node_modules /app/node_modules
COPY ${imageName}.js /app
${environmentCopy}
ENTRYPOINT ["node", "${imageName}.js"]`
, function (err) {
if (err) {
......@@ -214,16 +228,18 @@ function deployContainer(path, imageName) {
* REST API to receive execute requests
*/
app.post('/serverless/execute/:id', (req, res) => {
console.log("executing called ", req.params.id, req.body.runtime)
let runtime = req.body.runtime
let id = req.params.id + runtime
res.timestamp = Date.now()
if (functionToResource.has(id)) {
res.start = 'warmstart'
console.log('warmstart')
res.dispatch_time = Date.now()
libSupport.reverseProxy(req, res)
} else {
res.start = 'coldstart'
console.log('coldstart')
/**
* Requests are queued up before being dispatched. To prevent requests coming in for the
* same function from starting too many workers, they are grouped together
......
......@@ -4,7 +4,7 @@ const fs = require('fs')
const rp = require('request-promise');
const fetch = require('node-fetch');
const winston = require('winston')
const constants = require('.././constants.json')
const constants = require('.././constants_local.json')
const secrets = require('./secrets.json')
const metrics = require('./metrics')
const sharedMeta = require('./shared_meta')
......@@ -77,6 +77,7 @@ function generateExecutor(functionPath, functionHash) {