Commit d80fbcc6 authored by Jayesh Kishor's avatar Jayesh Kishor

Initial commit

parents
{
"Server Configuration":
[
{
"IP address":"10.96.23.32",
"Username":"helen",
"Password":"helen@12",
"Location":"/home/helen/Desktop/thesis/Project/test.py",
"Name":"CPU-cores",
"Regexp":"(workers\",\\s\")[0-9]+(.*)",
"Range":["1"]
}
],
"Server Profiling":
[
{
"IP address":"10.96.23.32",
"Username":"helen",
"Password":"helen@12",
"Name":["apache2"],
"Path":"/home/helen/Desktop/thesis/Project/profile.py"
}
],
"Command":
[
],
"BaseURL":"http://10.96.23.32/",
"AutomaticLoadLevelSelection":"True",
"LoadLevels":[1,20,40,44,60,80],
"OTSLG":"jmeter",
"SessionDescriptionFile":"sample.jmx",
"ThinkTime":2,
"NumberOfCores":20,
"MasterConfiguration":
{
"IP address":"10.129.29.155",
"Username":"helen",
"Password":"helen@12"
}
}
import paramiko
import re
import json
import multiprocessing
from multiprocessing import Process, Manager
import ast
class ServerSystem:
def SetServerConfiguration(self,nextServerConfiguration,ServerConfigurationInfo,serverConfigurationFilePath):
ioError="00"
for i in range(0,len(nextServerConfiguration)):
try:
ssh=paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ServerConfigurationInfo[i]["IP address"],username=ServerConfigurationInfo[i]["Username"],password=ServerConfigurationInfo[i]["Password"])
try:
ssh.open_sftp().file(ServerConfigurationInfo[i]["Location"],"r+")
except IOError:
ioError="12"
if ioError=="12":
fileLocation=ServerConfigurationInfo[i]["Location"]
password=ServerConfigurationInfo[i]["Password"]
stdin, stdout, stderr =ssh.exec_command(f'cat {fileLocation}')
fileContent=stdout.read()
fileContent = fileContent.decode('utf-8')
fileContent=re.sub(ServerConfigurationInfo[i]["Regexp"],rf'\g<1>{nextServerConfiguration[i]}\g<2>',fileContent)
tempFilePath=serverConfigurationFilePath+"/temp.txt"
with ssh.open_sftp().file(tempFilePath,"w") as tempFile:
tempFile.write(fileContent)
stdin, stdout, stderr =ssh.exec_command(f'echo {password} | sudo -S cp {tempFilePath} {fileLocation}')
else:
with ssh.open_sftp().file(ServerConfigurationInfo[i]["Location"],"r+") as remoteFile:
fileContent=remoteFile.read()
fileContent=fileContent.decode("utf-8")
fileContent=re.sub(ServerConfigurationInfo[i]["Regexp"],rf'\g<1>{nextServerConfiguration[i]}\g<2>',fileContent)
with ssh.open_sftp().file(ServerConfigurationInfo[i]["Location"],"w") as remoteFile:
remoteFile.write(fileContent)
except paramiko.AuthenticationException:
return "08"
except paramiko.SSHException as e:
return "09"
except Exception as e:
return "10"
finally:
ssh.close()
return "00"
def executeCommands(self,ServerCommands,serverConfigurationFilePath):
for i in range(0,len(ServerCommands)):
try:
ssh=paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ServerCommands[i]["IP address"],username=ServerCommands[i]["Username"],password=ServerCommands[i]["Password"])
if ServerCommands[i]["Inputs"]==[]:
command=ServerCommands[i]["Command"]
stdin, stdout, stderr =ssh.exec_command(f'{command}')
channel=stdout.channel
while not channel.exit_status_ready():
pass
else:
inputs=""
for j in ServerCommands[i]["Inputs"]:
inputs=inputs+j+" "
command=ServerCommands[i]["Command"]
stdin, stdout, stderr =ssh.exec_command(f'echo {inputs}| {command}')
channel=stdout.channel
while not channel.exit_status_ready():
pass
except paramiko.AuthenticationException:
return "08"
except paramiko.SSHException as e:
return "09"
except Exception as e:
return "10"
finally:
ssh.close()
return "00"
def IntializeProfilers(self,ServerProfilingInfo,MasterConfiguration):
ssh=[]
stdin=[]
stdout=[]
stderr=[]
channel=[]
count=0
self.backgroundProcessExecTime=[]
for i in range(0,len(ServerProfilingInfo)):
try:
ssh.append(paramiko.SSHClient())
ssh[i].set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh[i].connect(ServerProfilingInfo[i]["IP address"],username=ServerProfilingInfo[i]["Username"],password=ServerProfilingInfo[i]["Password"]) #Connect to the server
ServerProcessName=ServerProfilingInfo[i]["Name"] # Server process names
ServerProcessPath=ServerProfilingInfo[i]["Path"] # Server profiling agent execute file path
ServerProcessName=json.dumps(ServerProcessName)
masterConfiguration=json.dumps(MasterConfiguration) # Client Configuration
command=f"python3 {ServerProcessPath} '{ServerProcessName}' '{masterConfiguration}'" # Command to run profiling agent
stdinTemp, stdoutTemp, stderrTemp =ssh[i].exec_command(command) # exectue the command at server shell
channel=stdoutTemp.channel
while not channel.exit_status_ready():
pass
exit_status = channel.recv_exit_status()
output = channel.recv(1024).decode().strip() # capture the output(resource usage data)
output = ast.literal_eval(output)
for j in output:
self.backgroundProcessExecTime.append(j)
except paramiko.AuthenticationException:
return "08"
except paramiko.SSHException as e:
return "09"
except Exception as e:
return "10"
for i in range(0,len(ServerProfilingInfo)): # close the connection
ssh[i].close()
return self.backgroundProcessExecTime # return the resource usage data
def startAPAgent(self,ServerProfilingInfo):
self.ssh=[]
self.stdin=[]
self.stdout=[]
self.stderr=[]
for i in range(0,len(ServerProfilingInfo)):
try:
ssh.append(paramiko.SSHClient())
ssh[i].set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh[i].connect(ServerProfilingInfo[i]["IP address"],username=ServerProfilingInfo[i]["Username"],password=ServerProfilingInfo[i]["Password"])
except paramiko.AuthenticationException:
return "08"
except paramiko.SSHException as e:
return "09"
except Exception as e:
return "10"
def GetProfilingData(self,ServerProfilingInfo,MasterConfiguration):
profileDataUNF=[]
ssh=[]
stdin=[]
stdout=[]
stderr=[]
channel=[]
count=0
for i in range(0,len(ServerProfilingInfo)):
try:
ssh.append(paramiko.SSHClient())
ssh[i].set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh[i].connect(ServerProfilingInfo[i]["IP address"],username=ServerProfilingInfo[i]["Username"],password=ServerProfilingInfo[i]["Password"]) #Connect to the server
ServerProcessName=ServerProfilingInfo[i]["Name"] # Server process names
ServerProcessPath=ServerProfilingInfo[i]["Path"] # Server profiling agent execute file path
ServerProcessName=json.dumps(ServerProcessName)
masterConfiguration=json.dumps(MasterConfiguration) # Client Configuration
command=f"python3 {ServerProcessPath} '{ServerProcessName}' '{masterConfiguration}'" # Command to run profiling agent
stdinTemp, stdoutTemp, stderrTemp =ssh[i].exec_command(command) # exectue the command at server shell
channel=stdoutTemp.channel
while not channel.exit_status_ready():
pass
exit_status = channel.recv_exit_status()
output = channel.recv(1024).decode().strip() # capture the output(resource usage data)
output = ast.literal_eval(output)
for j in output:
profileDataUNF.append(j)
except paramiko.AuthenticationException:
return "08"
except paramiko.SSHException as e:
return "09"
except Exception as e:
return "10"
for i in range(0,len(ServerProfilingInfo)):
ssh[i].close()
return profileDataUNF # return the resource usage data
import pkg_resources
import json
import subprocess
import os
import re
import time
import psutil
import csv
import requests
import datetime
from datetime import datetime
import matplotlib.pyplot as plt
import itertools
import paramiko
import multiprocessing
from multiprocessing import Process, Manager
from ServerSystem import ServerSystem
class SystemUnderTest:
def __init__(self,apc,SessionDescription,combinations,ServerConfigurationCombinations,ServerInstance,throughputConvergence,process,servicedemandconvergence,serviceDemandProcessFlag,testFolderName,testConfigurationFolderName,loadLevel,metricsFilename,throughput,servicedemand):
#Checking if Taurus is installed or not
try:
pkg_resources.get_distribution('bzt')
except pkg_resources.DistributionNotFound:
self.errors("01")
#Reading AutoPerf configuration file into buffer
if not os.path.exists('AutoPerfConfFile.json'):
self.errors("02")
with open('AutoPerfConfFile.json','rb') as file:
AutoPerfConf=file.read()
#converting the file content into dictionary
json_content=AutoPerfConf.decode('utf-8')
try:
self.apc=json.loads(json_content)
except json.decoder.JSONDecodeError:
self.errors("06")
#Checking if OTSLG is installed or not
try:
otslg=self.apc["OTSLG"]
except keyError:
self.error("05")
try:
subprocess.run([otslg,"--version"])
except FileNotFoundError:
self.errors("03")
#Reading session description file into buffer
try:
sdf=self.apc["SessionDescriptionFile"]
except json.decoder.JSONDecodeError:
self.errors("07")
if not os.path.exists(sdf):
self.errors("04")
with open(sdf,'rb') as file:
self.SessionDescription=file.read()
# Reading AutoPerf Developement Configuration
if not os.path.exists('AutoPerfDevConfiguration.json'):
self.errors("11")
with open("AutoPerfDevConfiguration.json","rb") as file:
self.metricsConvergence=file.read()
json_content=self.metricsConvergence.decode('utf-8')
self.metricsConvergence=json.loads(json_content)
#creating a folder for each new load testing
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
self.testFolderName = "loadTesting_"+timestamp
os.makedirs(self.testFolderName)
self.testConfigurationFolderName=""
self.loadLevel=0
headers=[]
for i in self.apc["Server Configuration"]:
headers.append(i["Name"])
headers.append("LoadLevel")
headers.append("process Name")
headers.extend(self.metricsConvergence["metrics"])
headers.extend(["serviceDemand","Throughput","ResponseTime","FailureRate"])
metricsHeaders=[]
metricsHeaders.append(headers)
self.metricsFilename=self.testFolderName+"/"+"metrics.csv"
with open(self.metricsFilename,"w",newline="") as file:
writer=csv.writer(file)
writer.writerows(metricsHeaders)
#Initializing number of server configuration combinations
self.combinations=-1
#Modify TaurusSessionDescriptionFile
self.ModifyTaurusSessionDescriptionFile(otslg,sdf)
#ExtractServerDeployment
ServerDeploymentConfiguration=self.ExtractServerDeployment()
#GenerateServerConfigurations
self.ServerConfigurationCombinations=self.GenerateServerConfigurations(ServerDeploymentConfiguration)
#Create server instance
self.ServerInstance=ServerSystem()
#Runn all tests
self.process=""
self.servicedemandconvergence=0
self.serviceDemandProcessFlag={}
self.RunAllTests()
#Modify TaurusSessionDescriptionFile
def ModifyTaurusSessionDescriptionFile(self,otslg,sdf):
with open('TaurusSessionDescriptionFile.yaml','r+') as file:
tsdf=file.read()
tsdf=re.sub(r'(executor:\s+)[A-z]+(.*)',rf'\g<1>{otslg}\g<2>',tsdf) # Load generator name
tsdf=re.sub(r'(script:\s+)[A-z]+.[A-z]+(.*)',rf'\g<1>{sdf}\g<2>',tsdf) # session description file
with open('TaurusSessionDescriptionFile.yaml','w') as file:
file.write(tsdf)
#ExtractServerDeployment
def ExtractServerDeployment(self):
ServerDeploymentConfiguration=self.apc["Server Configuration"]
return ServerDeploymentConfiguration
#GenerateServerConfigurations
def GenerateServerConfigurations(self,apscfb):
data=[]
info=apscfb[0].keys()
for i in range (0,len(apscfb)):
data.append(apscfb[i]["Range"])
combinations=list(itertools.product(*data))
return combinations
#GetNextConfiguration
def GetNextConfiguration(self):
self.combinations=self.combinations+1
if(self.combinations==len(self.ServerConfigurationCombinations)):
return 0
return self.ServerConfigurationCombinations[self.combinations]
#Run all tests
def RunAllTests(self):
while True:
nextServerConfiguration=self.GetNextConfiguration() #Get next server configuration
if(not nextServerConfiguration):
break
status=self.ServerInstance.SetServerConfiguration(nextServerConfiguration,self.apc["Server Configuration"],self.metricsConvergence["serverConfigurationChangeFilePath"]) #Set server configuration
if status!="00":
self.errors(status)
status=self.ServerInstance.executeCommands(self.apc["Command"],self.metricsConvergence["serverConfigurationChangeFilePath"]) #Execute commands
if status!="00":
self.errrors(status)
while(True):
try:
response=requests.get(self.apc["BaseURL"])
if response.status_code==200:
break
else:
time.sleep(1)
continue
except requests.exceptions.RequestException:
time.sleep(1)
continue
self.testConfigurationFolderName = "_".join(map(str, nextServerConfiguration))
os.makedirs(self.testFolderName+"/"+self.testConfigurationFolderName)
self.throughput={}
self.servicedemand={}
if self.apc["AutomaticLoadLevelSelection"]=="False":
for i in self.apc["LoadLevels"]: #iterate over all the load levels
with open('TaurusSessionDescriptionFile.yaml','r') as file: # Change the concurrency/load level value in taurus session file
fileContent=file.read()
fileContent=re.sub(r'(concurrency:\s+)[0-9]+(.*)',rf'\g<1>{i}\g<2>',fileContent)
with open('TaurusSessionDescriptionFile.yaml','w') as file:
file.write(fileContent)
self.loadLevel=i
(loadLevelTooLow,throughput,responseTime,failureRate,profileData,ServiceDemandProcess,bottleneckServiceDemand)=self.RunOneLoadLevelManual() # start load test at one load level
data=[]
for i in profileData: # Store the performance and server metrics
data1=[]
data1.extend(list(nextServerConfiguration))
data1.append(self.loadLevel)
data1.extend(i)
data1.append(ServiceDemandProcess[i[0]])
data1.extend([throughput,responseTime,failureRate])
data.append(data1)
with open(self.metricsFilename,"a",newline="") as file:
writer=csv.writer(file)
writer.writerows(data)
else:
minimumLoadLevel=self.findMinimumLoadLevel() #Finding Minimum Load Level
with open('TaurusSessionDescriptionFile.yaml','r') as file:
fileContent=file.read()
fileContent=re.sub(r'(concurrency:\s+)[0-9]+(.*)',rf'\g<1>{minimumLoadLevel}\g<2>',fileContent)
with open('TaurusSessionDescriptionFile.yaml','w') as file:
file.write(fileContent)
self.loadLevel=minimumLoadLevel
(loadLevelTooLow,throughput,responseTime,failureRate,profileData,ServiceDemandProcess,bottleneckServiceDemand)=self.RunOneLoadLevel()
MaximumLoadLevel=self.saturationLoadLevel(minimumLoadLevel,ServiceDemandProcess) # Calculate maximum load level
rangeStart=int(minimumLoadLevel)
rangeEnd=int(MaximumLoadLevel)
print(rangeEnd)
while rangeStart<rangeEnd:
(rangeMid,ServiceDemandProcess)=self.RunOptimalInRange(nextServerConfiguration,rangeStart,rangeEnd) #run load between maximum and minimum load level
rangeStart=rangeEnd+1
rangeEnd=self.saturationLoadLevel(rangeMid,ServiceDemandProcess)
#Minimum Load Level
def findMinimumLoadLevel(self):
loadLevel=1 #Assume inital minimum load level is 1
minFound=False # Minimum load level found false
while not minFound:
minimumLoadLevel=0
with open('TaurusSessionDescriptionFile.yaml','r') as file: # Change the concurrency/load level value in taurus session file
fileContent=file.read()
fileContent=re.sub(r'(concurrency:\s+)[0-9]+(.*)',rf'\g<1>{loadLevel}\g<2>',fileContent)
with open('TaurusSessionDescriptionFile.yaml','w') as file:
file.write(fileContent)
self.loadLevel=loadLevel
(loadLevelTooLow,throughput,responseTime,failureRate,profileData,ServiceDemandProcess,bottleneckServiceDemand)=self.RunOneLoadLevel() #Run one load level
if loadLevelTooLow==-1: # if load level is too low
if bottleneckServiceDemand==0.0:
minimumLoadLevel=self.metricsConvergence["minimumCPUUtilization"]*self.apc["NumberOfCores"]*((self.apc["ThinkTime"]+responseTime)/(1e-9))
minimumLoadLevel=int(minimumLoadLevel)
else:
minimumLoadLevel=self.metricsConvergence["minimumCPUUtilization"]*self.apc["NumberOfCores"]*((self.apc["ThinkTime"]+responseTime)/bottleneckServiceDemand)
minimumLoadLevel=int(minimumLoadLevel)
if minimumLoadLevel<=loadLevel:
minimumLoadLevel=int(max(1.1*loadLevel,loadLevel+1))
else:
loadLevel=minimumLoadLevel
else: #minium load level found
minFound=True
return int(loadLevel)
#Saturation Load Level
def saturationLoadLevel(self,minimumLoadLevel,ServiceDemandProcess):
if max(ServiceDemandProcess.values())==0.0: #if all process service demand is zero then bottlneck service demand is set to 1e-9
MaximumLoadLevel=((self.apc["ThinkTime"]+sum(ServiceDemandProcess.values()))/(1e-9))*self.apc["NumberOfCores"]
return MaximumLoadLevel
print(ServiceDemandProcess.values())
MaximumLoadLevel=((self.apc["ThinkTime"]+sum(ServiceDemandProcess.values()))/max(ServiceDemandProcess.values()))*self.apc["NumberOfCores"]
return int(MaximumLoadLevel) #return maximum load level
#Run Optimal In Range
def RunOptimalInRange(self,nextServerConfiguration,rangeStart,rangeEnd):
if rangeStart>=rangeEnd:
with open('TaurusSessionDescriptionFile.yaml','r') as file:
fileContent=file.read()
fileContent=re.sub(r'(concurrency:\s+)[0-9]+(.*)',rf'\g<1>{rangeStart}\g<2>',fileContent)
with open('TaurusSessionDescriptionFile.yaml','w') as file:
file.write(fileContent)
self.loadLevel=rangeStart
(loadLevelTooLow1,throughput1,responseTime1,failureRate1,profileData1,ServiceDemandProcess1,bottleneckServiceDemand1)=self.RunOneLoadLevel()
return (rangeStart,ServiceDemandProcess1)
if rangeStart not in self.throughput: #if the load test is not done at this load level
data=[]
with open('TaurusSessionDescriptionFile.yaml','r') as file: #Change the taurus file
fileContent=file.read()
fileContent=re.sub(r'(concurrency:\s+)[0-9]+(.*)',rf'\g<1>{rangeStart}\g<2>',fileContent)
with open('TaurusSessionDescriptionFile.yaml','w') as file:
file.write(fileContent)
self.loadLevel=rangeStart
(loadLevelTooLow1,throughput1,responseTime1,failureRate1,profileData1,ServiceDemandProcess1,bottleneckServiceDemand1)=self.RunOneLoadLevel() # Run one load level test
data=[]
for i in profileData1:
data1=[]
data1.extend(list(nextServerConfiguration))
data1.append(self.loadLevel)
data1.extend(i)
data1.append(ServiceDemandProcess1[i[0]])
data1.extend([throughput1,responseTime1,failureRate1])
data.append(data1)
with open(self.metricsFilename,"a",newline="") as file:
writer=csv.writer(file)
writer.writerows(data)
self.throughput[rangeStart]=throughput1
else:
throughput1=self.throughput[rangeStart]
if rangeEnd not in self.throughput: #if the load test is not done at this load level
with open('TaurusSessionDescriptionFile.yaml','r') as file: #Change the taurus file
fileContent=file.read()
fileContent=re.sub(r'(concurrency:\s+)[0-9]+(.*)',rf'\g<1>{rangeEnd}\g<2>',fileContent)
with open('TaurusSessionDescriptionFile.yaml','w') as file:
file.write(fileContent)
self.loadLevel=rangeEnd
(loadLevelTooLow2,throughput2,responseTime2,failureRate2,profileData2,ServiceDemandProcess2,bottleneckServiceDemand2)=self.RunOneLoadLevel() # Run one load level test
data=[]
for i in profileData2:
data1=[]
data1.extend(list(nextServerConfiguration))
data1.append(self.loadLevel)
data1.extend(i)
data1.append(ServiceDemandProcess2[i[0]])
data1.extend([throughput2,responseTime2,failureRate2])
data.append(data1)
with open(self.metricsFilename,"a",newline="") as file:
writer=csv.writer(file)
writer.writerows(data)
self.throughput[rangeEnd]=throughput2
else:
throughput2=self.throughput[rangeEnd]
rangeMid=int((rangeStart+rangeEnd)/2) #calculate the mid load level value
with open('TaurusSessionDescriptionFile.yaml','r') as file: #Change the taurus file
fileContent=file.read()
fileContent=re.sub(r'(concurrency:\s+)[0-9]+(.*)',rf'\g<1>{rangeMid}\g<2>',fileContent)
with open('TaurusSessionDescriptionFile.yaml','w') as file:
file.write(fileContent)
self.loadLevel=rangeMid
(loadLevelTooLow3,throughput3,responseTime3,failureRate3,profileData3,ServiceDemandProcess3,bottleneckServiceDemand3)=self.RunOneLoadLevel() # Run one load level test
data=[]
for i in profileData3:
data1=[]
data1.extend(list(nextServerConfiguration))
data1.append(self.loadLevel)
data1.extend(i)
data1.append(ServiceDemandProcess3[i[0]])
data1.extend([throughput3,responseTime3,failureRate3])
data.append(data1)
with open(self.metricsFilename,"a",newline="") as file:
writer=csv.writer(file)
with open(self.metricsFilename,"a",newline="") as file:
writer=csv.writer(file)
writer.writerows(data)
self.throughput[rangeMid]=throughput3
if((throughput3)-(throughput1+throughput2)/2)<self.metricsConvergence["errorThreshold"]: # if the average throughput(max,min) is almost same as throughput(mid) then stop load test
return (int((rangeStart+rangeEnd)/2),ServiceDemandProcess3)
else:# repeate the load test
self.RunOptimalInRange(nextServerConfiguration,rangeStart,int((rangeStart+rangeEnd)/2)) # at start and mid
mid= self.RunOptimalInRange(nextServerConfiguration,int((rangeStart+rangeEnd)/2),rangeEnd) # at mid and max
return (mid,ServiceDemandProcess3)
#Run one load level
def RunOneLoadLevel(self):
self.backgroundProcessExecTime=self.ServerInstance.IntializeProfilers(self.apc["Server Profiling"],self.apc["MasterConfiguration"]) # Capture the server background process resource usage data
self.StartTaurus() # Start Taurus
self.throughputRequest=self.CheckThroughputConvergence() # Throughput Convergence
#self.ServerInstance.startAPAgent(self.apc["Server Profiling"])
self.ServiceDemandProcess={}
self.servicedemandconvergence=0
serviceDemandAllValues={}
listTimestamps={}
for x in range(0,len(self.apc["Server Profiling"])): # Start profiling
i=self.apc["Server Profiling"][x]
for j in i["Name"]:
self.ServiceDemandProcess[j]=0
self.serviceDemandProcessFlag[j]=self.metricsConvergence["serviceDemandConvergenceCount"]
serviceDemandAllValues[j]=[]
serviceDemandAllValues[j].append(self.ServiceDemandProcess[j])
listTimestamps[j]=[]
listTimestamps[j].append(0)
while not self.servicedemandconvergence:
self.profileData=self.ServerInstance.GetProfilingData(self.apc["Server Profiling"],self.apc["MasterConfiguration"]) # Get the profile data
serviceDemandProcess={}
numberOfRequests=self.NumberOfRequestsCompleted(self.throughputRequest["NumberOfRequests"]) # Get the number of requests completed
self.throughputRequest["NumberOfRequests"]=numberOfRequests["NumberOfRequests"]
for j in self.profileData: #iterates over the profile data from each profiling agent
processName=j[0]
for i in self.backgroundProcessExecTime: #iterates over the process
if processName in i:
serviceDemand=(j[1]-i[1])/numberOfRequests["NumberOfRequests"]
serviceDemandProcess[processName]=serviceDemand
break
lowLoadLevel=-1
for i in serviceDemandProcess: # if all the process has service demand 0 then load level is to low
if serviceDemandProcess[i]!=0.0:
lowLoadLevel=0
break
if lowLoadLevel==-1: # return -1, load level too low
self.StopTaurus() # Stop Taurus
return (-1,self.throughputRequest["Throughput"],self.throughputRequest["ResponseTime"],self.throughputRequest["failureRate"],self.profileData,serviceDemandProcess,max(serviceDemandProcess.values()))
else:
self.servicedemandconvergence=self.serviceDemandConvergence(serviceDemandProcess) # Service demand convergence
for x in range(0,len(self.apc["Server Profiling"])):
i=self.apc["Server Profiling"][x]
for j in i["Name"]:
self.ServiceDemandProcess[j]=serviceDemandProcess[j]
serviceDemandAllValues[j].append(self.ServiceDemandProcess[j]) #Capture the service demand of all process
listTimestamps[j].append((numberOfRequests["timestamp"]-self.throughputRequest["LoadTestStartTime"]).total_seconds())
self.StopTaurus()
temp=0
for j in self.profileData:
processName=j[0]
for i in self.backgroundProcessExecTime:
if processName in i:
for k in range(1,len(i)):
self.profileData[temp][k]=(j[k]-i[k])/numberOfRequests["NumberOfRequests"]
break
temp=temp+1
for i in serviceDemandAllValues:
serviceDemandFileName=self.testFolderName+"/"+self.testConfigurationFolderName+"/"+"serviceDemand_"+str(i)+"_"+str(self.loadLevel)
plt.figure()
serviceDemandAllValues[i].pop(0)
listTimestamps[i].pop(0)
plt.plot(listTimestamps[i],serviceDemandAllValues[i],marker='o',linestyle='-', color='b', label='Line Plot')
plt.xlabel('Time(s)')
plt.ylabel("Service Demand "+str(i))
plt.title('Service Demand vs Time')
plt.ylim(0,max(serviceDemandAllValues[i])+0.001)
plt.legend()
plt.savefig(serviceDemandFileName)
return (1,self.throughputRequest["Throughput"],self.throughputRequest["ResponseTime"],self.throughputRequest["failureRate"],self.profileData,serviceDemandProcess,max(serviceDemandProcess.values()))
def RunOneLoadLevelManual(self):
self.backgroundProcessExecTime=self.ServerInstance.IntializeProfilers(self.apc["Server Profiling"],self.apc["MasterConfiguration"])
print(self.backgroundProcessExecTime)
self.StartTaurus()
self.throughputRequest=self.CheckThroughputConvergence()
self.ServerInstance.startAPAgent(self.apc["Server Profiling"])
self.ServiceDemandProcess={}
self.servicedemandconvergence=0
serviceDemandAllValues={}
listTimestamps={}
for x in range(0,len(self.apc["Server Profiling"])):
i=self.apc["Server Profiling"][x]
for j in i["Name"]:
self.ServiceDemandProcess[j]=0
self.serviceDemandProcessFlag[j]=self.metricsConvergence["serviceDemandConvergenceCount"]
serviceDemandAllValues[j]=[]
serviceDemandAllValues[j].append(self.ServiceDemandProcess[j])
listTimestamps[j]=[]
listTimestamps[j].append(0)
while not self.servicedemandconvergence:
self.profileData=self.ServerInstance.GetProfilingData(self.apc["Server Profiling"],self.apc["MasterConfiguration"])
print(self.profileData)
serviceDemandProcess={}
numberOfRequests=self.NumberOfRequestsCompleted(self.throughputRequest["NumberOfRequests"])
self.throughputRequest["NumberOfRequests"]=numberOfRequests["NumberOfRequests"]
for j in self.profileData:
processName=j[0]
for i in self.backgroundProcessExecTime:
if processName in i:
serviceDemand=(j[1]-i[1])/numberOfRequests["NumberOfRequests"]
print(serviceDemand)
serviceDemandProcess[processName]=serviceDemand
break
self.servicedemandconvergence=self.serviceDemandConvergence(serviceDemandProcess)
for x in range(0,len(self.apc["Server Profiling"])):
i=self.apc["Server Profiling"][x]
for j in i["Name"]:
self.ServiceDemandProcess[j]=serviceDemandProcess[j]
serviceDemandAllValues[j].append(self.ServiceDemandProcess[j])
listTimestamps[j].append((numberOfRequests["timestamp"]-self.throughputRequest["LoadTestStartTime"]).total_seconds())
self.StopTaurus()
temp=0
for j in self.profileData:
processName=j[0]
for i in self.backgroundProcessExecTime:
if processName in i:
for k in range(1,len(i)):
self.profileData[temp][k]=(j[k]-i[k])/numberOfRequests["NumberOfRequests"]
break
temp=temp+1
for i in serviceDemandAllValues:
serviceDemandFileName=self.testFolderName+"/"+self.testConfigurationFolderName+"/"+"serviceDemand_"+str(i)+"_"+str(self.loadLevel)
plt.figure()
serviceDemandAllValues[i].pop(0)
listTimestamps[i].pop(0)
plt.plot(listTimestamps[i],serviceDemandAllValues[i],marker='o',linestyle='-', color='b', label='Line Plot')
plt.xlabel('Time(s)')
plt.ylabel("Service Demand "+str(i))
plt.title('Service Demand(sec) vs Time(sec)')
plt.ylim(0,max(serviceDemandAllValues[i])+1)
plt.legend()
plt.savefig(serviceDemandFileName)
return (1,self.throughputRequest["Throughput"],self.throughputRequest["ResponseTime"],self.throughputRequest["failureRate"],self.profileData,serviceDemandProcess,max(serviceDemandProcess.values()))
#Start taurus
def StartTaurus(self):
command="bzt TaurusSessionDescriptionFile.yaml"
self.process=subprocess.Popen(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,text=True)
#Check Throughput Convergence
def CheckThroughputConvergence(self):
numberOfRequests=0
timestampPattern=r"(\d{2}:\d{2}:\d{2})"
successPattern=r"(\d+) succ"
failurePattern=r"(\d+) fail"
ResponseTimePattern=r"(\d+\.\d+) avg rt"
loadTestStartTimePattern=r"(\d{2}:\d{2}:\d{2}) INFO: Current"
ThroughputConvergenceCount=self.metricsConvergence["ThroughputConvergenceCount"]
ThroughputConvergenceDifference=self.metricsConvergence["ThroughputConvergenceDifference"]
loadTestStartTime=timestamp=datetime.now()
throughput=throughput1=1e-9
count=0
listTimestamps=[]
listThroughputs=[]
listResponseTime=[]
failurerate=0
failureRequests=0
loadTestStartTimeMatch=""
for line in self.process.stdout: #iterates over each line of text produced by the standard output of the subprocess
if not loadTestStartTimeMatch: # Capture the timestamp when the load test has started
loadTestStartTimeMatch=re.search(loadTestStartTimePattern,line.strip())
if loadTestStartTimeMatch:
loadTestStartTime=datetime.strptime(loadTestStartTimeMatch.group(1),'%H:%M:%S')
timestampMatch=re.search(timestampPattern,line.strip())
successMatch=re.search(successPattern,line.strip())
failureMatch=re.search(failurePattern,line.strip())
if timestampMatch and successMatch: # Capture new data (timestamp and number of request completed)
timestamp=datetime.strptime(timestampMatch.group(1),'%H:%M:%S')
numberOfRequests=numberOfRequests+int(successMatch.group(1))
failureRequests=failureRequests+int(failureMatch.group(1))
throughput=throughput1 # store previous throughput
if((timestamp-loadTestStartTime).total_seconds()==0):
pass
else:
throughput1=numberOfRequests/(timestamp-loadTestStartTime).total_seconds() #Calculate Throughput
failurerate=failureRequests/(timestamp-loadTestStartTime).total_seconds() #Calculate failure rate
listThroughputs.append(throughput1)
listTimestamps.append((timestamp-loadTestStartTime).total_seconds())
ResponseMatch=re.search(ResponseTimePattern,line.strip())
ResponseTime=float(ResponseMatch.group(1))
listResponseTime.append(ResponseTime)
if (abs(throughput1-throughput))<ThroughputConvergenceDifference: # if throughput is converged or not, Cummumative difference of the throughput has to fall below a threshold value
count=count+1 # This absoulte difference has to be maintained consectively for certain number
if count==ThroughputConvergenceCount:
print("Throughput converged")
throughputFileName=self.testFolderName+"/"+self.testConfigurationFolderName+"/"+"Throughput_"+str(self.loadLevel)
plt.figure()
plt.plot(listTimestamps,listThroughputs,marker='o',linestyle='-', color='b', label='Line Plot')
plt.xlabel('Time')
plt.ylabel('Throughput(req/s)')
plt.title('Throughput(req/sec) vs Time(sec)')
plt.legend()
plt.savefig(throughputFileName)
output={"Throughput":throughput1,"NumberOfRequests":numberOfRequests,"ResponseTime":ResponseTime,"LoadTestStartTime":loadTestStartTime,"failureRate":failurerate}
responseTimeFileName=self.testFolderName+"/"+self.testConfigurationFolderName+"/"+"ResponseTime_"+str(self.loadLevel)
plt.figure()
plt.plot(listTimestamps,listResponseTime,marker='o',linestyle='-', color='b', label='Line Plot')
plt.xlabel('Time')
plt.ylabel('ResponseTime(sec)')
plt.title('ResponseTime(sec) vs Time(sec)')
plt.legend()
plt.savefig(responseTimeFileName)
return output
else:
count=0 # Cummumative difference of the throughput has not to fallen below a threshold value then count is reset
#Service Demand Convergence
def serviceDemandConvergence(self,serviceDemandProcess):
for i in serviceDemandProcess:
if (serviceDemandProcess[i]-self.ServiceDemandProcess[i])<=self.metricsConvergence["serviceDemandConvergenceDifference"]:
self.serviceDemandProcessFlag[i]=self.serviceDemandProcessFlag[i]-1
else:
self.serviceDemandProcessFlag[i]=self.metricsConvergence["serviceDemandConvergenceCount"]
if not sum(self.serviceDemandProcessFlag.values()): #if all process service demand is converged
return 1
else:
return 0
#NumberOfRequestsCompleted
def NumberOfRequestsCompleted(self,NumberOfRequests):
output={"NumberOfRequests":NumberOfRequests,"timestamp":datetime.now()}
timestampPattern=r"(\d{2}:\d{2}:\d{2})"
successPattern=r"(\d+) succ"
for line in self.process.stdout: #iterates over each line of text produced by the standard output of the subprocess
timestampMatch=re.search(timestampPattern,line.strip())
successMatch=re.search(successPattern,line.strip())
if timestampMatch and successMatch:
NumberOfRequests=NumberOfRequests+int(successMatch.group(1))
timestamp=datetime.strptime(timestampMatch.group(1),'%H:%M:%S')
output={"NumberOfRequests":NumberOfRequests,"timestamp":timestamp} # return timestamp and no of requests completed
return output
#Stop Taurus
def StopTaurus(self):
for proc in psutil.process_iter(['name']):
if proc.info['name']=="bzt":
proc.kill()
#errors
def errors(self,error):
switcher ={"00": "No error",
"01": "Taurus not present",
"02": "AutoPerf configuration file doesn't exists",
"03": "Off the shell load generator not present, need to be installed and check whether it is present by using command 'load-generator-name --version'",
"04": "Load generator session description file not present",
"05": "Load generator name not mentioned in Autoperf configuration file",
"06": "AutoPerf configuration file has error",
"07": "Session description file name not given in configuration file",
"08": "AuthenticationException",
"09": "SSHException",
"10": "Some error during remote connection",
"11": "AutoPerf Development Configuration doesn't exist",
"12": "can't open configuration file"
}
print(switcher.get(error,'nothing'))
exit()
obj=SystemUnderTest(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
execution:
- executor: jmeter
scenario: example
concurrency: 49
hold-for: 120m
scenarios:
example:
default-address: http://blazedemo.com
script: sample.jmx
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment