Commit defaf09f authored by Santhosh Kumar's avatar Santhosh Kumar

cleanup code

parent 4afabae0
FROM python:3.9-slim FROM python:3.9-slim
RUN apt-get update && apt-get install -y procps
RUN apt install -y stress-ng
RUN mkdir /app RUN mkdir /app
# Set the working directory in the container # Set the working directory in the container
WORKDIR /app WORKDIR /app
......
...@@ -35,15 +35,21 @@ cpu_process = None ...@@ -35,15 +35,21 @@ cpu_process = None
def occupy_cpu(percentage): def occupy_cpu(percentage):
while True: while True:
# Get the current CPU usage
cpu_percent = psutil.cpu_percent()
# Adjust the workload based on the difference between actual and desired CPU usage
workload = min(1.0, percentage / cpu_percent)
# Start time # Start time
start_time = time.time() start_time = time.time()
# Perform CPU-bound task # Perform CPU-bound task
while (time.time() - start_time) < (percentage / 100): while (time.time() - start_time) < (workload):
pass pass
# Sleep to balance CPU usage # Sleep to balance CPU usage
time.sleep(1 - (percentage / 100)) time.sleep(1 - workload)
@app.route('/occupy_cpu/<int:percentage>', methods=['GET']) @app.route('/occupy_cpu/<int:percentage>', methods=['GET'])
def start_cpu_occupier(percentage): def start_cpu_occupier(percentage):
...@@ -105,6 +111,37 @@ def use_allocated_memory(): ...@@ -105,6 +111,37 @@ def use_allocated_memory():
return jsonify({'message': 'Data written to allocated memory successfully'}), 200 return jsonify({'message': 'Data written to allocated memory successfully'}), 200
stress_process = None
@app.route('/mem')
def index():
return 'Memory Usage: ' + get_memory_usage()
def get_memory_usage():
# Call system command to get memory usage using top
result = subprocess.run(['top', '-b', '-n', '1'], capture_output=True, text=True)
return result.stdout
@app.route('/memory')
def manage_memory():
global stress_process
size = request.args.get('size')
if size:
if stress_process:
# Kill existing stress-ng process
subprocess.run(['pkill', 'stress-ng'])
stress_process = None
# Start stress-ng process with specified size
stress_process = subprocess.Popen(['stress-ng', '--vm', '1', '--vm-bytes', size])
return f'Started new stress-ng process with {size} memory size.'
else:
if stress_process:
# Kill existing stress-ng process
subprocess.run(['pkill', 'stress-ng'])
stress_process = None
return 'Not running stress-ng.'
@app.route("/") @app.route("/")
def hello(): def hello():
time_invoked=time.time() time_invoked=time.time()
...@@ -112,8 +149,8 @@ def hello(): ...@@ -112,8 +149,8 @@ def hello():
# Increment request counter # Increment request counter
function_requests_total.inc() function_requests_total.inc()
for i in range(1000000): for i in range(100000):
for j in range(10000): for j in range(1000):
pass pass
......
...@@ -125,8 +125,7 @@ def scale_deployment(): ...@@ -125,8 +125,7 @@ def scale_deployment():
scale_api.read_namespaced_horizontal_pod_autoscaler(name=f"{function_name}-hpa", namespace=namespace) scale_api.read_namespaced_horizontal_pod_autoscaler(name=f"{function_name}-hpa", namespace=namespace)
hpa_exists = True hpa_exists = True
except ApiException as e: except ApiException as e:
if e.status != 404: pass
raise
# Define HorizontalPodAutoscaler object # Define HorizontalPodAutoscaler object
hpa = { hpa = {
...@@ -166,9 +165,13 @@ def scale_deployment(): ...@@ -166,9 +165,13 @@ def scale_deployment():
} }
} }
# Create or update HPA based on whether it already exists
# Create or update HorizontalPodAutoscaler if hpa_exists:
scale_api.create_namespaced_horizontal_pod_autoscaler(namespace=namespace, body=hpa) print("Updating HorizontalPodAutoscaler...")
scale_api.replace_namespaced_horizontal_pod_autoscaler(name=f"{function_name}-hpa", namespace=namespace, body=hpa)
else:
print("Creating HorizontalPodAutoscaler...")
scale_api.create_namespaced_horizontal_pod_autoscaler(namespace=namespace, body=hpa)
print(f"HorizontalPodAutoscaler '{function_name}-hpa' created/updated successfully.") print(f"HorizontalPodAutoscaler '{function_name}-hpa' created/updated successfully.")
...@@ -203,8 +206,8 @@ def expose_service(): ...@@ -203,8 +206,8 @@ def expose_service():
printf("Invalid choice") printf("Invalid choice")
return return
src_port = int(input("Enter your source port: ")) src_port = int(input("Enter your cluster port: "))
dst_port = int(input("Enter your target port: ")) dst_port = int(input("Enter your port program is listening: "))
namespace="default" namespace="default"
try: try:
# Check if service already exists # Check if service already exists
...@@ -275,7 +278,6 @@ def get_deployment_info(): ...@@ -275,7 +278,6 @@ def get_deployment_info():
pod_name = pod.metadata.name pod_name = pod.metadata.name
# Get resource metrics for the pod # Get resource metrics for the pod
print(pod_name)
try: try:
metrics = metrics_api.get_namespaced_custom_object( metrics = metrics_api.get_namespaced_custom_object(
group="metrics.k8s.io", group="metrics.k8s.io",
...@@ -294,6 +296,8 @@ def get_deployment_info(): ...@@ -294,6 +296,8 @@ def get_deployment_info():
memory_usage_total += int(memory_usage[:-2]) # Convert string like "1234Ki" to int (removing "Ki" suffix) memory_usage_total += int(memory_usage[:-2]) # Convert string like "1234Ki" to int (removing "Ki" suffix)
except ApiException as e: except ApiException as e:
pass pass
except ValueError as value_error:
pass
# Calculate average usage across all pods # Calculate average usage across all pods
num_pods = len(pods) num_pods = len(pods)
......
import psutil
import time
def hello():
for i in range(100000):
for j in range(1000):
pass
return "Hello from Python function"
hello()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment