Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
D
dfaast
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Santhosh Kumar
dfaast
Commits
defaf09f
Commit
defaf09f
authored
May 04, 2024
by
Santhosh Kumar
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
cleanup code
parent
4afabae0
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
71 additions
and
12 deletions
+71
-12
src/example2/Dockerfile
src/example2/Dockerfile
+3
-0
src/example2/function.py
src/example2/function.py
+41
-4
src/faas.py
src/faas.py
+12
-8
src/temp.py
src/temp.py
+15
-0
No files found.
src/example2/Dockerfile
View file @
defaf09f
FROM
python:3.9-slim
FROM
python:3.9-slim
RUN
apt-get update
&&
apt-get
install
-y
procps
RUN
apt
install
-y
stress-ng
RUN
mkdir
/app
RUN
mkdir
/app
# Set the working directory in the container
# Set the working directory in the container
WORKDIR
/app
WORKDIR
/app
...
...
src/example2/function.py
View file @
defaf09f
...
@@ -35,15 +35,21 @@ cpu_process = None
...
@@ -35,15 +35,21 @@ cpu_process = None
def
occupy_cpu
(
percentage
):
def
occupy_cpu
(
percentage
):
while
True
:
while
True
:
# Get the current CPU usage
cpu_percent
=
psutil
.
cpu_percent
()
# Adjust the workload based on the difference between actual and desired CPU usage
workload
=
min
(
1.0
,
percentage
/
cpu_percent
)
# Start time
# Start time
start_time
=
time
.
time
()
start_time
=
time
.
time
()
# Perform CPU-bound task
# Perform CPU-bound task
while
(
time
.
time
()
-
start_time
)
<
(
percentage
/
100
):
while
(
time
.
time
()
-
start_time
)
<
(
workload
):
pass
pass
# Sleep to balance CPU usage
# Sleep to balance CPU usage
time
.
sleep
(
1
-
(
percentage
/
100
)
)
time
.
sleep
(
1
-
workload
)
@
app
.
route
(
'/occupy_cpu/<int:percentage>'
,
methods
=
[
'GET'
])
@
app
.
route
(
'/occupy_cpu/<int:percentage>'
,
methods
=
[
'GET'
])
def
start_cpu_occupier
(
percentage
):
def
start_cpu_occupier
(
percentage
):
...
@@ -105,6 +111,37 @@ def use_allocated_memory():
...
@@ -105,6 +111,37 @@ def use_allocated_memory():
return
jsonify
({
'message'
:
'Data written to allocated memory successfully'
}),
200
return
jsonify
({
'message'
:
'Data written to allocated memory successfully'
}),
200
stress_process
=
None
@
app
.
route
(
'/mem'
)
def
index
():
return
'Memory Usage: '
+
get_memory_usage
()
def
get_memory_usage
():
# Call system command to get memory usage using top
result
=
subprocess
.
run
([
'top'
,
'-b'
,
'-n'
,
'1'
],
capture_output
=
True
,
text
=
True
)
return
result
.
stdout
@
app
.
route
(
'/memory'
)
def
manage_memory
():
global
stress_process
size
=
request
.
args
.
get
(
'size'
)
if
size
:
if
stress_process
:
# Kill existing stress-ng process
subprocess
.
run
([
'pkill'
,
'stress-ng'
])
stress_process
=
None
# Start stress-ng process with specified size
stress_process
=
subprocess
.
Popen
([
'stress-ng'
,
'--vm'
,
'1'
,
'--vm-bytes'
,
size
])
return
f
'Started new stress-ng process with {size} memory size.'
else
:
if
stress_process
:
# Kill existing stress-ng process
subprocess
.
run
([
'pkill'
,
'stress-ng'
])
stress_process
=
None
return
'Not running stress-ng.'
@
app
.
route
(
"/"
)
@
app
.
route
(
"/"
)
def
hello
():
def
hello
():
time_invoked
=
time
.
time
()
time_invoked
=
time
.
time
()
...
@@ -112,8 +149,8 @@ def hello():
...
@@ -112,8 +149,8 @@ def hello():
# Increment request counter
# Increment request counter
function_requests_total
.
inc
()
function_requests_total
.
inc
()
for
i
in
range
(
100000
0
):
for
i
in
range
(
100000
):
for
j
in
range
(
1000
0
):
for
j
in
range
(
1000
):
pass
pass
...
...
src/faas.py
View file @
defaf09f
...
@@ -125,8 +125,7 @@ def scale_deployment():
...
@@ -125,8 +125,7 @@ def scale_deployment():
scale_api
.
read_namespaced_horizontal_pod_autoscaler
(
name
=
f
"{function_name}-hpa"
,
namespace
=
namespace
)
scale_api
.
read_namespaced_horizontal_pod_autoscaler
(
name
=
f
"{function_name}-hpa"
,
namespace
=
namespace
)
hpa_exists
=
True
hpa_exists
=
True
except
ApiException
as
e
:
except
ApiException
as
e
:
if
e
.
status
!=
404
:
pass
raise
# Define HorizontalPodAutoscaler object
# Define HorizontalPodAutoscaler object
hpa
=
{
hpa
=
{
...
@@ -166,9 +165,13 @@ def scale_deployment():
...
@@ -166,9 +165,13 @@ def scale_deployment():
}
}
}
}
# Create or update HPA based on whether it already exists
# Create or update HorizontalPodAutoscaler
if
hpa_exists
:
scale_api
.
create_namespaced_horizontal_pod_autoscaler
(
namespace
=
namespace
,
body
=
hpa
)
print
(
"Updating HorizontalPodAutoscaler..."
)
scale_api
.
replace_namespaced_horizontal_pod_autoscaler
(
name
=
f
"{function_name}-hpa"
,
namespace
=
namespace
,
body
=
hpa
)
else
:
print
(
"Creating HorizontalPodAutoscaler..."
)
scale_api
.
create_namespaced_horizontal_pod_autoscaler
(
namespace
=
namespace
,
body
=
hpa
)
print
(
f
"HorizontalPodAutoscaler '{function_name}-hpa' created/updated successfully."
)
print
(
f
"HorizontalPodAutoscaler '{function_name}-hpa' created/updated successfully."
)
...
@@ -203,8 +206,8 @@ def expose_service():
...
@@ -203,8 +206,8 @@ def expose_service():
printf
(
"Invalid choice"
)
printf
(
"Invalid choice"
)
return
return
src_port
=
int
(
input
(
"Enter your
source
port: "
))
src_port
=
int
(
input
(
"Enter your
cluster
port: "
))
dst_port
=
int
(
input
(
"Enter your
target port
: "
))
dst_port
=
int
(
input
(
"Enter your
port program is listening
: "
))
namespace
=
"default"
namespace
=
"default"
try
:
try
:
# Check if service already exists
# Check if service already exists
...
@@ -275,7 +278,6 @@ def get_deployment_info():
...
@@ -275,7 +278,6 @@ def get_deployment_info():
pod_name
=
pod
.
metadata
.
name
pod_name
=
pod
.
metadata
.
name
# Get resource metrics for the pod
# Get resource metrics for the pod
print
(
pod_name
)
try
:
try
:
metrics
=
metrics_api
.
get_namespaced_custom_object
(
metrics
=
metrics_api
.
get_namespaced_custom_object
(
group
=
"metrics.k8s.io"
,
group
=
"metrics.k8s.io"
,
...
@@ -294,6 +296,8 @@ def get_deployment_info():
...
@@ -294,6 +296,8 @@ def get_deployment_info():
memory_usage_total
+=
int
(
memory_usage
[:
-
2
])
# Convert string like "1234Ki" to int (removing "Ki" suffix)
memory_usage_total
+=
int
(
memory_usage
[:
-
2
])
# Convert string like "1234Ki" to int (removing "Ki" suffix)
except
ApiException
as
e
:
except
ApiException
as
e
:
pass
pass
except
ValueError
as
value_error
:
pass
# Calculate average usage across all pods
# Calculate average usage across all pods
num_pods
=
len
(
pods
)
num_pods
=
len
(
pods
)
...
...
src/temp.py
0 → 100644
View file @
defaf09f
import
psutil
import
time
def
hello
():
for
i
in
range
(
100000
):
for
j
in
range
(
1000
):
pass
return
"Hello from Python function"
hello
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment