Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
D
dfaast
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Santhosh Kumar
dfaast
Commits
66f1e195
Commit
66f1e195
authored
May 03, 2024
by
Santhosh Kumar
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
my updates
parent
89f6ea52
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
194 additions
and
8 deletions
+194
-8
test/.full.py.swp
test/.full.py.swp
+0
-0
test/example2/.function.py.swp
test/example2/.function.py.swp
+0
-0
test/example2/Dockerfile
test/example2/Dockerfile
+2
-0
test/example2/function.py
test/example2/function.py
+182
-1
test/full.py
test/full.py
+10
-7
No files found.
test/.full.py.swp
View file @
66f1e195
No preview for this file type
test/example2/.function.py.swp
0 → 100644
View file @
66f1e195
File added
test/example2/Dockerfile
View file @
66f1e195
...
...
@@ -10,6 +10,8 @@ COPY function.py .
# Install any dependencies
RUN
pip
install
-r
requirements.txt
RUN
pip
install
prometheus_client
RUN
pip
install
psutil
EXPOSE
5000
# Define the command to run the function
...
...
test/example2/function.py
View file @
66f1e195
from
flask
import
Flask
from
flask
import
Flask
,
request
,
jsonify
import
ctypes
from
prometheus_client
import
Counter
,
Gauge
,
start_http_server
from
prometheus_client
import
generate_latest
,
CONTENT_TYPE_LATEST
,
CollectorRegistry
import
psutil
import
time
import
multiprocessing
app
=
Flask
(
__name__
)
# Define Prometheus metrics
function_requests_total
=
Counter
(
'function_requests_total'
,
'Total number of requests to the function'
)
cpu_utilization_gauge
=
Gauge
(
'cpu_utilization_percentage'
,
'CPU utilization percentage'
)
request_rate_gauge
=
Gauge
(
'function_request_rate'
,
'Function request rate (requests per second)'
)
service_time_gauge
=
Gauge
(
'function_service_time_seconds'
,
'Service time of the function in seconds'
)
network_bytes_sent
=
Counter
(
'network_bytes_sent_total'
,
'Total number of bytes sent over the network'
)
network_bytes_recv
=
Counter
(
'network_bytes_received_total'
,
'Total number of bytes received over the network'
)
start_time
=
time
.
time
()
@
app
.
route
(
"/clear_metrics"
)
def
clear_metrics
():
function_requests_total
.
_value
.
set
(
0
)
cpu_utilization_gauge
.
set
(
0
)
service_time_gauge
.
set
(
0
)
network_bytes_sent
.
_value
.
set
(
0
)
network_bytes_recv
.
_value
.
set
(
0
)
start_time
=
time
.
time
()
registry
=
CollectorRegistry
()
return
generate_latest
(
registry
),
200
,
{
"Content-Type"
:
CONTENT_TYPE_LATEST
}
cpu_process
=
None
def
occupy_cpu
(
percentage
):
while
True
:
# Start time
start_time
=
time
.
time
()
# Perform CPU-bound task
while
(
time
.
time
()
-
start_time
)
<
(
percentage
/
100
):
pass
# Sleep to balance CPU usage
time
.
sleep
(
1
-
(
percentage
/
100
))
@
app
.
route
(
'/occupy_cpu/<int:percentage>'
,
methods
=
[
'GET'
])
def
start_cpu_occupier
(
percentage
):
global
cpu_process
if
0
<=
percentage
<=
100
:
# Kill previous process if exists
if
cpu_process
and
cpu_process
.
is_alive
():
cpu_process
.
terminate
()
# Create a new process for occupying CPU
cpu_process
=
multiprocessing
.
Process
(
target
=
occupy_cpu
,
args
=
(
percentage
,))
cpu_process
.
start
()
return
jsonify
({
'message'
:
f
'CPU is being occupied at {percentage}
%
'
}),
200
else
:
return
jsonify
({
'error'
:
'Invalid percentage, must be between 0 and 100'
}),
400
memory_process
=
None
def
allocate_memory
(
memory_size
):
try
:
memory_size
=
int
(
memory_size
)
*
1024
*
1024
except
ValueError
:
return
jsonify
({
'error'
:
'Invalid memory size'
}),
400
if
memory_size
<=
0
:
return
jsonify
({
'error'
:
'Memory size must be greater than 0'
}),
400
# Allocate memory of specified size
ptr
=
ctypes
.
c_char
*
memory_size
memory_block
=
ptr
()
return
jsonify
({
'message'
:
'Memory allocated successfully'
}),
200
@
app
.
route
(
'/allocate_memory/<int:memory_size>'
,
methods
=
[
'GET'
])
def
start_memory_allocator
(
memory_size
):
global
memory_process
# Kill previous process if exists
if
memory_process
and
memory_process
.
is_alive
():
memory_process
.
terminate
()
# Create a new process for memory allocation
memory_process
=
multiprocessing
.
Process
(
target
=
allocate_memory
,
args
=
(
memory_size
,))
memory_process
.
start
()
return
jsonify
({
'message'
:
f
'Memory allocated with size {memory_size}'
}),
200
@
app
.
route
(
"/"
)
def
hello
():
time_invoked
=
time
.
time
()
# Increment request counter
function_requests_total
.
inc
()
time
.
sleep
(
0.1
)
service_time
=
time
.
time
()
-
time_invoked
service_time_gauge
.
set
(
service_time
)
# Collect CPU utilization
cpu_utilization
=
psutil
.
cpu_percent
()
cpu_utilization_gauge
.
set
(
cpu_utilization
)
return
"Hello from Python function"
@
app
.
route
(
"/metrics"
)
def
metrics
():
registry
=
CollectorRegistry
()
# Register your Prometheus metrics collectors here
# For example:
# registry.register(...)
elapsed_time
=
time
.
time
()
-
start_time
registry
.
register
(
function_requests_total
)
registry
.
register
(
cpu_utilization_gauge
)
registry
.
register
(
service_time_gauge
)
registry
.
register
(
network_bytes_sent
)
registry
.
register
(
network_bytes_recv
)
metric
=
generate_latest
(
registry
)
for
line
in
metric
.
decode
()
.
split
(
"
\n
"
):
if
"function_requests_total"
in
line
:
try
:
value
=
float
(
line
.
split
()[
1
])
request_rate
=
value
/
elapsed_time
except
:
pass
if
"function_requests_total"
in
line
:
try
:
no_of_requests
=
float
(
line
.
split
()[
1
])
except
:
pass
if
"cpu_utilization_percentage"
in
line
:
try
:
cpu_utilization
=
float
(
line
.
split
()[
1
])
except
:
pass
if
"function_service_time_seconds"
in
line
:
try
:
service_time
=
float
(
line
.
split
()[
1
])
except
:
pass
if
"network_bytes_sent_total"
in
line
:
try
:
bytes_sent
=
float
(
line
.
split
()[
1
])
except
:
pass
if
"network_bytes_received_total"
in
line
:
try
:
bytes_received
=
float
(
line
.
split
()[
1
])
except
:
pass
request_rate_gauge
.
set
(
request_rate
)
registry
.
register
(
request_rate_gauge
)
network_stats
=
psutil
.
net_io_counters
()
network_bytes_sent
.
inc
(
network_stats
.
bytes_sent
)
network_bytes_recv
.
inc
(
network_stats
.
bytes_recv
)
print
(
"Number of Requests: "
,
no_of_requests
)
print
(
"Request_rate: "
,
request_rate
)
print
(
"Service Time: "
,
service_time
)
print
(
"CPU utilization: "
,
cpu_utilization
)
print
(
"Bytes send: "
,
bytes_sent
)
print
(
"Bytes received: "
,
bytes_received
)
return
generate_latest
(
registry
),
200
,
{
"Content-Type"
:
CONTENT_TYPE_LATEST
}
if
__name__
==
"__main__"
:
# Start Prometheus HTTP server to expose metrics
start_http_server
(
8000
)
# Run Flask app
app
.
run
(
host
=
'0.0.0.0'
)
test/full.py
View file @
66f1e195
...
...
@@ -237,11 +237,11 @@ def get_deployment_info():
deployments
=
apps_api
.
list_namespaced_deployment
(
namespace
=
namespace
)
.
items
# Print header
print
(
"{:<30} {:<15} {:<20} {:<20} {:<20}"
.
format
(
"Deployment"
,
"Resource Limits"
,
"Resource Usage"
,
"Exposed URL"
,
"HPDA Info"
))
#
print("{:<30} {:<15} {:<20} {:<20} {:<20}".format(
#
"Deployment", "Resource Limits", "Resource Usage", "Exposed URL", "HPDA Info"))
#
print("{:<30} {:<15} {:<15} {:<20} {:<20} {:<20}".format(
#
"Deployment", "Replicas", "Resource Limits", "Resource Usage", "Exposed URL", "HPDA Info"))
print
(
"{:<30} {:<15} {:<15} {:<20} {:<20} {:<20}"
.
format
(
"Deployment"
,
"Replicas"
,
"Resource Limits"
,
"Resource Usage"
,
"Exposed URL"
,
"HPDA Info"
))
for
deployment
in
deployments
:
deployment_name
=
deployment
.
metadata
.
name
...
...
@@ -303,6 +303,7 @@ def get_deployment_info():
# Get HPDA (HorizontalPodAutoscaler) if exists
hpa_name
=
f
"{deployment_name}-hpa"
print
(
"Iam here"
)
try
:
hpa
=
scale_api
.
read_namespaced_horizontal_pod_autoscaler
(
name
=
hpa_name
,
namespace
=
namespace
)
min_replicas
=
hpa
.
spec
.
min_replicas
...
...
@@ -320,14 +321,16 @@ def get_deployment_info():
if
e
.
status
==
404
:
hpa_info
=
"No HPA found"
else
:
raise
hpa_info
=
"HPA Error"
# Get replica sets
print
(
"Iam here"
)
replica_sets
=
apps_api
.
list_namespaced_replica_set
(
namespace
=
deployment
.
metadata
.
namespace
)
.
items
# Print deployment info
print
(
"{:<30} {:<15} {:<20} {:<20} {:<20}"
.
format
(
deployment_name
,
json
.
dumps
(
limits
),
json
.
dumps
(
usage
),
exposed_url
,
json
.
dumps
(
hpa_info
)))
print
(
"Iam here"
)
print
(
"{:<30} {:<15} {:<15} {:<20} {:<20} {:<20}"
.
format
(
deployment_name
,
replicas
,
json
.
dumps
(
limits
),
json
.
dumps
(
usage
),
exposed_url
,
json
.
dumps
(
hpa_info
)))
except
Exception
as
e
:
print
(
f
"Error occurred while getting deployment info: {e}"
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment