Commit efcc7cd9 authored by Anshul's avatar Anshul

Newer device plugin

parent 9555df12
docker build -t xzaviourr/mps-device-plugin:v$1 .
docker push xzaviourr/mps-device-plugin:v$1
\ No newline at end of file
...@@ -3,6 +3,7 @@ package main ...@@ -3,6 +3,7 @@ package main
import ( import (
"context" "context"
"fmt" "fmt"
"log"
"net" "net"
"strconv" "strconv"
"sync" "sync"
...@@ -24,23 +25,20 @@ const ( ...@@ -24,23 +25,20 @@ const (
var ( var (
computeResourceName = "nvidia-mps.com/vcore" computeResourceName = "nvidia-mps.com/vcore"
memResourceName = "nvidia-mps.com/vmemory"
) )
type mpsGPUManager struct { type mpsGPUManager struct {
grpcServer *grpc.Server grpcServer *grpc.Server
devices map[string]pluginapi.Device devices map[string]pluginapi.Device
computePartitionSize int // In utilization (eg. 1%) computePartitionSize int // In utilization (eg. 1%)
memPartitionSize int // In MB (eg. 256MB)
socket string socket string
stop chan bool stop chan bool
} }
func NewMpsGPUManager(computePartitionSize, memPartitionSize int) *mpsGPUManager { func NewMpsGPUManager(computePartitionSize int) *mpsGPUManager {
return &mpsGPUManager{ return &mpsGPUManager{
devices: make(map[string]pluginapi.Device), devices: make(map[string]pluginapi.Device),
computePartitionSize: computePartitionSize, computePartitionSize: computePartitionSize,
memPartitionSize: memPartitionSize,
stop: make(chan bool), stop: make(chan bool),
} }
} }
...@@ -80,10 +78,10 @@ type pluginService struct { ...@@ -80,10 +78,10 @@ type pluginService struct {
// } // }
func (mgm *mpsGPUManager) Serve() { func (mgm *mpsGPUManager) Serve() {
glog.Infof("Starting MPS GPU Manager") log.Println("Serve() called ...")
lis, err := net.Listen("unix", pluginEndpoint) lis, err := net.Listen("unix", pluginEndpoint)
if err != nil { if err != nil {
glog.Fatal("starting device plugin server failed : %v", err) log.Fatalf("starting device plugin server failed : %v", err)
} }
mgm.socket = pluginEndpoint mgm.socket = pluginEndpoint
mgm.grpcServer = grpc.NewServer() mgm.grpcServer = grpc.NewServer()
...@@ -106,14 +104,14 @@ func (mgm *mpsGPUManager) Serve() { ...@@ -106,14 +104,14 @@ func (mgm *mpsGPUManager) Serve() {
defer wg.Done() defer wg.Done()
// Blocking call to accept incoming connections. // Blocking call to accept incoming connections.
err := mgm.grpcServer.Serve(lis) err := mgm.grpcServer.Serve(lis)
glog.Errorf("device-plugin server stopped serving: %v", err) log.Fatalf("device-plugin server stopped serving: %v", err)
}() }()
if !registeredWithKubelet { if !registeredWithKubelet {
for len(mgm.grpcServer.GetServiceInfo()) <= 0 { for len(mgm.grpcServer.GetServiceInfo()) <= 0 {
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
} }
glog.Infoln("device-plugin server started serving") log.Println("device-plugin server started serving")
err = RegisterToKubelet() err = RegisterToKubelet()
if err != nil { if err != nil {
...@@ -121,7 +119,7 @@ func (mgm *mpsGPUManager) Serve() { ...@@ -121,7 +119,7 @@ func (mgm *mpsGPUManager) Serve() {
wg.Wait() wg.Wait()
glog.Fatal(err) glog.Fatal(err)
} }
glog.Infoln("device plugin registered with kubelet") log.Println("device plugin registered with kubelet")
registeredWithKubelet = true registeredWithKubelet = true
} }
} }
...@@ -130,6 +128,7 @@ func (mgm *mpsGPUManager) Serve() { ...@@ -130,6 +128,7 @@ func (mgm *mpsGPUManager) Serve() {
} }
func RegisterToKubelet() error { func RegisterToKubelet() error {
log.Println("RegisterToKubelet() called ...")
conn, err := grpc.Dial(kubeletEndpoint, grpc.WithInsecure(), conn, err := grpc.Dial(kubeletEndpoint, grpc.WithInsecure(),
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout("unix", addr, timeout) return net.DialTimeout("unix", addr, timeout)
...@@ -153,23 +152,24 @@ func RegisterToKubelet() error { ...@@ -153,23 +152,24 @@ func RegisterToKubelet() error {
} }
func (ps *pluginService) RegisterService() { func (ps *pluginService) RegisterService() {
log.Println("RegisterSerivce() called ...")
pluginapi.RegisterDevicePluginServer(ps.mgm.grpcServer, ps) pluginapi.RegisterDevicePluginServer(ps.mgm.grpcServer, ps)
} }
func (mgm *mpsGPUManager) Stop() { func (mgm *mpsGPUManager) Stop() {
log.Println("Stop() called ...")
if mgm.grpcServer != nil { if mgm.grpcServer != nil {
mgm.grpcServer.Stop() mgm.grpcServer.Stop()
} }
mgm.stop <- true mgm.stop <- true
<-mgm.stop <-mgm.stop
glog.Infof("MPS GPU Manager stopped") log.Println("MPS GPU Manager stopped")
} }
func (ps *pluginService) ListDevices() []*pluginapi.Device { func (ps *pluginService) ListDevices() []*pluginapi.Device {
gpuMemoryAvailable := 16384 // Using static value for now log.Println("ListDevices() called ...")
computeDevicesCount := 100 / ps.mgm.computePartitionSize computeDevicesCount := 100 / ps.mgm.computePartitionSize
memoryDevicesCount := gpuMemoryAvailable / ps.mgm.memPartitionSize virtualDevices := make([]*pluginapi.Device, computeDevicesCount)
virtualDevices := make([]*pluginapi.Device, computeDevicesCount+memoryDevicesCount)
for i := 0; i < computeDevicesCount; i++ { for i := 0; i < computeDevicesCount; i++ {
virtualDeviceID := fmt.Sprintf("%s-%d", computeResourceName, i) virtualDeviceID := fmt.Sprintf("%s-%d", computeResourceName, i)
...@@ -178,29 +178,23 @@ func (ps *pluginService) ListDevices() []*pluginapi.Device { ...@@ -178,29 +178,23 @@ func (ps *pluginService) ListDevices() []*pluginapi.Device {
Health: pluginapi.Healthy, Health: pluginapi.Healthy,
} }
} }
for i := 0; i < memoryDevicesCount; i++ {
virtualDeviceID := fmt.Sprintf("%s-%d", memResourceName, i)
virtualDevices[computeDevicesCount+i] = &pluginapi.Device{
ID: virtualDeviceID,
Health: pluginapi.Healthy,
}
}
return virtualDevices return virtualDevices
} }
func (ps *pluginService) ListAndWatch(empty *pluginapi.Empty, stream pluginapi.DevicePlugin_ListAndWatchServer) error { func (ps *pluginService) ListAndWatch(empty *pluginapi.Empty, stream pluginapi.DevicePlugin_ListAndWatchServer) error {
log.Println("ListAndWatch() called ...")
resp := new(pluginapi.ListAndWatchResponse) resp := new(pluginapi.ListAndWatchResponse)
resp.Devices = ps.ListDevices() resp.Devices = ps.ListDevices()
if err := stream.Send(resp); err != nil { if err := stream.Send(resp); err != nil {
glog.Infof("Error sending device list : %v", err) log.Fatalf("error sending device list : %v", err)
return err return err
} }
glog.Infof("Successfully sent the list of devices ...") log.Println("successfully sent the list of devices ...")
select {} select {}
} }
func (ps *pluginService) Allocate(ctx context.Context, rqt *pluginapi.AllocateRequest) (*pluginapi.AllocateResponse, error) { func (ps *pluginService) Allocate(ctx context.Context, rqt *pluginapi.AllocateRequest) (*pluginapi.AllocateResponse, error) {
log.Println("Allocate() called ...")
allocateResponse := &pluginapi.AllocateResponse{} allocateResponse := &pluginapi.AllocateResponse{}
for _, req := range rqt.ContainerRequests { for _, req := range rqt.ContainerRequests {
...@@ -218,12 +212,13 @@ func (ps *pluginService) Allocate(ctx context.Context, rqt *pluginapi.AllocateRe ...@@ -218,12 +212,13 @@ func (ps *pluginService) Allocate(ctx context.Context, rqt *pluginapi.AllocateRe
containerAllocateResponse.Envs = envs containerAllocateResponse.Envs = envs
containerAllocateResponse.Mounts = mounts containerAllocateResponse.Mounts = mounts
allocateResponse.ContainerResponses = append(allocateResponse.ContainerResponses, containerAllocateResponse) allocateResponse.ContainerResponses = append(allocateResponse.ContainerResponses, containerAllocateResponse)
glog.Infof("Successfully allocated the devices ...") log.Println("successfully allocated the required devices ...")
} }
return allocateResponse, nil return allocateResponse, nil
} }
func (ps *pluginService) GetDevicePluginOptions(context.Context, *pluginapi.Empty) (*pluginapi.DevicePluginOptions, error) { func (ps *pluginService) GetDevicePluginOptions(context.Context, *pluginapi.Empty) (*pluginapi.DevicePluginOptions, error) {
log.Println("GetDevicePluginOptions() called ...")
return &pluginapi.DevicePluginOptions{ return &pluginapi.DevicePluginOptions{
PreStartRequired: false, PreStartRequired: false,
GetPreferredAllocationAvailable: false, GetPreferredAllocationAvailable: false,
...@@ -231,6 +226,7 @@ func (ps *pluginService) GetDevicePluginOptions(context.Context, *pluginapi.Empt ...@@ -231,6 +226,7 @@ func (ps *pluginService) GetDevicePluginOptions(context.Context, *pluginapi.Empt
} }
func (ps *pluginService) GetPreferredAllocation(ctx context.Context, rqt *pluginapi.PreferredAllocationRequest) (*pluginapi.PreferredAllocationResponse, error) { func (ps *pluginService) GetPreferredAllocation(ctx context.Context, rqt *pluginapi.PreferredAllocationRequest) (*pluginapi.PreferredAllocationResponse, error) {
log.Println("GetPreferredAllocation() called ...")
preferredAllocateResponse := &pluginapi.PreferredAllocationResponse{} preferredAllocateResponse := &pluginapi.PreferredAllocationResponse{}
for _, req := range rqt.ContainerRequests { for _, req := range rqt.ContainerRequests {
...@@ -246,15 +242,13 @@ func (ps *pluginService) GetPreferredAllocation(ctx context.Context, rqt *plugin ...@@ -246,15 +242,13 @@ func (ps *pluginService) GetPreferredAllocation(ctx context.Context, rqt *plugin
} }
func (ps *pluginService) PreStartContainer(context.Context, *pluginapi.PreStartContainerRequest) (*pluginapi.PreStartContainerResponse, error) { func (ps *pluginService) PreStartContainer(context.Context, *pluginapi.PreStartContainerRequest) (*pluginapi.PreStartContainerResponse, error) {
log.Println("PreStartContainer() called ...")
preStartContainerResponse := pluginapi.PreStartContainerResponse{} preStartContainerResponse := pluginapi.PreStartContainerResponse{}
return &preStartContainerResponse, nil return &preStartContainerResponse, nil
} }
func main() { func main() {
mgm := NewMpsGPUManager(1, 256) mgm := NewMpsGPUManager(1)
defer mgm.Stop() defer mgm.Stop()
mgm.Serve() mgm.Serve()
// if err := mgm.Serve(); err != nil {
// glog.Fatalf("Error starting the MPS GPU Manager : %v", err)
// }
} }
...@@ -20,7 +20,7 @@ spec: ...@@ -20,7 +20,7 @@ spec:
mps-gpu-enabled: "true" mps-gpu-enabled: "true"
containers: containers:
- name: mps-device-plugin - name: mps-device-plugin
image: xzaviourr/mps-device-plugin:v4 image: xzaviourr/mps-device-plugin:v6.1
securityContext: securityContext:
privileged: true privileged: true
volumeMounts: volumeMounts:
......
apiVersion: v1
kind: Pod
metadata:
name: cuda-test
spec:
restartPolicy: OnFailure
containers:
- name: cuda-vector-add
image: "nvidia/samples:vectoradd-cuda10.2"
resources:
requests:
nvidia.com/gpu: 5
limits:
nvidia.com/gpu: 5
\ No newline at end of file
...@@ -13,12 +13,6 @@ kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Doc ...@@ -13,12 +13,6 @@ kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Doc
kubectl taint nodes ub-10 node-role.kubernetes.io/master- # Allow device plugins and pods to run on master kubectl taint nodes ub-10 node-role.kubernetes.io/master- # Allow device plugins and pods to run on master
kubectl label node ub-10 mps-gpu-enabled=true # Add device plugin label kubectl label node ub-10 mps-gpu-enabled=true # Add device plugin label
# Delete daemonset
# kubectl delete daemonset gpu-device-plugin-daemonset
# kubectl delete clusterrolebinding gpu-device-plugin-manager-role
# Attach daemonset again
# kubectl create namespace gpu-device-plugin-namespace
kubectl create sa mps-device-plugin-manager -n kube-system kubectl create sa mps-device-plugin-manager -n kube-system
kubectl create clusterrolebinding mps-device-plugin-manager-role --clusterrole=cluster-admin --serviceaccount=kube-system:mps-device-plugin-manager kubectl create clusterrolebinding mps-device-plugin-manager-role --clusterrole=cluster-admin --serviceaccount=kube-system:mps-device-plugin-manager
kubectl apply -f mps-manager.yaml kubectl apply -f mps-manager.yaml
\ No newline at end of file
...@@ -2,6 +2,7 @@ FROM golang:1.20.4-alpine3.18 ...@@ -2,6 +2,7 @@ FROM golang:1.20.4-alpine3.18
WORKDIR /app WORKDIR /app
ENV KUBECONFIG=/root/.kube/config ENV KUBECONFIG=/root/.kube/config
ENV CACERT=/root/.kube/ca.crt
COPY scheduler_ext.go . COPY scheduler_ext.go .
RUN go mod init scheduler_ext RUN go mod init scheduler_ext
......
...@@ -2,6 +2,11 @@ apiVersion: kubescheduler.config.k8s.io/v1beta1 ...@@ -2,6 +2,11 @@ apiVersion: kubescheduler.config.k8s.io/v1beta1
kind: KubeSchedulerConfiguration kind: KubeSchedulerConfiguration
profiles: profiles:
- schedulerName: gpu-scheduler-extender-profile - schedulerName: gpu-scheduler-extender-profile
pluginConfig:
- name: gpu-scheduler-extender
- args:
kubeConfigPath: "/etc/kubernetes/scheduler.conf"
pluginConfigPath: "/etc/kubernetes/gpu-scheduler-extender-plugin-config.conf"
plugins: plugins:
score: score:
enabled: enabled:
...@@ -15,8 +20,5 @@ profiles: ...@@ -15,8 +20,5 @@ profiles:
postFilter: postFilter:
enabled: enabled:
- name: "gpu-scheduler-extender" - name: "gpu-scheduler-extender"
pluginConfig: clientConnection:
- name: DevicePlugin kubeconfig: "/etc/kubernetes/scheduler.conf"
- args:
kubeConfigPath: "/etc/kubernetes/scheduler.conf"
pluginConfigPath: "/etc/kubernetes/gpu-scheduler-extender-plugin-config.conf"
\ No newline at end of file
// package main
// import (
// "context"
// "fmt"
// "os"
// "time"
// "google.golang.org/grpc"
// corev1 "k8s.io/api/core/v1"
// v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// "k8s.io/apimachinery/pkg/util/wait"
// "k8s.io/client-go/kubernetes"
// "k8s.io/client-go/tools/cache"
// "k8s.io/client-go/tools/clientcmd"
// "k8s.io/client-go/util/workqueue"
// "k8s.io/klog"
// pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
// )
// const (
// pluginName = "scheduler_ext"
// gpuMemoryKey = "mps.iitb/gpu-vmemory"
// gpuCoresKey = "mps.iitb/gpu-vcore"
// resyncPeriod = 5 * time.Minute
// defaultWeight = 1
// )
// type MPSDevicePluginScheduler struct {
// clientset kubernetes.Interface
// queue workqueue.RateLimitingInterface
// deviceConn *grpc.ClientConn
// deviceClient pluginapi.DevicePluginClient
// }
// func main() {
// kubeconfig := os.Getenv("KUBECONFIG")
// config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
// if err != nil {
// klog.Fatalf("Error building kubeconfig: %v", err)
// }
// clientset, err := kubernetes.NewForConfig(config)
// if err != nil {
// klog.Fatalf("Error creating clientset: %v", err)
// }
// deviceConn, err := grpc.Dial("unix:/var/lib/kubelet/device-plugins/gpu-device-plugin.sock", grpc.WithInsecure())
// if err != nil {
// klog.Fatalf("Error connecting to device plugin: %v", err)
// }
// deviceClient := pluginapi.NewDevicePluginClient(deviceConn)
// scheduler := &MPSDevicePluginScheduler{
// clientset: clientset,
// queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
// deviceConn: deviceConn,
// deviceClient: deviceClient,
// }
// stopCh := make(chan struct{})
// defer close(stopCh)
// go scheduler.run(stopCh)
// wait.Until(func() {
// if err := scheduler.syncPods(); err != nil {
// klog.Errorf("Error syncing pods: %v", err)
// }
// }, resyncPeriod, stopCh)
// <-stopCh
// }
// func (s *MPSDevicePluginScheduler) run(stopCh <-chan struct{}) {
// go wait.Until(s.worker, time.Second, stopCh)
// <-stopCh
// }
// func (s *MPSDevicePluginScheduler) worker() {
// for s.processNextItem() {
// }
// }
// func (s *MPSDevicePluginScheduler) processNextItem() bool {
// obj, shutdown := s.queue.Get()
// if shutdown {
// return false
// }
// err := func(obj interface{}) error {
// defer s.queue.Done(obj)
// var key string
// var ok bool
// if key, ok = obj.(string); !ok {
// s.queue.Done(obj)
// klog.Errorf("Expected string in workqueue, but got %#v", obj)
// return nil
// }
// if err := s.syncPod(key); err != nil {
// return fmt.Errorf("error syncing pod '%s': %v", key, err)
// }
// s.queue.Forget(obj)
// klog.Infof("Successfully synced pod '%s'", key)
// return nil
// }(obj)
// if err != nil {
// klog.Error(err)
// return true
// }
// return true
// }
// func (s *MPSDevicePluginScheduler) syncPod(key string) error {
// namespace, name, err := cache.SplitMetaNamespaceKey(key)
// if err != nil {
// return err
// }
// pod, err := s.clientset.CoreV1().Pods(namespace).Get(context.TODO(), name, v1.GetOptions{})
// if err != nil {
// return err
// }
// if isPodScheduled(pod) {
// return nil
// }
// nodeName, err := s.findBestNode(pod)
// if err != nil {
// return err
// }
// boundPod := &corev1.Pod{
// ObjectMeta: v1.ObjectMeta{
// Namespace: pod.Namespace,
// Name: pod.Name,
// },
// Spec: pod.Spec,
// Status: pod.Status,
// }
// boundPod.Spec.NodeName = nodeName
// if _, err := s.clientset.CoreV1().Pods(pod.Namespace).Update(context.TODO(), boundPod, v1.UpdateOptions{}); err != nil {
// return err
// }
// return nil
// }
// func (s *MPSDevicePluginScheduler) findBestNode(pod *corev1.Pod) (string, error) {
// nodes, err := s.clientset.CoreV1().Nodes().List(context.TODO(), v1.ListOptions{
// LabelSelector: "gpu=true",
// })
// if err != nil {
// return "", err
// }
// var bestNode string
// bestScore := -1
// for _, node := range nodes.Items {
// if !s.isMPSAvailable(node) {
// continue
// }
// score := s.calculateNodeScore(node, pod)
// if score > bestScore {
// bestNode = node.Name
// bestScore = score
// }
// }
// return bestNode, nil
// }
// func (s *MPSDevicePluginScheduler) isMPSAvailable(node corev1.Node) bool {
// if node.Labels == nil {
// return false
// }
// _, mpsEnabled := node.Labels["mps-enabled"]
// return mpsEnabled
// }
// func (s *MPSDevicePluginScheduler) calculateNodeScore(node corev1.Node, pod *corev1.Pod) int {
// score := 0
// for _, container := range pod.Spec.Containers {
// if container.Resources.Requests != nil {
// gpuMemoryReq, ok := container.Resources.Requests[corev1.ResourceName(gpuMemoryKey)]
// if !ok {
// continue
// }
// gpuMemoryNode, ok := node.Status.Allocatable[corev1.ResourceName(gpuMemoryKey)]
// if !ok {
// continue
// }
// if gpuMemoryReq.Value() <= gpuMemoryNode.Value() {
// score++
// } else {
// score--
// }
// }
// if container.Resources.Requests != nil {
// gpuCoresReq, ok := container.Resources.Requests[corev1.ResourceName(gpuCoresKey)]
// if !ok {
// continue
// }
// gpuCoresNode, ok := node.Status.Allocatable[corev1.ResourceName(gpuCoresKey)]
// if !ok {
// continue
// }
// if gpuCoresReq.Value() <= gpuCoresNode.Value() {
// score++
// } else {
// score--
// }
// }
// }
// return score
// }
// func (s *MPSDevicePluginScheduler) syncPods() error {
// pods, err := s.clientset.CoreV1().Pods("").List(context.TODO(), v1.ListOptions{
// FieldSelector: "status.phase!=Succeeded,status.phase!=Failed",
// })
// if err != nil {
// return err
// }
// for _, pod := range pods.Items {
// key, err := cache.MetaNamespaceKeyFunc(&pod)
// if err != nil {
// klog.Errorf("Error getting key for pod '%s/%s': %v", pod.Namespace, pod.Name, err)
// continue
// }
// s.queue.Add(key)
// }
// return nil
// }
// func isPodScheduled(pod *corev1.Pod) bool {
// return pod.Spec.NodeName != ""
// }
package main package main
import ( import (
"context" "context"
"crypto/tls"
"crypto/x509"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"time" "time"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
...@@ -45,7 +297,29 @@ func main() { ...@@ -45,7 +297,29 @@ func main() {
klog.Fatalf("Error creating clientset: %v", err) klog.Fatalf("Error creating clientset: %v", err)
} }
deviceConn, err := grpc.Dial("unix:/var/lib/kubelet/device-plugins/gpu-device-plugin.sock", grpc.WithInsecure()) // Load the CA certificate
caCertFile := os.Getenv("CACERT")
caCert, err := ioutil.ReadFile(caCertFile)
if err != nil {
klog.Fatalf("Error loading CA certificate: %v", err)
}
// Create a certificate pool and add the CA certificate
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM(caCert) {
klog.Fatalf("Failed to append CA certificate to the certificate pool")
}
// Create TLS credentials with the certificate pool and skip certificate verification
creds := credentials.NewTLS(&tls.Config{
InsecureSkipVerify: true,
RootCAs: certPool,
})
// Create a dial option with the TLS credentials
dialOption := grpc.WithTransportCredentials(creds)
deviceConn, err := grpc.Dial("unix:/var/lib/kubelet/device-plugins/mps-device-plugin.sock", dialOption) //grpc.WithInsecure()
if err != nil { if err != nil {
klog.Fatalf("Error connecting to device plugin: %v", err) klog.Fatalf("Error connecting to device plugin: %v", err)
} }
......
#!/bin/bash
# Setup kubernetes cluster
sudo kubeadm reset # Delete existing master
rm $HOME/.kube/config
sudo rm -rf /etc/cni/net.d
sudo swapoff -a # Swapoff
sudo kubeadm init --pod-network-cidr=10.244.0.0/16 # Initialize cluster
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml # Use flannel for networking
kubectl taint nodes ub-10 node-role.kubernetes.io/master- # Allow device plugins and pods to run on master
kubectl label node ub-10 mps-gpu-enabled=true # Add device plugin label
# Delete daemonset
# kubectl delete daemonset gpu-device-plugin-daemonset
# kubectl delete clusterrolebinding gpu-device-plugin-manager-role
# Attach daemonset again
# kubectl create namespace gpu-device-plugin-namespace
kubectl create sa mps-device-plugin-manager -n kube-system
kubectl create clusterrolebinding mps-device-plugin-manager-role --clusterrole=cluster-admin --serviceaccount=kube-system:mps-device-plugin-manager
kubectl apply -f mps-manager.yaml
\ No newline at end of file
module mpsmanager
go 1.20
require (
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
google.golang.org/grpc v1.55.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
k8s.io/kubelet v0.27.2 // indirect
)
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=