Logo Questions Linux Laravel Mysql Ubuntu Git Menu
 

GO get K8S api server health status

Tags:

go

kubernetes

I've a golang program which I need to add a new call to the K8S API server status (livez) api to get the health status.

https://kubernetes.io/docs/reference/using-api/health-checks/

The program should run on same cluster of the api server and need to get the /livez status, I've tried to find this API in client-go lib but didn't find a way to achieve it...

https://github.com/kubernetes/client-go

Is there a way to do it from Go program which is running on the same cluster that the API server run?

like image 794
JJD Avatar asked Mar 03 '23 00:03

JJD


1 Answers

Update (final answer)

Appended

OP requested I modify my answer to show configs for "fine-tuned" or "specific" service accounts, without using cluster admin..

As far as I can tell, each pod has permissions to read from /healthz by default. For example, the following CronJob works just fine without using a ServiceAccount at all:

# cronjob
apiVersion: batch/v1beta1
kind: CronJob
metadata:
  name: is-healthz-ok-no-svc
spec:
  schedule: "*/5 * * * *" # at every fifth minute
  jobTemplate:
    spec:
      template:
        spec:
######### serviceAccountName: health-reader-sa
          containers:
            - name: is-healthz-ok-no-svc
              image: oze4/is-healthz-ok:latest
          restartPolicy: OnFailure

enter image description here

Original

I went ahead and wrote a proof of concept for this. You can find the full repo here, but the code is below.

main.go

package main

import (
    "os"
    "errors"
    "fmt"

    "k8s.io/client-go/kubernetes"
    "k8s.io/client-go/rest"
)

func main() {
    client, err := newInClusterClient()
    if err != nil {
        panic(err.Error())
    }

    path := "/healthz"
    content, err := client.Discovery().RESTClient().Get().AbsPath(path).DoRaw()
    if err != nil {
        fmt.Printf("ErrorBadRequst : %s\n", err.Error())
        os.Exit(1)
    }

    contentStr := string(content)
    if contentStr != "ok" {
        fmt.Printf("ErrorNotOk : response != 'ok' : %s\n", contentStr)
        os.Exit(1)
    }

    fmt.Printf("Success : ok!")
    os.Exit(0)
}

func newInClusterClient() (*kubernetes.Clientset, error) {
    config, err := rest.InClusterConfig()
    if err != nil {
        return &kubernetes.Clientset{}, errors.New("Failed loading client config")
    }
    clientset, err := kubernetes.NewForConfig(config)
    if err != nil {
        return &kubernetes.Clientset{}, errors.New("Failed getting clientset")
    }
    return clientset, nil
}

dockerfile

FROM golang:latest
RUN mkdir /app
ADD . /app
WORKDIR /app
RUN go build -o main .
CMD ["/app/main"]

deploy.yaml

(as CronJob)

# cronjob
apiVersion: batch/v1beta1
kind: CronJob
metadata:
  name: is-healthz-ok
spec:
  schedule: "*/5 * * * *" # at every fifth minute
  jobTemplate:
    spec:
      template:
        spec:
          serviceAccountName: is-healthz-ok
          containers:
            - name: is-healthz-ok
              image: oze4/is-healthz-ok:latest
          restartPolicy: OnFailure
---
# service account
apiVersion: v1
kind: ServiceAccount
metadata:
  name: is-healthz-ok
  namespace: default
---
# cluster role binding
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: is-healthz-ok
subjects:
  - kind: ServiceAccount
    name: is-healthz-ok
    namespace: default
roleRef:
  kind: ClusterRole
  ##########################################################################
  # Instead of assigning cluster-admin you can create your own ClusterRole #
  # I used cluster-admin because this is a homelab                         #
  ##########################################################################
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io
---

Screenshot

of successful CronJob run

enter image description here


Update 1

OP was asking how to deploy "in-cluster-client-config" so I am providing an example deployment (one that I am using)..

You can find the repo here

Example deployment (I am using a CronJob, but it could be anything):

cronjob.yaml

apiVersion: batch/v1beta1
kind: CronJob
metadata:
  name: remove-terminating-namespaces-cronjob
spec:
  schedule: "0 */1 * * *" # at minute 0 of each hour aka once per hour
  #successfulJobsHistoryLimit: 0
  #failedJobsHistoryLimit: 0
  jobTemplate:
    spec:
      template:
        spec:
          serviceAccountName: svc-remove-terminating-namespaces
          containers:
          - name: remove-terminating-namespaces
            image: oze4/service.remove-terminating-namespaces:latest
          restartPolicy: OnFailure

rbac.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: svc-remove-terminating-namespaces
  namespace: default
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: crb-namespace-reader-writer
subjects:
- kind: ServiceAccount
  name: svc-remove-terminating-namespaces
  namespace: default
roleRef:
  kind: ClusterRole
  ##########################################################################
  # Instead of assigning cluster-admin you can create your own ClusterRole #
  # I used cluster-admin because this is a homelab                         #
  ##########################################################################
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io
---

Original Answer

It sounds like what you are looking for is the "in-cluster-client-config" from client-go.

It is important to remember that when using an "in-cluster-client-config", the API calls within your Go code use the service account for "that" pod. Just wanted to make sure you were testing with an account that has permissions to read "/livez".

I tested the following code and I am able to get "livez" status..

package main

import (
    "errors"
    "flag"
    "fmt"
    "path/filepath"

    "k8s.io/client-go/kubernetes"
    "k8s.io/client-go/tools/clientcmd"
    "k8s.io/client-go/rest"
    "k8s.io/client-go/util/homedir"
)

func main() {
    // I find it easiest to use "out-of-cluster" for tetsing
    // client, err := newOutOfClusterClient()

    client, err := newInClusterClient()
    if err != nil {
        panic(err.Error())
    }

    livez := "/livez"
    content, _ := client.Discovery().RESTClient().Get().AbsPath(livez).DoRaw()

    fmt.Println(string(content))
}

func newInClusterClient() (*kubernetes.Clientset, error) {
    config, err := rest.InClusterConfig()
    if err != nil {
        return &kubernetes.Clientset{}, errors.New("Failed loading client config")
    }
    clientset, err := kubernetes.NewForConfig(config)
    if err != nil {
        return &kubernetes.Clientset{}, errors.New("Failed getting clientset")
    }
    return clientset, nil
}

// I find it easiest to use "out-of-cluster" for tetsing
func newOutOfClusterClient() (*kubernetes.Clientset, error) {
    var kubeconfig *string
    if home := homedir.HomeDir(); home != "" {
        kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
    } else {
        kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
    }
    flag.Parse()

    // use the current context in kubeconfig
    config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
    if err != nil {
        return nil, err
    }

    // create the clientset
    client, err := kubernetes.NewForConfig(config)
    if err != nil {
        return nil, err
    }

    return client, nil
}
like image 173
Matt Oestreich Avatar answered Mar 05 '23 15:03

Matt Oestreich