Kubernetes manifests

apiVersion: apps/v1
kind: Deployment
metadata:
  name: website
  labels:
    app: website
spec:
  replicas: 1
  minReadySeconds: 10
  progressDeadlineSeconds: 60
  revisionHistoryLimit: 5
  strategy:
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 0
  selector:
    matchLabels:
      app: website
  template:
    metadata:
      name: website
      labels:
        app: website
    spec:
      restartPolicy: Always
      securityContext:
        runAsUser: 33
        runAsGroup: 33
      hostAliases:
      - ip: "127.0.0.1"
        hostnames:
          - "foo.local"
          - "bar.local"
      - ip: "10.1.2.3"
        hostnames:
          - "foo.remote"
          - "bar.remote"
      containers:
        - name: website
          image: nginx:latest
          imagePullPolicy: Always

          ports:
            - containerPort: 80

          env:
            - name: APP_TYPE
              value: application
            - name: APP_SECRET
              valueFrom:
                secretKeyRef:
                  key: APP_SECRET
                  name: db-secrets

          envFrom:
            - configMapRef:
                name: site-configurations
            - secretRef:
                name: site-secrets

          resources:
            requests:
              memory: "64Mi"
              cpu: "10m"
            limits:
              memory: "256Mi"
              cpu: "100m"

          livenessProbe:
            httpGet:
              path: /healthz
              port: 8080
              httpHeaders:
                - name: Custom-Header
                  value: Awesome
            initialDelaySeconds: 3
            periodSeconds: 3

#          livenessProbe:
#            exec:
#              command:
#                - cat
#                - /tmp/healthy
#            initialDelaySeconds: 5
#            periodSeconds: 5

          readinessProbe:
            exec:
              command:
                - cat
                - /tmp/healthy
            initialDelaySeconds: 5
            periodSeconds: 5

#           TCP liveness probe
#          readinessProbe:
#            tcpSocket:
#              port: 8080
#            initialDelaySeconds: 5
#            periodSeconds: 10
#          livenessProbe:
#            tcpSocket:
#              port: 8080
#            initialDelaySeconds: 15
#            periodSeconds: 20

          # Protect slow starting containers with startup probes
          startupProbe:
            httpGet:
              path: /healthz
              port: liveness-port
            failureThreshold: 30
            periodSeconds: 10

          lifecycle:
            postStart:
              exec:
                command:
                  - "/bin/bash"
                  - "-c"
                  - 'curl -s -X GET --max-time 60 http://${SERVICE_NAME}.notifications.svc.cluster.local/start/${HOSTNAME}/php >&1; exit 0'
            preStop:
              exec:
                command:
                  - "/bin/bash"
                  - "-c"
                  - 'curl -s -X GET --max-time 60 http://${SERVICE_NAME}.notifications.svc.cluster.local/stop/${HOSTNAME}/php >&1; exit 0'

          volumeMounts:
            - mountPath: /app/public/thumbs
              name: thumbnails
            - mountPath: /app/uploads
              name: uploads
            - name: config
              mountPath: "/config"
              readOnly: true

      initContainers:
        - name: update-database
          image: php-container
          envFrom:
            - configMapRef:
                name: db-credentials
          command:
            - "bin/console"
            - "setup:install"
      volumes:
        - name: thumbnails
          emptyDir: {}
        - name: uploads
          persistentVolumeClaim:
            claimName: website-uploads
        - name: config
          configMap:
            name: my-app-config
            items:
              - key: "game.properties"
                path: "game.properties"
              - key: "user-interface.properties"
                path: "user-interface.properties"

More information

Create object

kubectl apply -f ./my-manifest.yaml            # create resource(s)
kubectl apply -f ./my1.yaml -f ./my2.yaml      # create from multiple files
kubectl apply -f ./dir                         # create resource(s) in all manifest files in dir
kubectl apply -f https://git.io/vPieo          # create resource(s) from url

# start a single instance of nginx
kubectl create deployment nginx --image=nginx  

# create a Job which prints "Hello World"
kubectl create job hello --image=busybox -- echo "Hello World" 

# create a CronJob that prints "Hello World" every minute
kubectl create cronjob hello --image=busybox   --schedule="*/1 * * * *" -- echo "Hello World"

Updating resources

kubectl set image deployment/frontend www=image:v2               # Rolling update "www" containers of "frontend" deployment, updating the image
kubectl rollout history deployment/frontend                      # Check the history of deployments including the revision 
kubectl rollout undo deployment/frontend                         # Rollback to the previous deployment
kubectl rollout undo deployment/frontend --to-revision=2         # Rollback to a specific revision
kubectl rollout status -w deployment/frontend                    # Watch rolling update status of "frontend" deployment until completion
kubectl rollout restart deployment/frontend                      # Rolling restart of the "frontend" deployment


cat pod.json | kubectl replace -f -                              # Replace a pod based on the JSON passed into std

# Force replace, delete and then re-create the resource. Will cause a service outage.
kubectl replace --force -f ./pod.json

# Create a service for a replicated nginx, which serves on port 80 and connects to the containers on port 8000
kubectl expose rc nginx --port=80 --target-port=8000

# Update a single-container pod's image version (tag) to v4
kubectl get pod mypod -o yaml | sed 's/\(image: myimage\):.*$/\1:v4/' | kubectl replace -f -

kubectl label pods my-pod new-label=awesome                      # Add a Label
kubectl annotate pods my-pod icon-url=http://goo.gl/XXBTWq       # Add an annotation
kubectl autoscale deployment foo --min=2 --max=10

Scaling resources

kubectl scale --replicas=3 rs/foo                                 # Scale a replicaset named 'foo' to 3
kubectl scale --replicas=3 -f foo.yaml                            # Scale a resource specified in "foo.yaml" to 3
kubectl scale --current-replicas=2 --replicas=3 deployment/mysql  # If the deployment named mysql's current size is 2, scale mysql to 3
kubectl scale --replicas=5 rc/foo rc/bar rc/baz                   # Scale multiple replication controllers

Deleting resources

kubectl delete -f ./pod.json                                      # Delete a pod using the type and name specified in pod.json
kubectl delete pod unwanted --now                                 # Delete a pod with no grace period
kubectl delete pod,service baz foo                                # Delete pods and services with same names "baz" and "foo"
kubectl delete pods,services -l name=myLabel                      # Delete pods and services with label name=myLabel
kubectl -n my-ns delete pod,svc --all                             # Delete all pods and services in namespace my-ns,
# Delete all pods matching the awk pattern1 or pattern2
kubectl get pods  -n mynamespace --no-headers=true | awk '/pattern1|pattern2/{print $1}' | xargs  kubectl delete -n mynamespace pod

Copy files and directories to and from containers

kubectl cp /tmp/foo_dir my-pod:/tmp/bar_dir            # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in the current namespace
kubectl cp /tmp/foo my-pod:/tmp/bar -c my-container    # Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific container
kubectl cp /tmp/foo my-namespace/my-pod:/tmp/bar       # Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace my-namespace
kubectl cp my-namespace/my-pod:/tmp/foo /tmp/bar       # Copy /tmp/foo from a remote pod to /tmp/bar locally
Note: kubectl cp requires that the 'tar' binary is present in your container image. If 'tar' is not present, kubectl cp will fail. For advanced use cases, such as symlinks, wildcard expansion or file mode preservation consider using kubectl exec.
tar cf - /tmp/foo | kubectl exec -i -n my-namespace my-pod -- tar xf - -C /tmp/bar     # Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace my-namespace
kubectl exec -n my-namespace my-pod -- tar cf - /tmp/foo | tar xf - -C /tmp/bar        # Copy /tmp/foo from a remote pod to /tmp/bar locally
# NodePort
apiVersion: v1
kind: Service
metadata:
  name: my-app-node-port
  namespace: default
spec:
  type: NodePort
  selector:
    app: my_app
  ports:
    - port: 80
      targetPort: 80
---
# ClusterIP
apiVersion: v1
kind: Service
metadata:
  name: my-app-cluster-ip
  namespace: defualt
spec:
  type: ClusterIP
  selector:
    app: my_app
  ports:
    - port: 80
      targetPort: 80
---
# LoadBalancer
apiVersion: v1
kind: Service
metadata:
  name: my-app-load-balancer
  namespace: default
spec:
  type: LoadBalancer
  selector:
    app: my-app
  ports:
    - port: 80
      targetPort: 80
---
# Headless service for StatefulSet
apiVersion: v1
kind: Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  ports:
    - port: 80
      name: web
  clusterIP: None
  selector:
    app: nginx
---
# AWS LoadBalancer with TLS termination and application listen on port 80
apiVersion: v1
kind: Service
metadata:
  name: classic-lb-k8s
  namespace: demo
  annotations:
    service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:eu-central-1:000000000000:certificate/11aa22bb-a1s2-1q2w-1234-q1w2e3r4t5t6
    service.beta.kubernetes.io/aws-load-balancer-backend-protocol: http
    service.beta.kubernetes.io/aws-load-balancer-ssl-ports: https
#    service.beta.kubernetes.io/aws-load-balancer-access-log-enabled: "true"
#    # Specifies whether access logs are enabled for the load balancer
#    service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval: "60"
#    # The interval for publishing the access logs. You can specify an interval of either 5 or 60 (minutes).
#    service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-name: "my-bucket"
#    # The name of the Amazon S3 bucket where the access logs are stored
#    service.beta.kubernetes.io/aws-load-balancer-access-log-s3-bucket-prefix: "my-bucket-prefix/prod"
#    # The logical hierarchy you created for your Amazon S3 bucket, for example `my-bucket-prefix/prod`
  labels:
    app: my-awesome-application
spec:
  selector:
    app: my-awesome-application
  type: LoadBalancer
  ports:
    - name: http
      port: 80
      targetPort: 80
      protocol: "tcp"
    - name: https
      port: 443
      targetPort: 80
      protocol: "tcp"
---
# External Name
apiVersion: v1
kind: Service
metadata:
  name: database-host
  namespace: default
spec:
  type: ExternalName
  externalName: "__RDS_HOST__"
---
# External IPs
apiVersion: v1
kind: Service
metadata:
  name: my-service
  namespace: default
spec:
  ports:
    - name: mysql
      protocol: TCP
      port: 3306
      targetPort: 3306
  externalIPs:
    - 10.10.10.10

More information

# Create a service for a replicated nginx, which serves on port 80 and connects to the containers on port 8000
kubectl expose rc nginx --port=80 --target-port=8000

# Create a service of type LoadBalancer
kubectl expose deployment hello-world --type=LoadBalancer --name=my-service
apiVersion: v1
kind: ConfigMap
metadata:
  # config map name, will be used by deployments, cronjobs, etc
  name: my-app-config
  namespace: default
#immutable: true
data:
  APP_ENV: prod

  MYSQL_HOST: "127.0.0.1"
  MYSQL_USER: mysql_username_here

  UPLOAD_DIRECTORY: /data/uploads

  # file-like keys
  game.properties: |
    enemy.types=aliens,machines,humans
    player.maximum-lives=5

  user-interface.properties: |
    color.good=purple
    color.bad=yellow
    color.textmode=true
---
apiVersion: v1
kind: Secret
metadata:
  name: my-app-secrets
  namespace: default
data:
  my_db_password: a2lzcGhw # base64 encoded
  redis_password: a2lzcGhw # base64 encoded
---
apiVersion: v1
kind: Secret
metadata:
  name: secret-dockercfg
  namespace: default
data:
  .dockercfg: |
    "<base64 encoded ~/.dockercfg file>"
---
# Basic authentication secret
apiVersion: v1
kind: Secret
metadata:
  name: secret-basic-auth
type: kubernetes.io/basic-auth
stringData:
  username: admin
  password: t0p-Secret
---
# SSH authentication secret
apiVersion: v1
kind: Secret
metadata:
  name: secret-ssh-auth
type: kubernetes.io/ssh-auth
data:
  # the data is abbreviated in this example
  ssh-privatekey: |
    MIIEpQIBAAKCAQEAulqb/Y ...
---
# TLS secret
apiVersion: v1
kind: Secret
metadata:
  name: secret-tls
type: kubernetes.io/tls
data:
  # the data is abbreviated in this example
  tls.crt: |
    MIIC2DCCAcCgAwIBAgIBATANBgkqh ...
  tls.key: |
    MIIEpgIBAAKCAQEA7yn3bRHQ5FHMQ ...
---
# Bootstrap token secret
apiVersion: v1
kind: Secret
metadata:
  name: bootstrap-token-5emitj
  namespace: kube-system
type: bootstrap.kubernetes.io/token
data:
  auth-extra-groups: c3lzdGVtOmJvb3RzdHJhcHBlcnM6a3ViZWFkbTpkZWZhdWx0LW5vZGUtdG9rZW4=
  expiration: MjAyMC0wOS0xM1QwNDozOToxMFo=
  token-id: NWVtaXRq
  token-secret: a3E0Z2lodnN6emduMXAwcg==
  usage-bootstrap-authentication: dHJ1ZQ==
  usage-bootstrap-signing: dHJ1ZQ==

More information

Create general secret

kubectl create secret general secret-name \
  --from-literal=MYSQL_PASSWORD=mypass123 \
  --from-literal=APP_SECRET=qawsed1234 \
  -o yaml \
  --dry-run

Create docker registry secret

kubectl create secret docker-registry secret-tiger-docker \
  --docker-username=tiger \
  --docker-password=pass113 \
  --docker-email=tiger@acme.com

Create a secret with several keys

cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Secret
metadata:
  name: mysecret
  namespace: mynamespace
type: Opaque
data:
  password: $(echo -n "s33msi4" | base64 -w0)
  username: $(echo -n "jane" | base64 -w0)
EOF
# Documentation: https://kubernetes.io/docs/concepts/services-networking/ingress/
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: minimal-ingress
  namespace: default
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /
    controller.scope.enabled: "true"
    controller.scope.namespace: "default"
spec:
  # (optional) if you want to listen to 443 port
  tls:
    - hosts:
        - www.example.com
      # secret defined for SSL certificate
      secretName: example.com-tls
  rules:
    - host: www.example.com
      http:
        paths:
          - path: /
            pathType: Prefix
            backend:
              service:
                name: my-app
                port:
                  number: 80
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: ingress-resource-backend
  namespace: default
spec:
  defaultBackend:
    resource:
      apiGroup: k8s.example.com
      kind: StorageBucket
      name: static-assets
  rules:
    - http:
        paths:
          - path: /icons
            pathType: ImplementationSpecific
            backend:
              resource:
                apiGroup: k8s.example.com
                kind: StorageBucket
                name: icon-assets
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: ingress-wildcard-host
  namespace: default
spec:
  rules:
    - host: "foo.bar.com"
      http:
        paths:
          - pathType: Prefix
            path: "/bar"
            backend:
              service:
                name: service1
                port:
                  number: 80
    - host: "*.foo.com"
      http:
        paths:
          - pathType: Prefix
            path: "/foo"
            backend:
              service:
                name: service2
                port:
                  number: 80
apiVersion: batch/v1beta1
kind: CronJob
metadata:
  name: my-cron-job
  namespace: default
  labels:
    app: my-cron-job
spec:
  # run every minute
  schedule: "* * * * *"
  # do not allow concurrent jobs
  concurrencyPolicy: Forbid
  # delete successful pods
  successfulJobsHistoryLimit: 0
  # keep last 2 failed pods for debug
  failedJobsHistoryLimit: 2
  startingDeadlineSeconds: 120
  suspend: false
  jobTemplate:
    spec:
      template:
        metadata:
          labels:
            app: my-cron-job
        spec:
          restartPolicy: Never
          nodeSelector:
            type: cron
          containers:
            # name of the container in pod
            - name: app-cron
              # docker image to load for this container
              image: my-php-container:tag_name
              # always download image from registry
              imagePullPolicy: Always
              # define necessary resources for this container
              resources:
                requests:
                  cpu: "1"
                  memory: "256Mi"
                limits:
                  cpu: "1"
                  memory: "1Gi"
              # load all variables defined in config map
              envFrom:
                - configMapRef:
                    name: my-app-config
              env:
                # set inline variable for container
                - name: CONTAINER_PURPOSE
                  value: cron
                # register a secret (password) to env vars from secrets manifests
                - name: MYSQL_PASSWORD
                  valueFrom:
                    secretKeyRef:
                      name: my-app-secrets
                      # reference to variable name in secrets manifest
                      key: my_db_password
              command:
                - vendor/bin/codecept
              args:
                - run
                - "--dry-run"
              volumeMounts:
                - name: project-data
                  mountPath: /stored_data
          volumes:
            # using aws EFS
            - name: project-data
              nfs:
                server: 1a2b3c4d.efs.eu-central-1.amazonaws.com
                path: /
            # using PVC
            - name: uploads
              persistentVolumeClaim:
                claimName: uploaded-files

More information

Create a job from a cronjob

kubectl create job --from=cronjob/<cronjob-name> <job-name>

Cron schedule syntax

# ┌───────────── minute (0 - 59)
# │ ┌───────────── hour (0 - 23)
# │ │ ┌───────────── day of the month (1 - 31)
# │ │ │ ┌───────────── month (1 - 12)
# │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday;
# │ │ │ │ │                                   7 is also Sunday on some systems)
# │ │ │ │ │
# │ │ │ │ │
# * * * * *
Entry Description Equivalent to
@yearly (or @annually) Run once a year at midnight of 1 January 0 0 1 1 *
@monthly Run once a month at midnight of the first day of the month 0 0 1 * *
@weekly Run once a week at midnight on Sunday morning 0 0 * * 0
@daily (or @midnight) Run once a day at midnight 0 0 * * *
@hourly Run once an hour at the beginning of the hour 0 * * * *
apiVersion: batch/v1
kind: Job
metadata:
  name: thumbs-cleanup
  namespace: default
spec:
  ttlSecondsAfterFinished: 120 # deleted the finished pod after 2 minutes
  template:
    spec:
      restartPolicy: Never
      containers:
        - name: php
          image: my-custom-container
          imagePullPolicy: Always
          command:
            - vendor/bin/console
            - cleanup:thumbs
          envFrom:
            - configMapRef:
                name: my-config
          env:
            - name: CONTAINER_PURPOSE
              value: job
            - name: MY_DB_PASSWORD
              valueFrom:
                secretKeyRef:
                  name: mysql-password
                  key: password
apiVersion: v1
kind: PersistentVolume
metadata:
  name: uploaded-files
  # PVs don't have a namespace
  labels:
    type: local
spec:
  storageClassName: manual
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteMany
  hostPath:
    path: /mnt/nfs/uploaded_files
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: my-app-uploads
  namespace: default
spec:
  storageClassName: manual
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 20Gi
# if you define PVC for AWS EFS, just set the storage to 1Gi, no matter how big the EFS is
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
  name: php-apache
spec:
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: php-apache
  minReplicas: 1
  maxReplicas: 10
  targetCPUUtilizationPercentage: 50
---
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
  name: php-apache
spec:
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: php-apache
  minReplicas: 1
  maxReplicas: 10
  metrics:
    # Resource metrics
    - type: Resource
      resource:
        name: cpu
        target:
          type: Utilization
          averageUtilization: 50
    # Pod metrics
    - type: Pods
      pods:
        metric:
          name: packets-per-second
        target:
          type: AverageValue
          averageValue: 1k
    # Object metrics
    - type: Object
      object:
        metric:
          name: requests-per-second
        describedObject:
          apiVersion: networking.k8s.io/v1beta1
          kind: Ingress
          name: main-route
        target:
          type: Value
          value: 10k
    # External metrics
    - type: External
      external:
        metric:
          name: queue_messages_ready
          selector:
            matchLabels:
              queue: "worker_tasks"
        target:
          type: AverageValue
          averageValue: 30

More information

Create HPA

kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
  name: zk-pdb
spec:
  minAvailable: 2
#  maxUnavailable: 1
  selector:
    matchLabels:
      app: zookeeper

More information

Get pod disruption budgets list

kubectl get poddisruptionbudgets
apiVersion: v1
kind: Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  ports:
    - port: 80
      name: web
  clusterIP: None
  selector:
    app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: web
spec:
  serviceName: "nginx"
#  podManagementPolicy: "Parallel"
  replicas: 2
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
        - name: nginx
          image: k8s.gcr.io/nginx-slim:0.8
          ports:
            - containerPort: 80
              name: web
          volumeMounts:
            - name: www
              mountPath: /usr/share/nginx/html
  volumeClaimTemplates:
    - metadata:
        name: www
      spec:
        accessModes: [ "ReadWriteOnce" ]
        resources:
          requests:
            storage: 1Gi

More information

Using StatefulSets

StatefulSets are valuable for applications that require one or more of the following.

  • Stable, unique network identifiers.
  • Stable, persistent storage.
  • Ordered, graceful deployment and scaling.
  • Ordered, automated rolling updates.

In the above, stable is synonymous with persistence across Pod (re)scheduling. If an application doesn't require any stable identifiers or ordered deployment, deletion, or scaling, you should deploy your application using a workload object that provides a set of stateless replicas. Deployment or ReplicaSet may be better suited to your stateless needs.

Limitations

  • The storage for a given Pod must either be provisioned by a PersistentVolume Provisioner based on the requested storage class, or pre-provisioned by an admin.
  • Deleting and/or scaling a StatefulSet down will not delete the volumes associated with the StatefulSet. This is done to ensure data safety, which is generally more valuable than an automatic purge of all related StatefulSet resources.
  • StatefulSets currently require a Headless Service to be responsible for the network identity of the Pods. You are responsible for creating this Service.
  • StatefulSets do not provide any guarantees on the termination of pods when a StatefulSet is deleted. To achieve ordered and graceful termination of the pods in the StatefulSet, it is possible to scale the StatefulSet down to 0 prior to deletion.
  • When using Rolling Updates with the default Pod Management Policy (OrderedReady), it's possible to get into a broken state that requires manual intervention to repair.