How would you test a Kubernetes operator? We figured we would never be truly confident unless we ran the tests against a Kubernetes cluster using kind.
tmpDir=$(mktemp -d)
cat << EOF > ${tmpDir}/kind-cluster.yml
kind: Cluster
apiVersion: kind.sigs.k8s.io/v1alpha3
nodes:
- role: control-plane
- role: worker
- role: worker
- role: worker
- role: worker
EOF
kind create cluster --loglevel=info --config ${tmpDir}/kind-cluster.yml --image kindest/node:v1.12.10@sha256:e43003c6714cc5a9ba7cf1137df3a3b52ada5c3f2c77f8c94a4d73c82b64f6f3
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
kubectl config rename-context "kubernetes-admin@kind" "kind"
node=kind-worker
zone=a
kubectl --context kind label --overwrite node ${node} failure-domain.beta.kubernetes.io/zone=eu-west-1${zone}
$ kubectl --context kind get node --show-labels
NAME STATUS ROLES AGE VERSION LABELS
kind-control-plane Ready master 93s v1.12.10 kubernetes.io/hostname=kind-control-plane,node-role.kubernetes.io/master=
kind-worker Ready <none> 71s v1.12.10 failure-domain.beta.kubernetes.io/zone=eu-west-1a,kubernetes.io/hostname=kind-worker
kind-worker2 Ready <none> 71s v1.12.10 failure-domain.beta.kubernetes.io/zone=eu-west-1a,kubernetes.io/hostname=kind-worker2
kind-worker3 Ready <none> 71s v1.12.10 failure-domain.beta.kubernetes.io/zone=eu-west-1b,kubernetes.io/hostname=kind-worker3
kind-worker4 Ready <none> 71s v1.12.10 failure-domain.beta.kubernetes.io/zone=eu-west-1b,kubernetes.io/hostname=kind-worker4
zone=a
cat <<EOF | kubectl --context kind apply -f -
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: standard-zone-${zone}
provisioner: kubernetes.io/no-provisioner
reclaimPolicy: Delete
EOF
node=kind-worker
zone=a
pv_path="/mnt/pv-zone-${zone}"
docker exec ${node} mkdir -p /data/vol ${pv_path}/bindmount
docker exec ${node} mount -o bind /data/vol ${pv_path}/bindmount
kubectl --context kind create ns local-volume-provisioning
cat <<EOF | kubectl --context kind apply -f -
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-storage-admin
namespace: local-volume-provisioning
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-storage-provisioner-pv-binding
namespace: local-volume-provisioning
subjects:
- kind: ServiceAccount
name: local-storage-admin
namespace: local-volume-provisioning
roleRef:
kind: ClusterRole
name: system:persistent-volume-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: local-storage-provisioner-node-clusterrole
namespace: local-volume-provisioning
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-storage-provisioner-node-binding
namespace: local-volume-provisioning
subjects:
- kind: ServiceAccount
name: local-storage-admin
namespace: local-volume-provisioning
roleRef:
kind: ClusterRole
name: local-storage-provisioner-node-clusterrole
apiGroup: rbac.authorization.k8s.io
EOF
zone=a
cat <<EOF | kubectl --context kind apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: local-provisioner-config-${zone}
namespace: local-volume-provisioning
data:
storageClassMap: |
standard-zone-${zone}:
hostDir: /mnt/pv-zone-${zone}
mountDir: /mnt/pv-zone-${zone}
volumeMode: Filesystem
fsType: ext4
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: local-volume-provisioner-${zone}
namespace: local-volume-provisioning
spec:
selector:
matchLabels:
app: local-volume-provisioner-${zone}
template:
metadata:
labels:
app: local-volume-provisioner-${zone}
spec:
serviceAccountName: local-storage-admin
containers:
- image: "quay.io/external_storage/local-volume-provisioner:v2.3.3"
name: provisioner
securityContext:
privileged: true
env:
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- mountPath: /etc/provisioner/config
name: provisioner-config
readOnly: true
- mountPath: /mnt/pv-zone-${zone}
name: pv-zone-${zone}
mountPropagation: "HostToContainer"
nodeSelector:
failure-domain.beta.kubernetes.io/zone: eu-west-1${zone}
volumes:
- name: provisioner-config
configMap:
name: local-provisioner-config-${zone}
- name: pv-zone-${zone}
hostPath:
path: /mnt/pv-zone-${zone}
EOF
$ kubectl --context kind -n local-volume-provisioning get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
local-volume-provisioner-a-2fn87 1/1 Running 0 115s 10.244.3.2 kind-worker2 <none>
local-volume-provisioner-a-h2k8w 1/1 Running 0 115s 10.244.4.2 kind-worker <none>
local-volume-provisioner-b-96qnj 1/1 Running 0 115s 10.244.1.2 kind-worker4 <none>
local-volume-provisioner-b-p76v6 1/1 Running 0 115s 10.244.2.2 kind-worker3 <none>
$ kubectl --context kind get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
local-pv-a7c00434 245Gi RWO Delete Available standard-zone-a 35s
local-pv-b6f98e8 245Gi RWO Delete Available standard-zone-b 35s
local-pv-c3b01b62 245Gi RWO Delete Available standard-zone-a 38s
local-pv-cfe437df 245Gi RWO Delete Available standard-zone-b 36s
cat <<EOF | kubectl --context kind apply -f -
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pv-claim
spec:
storageClassName: standard-zone-a
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
EOF
cat <<EOF | kubectl --context kind apply -f -
apiVersion: v1
kind: Pod
metadata:
name: pv-pod
spec:
volumes:
- name: pv-storage
persistentVolumeClaim:
claimName: pv-claim
containers:
- name: pv-container
image: nginx
volumeMounts:
- mountPath: "/path/to/storage"
name: pv-storage
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: failure-domain.beta.kubernetes.io/zone
operator: In
values:
- eu-west-1a
EOF
$ kubectl --context kind get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pv-claim Bound local-pv-a7c00434 245Gi RWO standard-zone-a 2m31s
docker run -d --name=kind-registry --restart=always -p 5000:5000 registry:2
cat <<EOF | kubectl --context kind apply -f -
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: registry-proxy
namespace: kube-system
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app: registry-proxy
template:
metadata:
labels:
app: registry-proxy
spec:
hostNetwork: true
containers:
- image: "tecnativa/tcp-proxy"
name: tcp-proxy
command: ["/bin/sh", "-c"]
args:
- export TALK=$(/sbin/ip route | awk '/default/ { print $3 ":5000"}');
export LISTEN=:5000;
/magic-entrypoint /docker-entrypoint.sh haproxy -f /usr/local/etc/haproxy/haproxy.cfg;
ports:
- containerPort: 5000
EOF
This article is provided as a general guide for general information purposes only. It does not constitute advice. CECG disclaims liability for actions taken based on the materials.
Discover more insights from our blog collection
Explore how Internal Developer Platforms (IDPs) streamline common development processes through interfaces like CLI tools, developer portals, and platform orchestrators. This post examines the pros and cons of each approach to help you optimize developer workflows.
Learn how to automate Landing Zones in GCP Organizations.
Learn how to implement security for an MVP Internal Developer Platform in a retail bank, covering secrets management, access control, vulnerability scanning, and network isolation. This post details a pragmatic approach to building a secure, scalable, and compliant platform from the ground up.