debug-network.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: debug-network-pod
spec:
containers:
- command:
- sleep
- "3600"
image: praqma/network-multitool
name: debug-network-container
dns-debug.yaml
---
# https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/
apiVersion: v1
kind: Pod
metadata:
name: pod-dns-debug
spec:
containers:
- command:
- sleep
- "3600"
image: gcr.io/kubernetes-e2e-test-images/dnsutils:1.3
name: dnsutils
simple.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: pods-simple-pod
spec:
containers:
- command:
- sleep
- "3600"
image: busybox
name: pods-simple-container
See: https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/#adding-additional-entries-with-hostaliases
spec.containers.livenessProbe/
advanced-liveness.yaml
---
apiVersion: v1
kind: Pod
metadata:
labels:
test: liveness
name: liveness-http
spec:
containers:
- args:
- /server
image: k8s.gcr.io/liveness
livenessProbe:
httpGet:
httpHeaders:
- name: X-Custom-Header
value: Awesome
# when "host" is not defined, "PodIP" will be used
# host: my-host
# when "scheme" is not defined, "HTTP" scheme will be used. Only "HTTP" and "HTTPS" are allowed
# scheme: HTTPS
path: /healthz
port: 8080
initialDelaySeconds: 15
timeoutSeconds: 1
name: liveness
liveness.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: pods-liveness-exec-pod
spec:
containers:
- args:
- /bin/sh
- -c
- touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
image: busybox
livenessProbe:
exec:
command:
- cat
- /tmp/healthy
initialDelaySeconds: 5
periodSeconds: 5
name: pods-liveness-exec-container
liveness/
advanced-liveness.yaml
---
apiVersion: v1
kind: Pod
metadata:
labels:
test: liveness
name: liveness-http
spec:
containers:
- args:
- /server
image: k8s.gcr.io/liveness
livenessProbe:
httpGet:
httpHeaders:
- name: X-Custom-Header
value: Awesome
# when "host" is not defined, "PodIP" will be used
# host: my-host
# when "scheme" is not defined, "HTTP" scheme will be used. Only "HTTP" and "HTTPS" are allowed
# scheme: HTTPS
path: /healthz
port: 8080
initialDelaySeconds: 15
timeoutSeconds: 1
name: liveness
liveness.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: pods-liveness-exec-pod
spec:
containers:
- args:
- /bin/sh
- -c
- touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
image: busybox
livenessProbe:
exec:
command:
- cat
- /tmp/healthy
initialDelaySeconds: 5
periodSeconds: 5
name: pods-liveness-exec-container
spec.dnsConfig/
dns-config.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: dns-config-dns-config-pod
spec:
containers:
- name: test
image: nginx
dnsPolicy: "None"
dnsConfig:
nameservers:
- 1.2.3.4
searches:
- ns1.svc.cluster-domain.example
- my.dns.search.suffix
options:
- name: ndots
value: "2"
- name: edns0
spec.volumes.emptyDir/
emptydir.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: volumes-emptydir-pod
spec:
containers:
- command:
- sleep
- "3600"
image: busybox
name: volumes-emptydir-container
volumeMounts:
- mountPath: /volumes-emptydir-mount-path
name: volumes-emptydir-volume
volumes:
- name: volumes-emptydir-volume
emptyDir: {}
spec.volumes.hostPath.type/
file-or-create.yaml
---
# https://kubernetes.io/docs/concepts/storage/volumes/#example-pod-fileorcreate
apiVersion: v1
kind: Pod
metadata:
name: volumes-file-or-create-pod
spec:
containers:
- command:
- sleep
- "3600"
name: busybox
image: busybox
volumeMounts:
- mountPath: /var/local/aaa
name: volumes-file-or-create-dir
- mountPath: /var/local/aaa/1.txt
name: volumes-file-or-create-file
volumes:
- name: volumes-file-or-create-dir
hostPath:
# Ensure the file directory is created.
path: /var/local/aaa
type: DirectoryOrCreate
- name: volumes-file-or-create-file
hostPath:
path: /var/local/aaa/1.txt
type: FileOrCreate
spec.hostAliases/
host-aliases.yaml
---
# https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/#adding-additional-entries-with-hostaliases
apiVersion: v1
kind: Pod
metadata:
name: pods-host-aliases-pod
spec:
hostAliases:
- ip: "127.0.0.1"
hostnames:
- "foo.local"
- "bar.local"
- ip: "10.1.2.3"
hostnames:
- "foo.remote"
- "bar.remote"
containers:
- name: cat-hosts
image: busybox
command:
- cat
args:
- "/etc/hosts"
spec.volumes.hostPath/
hostdir.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: volumes-hostdir-pod
spec:
containers:
- command:
- sleep
- "3600"
image: busybox
name: volumes-hostdir-container
volumeMounts:
- mountPath: /volumes-hostdir-mount-path
name: volumes-hostdir-volume
volumes:
- hostPath:
# directory location on host
path: /tmp
name: volumes-hostdir-volume
spec.initContainers/
init-container.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: init-container-pod
spec:
containers:
- name: init-container-container
image: busybox
command: ['sh', '-c', 'echo The app is running! && sleep 3600']
initContainers:
- name: init-container-init-container
image: busybox
command: ['sh', '-c', "until nslookup pods-init-container-service.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done"]
spec.containers.lifecycle/
lifecycle.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: lifecycle-pod
spec:
containers:
- image: nginx
lifecycle:
postStart:
exec:
command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
preStop:
exec:
command: ["/bin/sh", "-c", "nginx -s quit; while killall -0 nginx; do sleep 1; done"]
name: lifecycle-container
resources_and_limits/
memory-request-limit.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: memory-request-limit-pod
spec:
containers:
- command: ["sleep", "3600"]
image: busybox
name: memory-request-limit-container
resources:
limits:
memory: "200Mi"
requests:
memory: "100Mi"
resource-limit.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: resource-limit-pod
spec:
containers:
- name: resource-limit-container
image: busybox
args:
- sleep
- "600"
resources:
limits:
cpu: "30m"
memory: "200Mi"
resource-request.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: resource-request-pod
spec:
containers:
- name: resource-request-container
image: busybox
args:
- sleep
- "600"
livenessProbe:
exec:
command:
- cat
- /tmp/healthy
initialDelaySeconds: 5
periodSeconds: 5
resources:
requests:
memory: "20000Mi"
cpu: "99999m"
spec.containers.resources/
memory-request-limit.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: memory-request-limit-pod
spec:
containers:
- command: ["sleep", "3600"]
image: busybox
name: memory-request-limit-container
resources:
limits:
memory: "200Mi"
requests:
memory: "100Mi"
resource-limit.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: resource-limit-pod
spec:
containers:
- name: resource-limit-container
image: busybox
args:
- sleep
- "600"
resources:
limits:
cpu: "30m"
memory: "200Mi"
resource-request.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: resource-request-pod
spec:
containers:
- name: resource-request-container
image: busybox
args:
- sleep
- "600"
livenessProbe:
exec:
command:
- cat
- /tmp/healthy
initialDelaySeconds: 5
periodSeconds: 5
resources:
requests:
memory: "20000Mi"
cpu: "99999m"
spec.containers/
multi-container.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: pods-multi-container-pod
spec:
containers:
- image: busybox
command:
- sleep
- "3600"
name: pods-multi-container-container-1
- image: busybox
command:
- sleep
- "3601"
name: pods-multi-container-container-2
spec.affinity.nodeAffinity/
node-affinity.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: pod-node-affinity
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: Exists
containers:
- command: ["sleep", "3600"]
name: pod-node-affinity-container
image: busybox
spec.volumes.persistentVolumeClaim/
pod-pvc.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: volume-pvc
spec:
containers:
- name: frontend
image: nginx
volumeMounts:
- mountPath: /usr/share/nginx/html
name: volume-pvc
volumes:
- name: volume-pvc
persistentVolumeClaim:
claimName: persistent-volume-claim
spec.dnsPolicy/
policy.yaml
---
# Adapted from: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
# "Default": The Pod inherits the name resolution configuration from the node that the pods run on. See related discussion for more details.
# "ClusterFirst": Any DNS query that does not match the configured cluster domain suffix, such as "www.kubernetes.io", is forwarded to the upstream nameserver inherited from the node. Cluster administrators may have extra stub-domain and upstream DNS servers configured. See related discussion for details on how DNS queries are handled in those cases.
# "ClusterFirstWithHostNet": For Pods running with hostNetwork, you should explicitly set its DNS policy "ClusterFirstWithHostNet".
# "None": It allows a Pod to ignore DNS settings from the Kubernetes environment. All DNS settings are supposed to be provided using the dnsConfig field in the Pod Spec. See Pod's DNS config subsection below.
apiVersion: v1
kind: Pod
metadata:
name: dns-config-policy-pod
spec:
containers:
- command:
- sleep
- "3600"
image: busybox
name: dns-config-policy-container
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
spec.containers.securityContext/
privileged-namespace.yaml
---
# Namespace here refers to the container namespaces, not kubernetes
apiVersion: v1
kind: Pod
metadata:
name: privileged-namespace-pod
spec:
hostPID: true
hostIPC: true
hostNetwork: true
containers:
- command:
- sleep
- "3600"
image: busybox
name: privileged-namespace-container
securityContext:
privileged: true
privileged-simple.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: privileged-simple-pod
spec:
containers:
- command:
- sleep
- "3600"
image: busybox
name: privileged-simple-pod
securityContext:
privileged: true
spec.containers.volumes.projected/
projected.yaml
---
# https://kubernetes.io/docs/concepts/storage/volumes/#example-pod-with-a-secret-a-downward-api-and-a-configmap
apiVersion: v1
kind: Pod
metadata:
name: volumes-projected-pod
spec:
containers:
- command:
- sleep
- "3600"
image: busybox
name: volumes-projected-container
volumeMounts:
- name: volumes-projected-volume-mount
mountPath: "/volumes-projected-volume-path"
readOnly: true
volumes:
- name: volumes-projected-volume-mount
projected:
sources:
- secret:
items:
- key: username
path: my-group/my-username
mode: 511
name: volumes-projected-secret
- downwardAPI:
items:
- path: "labels"
fieldRef:
fieldPath: metadata.labels
- path: "cpu_limit"
resourceFieldRef:
containerName: container-test
resource: limits.cpu
- configMap:
items:
- key: config
path: my-group/my-config
name: volumes-projected-configmap
readiness/
readiness.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: pods-readiness-exec-pod
spec:
containers:
- args:
- /bin/sh
- -c
- touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
image: busybox
readinessProbe:
exec:
command:
- cat
- /tmp/healthy
initialDelaySeconds: 5
name: pods-readiness-exec-container
spec.containers.readinessProbe/
readiness.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: pods-readiness-exec-pod
spec:
containers:
- args:
- /bin/sh
- -c
- touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
image: busybox
readinessProbe:
exec:
command:
- cat
- /tmp/healthy
initialDelaySeconds: 5
name: pods-readiness-exec-container
spec.containers.volumes.projected.sources.serviceAccountToken/
sa-token.yaml
---
# https://kubernetes.io/docs/concepts/storage/volumes/#example-pod-with-a-secret-a-downward-api-and-a-configmap
apiVersion: v1
kind: Pod
metadata:
name: volumes-sa-token-pod
spec:
containers:
- name: container-test
image: busybox
volumeMounts:
- mountPath: "/service-account"
name: volumes-sa-token-volume
readOnly: true
volumes:
- name: volumes-sa-token-volume
projected:
sources:
- serviceAccountToken:
audience: api
expirationSeconds: 3600
path: token
spec.terminationGracePeriodSeconds/
simple.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: pods-termination-grace-period-seconds
spec:
containers:
- command:
- sleep
- "3600"
image: busybox
name: pods-termination-grace-period-seconds
terminationGracePeriodSeconds: 5 # Time to wait before moving from a TERM signal to the pod's main process to a KILL signal.
spec.containers.volumeMounts.subPath/
subpath.yaml
---
# https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath
# Sometimes, it is useful to share one volume for multiple uses in a single Pod.
# The volumeMounts.subPath property can be used to specify a sub-path inside the
# referenced volume instead of its root.
apiVersion: v1
kind: Pod
metadata:
name: volumes-subpath-pod
spec:
containers:
- env:
- name: MYSQL_ROOT_PASSWORD
value: "rootpasswd"
image: mysql
name: mysql
volumeMounts:
- mountPath: /var/lib/mysql
name: site-data
subPath: mysql
- image: php:7.0-apache
name: php
volumeMounts:
- mountPath: /var/www/html
name: site-data
subPath: html
volumes:
- name: site-data
persistentVolumeClaim:
claimName: my-lamp-site-data
spec.containers.volumeMounts.subPathExpr/
subpathexpr.yaml
---
# https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath-with-expanded-environment-variables
apiVersion: v1
kind: Pod
metadata:
name: volumes-subpathexpr-pod
spec:
containers:
- command: ["sleep", "3600"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
image: busybox
name: volumes-subpathexpr-container
volumeMounts:
- name: volumes-subpathexpr-volume
mountPath: /logs
subPathExpr: $(POD_NAME)
restartPolicy: Never
volumes:
- name: volumes-subpathexpr-volume
hostPath:
path: /var/log/pods
spec.topologySpreadConstraints/
topology-spread-constraints-with-node-affinity.yaml
---
# https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
kind: Pod
apiVersion: v1
metadata:
name: topology-spread-constraints-with-node-affinity-pod
labels:
label1: value1
spec:
topologySpreadConstraints:
- labelSelector:
matchLabels:
label1: value1
maxSkew: 1
topologyKey: zone
whenUnsatisfiable: DoNotSchedule
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: zone
operator: NotIn
values:
- zoneC
containers:
- name: pause
image: k8s.gcr.io/pause:3.1
topology-spread-constraints.yaml
---
# https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
kind: Pod
apiVersion: v1
metadata:
name: topology-spread-constraints-pod
labels:
label1: value1
spec:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: zone
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
label1: value1
containers:
- name: pause
image: k8s.gcr.io/pause:3.1
spec.containers.imagePullPolicy/
image-pull-policy.yaml
# imagePullPolicies: IfNotPresent, Always
---
apiVersion: v1
kind: Pod
metadata:
name: pods-image-pull-policy-pod
spec:
containers:
- command:
- sleep
- "3600"
image: busybox
imagePullPolicy: IfNotPresent
name: pods-image-pull-policy-container
spec.imagePullSecrets/
image-pull-secrets.yaml
# https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
---
apiVersion: v1
kind: Pod
metadata:
name: pods-image-pull-secrets-pod
spec:
containers:
- command:
- sleep
- "3600"
image: busybox
name: pods-image-pull-secrets-container
imagePullSecrets:
- name: regcred # does not exist, create with instructions above
spec.nodeSelector/
simple.yaml
# Assumes the existence of the label: node-role.kubernetes.io/master, and tries to assign the pod to the labelled node.
---
apiVersion: v1
kind: Pod
metadata:
name: pod-node-selector-simple
spec:
containers:
- command: ["sleep", "3600"]
image: busybox
name: pod-node-selector-simple-container
nodeSelector:
node-role.kubernetes.io/master: ""
spec.tolerations/
toleration.yaml
# IMPORTANT:
#
# This example makes some assumptions:
#
# - There is one single node that is also a master (called 'master')
# - The following command has been run: `kubectl taint nodes master pod-toleration:NoSchedule`
#
# Once the master node is tainted, a pod will not be scheduled on there (you can try the below yaml _without_ the spec.toleration to test this).
#
# CLEANUP:
#
# delete pod pod-toleration # delete pod
# kubectl taint nodes master pod-toleration:NoSchedule- # delete taint
---
apiVersion: v1
kind: Pod
metadata:
name: pod-toleration
spec:
containers:
- command: ["sleep", "3600"]
image: busybox
name: pod-toleration-container
tolerations:
- key: "" # empty means match all taint keys
operator: Exists
taints_and_tolerations/
toleration.yaml
# IMPORTANT:
#
# This example makes some assumptions:
#
# - There is one single node that is also a master (called 'master')
# - The following command has been run: `kubectl taint nodes master pod-toleration:NoSchedule`
#
# Once the master node is tainted, a pod will not be scheduled on there (you can try the below yaml _without_ the spec.toleration to test this).
#
# CLEANUP:
#
# delete pod pod-toleration # delete pod
# kubectl taint nodes master pod-toleration:NoSchedule- # delete taint
---
apiVersion: v1
kind: Pod
metadata:
name: pod-toleration
spec:
containers:
- command: ["sleep", "3600"]
image: busybox
name: pod-toleration-container
tolerations:
- key: "" # empty means match all taint keys
operator: Exists
tolerations/
toleration.yaml
# IMPORTANT:
#
# This example makes some assumptions:
#
# - There is one single node that is also a master (called 'master')
# - The following command has been run: `kubectl taint nodes master pod-toleration:NoSchedule`
#
# Once the master node is tainted, a pod will not be scheduled on there (you can try the below yaml _without_ the spec.toleration to test this).
#
# CLEANUP:
#
# delete pod pod-toleration # delete pod
# kubectl taint nodes master pod-toleration:NoSchedule- # delete taint
---
apiVersion: v1
kind: Pod
metadata:
name: pod-toleration
spec:
containers:
- command: ["sleep", "3600"]
image: busybox
name: pod-toleration-container
tolerations:
- key: "" # empty means match all taint keys
operator: Exists