broken-pods/ bad-command.yaml --- apiVersion: v1 kind: Pod metadata: name: broken-pods-bad-command-pod spec: containers: - command: - thiscommanddoesnotexist image: busybox name: broken-pods-bad-command-container default-shell-command.yaml --- apiVersion: v1 kind: Pod metadata: name: broken-pods-default-shell-command-pod spec: containers: - image: busybox name: broken-pods-default-shell-comman-container failed-command.yaml --- apiVersion: v1 kind: Pod metadata: name: broken-pods-failed-command-pod spec: containers: - image: busybox command: - /bin/sh - -c - "exit 1" name: broken-pods-failed-command-container misused-command.yaml --- apiVersion: v1 kind: Pod metadata: name: broken-pods-misused-command-pod spec: containers: - image: busybox command: - /bin/sh - -c name: broken-pods-misused-command-container multi-container-no-command.yaml --- apiVersion: v1 kind: Pod metadata: name: broken-pod-multi-container-no-command-pod spec: containers: # this container has no command or entrypoint specified - image: mstormo/suse imagePullPolicy: IfNotPresent name: broken-pod-multi-container-no-command-1 - image: busybox command: - sleep - "3600" name: broken-pod-multi-container-no-command-2 no-command.yaml --- apiVersion: v1 kind: Pod metadata: name: broken-pods-no-command-pod spec: containers: # this container has no command or entrypoint specified - image: mstormo/suse name: broken-pods-no-command-container oom-killed.yaml --- # https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/ claims that this invoked the OOM killer, but it runs fine on MicroK8s? apiVersion: v1 kind: Pod metadata: name: broken-pods-oom-killed-pod spec: containers: - args: ["--vm", "1", "--vm-bytes", "250M", "--vm-hang", "1"] command: ["stress"] image: polinux/stress name: broken-pods-oom-killed-container resources: limits: memory: "100Mi" requests: memory: "50Mi" private-repo.yaml --- apiVersion: v1 kind: Pod metadata: name: broken-pods-private-repo-pod spec: containers: # this container has no command or entrypoint specified - image: imiell/bad-dockerfile-private name: broken-pods-private-repo-container too-much-mem.yaml --- # https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/ claims that this invoked the OOM killer, but it runs fine on MicroK8s? apiVersion: v1 kind: Pod metadata: name: broken-pods-too-much-mem-pod spec: containers: - command: - sleep - "3600" image: busybox name: broken-pods-too-much-mem-container resources: requests: memory: "1000Gi" spec.initContainers/ init-container.yaml --- apiVersion: v1 kind: Pod metadata: name: broken-init-container-pod spec: containers: - name: broken-init-container-container image: busybox command: ['sh', '-c', 'echo The app is running! && sleep 3600'] initContainers: - name: broken-init-container-init-container image: busybox command: ['sh', '-c', "until nslookup pods-init-container-service-nonexistent.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done"] spec.containers.livenessProbe/ liveness.yaml --- apiVersion: v1 kind: Pod metadata: name: broken-liveness-pod spec: containers: - args: - /bin/sh - -c - "sleep 3600" image: busybox livenessProbe: exec: command: - cat - /tmp/healthy initialDelaySeconds: 5 periodSeconds: 5 name: broken-liveness-container spec.containers.readinessProbe/ readiness.yaml --- apiVersion: v1 kind: Pod metadata: name: broken-readiness-pod spec: containers: - args: - /bin/sh - -c - "sleep 3600" image: busybox readinessProbe: exec: command: - cat - /tmp/healthy initialDelaySeconds: 5 periodSeconds: 5 name: broken-readiness-container