kubernetes.io > Documentation > Concepts > Overview > Working with Kubernetes Objects > Labels and Selectors
kubectl run nginx1 --image=nginx --restart=Never --labels=app=v1
kubectl run nginx2 --image=nginx --restart=Never --labels=app=v1
kubectl run nginx3 --image=nginx --restart=Never --labels=app=v1
# or
for i in `seq 1 3`; do kubectl run nginx$i --image=nginx -l app=v1 ; done
kubectl get po --show-labels
kubectl label po nginx2 app=v2 --overwrite
kubectl get po -L app
# or
kubectl get po --label-columns=app
kubectl get po -l app=v2
# or
kubectl get po -l 'app in (v2)'
# or
kubectl get po --selector=app=v2
kubectl label po -l "app in(v1,v2)" tier=web
kubectl annotate po -l "app=v2" owner=marketing
kubectl label po nginx1 nginx2 nginx3 app-
# or
kubectl label po nginx{1..3} app-
# or
kubectl label po -l app app-
kubectl annotate po nginx1 nginx2 nginx3 description='my description'
#or
kubectl annotate po nginx{1..3} description='my description'
kubectl annotate pod nginx1 --list
# or
kubectl describe po nginx1 | grep -i 'annotations'
# or
kubectl get po nginx1 -o custom-columns=Name:metadata.name,ANNOTATIONS:metadata.annotations.description
As an alternative to using | grep
you can use jsonPath like kubectl get po nginx1 -o jsonpath='{.metadata.annotations}{"\n"}'
kubectl annotate po nginx{1..3} description-
kubectl delete po nginx{1..3}
Add the label to a node:
kubectl label nodes <your-node-name> accelerator=nvidia-tesla-p100
kubectl get nodes --show-labels
We can use the ‘nodeSelector’ property on the Pod YAML:
apiVersion: v1
kind: Pod
metadata:
name: cuda-test
spec:
containers:
- name: cuda-test
image: "k8s.gcr.io/cuda-vector-add:v0.1"
nodeSelector: # add this
accelerator: nvidia-tesla-p100 # the selection label
You can easily find out where in the YAML it should be placed by:
kubectl explain po.spec
OR: Use node affinity (https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/#schedule-a-pod-using-required-node-affinity)
apiVersion: v1
kind: Pod
metadata:
name: affinity-pod
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: accelerator
operator: In
values:
- nvidia-tesla-p100
containers:
...
tier
and value frontend
with the effect NoSchedule
. Then, create a pod that tolerates this taint.Taint a node:
kubectl taint node node1 tier=frontend:NoSchedule # key=value:Effect
kubectl describe node node1 # view the taints on a node
And to tolerate the taint:
apiVersion: v1
kind: Pod
metadata:
name: frontend
spec:
containers:
- name: nginx
image: nginx
tolerations:
- key: "tier"
operator: "Equal"
value: "frontend"
effect: "NoSchedule"
controlplane
. Use nodeSelector and tolerations.vi pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: frontend
spec:
image:
name: nginx
image: nginx
nodeSelector:
kubernetes.io/hostname: controlplane
tolerations:
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
kubectl create -f pod.yaml
kubernetes.io > Documentation > Concepts > Workloads > Workload Resources > Deployments
kubectl create deployment nginx --image=nginx:1.18.0 --dry-run=client -o yaml > deploy.yaml
vi deploy.yaml
# change the replicas field from 1 to 2
# add this section to the container spec and save the deploy.yaml file
# ports:
# - containerPort: 80
kubectl apply -f deploy.yaml
or, do something like:
kubectl create deployment nginx --image=nginx:1.18.0 --dry-run=client -o yaml | sed 's/replicas: 1/replicas: 2/g' | sed 's/image: nginx:1.18.0/image: nginx:1.18.0\n ports:\n - containerPort: 80/g' | kubectl apply -f -
or,
kubectl create deploy nginx --image=nginx:1.18.0 --replicas=2 --port=80
kubectl get deploy nginx -o yaml
kubectl describe deploy nginx # you'll see the name of the replica set on the Events section and in the 'NewReplicaSet' property
# OR you can find rs directly by:
kubectl get rs -l run=nginx # if you created deployment by 'run' command
kubectl get rs -l app=nginx # if you created deployment by 'create' command
# you could also just do kubectl get rs
kubectl get rs nginx-7bf7478b77 -o yaml
kubectl get po # get all the pods
# OR you can find pods directly by:
kubectl get po -l run=nginx # if you created deployment by 'run' command
kubectl get po -l app=nginx # if you created deployment by 'create' command
kubectl get po nginx-7bf7478b77-gjzp8 -o yaml
kubectl rollout status deploy nginx
kubectl set image deploy nginx nginx=nginx:1.19.8
# alternatively...
kubectl edit deploy nginx # change the .spec.template.spec.containers[0].image
The syntax of the ‘kubectl set image’ command is kubectl set image (-f FILENAME | TYPE NAME) CONTAINER_NAME_1=CONTAINER_IMAGE_1 ... CONTAINER_NAME_N=CONTAINER_IMAGE_N [options]
kubectl rollout history deploy nginx
kubectl get deploy nginx
kubectl get rs # check that a new replica set has been created
kubectl get po
kubectl rollout undo deploy nginx
# wait a bit
kubectl get po # select one 'Running' Pod
kubectl describe po nginx-5ff4457d65-nslcl | grep -i image # should be nginx:1.18.0
kubectl set image deploy nginx nginx=nginx:1.91
# or
kubectl edit deploy nginx
# change the image to nginx:1.91
# vim tip: type (without quotes) '/image' and Enter, to navigate quickly
kubectl rollout status deploy nginx
# or
kubectl get po # you'll see 'ErrImagePull' or 'ImagePullBackOff'
kubectl rollout undo deploy nginx --to-revision=2
kubectl describe deploy nginx | grep Image:
kubectl rollout status deploy nginx # Everything should be OK
kubectl rollout history deploy nginx --revision=4 # You'll also see the wrong image displayed here
kubectl scale deploy nginx --replicas=5
kubectl get po
kubectl describe deploy nginx
kubectl autoscale deploy nginx --min=5 --max=10 --cpu-percent=80
# view the horizontalpodautoscalers.autoscaling for nginx
kubectl get hpa nginx
kubectl rollout pause deploy nginx
kubectl set image deploy nginx nginx=nginx:1.19.9
# or
kubectl edit deploy nginx
# change the image to nginx:1.19.9
kubectl rollout history deploy nginx # no new revision
kubectl rollout resume deploy nginx
kubectl rollout history deploy nginx
kubectl rollout history deploy nginx --revision=6 # insert the number of your latest revision
kubectl delete deploy nginx
kubectl delete hpa nginx
#Or
kubectl delete deploy/nginx hpa/nginx
Deploy 3 replicas of v1:
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-app-v1
labels:
app: my-app
spec:
replicas: 3
selector:
matchLabels:
app: my-app
version: v1
template:
metadata:
labels:
app: my-app
version: v1
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
volumeMounts:
- name: workdir
mountPath: /usr/share/nginx/html
initContainers:
- name: install
image: busybox:1.28
command:
- /bin/sh
- -c
- "echo version-1 > /work-dir/index.html"
volumeMounts:
- name: workdir
mountPath: "/work-dir"
volumes:
- name: workdir
emptyDir: {}
Create the service:
apiVersion: v1
kind: Service
metadata:
name: my-app-svc
labels:
app: my-app
spec:
type: ClusterIP
ports:
- name: http
port: 80
targetPort: 80
selector:
app: my-app
Test if the deployment was successful from within a Pod:
# run a wget to the Service my-app-svc
kubectl run -it --rm --restart=Never busybox --image=gcr.io/google-containers/busybox --command -- wget -qO- my-app-svc
version-1
Deploy 1 replica of v2:
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-app-v2
labels:
app: my-app
spec:
replicas: 1
selector:
matchLabels:
app: my-app
version: v2
template:
metadata:
labels:
app: my-app
version: v2
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
volumeMounts:
- name: workdir
mountPath: /usr/share/nginx/html
initContainers:
- name: install
image: busybox:1.28
command:
- /bin/sh
- -c
- "echo version-2 > /work-dir/index.html"
volumeMounts:
- name: workdir
mountPath: "/work-dir"
volumes:
- name: workdir
emptyDir: {}
Observe that calling the ip exposed by the service the requests are load balanced across the two versions:
# run a busyBox pod that will make a wget call to the service my-app-svc and print out the version of the pod it reached.
kubectl run -it --rm --restart=Never busybox --image=gcr.io/google-containers/busybox -- /bin/sh -c 'while sleep 1; do wget -qO- my-app-svc; done'
version-1
version-1
version-1
version-2
version-2
version-1
If the v2 is stable, scale it up to 4 replicas and shoutdown the v1:
kubectl scale --replicas=4 deploy my-app-v2
kubectl delete deploy my-app-v1
while sleep 0.1; do curl $(kubectl get svc my-app-svc -o jsonpath="{.spec.clusterIP}"); done
version-2
version-2
version-2
version-2
version-2
version-2
kubectl create job pi --image=perl:5.34 -- perl -Mbignum=bpi -wle 'print bpi(2000)'
kubectl get jobs -w # wait till 'SUCCESSFUL' is 1 (will take some time, perl image might be big)
kubectl get po # get the pod name
kubectl logs pi-**** # get the pi numbers
kubectl delete job pi
OR
kubectl get jobs -w # wait till 'SUCCESSFUL' is 1 (will take some time, perl image might be big)
kubectl logs job/pi
kubectl delete job pi
OR
kubectl wait --for=condition=complete --timeout=300s job pi
kubectl logs job/pi
kubectl delete job pi
kubectl create job busybox --image=busybox -- /bin/sh -c 'echo hello;sleep 30;echo world'
kubectl get po # find the job pod
kubectl logs busybox-ptx58 -f # follow the logs
kubectl get jobs
kubectl describe jobs busybox
kubectl logs job/busybox
kubectl delete job busybox
kubectl create job busybox --image=busybox --dry-run=client -o yaml -- /bin/sh -c 'while true; do echo hello; sleep 10;done' > job.yaml
vi job.yaml
Add job.spec.activeDeadlineSeconds=30
apiVersion: batch/v1
kind: Job
metadata:
creationTimestamp: null
labels:
run: busybox
name: busybox
spec:
activeDeadlineSeconds: 30 # add this line
template:
metadata:
creationTimestamp: null
labels:
run: busybox
spec:
containers:
- args:
- /bin/sh
- -c
- while true; do echo hello; sleep 10;done
image: busybox
name: busybox
resources: {}
restartPolicy: OnFailure
status: {}
kubectl create job busybox --image=busybox --dry-run=client -o yaml -- /bin/sh -c 'echo hello;sleep 30;echo world' > job.yaml
vi job.yaml
Add job.spec.completions=5
apiVersion: batch/v1
kind: Job
metadata:
creationTimestamp: null
labels:
run: busybox
name: busybox
spec:
completions: 5 # add this line
template:
metadata:
creationTimestamp: null
labels:
run: busybox
spec:
containers:
- args:
- /bin/sh
- -c
- echo hello;sleep 30;echo world
image: busybox
name: busybox
resources: {}
restartPolicy: OnFailure
status: {}
kubectl create -f job.yaml
Verify that it has been completed:
kubectl get job busybox -w # will take two and a half minutes
kubectl delete jobs busybox
vi job.yaml
Add job.spec.parallelism=5
apiVersion: batch/v1
kind: Job
metadata:
creationTimestamp: null
labels:
run: busybox
name: busybox
spec:
parallelism: 5 # add this line
template:
metadata:
creationTimestamp: null
labels:
run: busybox
spec:
containers:
- args:
- /bin/sh
- -c
- echo hello;sleep 30;echo world
image: busybox
name: busybox
resources: {}
restartPolicy: OnFailure
status: {}
kubectl create -f job.yaml
kubectl get jobs
It will take some time for the parallel jobs to finish (>= 30 seconds)
kubectl delete job busybox
kubernetes.io > Documentation > Tasks > Run Jobs > Running Automated Tasks with a CronJob
kubectl create cronjob busybox --image=busybox --schedule="*/1 * * * *" -- /bin/sh -c 'date; echo Hello from the Kubernetes cluster'
kubectl get po # copy the ID of the pod whose container was just created
kubectl logs <busybox-***> # you will see the date and message
kubectl delete cj busybox --force # cj stands for cronjob and --force to delete immediately
kubectl get cj
kubectl get jobs --watch
kubectl get po --show-labels # observe that the pods have a label that mentions their 'parent' job
kubectl logs busybox-1529745840-m867r
# Bear in mind that Kubernetes will run a new job/pod for each new cron job
kubectl delete cj busybox
kubectl create cronjob time-limited-job --image=busybox --restart=Never --dry-run=client --schedule="* * * * *" -o yaml -- /bin/sh -c 'date; echo Hello from the Kubernetes cluster' > time-limited-job.yaml
vi time-limited-job.yaml
Add cronjob.spec.startingDeadlineSeconds=17
apiVersion: batch/v1
kind: CronJob
metadata:
creationTimestamp: null
name: time-limited-job
spec:
startingDeadlineSeconds: 17 # add this line
jobTemplate:
metadata:
creationTimestamp: null
name: time-limited-job
spec:
template:
metadata:
creationTimestamp: null
spec:
containers:
- args:
- /bin/sh
- -c
- date; echo Hello from the Kubernetes cluster
image: busybox
name: time-limited-job
resources: {}
restartPolicy: Never
schedule: '* * * * *'
status: {}
kubectl create cronjob time-limited-job --image=busybox --restart=Never --dry-run=client --schedule="* * * * *" -o yaml -- /bin/sh -c 'date; echo Hello from the Kubernetes cluster' > time-limited-job.yaml
vi time-limited-job.yaml
Add cronjob.spec.jobTemplate.spec.activeDeadlineSeconds=12
apiVersion: batch/v1
kind: CronJob
metadata:
creationTimestamp: null
name: time-limited-job
spec:
jobTemplate:
metadata:
creationTimestamp: null
name: time-limited-job
spec:
activeDeadlineSeconds: 12 # add this line
template:
metadata:
creationTimestamp: null
spec:
containers:
- args:
- /bin/sh
- -c
- date; echo Hello from the Kubernetes cluster
image: busybox
name: time-limited-job
resources: {}
restartPolicy: Never
schedule: '* * * * *'
status: {}
kubectl create job --from=cronjob/sample-cron-job sample-job