Kubernetes Commands & Concepts
RUN Commands
GET Commands
CREATE Commands
DESCRIBE Commands
PATCH Commands
DELETE Commands
EDIT Commands
EXEC Commands
APPLY Commands
WAIT Commands
ROLLOUT restart Commands
LOGS Commands
TLS Termination
TLS redirect
Kubernetes Management Commands
RUN Commands
# Test service connectivity from within cluster kubectl run test-curl --image=curlimages/curl -it --rm -- sh
GET Commands
kubectl get pods -A kubectl get pods -o wide -A kubectl get pods -n elk-stack -o wide kubectl get svc logstash -n elk-stack kubectl get svc -n elk-stack -o wide kubectl get deploy -n=kube-system kubectl get deployment -n=kube-system kubectl get svc -n kube-system# Check if services have endpoints kubectl get endpoints nginx-service -n default kubectl get endpoints elasticsearch -n elk-stack kubectl get httproute -A kubectl get certificate -n nginx-gateway kubectl get certificate -n haproxy-lb kubectl get configmap -n elk-stack kubectl get lease -n kube-system | grep scheduler# Get detailed info kubectl get svc nginx-gateway -n nginx-gateway -o yaml# Check what ports are actually open kubectl get svc nginx-gateway -n nginx-gateway -o jsonpath='{.spec.ports[*].nodePort}'
CREATE Commands
kubectl create secret tls nginx-kishoreweb-tls \
--cert=nginx-cert.pem \
--key=nginx-key.pem \
-n haproxy-lb
DESCRIBE Commands
kubectl describe pod -n elk-stack logstash-65694d9756-gzrpq kubectl describe gateway nginx-gateway -n nginx-gateway
PATCH Commands
kubectl patch svc logstash -n elk-stack -p '{"spec":{"ports":[{"name":"beats","port":5044,"targetPort":5044},{"name":"api","port":9600,"targetPort":9600},{"name":"http","port":8080,"targetPort":8080}]}}'
kubectl patch svc kibana -n elk-stack -p '{"spec":{"type":"NodePort"}}'
#If we want Gateway Affinity to specific node use this patch
kubectl patch deployment nginx-gateway -n nginx-gateway -p '
{
"spec": {
"template": {
"spec": {
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{
"key": "kubernetes.io/hostname",
"operator": "In",
"values": ["data2"]
}
]
}
]
}
}
}
}
}
}
}'
DELETE Commands
kubectl delete configmap logstash-config -n elk-stack# Delete the conflicting routes kubectl delete httproute nginx-route -n default kubectl delete httproute elasticsearch-route -n elk-stack
EDIT Commands
kubectl edit configmap logstash-config -n elk-stack
EXEC Commands
kubectl exec -it elasticsearch-5bdbccc746-8ctgb -n elk-stack -- bash kubectl exec -it elasticsearch -- curl elasticsearch-5bdbccc746-8ctgb
APPLY Commands
#Apply the configuration kubectl apply -f complete-setup.yaml kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.yaml
WAIT Commands
#Apply the configuration kubectl apply -f complete-setup.yaml kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.yaml
rollout restart Commands
kubectl rollout restart deployment logstash -n elk-stack # Output => deployment.apps/logstash restarted
LOGS Commands
kubectl logs -f logstash-65694d9756-gzrpq -n elk-stack # Check current Logstash logs kubectl logs logstash-65694d9756-gzrpq -n elk-stack --previous # Check previous container logs to see why it crashed kubectl logs -n nginx-gateway -l app.kubernetes.io/name=nginx-gateway-fabric --tail=50
TLS Termination
TLS Termination is the process where a server or a dedicated component (like a load balancer or reverse proxy) decrypts the incoming secure HTTPS connection. The correct sequence of events is: A user's browser sends an HTTPS request (encrypted with TLS) to the server/proxy. The server/proxy performs TLS Termination, which means it decrypts the request. After decryption, the request inside the server/proxy environment is often handled as plain HTTP before being sent to the backend application servers. The backend server processes the HTTP request and sends an HTTP response back to the proxy. The proxy then encrypts the response again with TLS and sends it back to the user's browser as HTTPS.
TLS redirect
"TLS redirect" typically refers to the practice of redirecting HTTP traffic to HTTPS, ensuring that all communication between a client (like a web browser) and a server is encrypted via TLS (Transport Layer Security).
Kubernetes Management Commands
sudo apt install plocate
locate *.yaml
locate *.yaml | grep logstash
kubectl port-forward -n elk-stack svc/elasticsearch 9200:9200 & curl http://localhost:9200/_cluster/health
kubectl get pods -A
kubectl get pods -o wide -A
kubectl get pods -n elk-stack -o wide
kubectl get svc logstash -n elk-stack
kubectl get svc -n elk-stack -o wide
kubectl exec -it elasticsearch-5bdbccc746-8ctgb -n elk-stack -- bash
kubectl get deploy -n=kube-system
kubectl get deployment -n=kube-system
kubectl get svc -n kube-system
kubectl exec -it elasticsearch -- curl elasticsearch-5bdbccc746-8ctgb
kubectl describe pod -n elk-stack logstash-65694d9756-gzrpq #
kubectl edit configmap logstash-config -n elk-stack
kubectl logs -f logstash-65694d9756-gzrpq -n elk-stack # Check current Logstash logs
kubectl logs logstash-65694d9756-gzrpq -n elk-stack --previous # Check previous container logs to see why it crashed
kubectl patch svc logstash -n elk-stack -p '{"spec":{"ports":[{"name":"beats","port":5044,"targetPort":5044},{"name":"api","port":9600,"targetPort":9600},{"name":"http","port":8080,"targetPort":8080}]}}'
-> Output => service/logstash patched
kubectl patch svc kibana -n elk-stack -p '{"spec":{"type":"NodePort"}}' # -> Output => service/kibana patched
kubectl rollout restart deployment logstash -n elk-stack # -> Output => deployment.apps/logstash restarted
kubectl get configmap -n elk-stack
kubectl delete configmap logstash-config -n elk-stack # -> Output => configmap "logstash-config" deleted
kubectl get deployment -n elk-stack
kubectl delete deployment logstash -n elk-stack # -> Output => deployment.apps "logstash" deleted
kubectl get lease -n kube-system | grep scheduler
sudo systemctl daemon-reload
sudo systemctl enable --now containerd
systemctl status containerd
Kubernetes Master Node Commands
1 history
2 ifconfig
3 sudo apt install net-tools
4 ifconfig
5 getmac
6 swapoff -a
7 sudo swapoff -a
8 sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
9 cat $$EOF | sudo tee /etc/modules-load.d/k8s.conf -----> $$ used here is wrong see Page Source commented correct symbol
10 overlay
11 br_netfilter
12 EOF
13 sudo modprobe overlay
14 sudo modprobe br_netfilter
15 # sysctl params required by setup, params persist across reboots
16 cat $$EOF | sudo tee /etc/sysctl.d/k8s.conf -----> $$ used here is wrong see Page Source commented correct symbol
17 net.bridge.bridge-nf-call-iptables = 1
18 net.bridge.bridge-nf-call-ip6tables = 1
19 net.ipv4.ip_forward = 1
20 EOF
21 # Apply sysctl params without reboot
22 sudo sysctl --system
23 # Verify that the br_netfilter, overlay modules are loaded by running the following commands:
24 lsmod | grep br_netfilter
25 lsmod | grep overlay
26 # Verify that the net.bridge.bridge-nf-call-iptables, net.bridge.bridge-nf-call-ip6tables, and net.ipv4.ip_forward system variables are set to 1 in your sysctl config by running the following command:
27 sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward
28 curl -LO https://github.com/containerd/containerd/releases/download/v1.7.14/containerd-1.7.14-linux-amd64.tar.gz
29 sudo tar Cxzvf /usr/local containerd-1.7.14-linux-amd64.tar.gz
30 ls -lrt
31 sudo apt install curl
32 curl -LO https://github.com/containerd/containerd/releases/download/v1.7.14/containerd-1.7.14-linux-amd64.tar.gz
33 sudo tar Cxzvf /usr/local containerd-1.7.14-linux-amd64.tar.gz
34 ls -lrt
35 curl -LO https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
36 sudo mkdir -p /usr/local/lib/systemd/system/
37 sudo mv containerd.service /usr/local/lib/systemd/system/
38 sudo mkdir -p /etc/containerd
39 containerd config default | sudo tee /etc/containerd/config.toml
40 sudo sed -i 's/SystemdCgroup \= false/SystemdCgroup \= true/g' /etc/containerd/config.toml
41 sudo systemctl daemon-reload
42 sudo systemctl enable --now containerd
43 systemctl status containerd
44 curl -LO https://github.com/opencontainers/runc/releases/download/v1.1.12/runc.amd64
45 sudo install -m 755 runc.amd64 /usr/local/sbin/runc
46 curl -LO https://github.com/containernetworking/plugins/releases/download/v1.5.0/cni-plugins-linux-amd64-v1.5.0.tgz
47 sudo mkdir -p /opt/cni/bin
48 sudo tar Cxzvf /opt/cni/bin cni-plugins-linux-amd64-v1.5.0.tgz
49 sudo apt-get update
50 sudo apt-get install -y apt-transport-https ca-certificates curl gpg
51 curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
52 echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
53 sudo apt-get update
54 sudo apt-get install -y kubelet=1.29.6-1.1 kubeadm=1.29.6-1.1 kubectl=1.29.6-1.1 --allow-downgrades --allow-change-held-packages
55 sudo apt-mark hold kubelet kubeadm kubectl
56 kubeadm version
57 kubelet --version
58 kubectl version --client
59 sudo crictl config runtime-endpoint unix:///var/run/containerd/containerd.sock
60 crictl ps
61 crictl ps -a
62 sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --apiserver-advertise-address=192.168.1.17 --node-name master
63 kubectl get pod
64 mkdir -p $HOME/.kube
65 kubectl get pod
66 kubectl get pod -all
67 kubectl get pod -a
68 kubectl get pod -n default
69 kubectl get ns
70 kubectl get pod -n kube-system
71 kubectl get nodes
72 kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.28.0/manifests/tigera-operator.yaml
73 curl https://raw.githubusercontent.com/projectcalico/calico/v3.28.0/manifests/custom-resources.yaml -O
74 kubectl apply -f custom-resources.yaml
75 kubectl get nodes
76 kubectl get pods -A
77 kubectl get nodes
78 history
79 kubectl get nodes
80 ifconfig
81 systemctl status containerd
82 kubectl get nodes
83 systemctl status kubelet
84 kubectl get pods -A
85 sudo systemctl status kubelet --no-pager
86 sudo systemctl restart containerd
87 sudo systemctl restart kubelet
88 sudo journalctl -u kubelet -b --no-pager -n 200
89 sudo swapoff -a
90 sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
91 free -h
92 sudo systemctl restart kubelet
93 sudo systemctl status kubelet
94 kubectl get nodes
95 sudo kubectl run nginx-pod --image=nginx:latest --port=80
96 sudo kubectl get pod
97 sudo systemctl restart containerd
98 sudo systemctl restart kubelet
99 sudo systemctl status kubelet --no-pager
100 sudo journalctl -u kubelet -n 200 --no-pager
101 sudo ss -ltnp | grep 6443 || sudo netstat -ltnp | grep 6443
102 kubectl get pods -A
103 sudo kubectl run nginx-pod --image=nginx:latest --port=80
104 kubectl run nginx-pod --image=nginx:latest --port=80
105 kubectl get pods -O
106 kubectl get pods -A
107 kubectl get pods -O wide
108 kubectl get pods -o wide
109 kubectl expose pod nginx-pod --type=NodePort --port=80 --target-port=80 --name=nginx-service
110 kubectl get pods
111 kubectl get svc -o wide
112 kubectl get pods
113 kubectl get pod
114 cd $HOME/.kube/
115 ls
116 cd config
117 cat config
118 cd .
119 cd ..
120 kubectl create namespace argocd
121 kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
122 kubectl port-forward svc/argocd-server -n argocd 8080:443
123 kubectl get pod -n argocd
124 kubectl port-forward svc/argocd-server -n argocd 8080:443
125 kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath=”{.data.password}” | base64 -d; echo
126 kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d; echo
127 kubectl port-forward svc/argocd-server -n argocd 8080:443
128 history
Kubernetes Worker Node Commands
1 sudo apt-get install open-vm-tools-desktop -y
2 free -h
3 swapoff -a
4 sudo swapoff -a
5 sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
6 free -h
7 cat $$EOF | sudo tee /etc/modules-load.d/k8s.conf -----> $$ used here is wrong see Page Source commented correct symbol
overlay
br_netfilter
EOF
8 sudo modprobe overlay
9 sudo modprobe br_netfilter
10 # sysctl params required by setup, params persist across reboots
11 cat $$EOF | sudo tee /etc/sysctl.d/k8s.conf -----> $$ used here is wrong see Page Source commented correct symbol
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
12 # Apply sysctl params without reboot
13 sudo sysctl --system
14 # Verify that the br_netfilter, overlay modules are loaded by running the following commands:
15 lsmod | grep br_netfilter
16 lsmod | grep overlay
17 # Verify that the net.bridge.bridge-nf-call-iptables, net.bridge.bridge-nf-call-ip6tables, and net.ipv4.ip_forward system variables are set to 1 in your sysctl config by running the following command:
18 sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward
19 curl -LO https://github.com/containerd/containerd/releases/download/v1.7.14/containerd-1.7.14-linux-amd64.tar.gz
20 sudo tar Cxzvf /usr/local containerd-1.7.14-linux-amd64.tar.gz
21 curl -LO https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
22 sudo mkdir -p /usr/local/lib/systemd/system/
23 sudo mv containerd.service /usr/local/lib/systemd/system/
24 sudo mkdir -p /etc/containerd
25 containerd config default | sudo tee /etc/containerd/config.toml
26 sudo sed -i 's/SystemdCgroup \= false/SystemdCgroup \= true/g' /etc/containerd/config.toml
27 sudo systemctl daemon-reload
28 sudo systemctl enable --now containerd
29 # Check that containerd service is up and running
30 systemctl status containerd
31 sudo apt install curl
32 curl -LO https://github.com/containerd/containerd/releases/download/v1.7.14/containerd-1.7.14-linux-amd64.tar.gz
33 sudo tar Cxzvf /usr/local containerd-1.7.14-linux-amd64.tar.gz
34 curl -LO https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
35 sudo mkdir -p /usr/local/lib/systemd/system/
36 sudo mv containerd.service /usr/local/lib/systemd/system/
37 sudo mkdir -p /etc/containerd
38 containerd config default | sudo tee /etc/containerd/config.toml
39 sudo sed -i 's/SystemdCgroup \= false/SystemdCgroup \= true/g' /etc/containerd/config.toml
40 sudo systemctl daemon-reload
41 sudo systemctl enable --now containerd
42 # Check that containerd service is up and running
43 systemctl status containerd
44 curl -LO https://github.com/opencontainers/runc/releases/download/v1.1.12/runc.amd64
45 sudo install -m 755 runc.amd64 /usr/local/sbin/runc
46 curl -LO https://github.com/containernetworking/plugins/releases/download/v1.5.0/cni-plugins-linux-amd64-v1.5.0.tgz
47 sudo mkdir -p /opt/cni/bin
48 sudo tar Cxzvf /opt/cni/bin cni-plugins-linux-amd64-v1.5.0.tgz
49 sudo apt-get update
50 sudo apt-get install -y apt-transport-https ca-certificates curl gpg
51 curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
52 echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
53 sudo apt-get update
54 sudo apt-get install -y kubelet=1.29.6-1.1 kubeadm=1.29.6-1.1 kubectl=1.29.6-1.1 --allow-downgrades --allow-change-held-packages
55 sudo apt-mark hold kubelet kubeadm kubectl
56 kubeadm version
57 kubelet --version
58 kubectl version --client
59 sudo crictl config runtime-endpoint unix:///var/run/containerd/containerd.sock
60 sudo crictl ps
61 ping 192.168.1.17
62 kubeadm join 192.168.1.17:6443 --token m2nkd9.l1ir7saoqjktw1d0 --discovery-token-ca-cert-hash sha256:776608160328af88861f5b3215012d99d0cdb03b31ebb4e7aa532750c0c219ee
63 sudo kubeadm join 192.168.1.17:6443 --token m2nkd9.l1ir7saoqjktw1d0 --discovery-token-ca-cert-hash sha256:776608160328af88861f5b3215012d99d0cdb03b31ebb4e7aa532750c0c219ee
64 sudo kubectl run nginx-pod --image=nginx:latest --port=80
65 kubectl get node
66 sudo kubectl run nginx-pod --image=nginx:latest --port=80
67 kubelet get pod
68 kubectl get pod
69 cd /etc/
70 cd kubernetes/
71 ls
72 cat kubelet.conf
73 sudo cat kubelet.conf
74 history