kubectl config use-context k8s
kubectl create clusterrole deployment-clusterrole --verb=create --resource=Deployment,StatefulSet,DaemonSet
kubectl create sa cicd-token --namespace app-team1
kubectl create clusterrolebinding deploy-b --clusterrole=deployment-clusterrole --serviceaccount=app-team1:cicd-token
kubectl drain ek8s-node-1 --delete-local-data --ignore-daemonsets --force
# Upgrade the kubeadm:
apt-mark unhold kubeadm && apt-get update && apt-get install -y kubeadm=1.21.2-00 && apt-mark hold kubeadm # Now drain the mater node. kubectl drain master --ignore-daemonsets #Now run the kubeadm upgrade command with required version.
kubeadm upgrade apply v1.21.2 #Now upgrade kubelet and kubectl with below command. apt-mark unhold kubelet kubectl && apt-get install -y kubelet=1.21.2-00 kubectl=1.21.2-00 && apt-mark hold kubelet kubectl #Then restart the kubelet. systemctl daemon-reload && systemctl restart kubelet && kubectl uncordon master
# Backing up the etcd.
ETCDCTL_API=3 etcdctl --endpoints="https://127.0.0.1:2379" --cacert=/opt/KUIN000601/ca.crt --cert=/opt/KUIN000601/etcd-client.crt --key=/opt/KUIN000601/etcd-client.key snapshot save /etc/data/etcd-snapshot.db
#Restoring from etcd:
ETCDCTL_API=3 etcdctl --endpoints="https://127.0.0.1:2379" --cacert=/opt/KUIN000601/ca.crt --cert=/opt/KUIN000601/etcd-client.crt --key=/opt/KUIN000601/etcd-client.key snapshot restore /var/lib/backup/etcd-snapshot-previous.db
# vim netpol.yaml
apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: all-port-from-namespace namespace: echo spec: podSelector: matchLabels: {} ingress: - from: - namespaceSelector: matchLabels: name: internal
- podSelector: {} ports: - port: 9000
#Now run the below command to create the network policy.
kubectl create -f netpol.yaml
# Changing the deployment configuration.
apiVersion: apps/v1 kind: Deployment metadata: name: front-end spec: template: spec: containers: - image: nginx name: nginx ports: - containerPort: 80 name: http
# To create the new service to expose the container port http.
kubectl expose deployment front-end --name front-end-svc --port 80 --target-port 80
# Create the namespace ing-internal.
kubectl create ns ing-internal
# Now prepare the manifest for ingress resource.
# vi ingress-resource.yaml
apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: pong namespace: ing-internal annotations: nginx.ingress.kubernetes.io/rewrite-target: / spec: rules: - http: paths: - path: /hi pathType: Prefix backend: service: name: hi port: number: 5678 # Now create the ingress resource with below kubectl command.: kubectl create -f ingress-resource.yaml
kubectl scale deployment loadbalancer --replicas=6
apiVersion: v1 kind: Pod metadata: name: nginx-kusc00401 spec: containers: - name: nginx image: nginx imagePullPolicy: Always nodeSelector: disk: ssd
kubectl describe nodes |grep Taint |grep -v NoSchedule |wc -l >/opt/kubernetes/nodenum
# Run the command to generate the manifest file for the pod
kubectl run kucc4 --image=redis –dry-run=client -o yaml > multipod.yaml
# Now edit the multipod.yaml file
# vi multipod.yaml
apiVersion: v1 kind: Pod metadata: name: kucc4 spec: containers: - image: redis name: redis - image: consul name: consul
# Now run the command to create the pod.
kubectl apply -f multipod.yaml
#Create the manifest file for the pv.
#vi pv-volume.yaml
apiVersion: v1 kind: PersistentVolume metadata: name: app-config labels: type: local spec: capacity: storage: 1Gi accessModes: - ReadWriteOnce hostPath: path: "/srv/app-config"
#Now create the persistent volume with below kubectl command.
kubectl create -f pv-volume.yaml
#Check the available storage-class for the pvc.
kubectl get sc
Output: csi-hostpath-sc
#Now create the pvc with below manifest.
# vi pv-vol.yaml
apiVersion: v1 kind: PersistentVolumeClaim metadata: name: pv-volume spec: accessModes: - ReadWriteOnce volumeMode: Filesystem resources: requests: storage: 10Mi storageClassName: csi-hostpath-sc
# Now run the kubectl apply command to create the pvc.
kubectl create pvc pv-vol.yaml
# Now create the pod to use the above pvc.
# vi web-server.yaml apiVersion: v1 kind: Pod metadata: name: web-server spec: containers: - name: nginx image: nginx volumeMounts: - mountPath: "/usr/share/nginx/html" name: pv-volume volumes: - name: pv-volume persistentVolumeClaim: claimName: pv-volume
#Now edit pvc pv-volume to change the capacity.
kubectl edit pvc pv-volume --save-config
#Change the capacity from 10Mi to 70Mi. To check the changes run the command.
kubectl get pvc
#Run the below command to fetch the log of the pod. Later grep the string and extract it into desired file. kubectl logs loadbalancer |grep Error |grep unable-to-access-website > /opt/KUTR00101/loadbalancer
# ssh into worker node which is in NotReady state.
ssh wk8s-node-0
sudo -i
# Check the status of the kubelet.
systemctl status kubelet.service
#You will see the kubelet is in stopped state. Now start the kubelet and enable it.
systemctl start kubelet.service systemctl enable kubelet.service
#This will make the kubelet in start state. In sometime you will see the node is in running state.
system status kubelet
kubectl get node.
kubectl top pod --sort-by=cpu --selector name=app |head -2 |tail -1 |cut-d' ' -f1 >/opt/KUT00401/KUT0001.txt
# Get the manifest of the pod legacy-app and save into pod-legacy.yaml file, later on edit the file according to the question.
kubectl get pod legacy-app -o yaml > pod-legacy.yaml
# vi pod-legacy.yaml
kind: Pod
metadata:
name: podname
spec:
containers:
- name: count
image: busybox
args:
- /bin/sh
- -c
- >
i=0;
while true;
do
echo "$(date) INFO $i" >> /var/log/legacy-ap.log;
i=$((i+1));
sleep 1;
done
volumeMounts:
- name: logs
mountPath: /var/log
- name: count-log-1
image: busybox
args: [/bin/sh, -c, 'tail -n+1 -f /var/log/legacy-ap.log']
volumeMounts:
- name: logs
mountPath: /var/log
volumes:
- name: logs
emptyDir: {}
Get All Google Cloud Exam Mock: