Download as pdf or txt
Download as pdf or txt
You are on page 1of 13

1.

swapoff -a

2. disable swapoff from /etc/fstab

3. sudo swapon --show (to check)

4. sudo hostnamectl set-hostname "hostname" (to set hostname)

5. exec bash

6. set hosts in /etc/hosts

7. ping -c 4 "hostname" (to chk ping)

8. Kernel modules for all nodes

sudo cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf

overlay

br_netfilter

EOF

9. mod modules

sudo modprobe overlay

sudo modprobe br_netfilter

10. params persists across reboots

sudo cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf

net.bridge.bridge-nf-call-iptables = 1

net.bridge.bridge-nf-call-ip6tables = 1

net.ipv4.ip_forward =1

EOF

11. persists changes

sudo sysctl --system


12. sudo apt update

13. To install required packages

sudo apt install curl ca-certificates apt-transport-https

14. fetch public gpg keys from google

curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg|sudo gpg --dearmor -o


/etc/apt/keyrings/kubernetes-archive-keyring.gpg

NOT running OR curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.26/deb/Release.key | sudo gpg --dearmor


-o /etc/apt/keyrings/kubernetes-apt-keyring.gpg

15. add the gpg key and create k8s repository and edit system

echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/


kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list

NOT running OR echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg]


https://pkgs.k8s.io/core:/stable:/v1.26/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list

URL for reference : kubeadm | 1.26.5-00 | http://apt.kubernetes.io kubernetes-xenial/main amd64


Packages

16. sudo apt update

17. sudo apt install kubelet=1.26.5-00 kubeadm=1.26.5-00 kubectl=1.26.5-00

18. install docker

sudo apt install docker.io


19. configure docker

sudo mkdir /etc/containerd

20. default config file for docker containerd:

sudo sh -c "containerd config default > /etc/containerd/config.toml"

21. modify configuratoin file

sudo sed -i 's/ SystemdCgroup = false/ SystemdCgroup = true/' /etc/containerd/config.toml

22. sudo systemctl restart containerd.service

23. sudo systemctl restart kubelet.service

24. sudo systemctl enable kubelet.service

Now remaining commands will run in master only

25. on master only: to initialize k8s cluster (k8s control plane) on master node

sudo kubeadm config images pull

26. to initialize network, configure pod network

sudo kubeadm init --pod-network-cidr=10.10.0.0/16

27. Run 3 commands came in result.

mkdir -p $HOME/.kube

sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

sudo chown $(id -u):$(id -g) $HOME/.kube/config


28. copy and save to master :

kubeadm join 192.168.163.134:6443 --token nl1fjh.op99i7xhbfd0k7kb \

--discovery-token-ca-cert-hash
sha256:91b7274ff12f4f4179dffa22c8bdbaf8bcd51034412c50afdd266badac7ebf7d

29. on Master only to : install calico container network solution that allow k8s worknodes to
communicate:

To Deploy Calico on master node

kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.1/manifests/tigera-


operator.yaml

30. download custom resources file for calico

curl https://raw.githubusercontect.com//projectcalico/calico/v3.26.1/manifests/custom-resources.yaml -
O

31. ls -l

32. to make changes from default CIDR to my pod network

sed -i 's/cidr: 192\.168\.0\.0\/16/cidr: 10.10.0.0\/16/g' custom-resources.yaml

33. to create resources defined in custom resources file:

kubectl create -f custom-resources.yaml

34. Add worker node to k8s cluster

kubeadm join 192.168.163.134:6443 --token nl1fjh.op99i7xhbfd0k7kb \

--discovery-token-ca-cert-hash
sha256:91b7274ff12f4f4179dffa22c8bdbaf8bcd51034412c50afdd266badac7ebf7d
35. kubectl get pods -A

Results:

root@k8s:/home/user# sudo kubeadm config images pull

I0304 14:25:24.481213 2074 version.go:256] remote version is much newer: v1.29.2; falling back to:
stable-1.28

[config/images] Pulled registry.k8s.io/kube-apiserver:v1.28.7

[config/images] Pulled registry.k8s.io/kube-controller-manager:v1.28.7

[config/images] Pulled registry.k8s.io/kube-scheduler:v1.28.7

[config/images] Pulled registry.k8s.io/kube-proxy:v1.28.7

[config/images] Pulled registry.k8s.io/pause:3.9

[config/images] Pulled registry.k8s.io/etcd:3.5.9-0

[config/images] Pulled registry.k8s.io/coredns/coredns:v1.10.1

In one of the worker node:

user@node2:~$ sudo kubeadm config images pull

I0305 12:38:42.382799 19523 version.go:256] remote version is much newer: v1.29.2; falling back to:
stable-1.26

[config/images] Pulled registry.k8s.io/kube-apiserver:v1.26.14

[config/images] Pulled registry.k8s.io/kube-controller-manager:v1.26.14

[config/images] Pulled registry.k8s.io/kube-scheduler:v1.26.14

[config/images] Pulled registry.k8s.io/kube-proxy:v1.26.14

[config/images] Pulled registry.k8s.io/pause:3.9


[config/images] Pulled registry.k8s.io/etcd:3.5.10-0

[config/images] Pulled registry.k8s.io/coredns/coredns:v1.9.3

root@k8s:/home/user#

root@k8s:/home/user# sudo kubeadm init --pod-network-cidr=10.10.0.0/16

I0304 14:39:36.814369 2706 version.go:256] remote version is much newer: v1.29.2; falling back to:
stable-1.28

[init] Using Kubernetes version: v1.28.7

[preflight] Running pre-flight checks

[preflight] Pulling images required for setting up a Kubernetes cluster

[preflight] This might take a minute or two, depending on the speed of your internet connection

[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'

W0304 14:39:39.257202 2706 checks.go:835] detected that the sandbox image


"registry.k8s.io/pause:3.8" of the container runtime is inconsistent with that used by kubeadm. It is
recommended that using "registry.k8s.io/pause:3.9" as the CRI sandbox image.

[certs] Using certificateDir folder "/etc/kubernetes/pki"

[certs] Generating "ca" certificate and key

[certs] Generating "apiserver" certificate and key

[certs] apiserver serving cert is signed for DNS names [k8s kubernetes kubernetes.default
kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.163.134]

[certs] Generating "apiserver-kubelet-client" certificate and key

[certs] Generating "front-proxy-ca" certificate and key

[certs] Generating "front-proxy-client" certificate and key

[certs] Generating "etcd/ca" certificate and key

[certs] Generating "etcd/server" certificate and key

[certs] etcd/server serving cert is signed for DNS names [k8s localhost] and IPs [192.168.163.134
127.0.0.1 ::1]

[certs] Generating "etcd/peer" certificate and key

[certs] etcd/peer serving cert is signed for DNS names [k8s localhost] and IPs [192.168.163.134 127.0.0.1
::1]

[certs] Generating "etcd/healthcheck-client" certificate and key


[certs] Generating "apiserver-etcd-client" certificate and key

[certs] Generating "sa" key and public key

[kubeconfig] Using kubeconfig folder "/etc/kubernetes"

[kubeconfig] Writing "admin.conf" kubeconfig file

[kubeconfig] Writing "kubelet.conf" kubeconfig file

[kubeconfig] Writing "controller-manager.conf" kubeconfig file

[kubeconfig] Writing "scheduler.conf" kubeconfig file

[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"

[control-plane] Using manifest folder "/etc/kubernetes/manifests"

[control-plane] Creating static Pod manifest for "kube-apiserver"

[control-plane] Creating static Pod manifest for "kube-controller-manager"

[control-plane] Creating static Pod manifest for "kube-scheduler"

[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"

[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"

[kubelet-start] Starting the kubelet

[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory
"/etc/kubernetes/manifests". This can take up to 4m0s

[apiclient] All control plane components are healthy after 19.505938 seconds

[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system"


Namespace

[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for
the kubelets in the cluster

[upload-certs] Skipping phase. Please see --upload-certs

[mark-control-plane] Marking the node k8s as control-plane by adding the labels: [node-
role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]

[mark-control-plane] Marking the node k8s as control-plane by adding the taints [node-
role.kubernetes.io/control-plane:NoSchedule]

[bootstrap-token] Using token: 15783p.y234zn73ostwsz1l

[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles

[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes
to get long term certificate credentials

[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs
from a Node Bootstrap Token

[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the
cluster

[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace

[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client


certificate and key

[addons] Applied essential addon: CoreDNS

[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube

sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.

Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:

https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.163.134:6443 --token 15783p.y234zn73ostwsz1l \

--discovery-token-ca-cert-hash
sha256:7d9f3eac51ad5554502ef99165f4a345a9d852f86cc30606d2b71aeb858d9825

root@k8s:/home/user# kubectl get nodes

NAME STATUS ROLES AGE VERSION

k8s Ready control-plane 33h v1.26.5

node1 Ready <none> 32h v1.26.5

root@k8s:/home/user# kubectl get pods -A

NAMESPACE NAME READY STATUS RESTARTS AGE

calico-apiserver calico-apiserver-7ff99d89dc-6bhnh 1/1 Running 3 (11m ago) 32h

calico-apiserver calico-apiserver-7ff99d89dc-w5m59 1/1 Running 3 (11m ago) 32h

calico-system calico-kube-controllers-6fdc57f96f-rqqgg 1/1 Running 1 (11m ago) 32h

calico-system calico-node-288jt 1/1 Running 0 32h

calico-system calico-node-gp6q4 1/1 Running 1 (11m ago) 32h

calico-system calico-typha-5978b87797-qdvgj 1/1 Running 1 (11m ago) 32h

calico-system csi-node-driver-jx82b 2/2 Running 0 32h

calico-system csi-node-driver-vqmgq 2/2 Running 2 (11m ago) 32h

kube-system coredns-787d4945fb-gclqr 1/1 Running 1 (11m ago) 33h

kube-system coredns-787d4945fb-lxxj5 1/1 Running 1 (11m ago) 33h

kube-system etcd-k8s 1/1 Running 1 (11m ago) 33h

kube-system kube-apiserver-k8s 1/1 Running 1 (11m ago) 33h

kube-system kube-controller-manager-k8s 1/1 Running 1 (11m ago) 33h

kube-system kube-proxy-448rw 1/1 Running 0 32h

kube-system kube-proxy-w59m4 1/1 Running 1 (11m ago) 33h

kube-system kube-scheduler-k8s 1/1 Running 2 (4m34s ago) 33h

tigera-operator tigera-operator-78d7857c44-gv6ql 1/1 Running 2 (11m ago) 32h

tigera-operator tigera-operator-78d7857c44-qpwn9 0/1 Completed 0 32h


root@k8s:/home/user# kubectl get svc -n kube-system

NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE

kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 33h

root@k8s:/home/user# kubectl cluster-info

Kubernetes control plane is running at https://192.168.163.134:6443

CoreDNS is running at https://192.168.163.134:6443/api/v1/namespaces/kube-system/services/kube-


dns:dns/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

root@k8s:/home/user# kubectl get cs

Warning: v1 ComponentStatus is deprecated in v1.19+

NAME STATUS MESSAGE ERROR

controller-manager Healthy ok

scheduler Healthy ok

etcd-0 Healthy

root@k8s:/home/user#

root@k8s:/home/user# kubectl cluster-info

Kubernetes control plane is running at https://192.168.163.134:6443

CoreDNS is running at https://192.168.163.134:6443/api/v1/namespaces/kube-system/services/kube-


dns:dns/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

root@k8s:/home/user# kubectl get cs

Warning: v1 ComponentStatus is deprecated in v1.19+

NAME STATUS MESSAGE ERROR

controller-manager Healthy ok

scheduler Healthy ok
etcd-0 Healthy

root@k8s:/home/user# curl -v localhost:10249/proxyMode

* Trying 127.0.0.1:10249...

* Connected to localhost (127.0.0.1) port 10249 (#0)

> GET /proxyMode HTTP/1.1

> Host: localhost:10249

> User-Agent: curl/7.81.0

> Accept: */*

>

* Mark bundle as not supporting multiuse

< HTTP/1.1 200 OK

< Content-Type: text/plain; charset=utf-8

< X-Content-Type-Options: nosniff

< Date: Mon, 04 Mar 2024 15:39:20 GMT

< Content-Length: 8

<

* Connection #0 to host localhost left intact

iptablesroot@k8s:/home/bhkubectl apply -f https://k8s.io/examples/pods/commands.yamlnds.yaml

pod/command-demo created

root@k8s:/home/user# kubectl get pods

NAME READY STATUS RESTARTS AGE

command-demo 0/1 ContainerCreating 0 17s

root@k8s:/home/user# kubectl get pods -n kube-system

NAME READY STATUS RESTARTS AGE

coredns-787d4945fb-gclqr 1/1 Running 1 (24m ago) 33h

coredns-787d4945fb-lxxj5 1/1 Running 1 (24m ago) 33h

etcd-k8s 1/1 Running 1 (24m ago) 33h

kube-apiserver-k8s 1/1 Running 1 (24m ago) 33h

kube-controller-manager-k8s 1/1 Running 1 (24m ago) 33h


kube-proxy-448rw 1/1 Running 0 33h

kube-proxy-w59m4 1/1 Running 1 (24m ago) 33h

kube-scheduler-k8s 1/1 Running 2 (17m ago) 33h

to get node add command

root@k8s:/home/user# kubeadm token create --print-join-command

kubeadm join 192.168.163.134:6443 --token xn7bzf.c5si9r2f64v0w8v7 --discovery-token-ca-cert-hash


sha256:91b7274ff12f4f4179dffa22c8bdbaf8bcd51034412c50afdd266badac7ebf7d

#####After reboot if cluster not working:

export KUBECONFIG=/etc/kubernetes/admin.conf

root@k8s:/home/user# kubectl get nodes --show-labels

NAME STATUS ROLES AGE VERSION LABELS

k8s Ready control-plane 2d2h v1.26.5


beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/
hostname=k8s,kubernetes.io/os=linux,node-role.kubernetes.io/control-
plane=,node.kubernetes.io/exclude-from-external-load-balancers=

node1 Ready <none> 2d2h v1.26.5


beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/
hostname=node1,kubernetes.io/os=linux

node2 Ready <none> 110m v1.26.14


beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/
hostname=node2,kubernetes.io/os=linux
root@k8s:/home/user#

root@k8s:/home/user# kubectl get nodes

NAME STATUS ROLES AGE VERSION

k8s Ready control-plane 2d2h v1.26.5

node1 Ready <none> 2d2h v1.26.5

node2 Ready <none> 114m v1.26.14

root@k8s:/home/user# kubectl label node node1 node-role.kubernetes.io/worker=worker

node/node1 labeled

root@k8s:/home/user# kubectl label node node2 node-role.kubernetes.io/worker=worker

node/node2 labeled

root@k8s:/home/user# kubectl get nodes

NAME STATUS ROLES AGE VERSION

k8s Ready control-plane 2d3h v1.26.5

node1 Ready worker 2d2h v1.26.5

node2 Ready worker 118m v1.26.14

root@k8s:/home/user#

You might also like