We are going to use the calico for the overlay network. Callico supports a flexible set of networking options so you can choose the most efficient option for your setup. We are going to use the containerd [light weight components] in our setup instead of docker.
Adding the overlay and br filter in the containerd configuration file.
root@workernode:/etc/modules-load.d# vi containerd.conf
root@workernode:/etc/modules-load.d# more containerd.conf
overlay
br_netfilter
Loading the overlay nad netfilter kernel modules.
root@workernode:/etc/modules-load.d# modprobe overlay
root@workernode:/etc/modules-load.d# modprobe br_netfilter
root@workernode:/etc/modules-load.d#
oot@control-plane:/home/thiru_strive# cd /etc/sysctl.d/
root@control-plane:/etc/sysctl.d# ls
10-console-messages.conf 10-link-restrictions.conf 10-network-security.conf 60-gce-network-security.conf 99-sysctl.conf
10-ipv6-privacy.conf 10-lxd-inotify.conf 10-ptrace.conf 99-cloudimg-ipv6.conf README
10-kernel-hardening.conf 10-magic-sysrq.conf 10-zeropage.conf 99-kubernetes-cri.conf
root@control-plane:/etc/sysctl.d# vi 99-kubernetes-cri.conf
root@workernode:/etc/sysctl.d# more 99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
root@workernode:/etc/sysctl.d#
-----------------
root@control-plane:/etc/sysctl.d# sudo sysctl --system
* Applying /etc/sysctl.d/10-console-messages.conf ...
kernel.printk = 4 4 1 7
* Applying /etc/sysctl.d/10-ipv6-privacy.conf ...
net.ipv6.conf.all.use_tempaddr = 2
net.ipv6.conf.default.use_tempaddr = 2
* Applying /etc/sysctl.d/10-kernel-hardening.conf ...
kernel.kptr_restrict = 1
* Applying /etc/sysctl.d/10-link-restrictions.conf ...
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /etc/sysctl.d/10-lxd-inotify.conf ...
fs.inotify.max_user_instances = 1024
* Applying /etc/sysctl.d/10-magic-sysrq.conf ...
kernel.sysrq = 176
* Applying /etc/sysctl.d/10-network-security.conf ...
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.tcp_syncookies = 1
* Applying /etc/sysctl.d/10-ptrace.conf ...
kernel.yama.ptrace_scope = 1
* Applying /etc/sysctl.d/10-zeropage.conf ...
vm.mmap_min_addr = 65536
* Applying /usr/lib/sysctl.d/50-default.conf ...
net.ipv4.conf.all.promote_secondaries = 1
net.core.default_qdisc = fq_codel
* Applying /etc/sysctl.d/60-gce-network-security.conf ...
net.ipv4.tcp_syncookies = 1
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.all.secure_redirects = 1
net.ipv4.conf.default.secure_redirects = 1
net.ipv4.ip_forward = 0
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.default.send_redirects = 0
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.icmp_echo_ignore_broadcasts = 1
net.ipv4.icmp_ignore_bogus_error_responses = 1
net.ipv4.conf.all.log_martians = 1
net.ipv4.conf.default.log_martians = 1
kernel.randomize_va_space = 2
kernel.panic = 10
* Applying /etc/sysctl.d/99-cloudimg-ipv6.conf ...
net.ipv6.conf.all.use_tempaddr = 0
net.ipv6.conf.default.use_tempaddr = 0
* Applying /etc/sysctl.d/99-kubernetes-cri.conf ...
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.conf ...
root@control-plane:/etc/sysctl.d# exit
exit
Downloading the docker file:
thiru_strive@control-plane:~$ curl -fsSL https://get.docker.com -o get-docker.sh
thiru_strive@control-plane:~$ ls
get-docker.sh
thiru_strive@control-plane:~$ curl -fsSL https://download.docker.com/linux/ubuntu/gp | sudo apt-key add -
curl: (22) The requested URL returned error: 404
gpg: no valid OpenPGP data found.
thiru_strive@control-plane:~$ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
OK
thiru_strive@control-plane:~$ sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu
> ^C
thiru_strive@control-plane:~$ sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
Hit:1 http://us-west4.gce.archive.ubuntu.com/ubuntu bionic InRelease
Hit:2 http://us-west4.gce.archive.ubuntu.com/ubuntu bionic-updates InRelease
Hit:3 https://download.docker.com/linux/ubuntu bionic InRelease
Hit:5 http://us-west4.gce.archive.ubuntu.com/ubuntu bionic-backports InRelease
Hit:4 https://packages.cloud.google.com/apt kubernetes-xenial InRelease
Get:6 http://security.ubuntu.com/ubuntu bionic-security InRelease [88.7 kB]
Fetched 88.7 kB in 1s (109 kB/s)
Reading package lists... Done
W: Target Packages (stable/binary-amd64/Packages) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
W: Target Packages (stable/binary-all/Packages) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
W: Target Translations (stable/i18n/Translation-en) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
W: Target CNF (stable/cnf/Commands-amd64) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
W: Target CNF (stable/cnf/Commands-all) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
W: Target Packages (stable/binary-amd64/Packages) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
W: Target Packages (stable/binary-all/Packages) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
W: Target Translations (stable/i18n/Translation-en) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
W: Target CNF (stable/cnf/Commands-amd64) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
W: Target CNF (stable/cnf/Commands-all) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
thiru_strive@control-plane:~$ apt-get update -y
Reading package lists... Done
E: Could not open lock file /var/lib/apt/lists/lock - open (13: Permission denied)
E: Unable to lock directory /var/lib/apt/lists/
W: Problem unlinking the file /var/cache/apt/pkgcache.bin - RemoveCaches (13: Permission denied)
W: Problem unlinking the file /var/cache/apt/srcpkgcache.bin - RemoveCaches (13: Permission denied)
thiru_strive@control-plane:~$ sudo apt-get update -y
Hit:1 https://download.docker.com/linux/ubuntu bionic InRelease
Hit:3 http://us-west4.gce.archive.ubuntu.com/ubuntu bionic InRelease
Hit:4 http://us-west4.gce.archive.ubuntu.com/ubuntu bionic-updates InRelease
Hit:5 http://us-west4.gce.archive.ubuntu.com/ubuntu bionic-backports InRelease
Hit:2 https://packages.cloud.google.com/apt kubernetes-xenial InRelease
Hit:6 http://security.ubuntu.com/ubuntu bionic-security InRelease
Reading package lists... Done
W: Target Packages (stable/binary-amd64/Packages) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
W: Target Packages (stable/binary-all/Packages) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
W: Target Translations (stable/i18n/Translation-en) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
W: Target CNF (stable/cnf/Commands-amd64) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
W: Target CNF (stable/cnf/Commands-all) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
W: Target Packages (stable/binary-amd64/Packages) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
W: Target Packages (stable/binary-all/Packages) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
W: Target Translations (stable/i18n/Translation-en) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
W: Target CNF (stable/cnf/Commands-amd64) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
W: Target CNF (stable/cnf/Commands-all) is configured multiple times in /etc/apt/sources.list:58 and /etc/apt/sources.list.d/docker.list:1
thiru_strive@control-plane:~$ apt install -y containerd.io
E: Could not open lock file /var/lib/dpkg/lock-frontend - open (13: Permission denied)
E: Unable to acquire the dpkg frontend lock (/var/lib/dpkg/lock-frontend), are you root?
thiru_strive@control-plane:~$ sduo apt install -y containerd.io
Command 'sduo' not found, did you mean:
command 'sudo' from deb sudo
command 'sudo' from deb sudo-ldap
Try: apt install <deb name>
thiru_strive@control-plane:~$ sudo apt install -y containerd.io
Reading package lists... Done
Building dependency tree
Reading state information... Done
containerd.io is already the newest version (1.6.8-1).
The following package was automatically installed and is no longer required:
libnuma1
Use 'sudo apt autoremove' to remove it.
0 upgraded, 0 newly installed, 0 to remove and 10 not upgraded.
thiru_strive@control-plane:~$ sudo ls /etc/containerd
config.toml
thiru_strive@control-plane:~$ containerd config default | sudo tee /etc/containerd/config.toml
disabled_plugins = []
imports = []
oom_score = 0
plugin_dir = ""
required_plugins = []
root = "/var/lib/containerd"
state = "/run/containerd"
temp = ""
version = 2
[cgroup]
path = ""
[debug]
address = ""
format = ""
gid = 0
level = ""
uid = 0
[grpc]
address = "/run/containerd/containerd.sock"
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
tcp_address = ""
tcp_tls_ca = ""
tcp_tls_cert = ""
tcp_tls_key = ""
uid = 0
[metrics]
address = ""
grpc_histogram = false
[plugins]
[plugins."io.containerd.gc.v1.scheduler"]
deletion_threshold = 0
mutation_threshold = 100
pause_threshold = 0.02
schedule_delay = "0s"
startup_delay = "100ms"
[plugins."io.containerd.grpc.v1.cri"]
device_ownership_from_security_context = false
disable_apparmor = false
disable_cgroup = false
disable_hugetlb_controller = true
disable_proc_mount = false
disable_tcp_service = true
enable_selinux = false
enable_tls_streaming = false
enable_unprivileged_icmp = false
enable_unprivileged_ports = false
ignore_image_defined_volumes = false
max_concurrent_downloads = 3
max_container_log_line_size = 16384
netns_mounts_under_state_dir = false
restrict_oom_score_adj = false
sandbox_image = "k8s.gcr.io/pause:3.6"
selinux_category_range = 1024
stats_collect_period = 10
stream_idle_timeout = "4h0m0s"
stream_server_address = "127.0.0.1"
stream_server_port = "0"
systemd_cgroup = false
tolerate_missing_hugetlb_controller = true
unset_seccomp_profile = ""
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = ""
ip_pref = ""
max_conf_num = 1
[plugins."io.containerd.grpc.v1.cri".containerd]
default_runtime_name = "runc"
disable_snapshot_annotations = true
discard_unpacked_layers = false
ignore_rdt_not_enabled_errors = false
no_pivot = false
snapshotter = "overlayfs"
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
BinaryName = ""
CriuImagePath = ""
CriuPath = ""
CriuWorkPath = ""
IoGid = 0
IoUid = 0
NoNewKeyring = false
NoPivotRoot = false
Root = ""
ShimCgroup = ""
SystemdCgroup = false
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
base_runtime_spec = ""
cni_conf_dir = ""
cni_max_conf_num = 0
container_annotations = []
pod_annotations = []
privileged_without_host_devices = false
runtime_engine = ""
runtime_path = ""
runtime_root = ""
runtime_type = ""
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]
[plugins."io.containerd.grpc.v1.cri".image_decryption]
key_model = "node"
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = ""
[plugins."io.containerd.grpc.v1.cri".registry.auths]
[plugins."io.containerd.grpc.v1.cri".registry.configs]
[plugins."io.containerd.grpc.v1.cri".registry.headers]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins."io.containerd.internal.v1.opt"]
path = "/opt/containerd"
[plugins."io.containerd.internal.v1.restart"]
interval = "10s"
[plugins."io.containerd.internal.v1.tracing"]
sampling_ratio = 1.0
service_name = "containerd"
[plugins."io.containerd.metadata.v1.bolt"]
content_sharing_policy = "shared"
[plugins."io.containerd.monitor.v1.cgroups"]
no_prometheus = false
[plugins."io.containerd.runtime.v1.linux"]
no_shim = false
runtime = "runc"
runtime_root = ""
shim = "containerd-shim"
shim_debug = false
[plugins."io.containerd.runtime.v2.task"]
platforms = ["linux/amd64"]
sched_core = false
[plugins."io.containerd.service.v1.diff-service"]
default = ["walking"]
[plugins."io.containerd.service.v1.tasks-service"]
rdt_config_file = ""
[plugins."io.containerd.snapshotter.v1.aufs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.btrfs"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.devmapper"]
async_remove = false
base_image_size = ""
discard_blocks = false
fs_options = ""
fs_type = ""
pool_name = ""
root_path = ""
[plugins."io.containerd.snapshotter.v1.native"]
root_path = ""
[plugins."io.containerd.snapshotter.v1.overlayfs"]
root_path = ""
upperdir_label = false
[plugins."io.containerd.snapshotter.v1.zfs"]
root_path = ""
[plugins."io.containerd.tracing.processor.v1.otlp"]
endpoint = ""
insecure = false
protocol = ""
[proxy_plugins]
[stream_processors]
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar"
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
path = "ctd-decoder"
returns = "application/vnd.oci.image.layer.v1.tar+gzip"
[timeouts]
"io.containerd.timeout.bolt.open" = "0s"
"io.containerd.timeout.shim.cleanup" = "5s"
"io.containerd.timeout.shim.load" = "5s"
"io.containerd.timeout.shim.shutdown" = "3s"
"io.containerd.timeout.task.state" = "2s"
[ttrpc]
address = ""
gid = 0
uid = 0
thiru_strive@control-plane:~$ systemctl restart containerd
==== AUTHENTICATING FOR org.freedesktop.systemd1.manage-units ===
Authentication is required to restart 'containerd.service'.
Authenticating as: Ubuntu (ubuntu)
Password:
thiru_strive@control-plane:~$ sudo systemctl restart containerd
thiru_strive@control-plane:~$ sudo systemctl status containerd
● containerd.service - containerd container runtime
Loaded: loaded (/lib/systemd/system/containerd.service; enabled; vendor preset: enabled)
Active: active (running) since Sat 2022-09-24 06:51:31 UTC; 11s ago
Docs: https://containerd.io
Process: 30365 ExecStartPre=/sbin/modprobe overlay (code=exited, status=0/SUCCESS)
Main PID: 30371 (containerd)
Tasks: 9
CGroup: /system.slice/containerd.service
└─30371 /usr/bin/containerd
Sep 24 06:51:31 control-plane containerd[30371]: time="2022-09-24T06:51:31.762791844Z" level=info msg=serving... address=/run/containerd/containerd.sock.ttrpc
Sep 24 06:51:31 control-plane containerd[30371]: time="2022-09-24T06:51:31.763046925Z" level=info msg=serving... address=/run/containerd/containerd.sock
Sep 24 06:51:31 control-plane systemd[1]: Started containerd container runtime.
Sep 24 06:51:31 control-plane containerd[30371]: time="2022-09-24T06:51:31.764616269Z" level=info msg="Start subscribing containerd event"
Sep 24 06:51:31 control-plane containerd[30371]: time="2022-09-24T06:51:31.767021667Z" level=info msg="Start recovering state"
Sep 24 06:51:31 control-plane containerd[30371]: time="2022-09-24T06:51:31.768727463Z" level=info msg="Start event monitor"
Sep 24 06:51:31 control-plane containerd[30371]: time="2022-09-24T06:51:31.768799239Z" level=info msg="Start snapshots syncer"
Sep 24 06:51:31 control-plane containerd[30371]: time="2022-09-24T06:51:31.768820472Z" level=info msg="Start cni network conf syncer for default"
Sep 24 06:51:31 control-plane containerd[30371]: time="2022-09-24T06:51:31.768861547Z" level=info msg="Start streaming server"
Sep 24 06:51:31 control-plane containerd[30371]: time="2022-09-24T06:51:31.779908528Z" level=info msg="containerd successfully booted in 0.065294s"
thiru_strive@control-plane:~$ sudo systemctl enable containerd
thiru_strive@control-plane:~$ sudo kubeadm init
[init] Using Kubernetes version: v1.25.2
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [control-plane kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.182.0.2]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [control-plane localhost] and IPs [10.182.0.2 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [control-plane localhost] and IPs [10.182.0.2 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 10.504975 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node control-plane as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node control-plane as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: bp7u7h.yd41xhaja24z7l86
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.182.0.2:6443 --token bp7u7h.yd41xhaja24z7l86 \
--discovery-token-ca-cert-hash sha256:081170aeba0d24d093b39838f549c60f379a0d0b67861a90e91ea2a41f9d816d
kubeadm join 10.182.0.2:6443 --token kui2lz.qqv1xjk05k54nn26 \
--discovery-token-ca-cert-hash sha256:081170aeba0d24d093b39838f549c60f379a0d0b67861a90e91ea2a41f9d816d
thiru_strive@control-plane:~$
thiru_strive@control-plane:~$ kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
thiru_strive@control-plane:~$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-565d847f94-p8lqw 0/1 Pending 0 3d8h
kube-system coredns-565d847f94-qdhkl 0/1 Pending 0 3d8h
kube-system etcd-control-plane 1/1 Running 2 (3m7s ago) 3d8h
kube-system kube-apiserver-control-plane 1/1 Running 2 (3m7s ago) 3d8h
kube-system kube-controller-manager-control-plane 1/1 Running 2 (3m7s ago) 3d8h
kube-system kube-proxy-v7bwd 1/1 Running 2 (3m7s ago) 3d8h
kube-system kube-scheduler-control-plane 1/1 Running 2 (3m7s ago) 3d8h
kube-system weave-net-88qdk 1/2 Running 1 (5s ago) 12s
thiru_strive@control-plane:~$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-565d847f94-p8lqw 0/1 Pending 0 3d8h
kube-system coredns-565d847f94-qdhkl 0/1 Pending 0 3d8h
kube-system etcd-control-plane 1/1 Running 2 (3m11s ago) 3d8h
kube-system kube-apiserver-control-plane 1/1 Running 2 (3m11s ago) 3d8h
kube-system kube-controller-manager-control-plane 1/1 Running 2 (3m11s ago) 3d8h
kube-system kube-proxy-v7bwd 1/1 Running 2 (3m11s ago) 3d8h
kube-system kube-scheduler-control-plane 1/1 Running 2 (3m11s ago) 3d8h
kube-system weave-net-88qdk 2/2 Running 1 (9s ago) 16s
thiru_strive@control-plane:~$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-565d847f94-p8lqw 1/1 Running 0 3d8h
kube-system coredns-565d847f94-qdhkl 1/1 Running 0 3d8h
kube-system etcd-control-plane 1/1 Running 2 (3m46s ago) 3d8h
kube-system kube-apiserver-control-plane 1/1 Running 2 (3m46s ago) 3d8h
kube-system kube-controller-manager-control-plane 1/1 Running 2 (3m46s ago) 3d8h
kube-system kube-proxy-v7bwd 1/1 Running 2 (3m46s ago) 3d8h
kube-system kube-scheduler-control-plane 1/1 Running 2 (3m46s ago) 3d8h
kube-system weave-net-88qdk 2/2 Running 1 (44s ago) 51s
**********************************
thiru_strive@control-plane:~$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
control-plane Ready control-plane 3d8h v1.25.0
thiru_strive@control-plane:~$
***************************************
Pod creation:
thiru_strive@control-plane:~$ kubectl delete pod nginx-deployment-7fb96c846b-m2mqn
pod "nginx-deployment-7fb96c846b-m2mqn" deleted
thiru_strive@control-plane:~$ kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-deployment-7fb96c846b-9x87x 1/1 Running 0 41s
nginx-deployment-7fb96c846b-jhms8 1/1 Running 0 4s
thiru_strive@control-plane:~$ kubectl exec -it nginx-deployment-7fb96c846b-jhms8 /bin/bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deployment-7fb96c846b-jhms8:/# ls
bin boot dev etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var
root@nginx-deployment-7fb96c846b-jhms8:/# ps -ef | grep httpd
bash: ps: command not found
root@nginx-deployment-7fb96c846b-jhms8:/# service status httpd
***********************************
thiru_strive@control-plane:~$ kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-deployment-7fb96c846b-9x87x 1/1 Running 0 5m39s 10.44.0.1 workernode <none> <none>
nginx-deployment-7fb96c846b-jhms8 1/1 Running 0 5m2s 10.44.0.3 workernode <none> <none>
thiru_strive@control-plane:~$
*****************
shutdown a workernode:
thiru_strive@control-plane:~$ kubectl exec -it nginx-deployment-7fb96c846b-jhms8 /bin/bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
thiru_strive@control-plane:~$ kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-deployment-7fb96c846b-88kf5 0/1 Pending 0 3m15s
nginx-deployment-7fb96c846b-9x87x 1/1 Terminating 0 17m
nginx-deployment-7fb96c846b-jhms8 1/1 Terminating 0 16m
nginx-deployment-7fb96c846b-lbsxz 0/1 Pending 0 3m15s
thiru_strive@control-plane:~$
**********************
No comments:
Post a Comment