Tag Archives: Kubernetes

k3s lxc container


Run k3s inside LXD:

vim /etc/sysctl.d/90-lxd-limits.conf
fs.aio-max-nr = 524288
fs.inotify.max_queued_events = 1048576
fs.inotify.max_user_instances = 1048576
fs.inotify.max_user_watches = 1048576
kernel.dmesg_restrict = 1
kernel.keys.maxbytes = 2000000
kernel.keys.maxkeys = 2000
net.ipv4.neigh.default.gc_thresh3 = 8192
net.ipv6.neigh.default.gc_thresh3 = 8192
vm.max_map_count = 262144

lxc profile create k3s
wget https://raw.githubusercontent.com/ubuntu/microk8s/master/tests/lxc/microk8s.profile -O k3s.profile
cat k3s.profile

name: k3s
config:
boot.autostart: "true"
linux.kernel_modules: ip_vs,ip_vs_rr,ip_vs_wrr,ip_vs_sh,ip_tables,ip6_tables,netlink_diag,nf_nat,overlay,br_netfilter
raw.lxc: |
lxc.apparmor.profile=unconfined
lxc.mount.auto=proc:rw sys:rw cgroup:rw
lxc.cgroup.devices.allow=a
lxc.cap.drop=
security.nesting: "true"
security.privileged: "true"
description: ""
devices:
aadisable:
path: /sys/module/nf_conntrack/parameters/hashsize
source: /sys/module/nf_conntrack/parameters/hashsize
type: disk
aadisable2:
path: /dev/kmsg
source: /dev/kmsg
type: unix-char
aadisable3:
path: /sys/fs/bpf
source: /sys/fs/bpf
type: disk
aadisable4:
path: /proc/sys/net/netfilter/nf_conntrack_max
source: /proc/sys/net/netfilter/nf_conntrack_max
type: disk
cat k3s.profile | lxc profile edit k3s

CNAME="$(hostname)-k3s"
lxc launch -p default -p k3s ubuntu:20.04 ${CNAME}

lxc exec ${CNAME} -- unlink /etc/resolv.conf
lxc exec ${CNAME} -- bash -c "echo 'nameserver 1.1.1.1' > /etc/resolv.conf"
lxc exec ${CNAME} -- bash -c "echo 'nameserver 8.8.8.8' >> /etc/resolv.conf"
lxc exec ${CNAME} -- bash -c "echo '127.0.1.1 ${CNAME}' >> /etc/hosts"

lxc exec ${CNAME} -- apt install -y apparmor-utils avahi-daemon
lxc exec ${CNAME} -- bash -c "echo 'L /dev/kmsg - - - - /dev/console' > /etc/tmpfiles.d/kmsg.conf"

lxc exec ${CNAME} -- bash -c "curl -sfL https://get.k3s.io | sh -s - server --snapshotter=native --disable traefik"
lxc exec ${CNAME} -- k3s kubectl get pods --all-namespaces

NAME STATUS ROLES AGE VERSION
my-node-k3s Ready control-plane,master 19m v1.27.7+k3s2

Kill containers to fix LXC shutdown issue:

vim /etc/systemd/system/[email protected]
[Unit]
Description=Kill cgroup procs on shutdown for %i
DefaultDependencies=false
Before=shutdown.target umount.target
[Service]
# Instanced units are not part of system.slice for some reason
# without this, the service isn't started at shutdown
Slice=system.slice
ExecStart=/bin/bash -c "/usr/local/bin/k3s-killall.sh"
Type=oneshot
[Install]
WantedBy=shutdown.target

lxc exec ${CNAME} -- systemctl enable [email protected]

Helm install:
lxc exec ${CNAME} -- snap install helm --classic
helm 3.10.1 from SnapcraftersâœȘ installed

lxc exec ${CNAME} -- bash -c "mkdir -p \${HOME}/.kube/; cat /etc/rancher/k3s/k3s.yaml > \${HOME}/.kube/config"
lxc exec ${CNAME} -- bash -c "chmod 600 \${HOME}/.kube/config"

LoadBalancer:
helm upgrade --install ingress-nginx ingress-nginx \
--repo https://kubernetes.github.io/ingress-nginx \
--namespace ingress-nginx --create-namespace \
--set service.type=LoadBalancer

kubectl --namespace ingress-nginx get services -o wide -w ingress-nginx-controller
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
ingress-nginx-controller LoadBalancer 10.43.128.233 10.71.214.196 80:31957/TCP,443:30437/TCP 51s app.kubernetes.io/component=controller,app.kubernetes.io/instance=ingress-nginx,app.kubernetes.io/name=ingress-nginx
i

Install dashboard:
GITHUB_URL=https://github.com/kubernetes/dashboard/releases
VERSION_KUBE_DASHBOARD=$(curl -w '%{url_effective}' -I -L -s -S ${GITHUB_URL}/latest -o /dev/null | sed -e 's|.*/||')
sudo k3s kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/${VERSION_KUBE_DASHBOARD}/aio/deploy/recommended.yaml

Or if not working specify version:
sudo k3s kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml

vim dashboard.admin-user.yml
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard

vim dashboard.admin-user-role.yml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard

k3s kubectl create -f dashboard.admin-user.yml -f dashboard.admin-user-role.yml
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created

Ingress:
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: dashboard-nginx-ingress
namespace: kubernetes-dashboard
annotations:
nginx.ingress.kubernetes.io/ssl-redirect: "false"
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
rules:
- host: e7470-k3s.local
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kubernetes-dashboard
port:
number: 443

Add your current user:
lxc config set ${CNAME} security.idmap.isolated true
lxc config set ${CNAME} security.idmap.size 200000
printf "uid $(id -u) $(id -u)\ngid $(id -g) $(id -g)" | lxc config set ${CNAME} raw.idmap -
lxc restart ${CNAME}

Create user (host) inside the container:
lxc exec ${CNAME} -- bash -c "groupadd -r k3s"
lxc exec ${CNAME} -- bash -c "chown root:k3s /etc/rancher/k3s/k3s.yaml"
lxc exec ${CNAME} -- bash -c "chmod g+r /etc/rancher/k3s/k3s.yaml"
lxc exec ${CNAME} -- bash -c "userdel ubuntu"
lxc exec ${CNAME} -- bash -c "groupadd -g $(id -g) $(id -gn)"
lxc exec ${CNAME} -- bash -c "useradd -u $(id -u) -g $(id -g) -m -G sudo $(id -un)"
lxc exec ${CNAME} -- bash -c "usermod -a -G k3s $(id -un)"
lxc exec ${CNAME} -- bash -c "mkdir /home/$(id -un)/.kube"
lxc exec ${CNAME} -- bash -c "cat /etc/rancher/k3s/k3s.yaml > /home/$(id -un)/.kube/config"
lxc exec ${CNAME} -- bash -c "chown -R $(id -u):$(id -g) /home/$(id -un)/.kube/"

Map directory:
lxc config device add ${CNAME} Projects disk source=/home/vit/1 path=/home/vit/1

Wrapper scripts:
vim ${HOME}/.local/bin/k3s
#!/usr/bin/env bash

CNAME="${CNAME:-e7470-k3s}"

lxc exec ${CNAME} --mode interactive --cwd "${PWD}" --user $(id -u) --group $(\
lxc exec ${CNAME} -- getent group k3s | awk 'BEGIN{FS=":"}{print $3}'
) --env "HOME=/home/$(id -un)" -- \
$(basename $0) $@

chmod +x ${HOME}/.local/bin/k3s
ln -s ${HOME}/.local/bin/k3s .local/bin/kubectl
ln -s ${HOME}/.local/bin/k3s .local/bin/helm

kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS (code=exited, status=255)

ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS (code=exited, status=255)

journalctl -u kubelet
kubelet[25843]: F0310 21:08:58.395379 25843 server.go:189] failed to load Kubelet config file /var/lib/kubelet/config.yaml, error
kubelet.service: main process exited, code=exited, status=255/n/a

Fix:
kubeadm init

[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using ‘kubeadm config images pull’
[kubelet-start] Writing kubelet environment file with flags to file “/var/lib/kubelet/kubeadm-flags.env”
[kubelet-start] Writing kubelet configuration to file “/var/lib/kubelet/config.yaml”
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder “/etc/kubernetes/pki”
[certs] Generating “etcd/ca” certificate and key
[certs] Generating “etcd/server” certificate and key