Category Archives: Virtualization

k3s lxc container


Run k3s inside LXD:

vim /etc/sysctl.d/90-lxd-limits.conf
fs.aio-max-nr = 524288
fs.inotify.max_queued_events = 1048576
fs.inotify.max_user_instances = 1048576
fs.inotify.max_user_watches = 1048576
kernel.dmesg_restrict = 1
kernel.keys.maxbytes = 2000000
kernel.keys.maxkeys = 2000
net.ipv4.neigh.default.gc_thresh3 = 8192
net.ipv6.neigh.default.gc_thresh3 = 8192
vm.max_map_count = 262144

lxc profile create k3s
wget https://raw.githubusercontent.com/ubuntu/microk8s/master/tests/lxc/microk8s.profile -O k3s.profile
cat k3s.profile

name: k3s
config:
boot.autostart: "true"
linux.kernel_modules: ip_vs,ip_vs_rr,ip_vs_wrr,ip_vs_sh,ip_tables,ip6_tables,netlink_diag,nf_nat,overlay,br_netfilter
raw.lxc: |
lxc.apparmor.profile=unconfined
lxc.mount.auto=proc:rw sys:rw cgroup:rw
lxc.cgroup.devices.allow=a
lxc.cap.drop=
security.nesting: "true"
security.privileged: "true"
description: ""
devices:
aadisable:
path: /sys/module/nf_conntrack/parameters/hashsize
source: /sys/module/nf_conntrack/parameters/hashsize
type: disk
aadisable2:
path: /dev/kmsg
source: /dev/kmsg
type: unix-char
aadisable3:
path: /sys/fs/bpf
source: /sys/fs/bpf
type: disk
aadisable4:
path: /proc/sys/net/netfilter/nf_conntrack_max
source: /proc/sys/net/netfilter/nf_conntrack_max
type: disk
cat k3s.profile | lxc profile edit k3s

CNAME="$(hostname)-k3s"
lxc launch -p default -p k3s ubuntu:20.04 ${CNAME}

lxc exec ${CNAME} -- unlink /etc/resolv.conf
lxc exec ${CNAME} -- bash -c "echo 'nameserver 1.1.1.1' > /etc/resolv.conf"
lxc exec ${CNAME} -- bash -c "echo 'nameserver 8.8.8.8' >> /etc/resolv.conf"
lxc exec ${CNAME} -- bash -c "echo '127.0.1.1 ${CNAME}' >> /etc/hosts"

lxc exec ${CNAME} -- apt install -y apparmor-utils avahi-daemon
lxc exec ${CNAME} -- bash -c "echo 'L /dev/kmsg - - - - /dev/console' > /etc/tmpfiles.d/kmsg.conf"

lxc exec ${CNAME} -- bash -c "curl -sfL https://get.k3s.io | sh -s - server --snapshotter=native --disable traefik"
lxc exec ${CNAME} -- k3s kubectl get pods --all-namespaces

NAME STATUS ROLES AGE VERSION
my-node-k3s Ready control-plane,master 19m v1.27.7+k3s2

Kill containers to fix LXC shutdown issue:

vim /etc/systemd/system/[email protected]
[Unit]
Description=Kill cgroup procs on shutdown for %i
DefaultDependencies=false
Before=shutdown.target umount.target
[Service]
# Instanced units are not part of system.slice for some reason
# without this, the service isn't started at shutdown
Slice=system.slice
ExecStart=/bin/bash -c "/usr/local/bin/k3s-killall.sh"
Type=oneshot
[Install]
WantedBy=shutdown.target

lxc exec ${CNAME} -- systemctl enable [email protected]

Helm install:
lxc exec ${CNAME} -- snap install helm --classic
helm 3.10.1 from Snapcrafters✪ installed

lxc exec ${CNAME} -- bash -c "mkdir -p \${HOME}/.kube/; cat /etc/rancher/k3s/k3s.yaml > \${HOME}/.kube/config"
lxc exec ${CNAME} -- bash -c "chmod 600 \${HOME}/.kube/config"

LoadBalancer:
helm upgrade --install ingress-nginx ingress-nginx \
--repo https://kubernetes.github.io/ingress-nginx \
--namespace ingress-nginx --create-namespace \
--set service.type=LoadBalancer

kubectl --namespace ingress-nginx get services -o wide -w ingress-nginx-controller
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
ingress-nginx-controller LoadBalancer 10.43.128.233 10.71.214.196 80:31957/TCP,443:30437/TCP 51s app.kubernetes.io/component=controller,app.kubernetes.io/instance=ingress-nginx,app.kubernetes.io/name=ingress-nginx
i

Install dashboard:
GITHUB_URL=https://github.com/kubernetes/dashboard/releases
VERSION_KUBE_DASHBOARD=$(curl -w '%{url_effective}' -I -L -s -S ${GITHUB_URL}/latest -o /dev/null | sed -e 's|.*/||')
sudo k3s kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/${VERSION_KUBE_DASHBOARD}/aio/deploy/recommended.yaml

Or if not working specify version:
sudo k3s kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml

vim dashboard.admin-user.yml
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard

vim dashboard.admin-user-role.yml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard

k3s kubectl create -f dashboard.admin-user.yml -f dashboard.admin-user-role.yml
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created

Ingress:
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: dashboard-nginx-ingress
namespace: kubernetes-dashboard
annotations:
nginx.ingress.kubernetes.io/ssl-redirect: "false"
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
rules:
- host: e7470-k3s.local
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kubernetes-dashboard
port:
number: 443

Add your current user:
lxc config set ${CNAME} security.idmap.isolated true
lxc config set ${CNAME} security.idmap.size 200000
printf "uid $(id -u) $(id -u)\ngid $(id -g) $(id -g)" | lxc config set ${CNAME} raw.idmap -
lxc restart ${CNAME}

Create user (host) inside the container:
lxc exec ${CNAME} -- bash -c "groupadd -r k3s"
lxc exec ${CNAME} -- bash -c "chown root:k3s /etc/rancher/k3s/k3s.yaml"
lxc exec ${CNAME} -- bash -c "chmod g+r /etc/rancher/k3s/k3s.yaml"
lxc exec ${CNAME} -- bash -c "userdel ubuntu"
lxc exec ${CNAME} -- bash -c "groupadd -g $(id -g) $(id -gn)"
lxc exec ${CNAME} -- bash -c "useradd -u $(id -u) -g $(id -g) -m -G sudo $(id -un)"
lxc exec ${CNAME} -- bash -c "usermod -a -G k3s $(id -un)"
lxc exec ${CNAME} -- bash -c "mkdir /home/$(id -un)/.kube"
lxc exec ${CNAME} -- bash -c "cat /etc/rancher/k3s/k3s.yaml > /home/$(id -un)/.kube/config"
lxc exec ${CNAME} -- bash -c "chown -R $(id -u):$(id -g) /home/$(id -un)/.kube/"

Map directory:
lxc config device add ${CNAME} Projects disk source=/home/vit/1 path=/home/vit/1

Wrapper scripts:
vim ${HOME}/.local/bin/k3s
#!/usr/bin/env bash

CNAME="${CNAME:-e7470-k3s}"

lxc exec ${CNAME} --mode interactive --cwd "${PWD}" --user $(id -u) --group $(\
lxc exec ${CNAME} -- getent group k3s | awk 'BEGIN{FS=":"}{print $3}'
) --env "HOME=/home/$(id -un)" -- \
$(basename $0) $@

chmod +x ${HOME}/.local/bin/k3s
ln -s ${HOME}/.local/bin/k3s .local/bin/kubectl
ln -s ${HOME}/.local/bin/k3s .local/bin/helm

Convert OpenVZ to LXC


Convert OpenVZ VM to LXC VM

Install same version lxc OS
lxc launch ubuntu:20.04 lxc-server
lxc stop lxc-server

rm -rf /var/snap/lxd/common/lxd/storage-pools/default/containers/lxc-server/rootfs
mkdir /var/snap/lxd/common/lxd/storage-pools/default/containers/lxc-server/rootfs

On Ubuntu 20.04 OpenVZ VM run:
rsync -avz -e ssh --exclude=etc/inittab --exclude=etc/network/interfaces --exclude=dev/ --exclude=sys/ --exclude=proc/ / [email protected]:/var/snap/lxd/common/lxd/storage-pools/default/containers/lxc-server/rootfs/

vim /var/snap/lxd/common/lxd/storage-pools/default/containers/lxc-server/rootfs/etc/network/interfaces
auto lo
iface lo inet loopback
source /etc/network/interfaces.d/*.cfg

mkdir /var/snap/lxd/common/lxd/storage-pools/default/containers/lxc-server/rootfs/etc/network/interfaces.d/
vim /var/snap/lxd/common/lxd/storage-pools/default/containers/lxc-server/rootfs/etc/network/interfaces.d/eth0.cfg
# The primary network interface
auto eth0
iface eth0 inet dhcp

lxc start lxc-server

For sure you need to stop MySQL/PostgreSQL before rsync or if huge database you can resync database after syncing data from OpenVZ server

Error: /usr/bin/apt-get failed, exitcode=100

Downloading packages
Failed to register the CT: PRL_ERR_VZCTL_OPERATION_FAILED (Details: Creating OS template cache for ubuntu-20.04-x86_64 template

tune2fs 1.42.9 (28-Dec-2013)
Setting maximal mount count to -1
Setting error behavior to 2
Setting interval between checks to 0 seconds
E: Unable to determine a suitable packaging system type
Error: /usr/bin/apt-get failed, exitcode=100
Error: Failed to umount ploop image /vz/tmp//vzpkg.2WxExF/cache-private/root.hdd: Error in ploop_umount_image (ploop.c:2804): Image /vz/tmp/vzpkg.2WxExF/cache-private/root.hdd/root.hds is not mounted 40
VE_PRIVATE is not set
Creation of Container private area failed

service vz restart

docker detach from running container

To detach the tty without exiting the shell, use the escape sequence Ctrl+P followed by Ctrl+Q. More details here.

Additional info from this source:

docker run -t -i → can be detached with ^P^Qand reattached with docker attach
docker run -i → cannot be detached with ^P^Q; will disrupt stdin
docker run → cannot be detached with ^P^Q; can SIGKILL client; can reattach with docker attach