Tag Archives: LXC

k3s lxc container


Run k3s inside LXD:

vim /etc/sysctl.d/90-lxd-limits.conf
fs.aio-max-nr = 524288
fs.inotify.max_queued_events = 1048576
fs.inotify.max_user_instances = 1048576
fs.inotify.max_user_watches = 1048576
kernel.dmesg_restrict = 1
kernel.keys.maxbytes = 2000000
kernel.keys.maxkeys = 2000
net.ipv4.neigh.default.gc_thresh3 = 8192
net.ipv6.neigh.default.gc_thresh3 = 8192
vm.max_map_count = 262144

lxc profile create k3s
wget https://raw.githubusercontent.com/ubuntu/microk8s/master/tests/lxc/microk8s.profile -O k3s.profile
cat k3s.profile

name: k3s
config:
boot.autostart: "true"
linux.kernel_modules: ip_vs,ip_vs_rr,ip_vs_wrr,ip_vs_sh,ip_tables,ip6_tables,netlink_diag,nf_nat,overlay,br_netfilter
raw.lxc: |
lxc.apparmor.profile=unconfined
lxc.mount.auto=proc:rw sys:rw cgroup:rw
lxc.cgroup.devices.allow=a
lxc.cap.drop=
security.nesting: "true"
security.privileged: "true"
description: ""
devices:
aadisable:
path: /sys/module/nf_conntrack/parameters/hashsize
source: /sys/module/nf_conntrack/parameters/hashsize
type: disk
aadisable2:
path: /dev/kmsg
source: /dev/kmsg
type: unix-char
aadisable3:
path: /sys/fs/bpf
source: /sys/fs/bpf
type: disk
aadisable4:
path: /proc/sys/net/netfilter/nf_conntrack_max
source: /proc/sys/net/netfilter/nf_conntrack_max
type: disk
cat k3s.profile | lxc profile edit k3s

CNAME="$(hostname)-k3s"
lxc launch -p default -p k3s ubuntu:20.04 ${CNAME}

lxc exec ${CNAME} -- unlink /etc/resolv.conf
lxc exec ${CNAME} -- bash -c "echo 'nameserver 1.1.1.1' > /etc/resolv.conf"
lxc exec ${CNAME} -- bash -c "echo 'nameserver 8.8.8.8' >> /etc/resolv.conf"
lxc exec ${CNAME} -- bash -c "echo '127.0.1.1 ${CNAME}' >> /etc/hosts"

lxc exec ${CNAME} -- apt install -y apparmor-utils avahi-daemon
lxc exec ${CNAME} -- bash -c "echo 'L /dev/kmsg - - - - /dev/console' > /etc/tmpfiles.d/kmsg.conf"

lxc exec ${CNAME} -- bash -c "curl -sfL https://get.k3s.io | sh -s - server --snapshotter=native --disable traefik"
lxc exec ${CNAME} -- k3s kubectl get pods --all-namespaces

NAME STATUS ROLES AGE VERSION
my-node-k3s Ready control-plane,master 19m v1.27.7+k3s2

Kill containers to fix LXC shutdown issue:

vim /etc/systemd/system/[email protected]
[Unit]
Description=Kill cgroup procs on shutdown for %i
DefaultDependencies=false
Before=shutdown.target umount.target
[Service]
# Instanced units are not part of system.slice for some reason
# without this, the service isn't started at shutdown
Slice=system.slice
ExecStart=/bin/bash -c "/usr/local/bin/k3s-killall.sh"
Type=oneshot
[Install]
WantedBy=shutdown.target

lxc exec ${CNAME} -- systemctl enable [email protected]

Helm install:
lxc exec ${CNAME} -- snap install helm --classic
helm 3.10.1 from Snapcrafters✪ installed

lxc exec ${CNAME} -- bash -c "mkdir -p \${HOME}/.kube/; cat /etc/rancher/k3s/k3s.yaml > \${HOME}/.kube/config"
lxc exec ${CNAME} -- bash -c "chmod 600 \${HOME}/.kube/config"

LoadBalancer:
helm upgrade --install ingress-nginx ingress-nginx \
--repo https://kubernetes.github.io/ingress-nginx \
--namespace ingress-nginx --create-namespace \
--set service.type=LoadBalancer

kubectl --namespace ingress-nginx get services -o wide -w ingress-nginx-controller
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
ingress-nginx-controller LoadBalancer 10.43.128.233 10.71.214.196 80:31957/TCP,443:30437/TCP 51s app.kubernetes.io/component=controller,app.kubernetes.io/instance=ingress-nginx,app.kubernetes.io/name=ingress-nginx
i

Install dashboard:
GITHUB_URL=https://github.com/kubernetes/dashboard/releases
VERSION_KUBE_DASHBOARD=$(curl -w '%{url_effective}' -I -L -s -S ${GITHUB_URL}/latest -o /dev/null | sed -e 's|.*/||')
sudo k3s kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/${VERSION_KUBE_DASHBOARD}/aio/deploy/recommended.yaml

Or if not working specify version:
sudo k3s kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml

vim dashboard.admin-user.yml
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard

vim dashboard.admin-user-role.yml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard

k3s kubectl create -f dashboard.admin-user.yml -f dashboard.admin-user-role.yml
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created

Ingress:
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: dashboard-nginx-ingress
namespace: kubernetes-dashboard
annotations:
nginx.ingress.kubernetes.io/ssl-redirect: "false"
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
rules:
- host: e7470-k3s.local
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kubernetes-dashboard
port:
number: 443

Add your current user:
lxc config set ${CNAME} security.idmap.isolated true
lxc config set ${CNAME} security.idmap.size 200000
printf "uid $(id -u) $(id -u)\ngid $(id -g) $(id -g)" | lxc config set ${CNAME} raw.idmap -
lxc restart ${CNAME}

Create user (host) inside the container:
lxc exec ${CNAME} -- bash -c "groupadd -r k3s"
lxc exec ${CNAME} -- bash -c "chown root:k3s /etc/rancher/k3s/k3s.yaml"
lxc exec ${CNAME} -- bash -c "chmod g+r /etc/rancher/k3s/k3s.yaml"
lxc exec ${CNAME} -- bash -c "userdel ubuntu"
lxc exec ${CNAME} -- bash -c "groupadd -g $(id -g) $(id -gn)"
lxc exec ${CNAME} -- bash -c "useradd -u $(id -u) -g $(id -g) -m -G sudo $(id -un)"
lxc exec ${CNAME} -- bash -c "usermod -a -G k3s $(id -un)"
lxc exec ${CNAME} -- bash -c "mkdir /home/$(id -un)/.kube"
lxc exec ${CNAME} -- bash -c "cat /etc/rancher/k3s/k3s.yaml > /home/$(id -un)/.kube/config"
lxc exec ${CNAME} -- bash -c "chown -R $(id -u):$(id -g) /home/$(id -un)/.kube/"

Map directory:
lxc config device add ${CNAME} Projects disk source=/home/vit/1 path=/home/vit/1

Wrapper scripts:
vim ${HOME}/.local/bin/k3s
#!/usr/bin/env bash

CNAME="${CNAME:-e7470-k3s}"

lxc exec ${CNAME} --mode interactive --cwd "${PWD}" --user $(id -u) --group $(\
lxc exec ${CNAME} -- getent group k3s | awk 'BEGIN{FS=":"}{print $3}'
) --env "HOME=/home/$(id -un)" -- \
$(basename $0) $@

chmod +x ${HOME}/.local/bin/k3s
ln -s ${HOME}/.local/bin/k3s .local/bin/kubectl
ln -s ${HOME}/.local/bin/k3s .local/bin/helm

Convert OpenVZ to LXC


Convert OpenVZ VM to LXC VM

Install same version lxc OS
lxc launch ubuntu:20.04 lxc-server
lxc stop lxc-server

rm -rf /var/snap/lxd/common/lxd/storage-pools/default/containers/lxc-server/rootfs
mkdir /var/snap/lxd/common/lxd/storage-pools/default/containers/lxc-server/rootfs

On Ubuntu 20.04 OpenVZ VM run:
rsync -avz -e ssh --exclude=etc/inittab --exclude=etc/network/interfaces --exclude=dev/ --exclude=sys/ --exclude=proc/ / [email protected]:/var/snap/lxd/common/lxd/storage-pools/default/containers/lxc-server/rootfs/

vim /var/snap/lxd/common/lxd/storage-pools/default/containers/lxc-server/rootfs/etc/network/interfaces
auto lo
iface lo inet loopback
source /etc/network/interfaces.d/*.cfg

mkdir /var/snap/lxd/common/lxd/storage-pools/default/containers/lxc-server/rootfs/etc/network/interfaces.d/
vim /var/snap/lxd/common/lxd/storage-pools/default/containers/lxc-server/rootfs/etc/network/interfaces.d/eth0.cfg
# The primary network interface
auto eth0
iface eth0 inet dhcp

lxc start lxc-server

For sure you need to stop MySQL/PostgreSQL before rsync or if huge database you can resync database after syncing data from OpenVZ server

error: unpacking of archive failed on file /usr/sbin/suexec;5e440e82: cpio: cap_set_file

Running transaction
Installing : httpd-2.4.6-90.el7.centos.x86_64 1/1
Error unpacking rpm package httpd-2.4.6-90.el7.centos.x86_64
error: unpacking of archive failed on file /usr/sbin/suexec;5e440e82: cpio: cap_set_file
Verifying : httpd-2.4.6-90.el7.centos.x86_64 1/1

Failed:
httpd.x86_64 0:2.4.6-90.el7.centos

/var/lib/lxc/new.server.lt/config
lxc.cap.drop =
lxc.cap.drop = mac_admin mac_override setpcap
lxc.cap.drop = sys_module sys_nice sys_pacct
lxc.cap.drop = sys_rawio sys_time

echo “%_netsharedpath /sys:/proc” >> /etc/rpm/macros.dist

Transaction test succeeded
Running transaction
Installing : httpd-2.4.6-90.el7.centos.x86_64

lxc Error creating container test

sudo lxc-create -t download -n test
Error creating container test

sudo lxc-create -t download -n test -o aaa.log
lxc-create: test: confile.c: set_config_net: 258 lxc.net must not have a value
lxc-create: test: parse.c: lxc_file_for_each_line_mmap: 103 Failed to parse config: lxc.network.type = veth

vi /etc/lxc/default.conf
#lxc.network.type = veth
lxc.net.0.type = vet

#lxc.network.link = virbr0
lxc.net.0.link = virbr0

lxc.net.0.flags = up
lxc.net.0.hwaddr = 00:16:3e:xx:xx:xx

lxc-start: lxccontainer.c: wait_on_daemonized_start: 754 Received container state “STOPPING” instead of “RUNNING”

It could be cause by this libvirt error: libvirtd[18329]: Failed to open file ‘/sys/class/net/vethSUT3FA/operstate’: No such file or directory

Fix:
brctl show
bridge name bridge id STP enabled interfaces
virbr0 8000.525400a97813 yes virbr0-nic

vim /var/lib/lxc/my_server/config
lxc.network.link = virbr0

lxc-start -n my_server -d

LXC container on Centos

LXC isn’t a real Virtualization technique, but is more like a chroot environment, but on “steroids”. Its similar to OpenVZ virtualization, but can use your native kernel version. In some cases its very important.

mkdir /var/lib/libvirt/lxc/centos-6-x86_64/etc/yum.repos.d/ -p  
cat /etc/yum.repos.d/CentOS-Base.repo |sed s/'$releasever'/6/g > /var/lib/libvirt/lxc/centos-6-x86_64/etc/yum.repos.d/CentOS-Base.repo
yum groupinstall core --installroot=/var/lib/libvirt/lxc/centos-6-x86_64/ --nogpgcheck -y
yum install plymouth libselinux-python --installroot=/var/lib/libvirt/lxc/centos-6-x86_64/ --nogpgcheck -y

You should crate selinux rule:

module lxc 1.0;

require {
type hald_t;
type virtd_lxc_t;
class dbus send_msg;
}

#============= hald_t ==============
allow hald_t virtd_lxc_t:dbus send_msg;

You should create manually your selinux rule to allow virtd_lxc_t to use dbus daemon. How crate custom selinux rules, you can check in other my article there.

chroot /var/lib/libvirt/lxc/centos-6-x86_64/ 

echo your_password_there |passwd root --stdin

#Fix root login on console

echo "pts/0" >>/etc/securetty

sed -i s/"session    required     pam_selinux.so close"/"#session    required     pam_selinux.so close"/g /etc/pam.d/login

sed -i s/"session    required     pam_selinux.so open"/"#session    required     pam_selinux.so open"/g /etc/pam.d/login

sed -i s/"session    required     pam_loginuid.so"/"#session    required     pam_loginuid.so"/g /etc/pam.d/login

#Configuring basic networking

cat > /etc/sysconfig/network << EOF

NETWORKING=yes

HOSTNAME=lxc.linux4you.tk

EOF

cat > /etc/sysconfig/network-scripts/ifcfg-eth0 << EOF

DEVICE=eth0

BOOTPROTO=dhcp

ONBOOT=yes

EOF

#Enabling sshd

chkconfig sshd on

# Fixing root login for sshd

sed -i s/"session    required     pam_selinux.so close"/"#session    required     pam_selinux.so close"/g /etc/pam.d/sshd

sed -i s/"session    required     pam_loginuid.so"/"#session    required     pam_loginuid.so"/g /etc/pam.d/sshd

sed -i s/"session    required     pam_selinux.so open env_params"/"#session    required     pam_selinux.so open env_params"/g /etc/pam.d/sshd

# Leaving the chroot'ed filesystem

exit
virt-install --connect lxc:/// --name test --ram 512 --vcpu 1 --filesystem /var/lib/libvirt/lxc/centos-6-x86_64/,/ --noautoconsole