Specifically for installing on my 3-node Zima Blades cluster
Prep
Storage
Carve out space on external drive to store snap data, and create a blank partition to use as ceph OSD
# /dev/sda1 for snap storage
# /dev/sda2 as OSD
sudo parted /dev/sda -s -a optimal -- \
  mklabel gpt \
  mkpart primary 1MiB 100GiB \
  mkpart primary 100GiB 100% Create LVM stuff
sudo pvcreate /dev/sda1 # "y" if prompted to overwrite ext4 signature
sudo vgcreate ext /dev/sda1 
sudo lvcreate -L 20G -n snaplib ext
sudo mkfs.ext4 /dev/ext/snaplib
echo "/dev/mapper/ext-snaplib /var/lib/snapd ext4 defaults 0 2" \
  | sudo tee -a /etc/fstab
sudo lvcreate -L 40G -n snap ext
sudo mkfs.ext4 /dev/ext/snap
echo "/dev/mapper/ext-snap /var/snap ext4 defaults 0 2" \
  | sudo tee -a /etc/fstab
sudo mount -a
sudo systemctl daemon-reloadNetworking
Only have a single NIC on these bad boys so can’t do really fancy stuff, but want to at least make sure the IPs don’t change:
In /etc/network/interfaces:
auto lo
iface lo inet loopback
auto enp2s0
iface enp2s0 inet static
  address 192.168.1.61 # other nodes: .62, .63
  netmask 255.255.255.0
  gateway 192.168.1.1
auto configure_network_blade1Restart networking
sudo systemctl restart networking.serviceSnaps
The handy microX tools are distributed as snaps so snap support needs to be installed
# update all the things for good measure
sudo apt-get update && sudo apt-get dist-upgrade
 
sudo apt-get install snapd
 
sudo reboot # to make sure all the snap components get registered
 
sudo snap install snapd # to update snapd to the version required by microk8sTo be able to run snaps with sudo, you may need to sudo visudo and add /snap/bin to the secure_path line in /etc/sudoers.
Microceph
Initial setup
Install
sudo snap install microceph
sudo snap refresh --hold microcephCreate cluster (on first node)
# bootstrap
sudo microceph cluster bootstrap
 
# add other nodes
sudo microceph cluster add blade2
sudo microceph cluster add blade3Copy the token generated by the cluster add command, execute on the other nodes
sudo microceph cluster join $tokenDisks
On all three
sudo microceph disk add /dev/sda2 On one
sudo ceph osd pool create cephfs_meta
sudo ceph osd pool create cephfs_data
sudo ceph fs new cephFs cephfs_meta cephfs_dataCephFS share
Not needed for k8s functionality, but nice-to-have if you’d like to access cephfs files from outside
On all three
sudo apt-get install ceph-common 
cd /var/snap/microceph/current/conf
sudo ln -s $(pwd)/ceph.conf /etc/ceph/ceph.conf
sudo ln -s $(pwd)/ceph.keyring /etc/ceph/ceph.keyring
 
sudo mkdir /cephfs 
sudo mount -t ceph :/ /cephfs/ -o name=admin,fs=cephFs
 
# verify
df -h /cephfsMicrok8s
Install
On all nodes:
sudo snap install microk8s --classic
sudo snap refresh --hold microk8s
 
# add user to microk8s group
sudo usermod -a -G microk8s john
# create ~/.kube
mkdir -p ~/.kube
# reload group membership
newgrp microk8sCluster bootstrap
On first node:
microk8s add-node # repeat for each nodeCopy join command, run on other nodes. ex:
microk8s join 192.168.1.61:25000/92b2db237428470dc4fcfc4ebbd9dc81/2c0cb3284b05Check out new membership:
microk8s kubectl get nodes
#> NAME     STATUS   ROLES    AGE    VERSION
#> blade1   Ready    <none>   108m   v1.32.3
#> blade2   Ready    <none>   100m   v1.32.3
#> blade3   Ready    <none>   27m    v1.32.3Convenience aliases
echo "alias kubectl='microk8s kubectl'" >> ~/.bashrc
echo "alias k='microk8s kubectl'" >> ~/.bashrc
source ~/.bashrc
 
k get nodes # so much more convenient!
#>  NAME     STATUS   ROLES    AGE   VERSION
#>  blade1   Ready    <none>   16h   v1.32.3
#>  blade2   Ready    <none>   13h   v1.32.3
#>  blade3   Ready    <none>   13h   v1.32.3Configure storage
# enable
microk8s enable rook-ceph
 
# connect to microceph cluster
sudo microk8s connect-external-cephTest-create block and filesystem PVCs
ceph-pvcs.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: ceph-fs-pvc
  namespace: default
spec:
  storageClassName: cephfs
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: ceph-rbd-pvc
  namespace: default
spec:
  storageClassName: ceph-rbd
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
---Deploy:
k apply -f ceph-pvcs.yaml
#> persistentvolumeclaim/ceph-fs-pvc created
#> persistentvolumeclaim/ceph-rbd-pvc created
 
 
k get pvc
#> NAME           STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   VOLUMEATTRIBUTESCLASS   AGE
#> ceph-fs-pvc    Bound    pvc-467d33e2-5067-4f6f-8586-46d645421cd5   1Gi        RWX            cephfs         <unset>                 3s
#> ceph-rbd-pvc   Bound    pvc-9655944b-e6b8-431b-b55c-82eac881ba04   1Gi        RWO            ceph-rbd       <unset>                 3s
 
k get pv
#> NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                  STORAGECLASS   VOLUMEATTRIBUTESCLASS   REASON   AGE
#> pvc-467d33e2-5067-4f6f-8586-46d645421cd5   1Gi        RWX            Delete           Bound    default/ceph-fs-pvc    cephfs         <unset>                          5m12s
#> pvc-9655944b-e6b8-431b-b55c-82eac881ba04   1Gi        RWO            Delete           Bound    default/ceph-rbd-pvc   ceph-rbd       <unset>                          5m12s
 Load balancer
Microk8s makes it incredibly easy to deploy a reliable load balancer right in the cluster so you can expose services without having to use NodePorts.
microk8s enable metallbThen just input your desired IP range(s) when prompted. Boom, done.