Kubernetes Cluster Bootstrap

Introduction

Kubernetes components

Bootstrap on baremetal

PXE network boot

Dnsmasq


sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/coreos/dnsmasq \
-d -q \
--dhcp-range=192.168.1.3,192.168.1.254 \
--enable-tftp --tftp-root=/var/lib/tftpboot \
--dhcp-match=set:bios,option:client-arch,0 \
--dhcp-boot=tag:bios,undionly.kpxe \
--dhcp-match=set:efi32,option:client-arch,6 \
--dhcp-boot=tag:efi32,ipxe.efi \
--dhcp-match=set:efibc,option:client-arch,7 \
--dhcp-boot=tag:efibc,ipxe.efi \
--dhcp-match=set:efi64,option:client-arch,9 \
--dhcp-boot=tag:efi64,ipxe.efi \
--dhcp-userclass=set:ipxe,iPXE \
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
--address=/matchbox.example/192.168.1.2 \
--log-queries \
--log-dhcp

CoreOS and Ignition

---
systemd:
units:
- name: docker.service
enable: true
- name: locksmithd.service
mask: true
- name: kubelet.path
enable: true
contents: |
[Unit]
Description=Watch for kubeconfig
[Path]
PathExists=/etc/kubernetes/kubeconfig
[Install]
WantedBy=multi-user.target
- name: wait-for-dns.service
enable: true
contents: |
[Unit]
Description=Wait for DNS entries
Wants=systemd-resolved.service
Before=kubelet.service
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
[Install]
RequiredBy=kubelet.service

Matchbox

sudo docker run --rm quay.io/coreos/matchbox:latest \
-p 8080:8080 \
-v /var/lib/matchbox:/var/lib/matchbox:Z \
-address=0.0.0.0:8080 \
-log-level=debug

Groups

{
"id": "node1",
"name": "Worker Node",
"profile": "worker",
"selector": {
"mac": "52:54:00:b2:2f:86"
},
"metadata": {
"domain_name": "node1.example.com",
"k8s_dns_service_ip": "10.3.0.10",
"pxe": "true",
"ssh_authorized_keys": [
"ssh-rsa XXXXXXXXXXXX fake-test-key-REMOVE-ME"
]
}
}

Profiles

{
"id": "worker",
"name": "Worker",
"boot": {
"kernel": "/assets/coreos/1465.8.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/1465.8.0/coreos_production_pxe_image.cpio.gz"],
"args": [
"initrd=coreos_production_pxe_image.cpio.gz",
"root=/dev/sda1",
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
"coreos.first_boot=yes",
"console=tty0",
"console=ttyS0",
"coreos.autologin"
]
},
"ignition_id": "worker.yaml"
}

Assets

$ tree /var/lib/matchbox/assets
/var/lib/matchbox/assets/
├── coreos
│ └── 1465.8.0
│ ├── CoreOS_Image_Signing_Key.asc
│ ├── coreos_production_image.bin.bz2
│ ├── coreos_production_image.bin.bz2.sig
│ ├── coreos_production_pxe_image.cpio.gz
│ ├── coreos_production_pxe_image.cpio.gz.sig
│ ├── coreos_production_pxe.vmlinuz
│ └── coreos_production_pxe.vmlinuz.sig

Ignition

---
systemd:
units:
- name: docker.service
enable: true
- name: locksmithd.service
mask: true
- name: kubelet.path
enable: true
contents: |
[Unit]
Description=Watch for kubeconfig
[Path]
PathExists=/etc/kubernetes/kubeconfig
[Install]
WantedBy=multi-user.target
- name: wait-for-dns.service
enable: true
contents: |
[Unit]
Description=Wait for DNS entries
Wants=systemd-resolved.service
Before=kubelet.service
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
[Install]
RequiredBy=kubelet.service

Kubernetes node boot process

kubelet.service

- name: kubelet.service
command: start
runtime: true
content: |
[Unit]
Description=Kubelet via Hyperkube ACI

[Service]
Environment=KUBELET_IMAGE=quay.io/coreos/hyperkube:v1.9.2_coreos.0
ExecStart=/usr/lib/coreos/kubelet-wrapper \
--kubeconfig=/etc/kubernetes/kubelet-kubeconfig.yaml \
--require-kubeconfig \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--network-plugin=cni \
--lock-file=/var/run/lock/kubelet.lock \
--exit-on-lock-contention \
--pod-manifest-path=/etc/kubernetes/manifests \
--allow-privileged \
--node-labels="node-role.kubernetes.io/node",type=worker,cluster=baremetal \
--cni-bin-dir=/var/lib/cni/bin \
--minimum-container-ttl-duration=6m0s \
--cluster_dns=10.5.0.10 \
--cluster-domain=cluster.local \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--anonymous-auth=false \
--register-node=true

ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid

Restart=always
RestartSec=10

[Install]
WantedBy=multi-user.target
{
"name": "k8s-pod-network",
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"etcd_key_file": "__ETCD_KEY_FILE__",
"etcd_cert_file": "__ETCD_CERT_FILE__",
"etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
"log_level": "__LOG_LEVEL__",
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
}

kube-proxy

- path: /etc/kubernetes/manifests/kube-proxy.yaml
content: |
apiVersion: v1
kind: Pod
metadata:
name: kube-proxy
namespace: kube-system
labels:
k8s-app: kube-proxy
spec:
containers:
- name: kube-proxy
image: quay.io/coreos/hyperkube:v1.9.2_coreos.0
command:
- ./hyperkube
- proxy
- --kubeconfig=/etc/kubernetes/kubelet-kubeconfig.yaml
- --proxy-mode=iptables
- --cluster-cidr=10.123.0.0/16
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
- name: etc-kubernetes
mountPath: /etc/kubernetes
readOnly: true
hostNetwork: true
volumes:
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-host
- name: etc-kubernetes
hostPath:
path: /etc/kubernetes

Bootstrap on cloud

Summary

VirtusLab

Virtus Lab company blog

Thanks to Jan Paw.

Bartłomiej Antoniak

Written by

Software Development Manager @VirtusLab

VirtusLab

VirtusLab

Virtus Lab company blog