一、集群规划
1、机器规划
注意:master、etcd节点尽量打散到不同机柜和交换机下
主机名 IP 系统版本 集群角色 部署服务 common-01 192.168.86.81 Ubuntu 22.04.2 LTS master kubeadm、kubelet、kubectl、kube-apiserver、etcd、kube-scheduler、kube-controller-manager common-02 192.168.86.82 Ubuntu 22.04.2 LTS master kubeadm、kubelet、kubectl、kube-apiserver、etcd、kube-scheduler、kube-controller-manager common-03 192.168.86.83 Ubuntu 22.04.2 LTS master kubeadm、kubelet、kubectl、kube-apiserver、etcd、kube-scheduler、kube-controller-manager mgmt-01 192.168.86.84 Ubuntu 22.04.2 LTS node kubelet、kube-proxy mgmt-02 192.168.86.85 Ubuntu 22.04.2 LTS node kubelet、kube-proxy mgmt-03 192.168.86.86 Ubuntu 22.04.2 LTS node kubelet、kube-proxy
2、网络规划
网络类型 网段 podSubnet 10.10.0.0/12 serviceSubnet 10.20.0.0/12 controlPlaneEndpoint k8s.disallow.cn
3、系统规划
类型 版本名称 版本号 集群版本 kubernetes 1.32.2 容器版本 containerd 1.7.8 系统版本 Ubuntu 22.04.2 LTS 内核版本 kernal 5.15.0-70-generic
二、集群搭建
1、节点初始化
1、修改主机名、同步时间、DNS
修改主机名
root@common-01:~
root@common-02:~
root@common-03:~
root@mgmt-01:~
root@mgmt-02:~
root@mgmt-03:~
同步时间
ntpdate ntp.aliyun.com
DNS解析
cat /etc/netplan/00-installer-config.yaml
network:
ethernets:
eth0:
dhcp4: no
dhcp6: no
addresses: [ 192.168 .86.81/24]
gateway4: 192.168 .86.2
nameservers:
addresses: [ 114.114 .114.114,8.8.8.8]
optional: true
version: 2
生效配置
netplan try
netplan apply
2、系统优化
cp -f /etc/security/limits.conf /etc/security/limits.conf.bak
cat /etc/security/limits.conf
root soft nofile 655350
root hard nofile 655350
* soft nofile 655350
* hard nofile 655350
cp /etc/sysctl.conf /etc/sysctl.conf.bak
cat /etc/sysctl.conf
fs.inotify.max_user_watches= 1048576
fs.inotify.max_user_instances= 8192
net.core.somaxconn= 40960
fs.file-max= 9223372036854775807
3、关闭swap
swapoff --all --verbose
sed -i "/^\/swap.img/d" /etc/fstab
4、禁止系统和Linux内核版本自动升级
cp /etc/apt/apt.conf.d/20auto-upgrades /etc/apt/apt.conf.d/20auto-upgrades.bak
cp /etc/apt/apt.conf.d/50unattended-upgrades /etc/apt/apt.conf.d/50unattended-upgrades.bak
cat /etc/apt/apt.conf.d/20auto-upgrades
APT::Periodic::Update-Package-Lists "0" ;
APT::Periodic::Download-Upgradeable-Packages "0" ;
APT::Periodic::AutocleanInterval "0" ;
APT::Periodic::Unattended-Upgrade "0" ;
cat /etc/apt/apt.conf.d/50unattended-upgrades
Unattended-Upgrade::Package-Blacklist {
} ;
Unattended-Upgrade::DevRelease "auto" ;
systemctl restart unattended-upgrades
mv /etc/update-motd.d /etc/update-motd.d.bak
mkdir -p /etc/update-motd.d
dpkg -l | awk '{print $2 }' | grep -E '^linux-' | xargs apt-mark hold
DEBIAN_FRONTEND = noninteractive apt update
5、修改时区、优化history
timedatectl set-timezone Asia/Shanghai
cat /etc/profile
export HISTTIMEFORMAT = "%Y-%m-%d %H:%M:%S ` whoami ` "
export HISTFILESIZE = 100000
export HISTSIZE = 100000
cat /root/.bashrc | grep HIS
HISTCONTROL = ignoredups:ignorespace
HISTSIZE = 100000
HISTFILESIZE = 100000
source /root/.bashrc
6、安装基础软件
DEBIAN_FRONTEND = noninteractive apt-get install -y \
ipset netplan.io curl net-tools iftop fping iperf ntp iputils-ping mtr traceroute tcptraceroute ipvsadm socat conntrack ctorrent \
pigz zip rsyslog unzip vim nano less tree git lrzsz tmux wget jq lynx \
freeipmi ipmitool smartmontools \
libvirt-clients libnuma-dev \
nfs-common nvme-cli
2、初始化k8s节点
1、安装containerd
wget https://github.com/containerd/containerd/releases/download/v1.7.8/containerd-1.7.8-linux-amd64.tar.gz
tar -xf containerd-1.7.8-linux-amd64.tar.gz -C /usr/local/
mkdir -p /etc/containerd
containerd config default > /etc/containerd/config.toml
cat /etc/containerd/config.toml | grep sandbox_image
sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.10"
tee /etc/systemd/system/containerd.service <<- 'EOF'
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target
[Service]
ExecStart=/usr/local/bin/containerd
Restart=always
RestartSec=5
Delegate=yes
KillMode=process
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
EOF
systemctl enable containerd && systemctl start containerd
2、安装crictl、nerdctl、etcdctl
wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.25.0/crictl-v1.25.0-linux-amd64.tar.gz
tar -zxvf crictl-*-linux-amd64.tar.gz -C /usr/local/bin/
cat << EOF > /etc/crictl.yaml
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF
wget https://github.com/containerd/nerdctl/releases/download/v1.7.5/nerdctl-full-1.7.5-linux-amd64.tar.gz
tar -xf nerdctl-full-1.7.5-linux-amd64.tar.gz -C /usr/local/
mkdir -p /opt/cni
ln -s /usr/local/libexec/cni /opt/cni/bin
curl -LO https://github.com/coreos/etcd/releases/download/v3.5.4/etcd-v3.5.4-linux-amd64.tar.gz
tar -xf etcd-v3.5.4-linux-amd64.tar.gz
cp -a etcd-v3.5.4-linux-amd64/etcd* /usr/local/bin/
3、安装kubeadm、kubectl、kubelet、helm等二进制文件
wget https://dl.k8s.io/v1.32.6/kubernetes-server-linux-amd64.tar.gz
tar xf kubernetes-server-linux-amd64.tar.gz
cp kubernetes/server/bin/kubeadm /usr/bin/
cp kubernetes/server/bin/kubectl /usr/bin/
cp kubernetes/server/bin/kubelet /usr/bin/
wget https://get.helm.sh/helm-v3.14.2-linux-amd64.tar.gz
tar -xf helm-v3.14.2-linux-amd64.tar.gz
mv linux-amd64/helm /usr/bin/
4、创建kubelet配置文件
cat /lib/systemd/system/kubelet.service
[ Unit]
Description = kubelet: The Kubernetes Node Agent
Documentation = https://kubernetes.io/docs/
Wants = network-online.target
After = network-online.target
[ Service]
ExecStart = /usr/bin/kubelet
Restart = always
StartLimitInterval = 0
RestartSec = 10
[ Install]
WantedBy = multi-user.target
5、创建kubeadm配置文件
mkdir -p /usr/lib/systemd/system/kubelet.service.d/
cat /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
[ Service]
Environment = "KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment = "KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
EnvironmentFile = -/var/lib/kubelet/kubeadm-flags.env
EnvironmentFile = -/etc/default/kubelet
ExecStart =
ExecStart = /usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS
6、创建99-kubernetes-cri.conf文件
cat /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
sysctl --system
modprobe br_netfilter
systemctl enable kubelet
3、初始化k8s集群
1、配置kubeadm.yaml
cat /etc/hosts
127.0 .0.1 localhost
127.0 .1.1 root
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
127.0 .0.1 ubuntu
192.168 .86.81 common-01
192.168 .86.82 common-02
192.168 .86.83 common-03
192.168 .86.84 mgmt-01
192.168 .86.85 mgmt-02
192.168 .86.86 mgmt-03
192.168 .86.81 k8s.disallow.cn
mkdir /etc/kubeadm
cat /etc/kubeadm/kubeadm.yaml
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
kubernetesVersion: v1.32.6
clusterName: kubernetes
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
controlPlaneEndpoint: k8s.disallow.cn:6443
certificatesDir: /etc/kubernetes/pki
apiServer:
extraArgs:
- name: authorization-mode
value: "Node,RBAC"
- name: max-mutating-requests-inflight
value: "1000"
- name: max-requests-inflight
value: "2000"
- name: watch-cache
value: "true"
- name: watch-cache-sizes
value: "node#2000,pod#10000,service#10000"
controllerManager:
extraArgs:
- name: bind-address
value: "0.0.0.0"
- name: kube-api-qps
value: "100"
- name: kube-api-burst
value: "150"
scheduler:
extraArgs:
- name: bind-address
value: "0.0.0.0"
- name: kube-api-qps
value: "100"
- name: kube-api-burst
value: "150"
etcd:
local:
dataDir: /var/lib/etcd
extraArgs:
- name: pre-vote
value: "true"
- name: auto-compaction-retention
value: "1"
- name: auto-compaction-mode
value: "periodic"
- name: quota-backend-bytes
value: "17179869184"
- name: heartbeat-interval
value: "500"
- name: election-timeout
value: "3000"
- name: snapshot-count
value: "50000"
- name: max-request-bytes
value: "31457280"
- name: max-txn-ops
value: "256"
- name: listen-metrics-urls
value: "http://0.0.0.0:2381"
networking:
dnsDomain: cluster.local
podSubnet: 10.10 .0.0/12
serviceSubnet: 10.20 .0.0/12
2、初始化集群
kubeadm config images list --config /etc/kubeadm/kubeadm.yaml
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.32.6
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.32.6
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.32.6
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.32.6
registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.11.3
registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.10
registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.16-0
kubeadm config images list --config /etc/kubeadm/kubeadm.yaml | grep hangzhou | awk '{print "nerdctl -n k8s.io pull "$1 " &"}' | bash
cd /etc/kubeadm/
kubeadm init --config kubeadm.yaml
mkdir -p $HOME /.kube
cp -i /etc/kubernetes/admin.conf $HOME /.kube/config
chown $( id -u) : $( id -g) $HOME /.kube/config
3、部署calico网络插件
wget https://raw.githubusercontent.com/projectcalico/calico/v3.27.2/manifests/tigera-operator.yaml
cat custom-resources.yaml
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
calicoNetwork:
bgp: Enabled
containerIPForwarding: Enabled
hostPorts: Enabled
ipPools:
- blockSize: 26
cidr: 10.10 .0.0/16
disableBGPExport: false
encapsulation: IPIP
natOutgoing: Enabled
nodeSelector: all( )
linuxDataplane: Iptables
multiInterfaceMode: None
nodeAddressAutodetectionV4:
kubernetes: NodeInternalIP
cni:
ipam:
type: Calico
type: Calico
controlPlaneReplicas: 2
flexVolumePath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/
kubeletVolumePluginPath: /var/lib/kubelet
logging:
cni:
logFileMaxAgeDays: 30
logFileMaxCount: 10
logFileMaxSize: 1024Mi
logSeverity: Info
nodeUpdateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
nonPrivileged: Disabled
registry: docker.m.daocloud.io
variant: Calico
kubectl create -f tigera-operator.yaml
kubectl create -f custom-resources.yaml
4、扩展master节点
kubeadm token create --print-join-command
kubeadm init phase upload-certs --upload-certs --config /etc/kubeadm/kubeadm.yaml
b1c46bbd0ef245eee826ff55693ada5e9980168051a08fc10a790c882bf356a8
kubeadm join k8s.disallow.cn:6443 --token lqbr2f.4365wbbnh1kjj8zi --discovery-token-ca-cert-hash sha256:5f1843f7e76b3213c8868d5820112964413b0d3db8755511d0830c464e37e20e --control-plane --certificate-key b1c46bbd0ef245eee826ff55693ada5e9980168051a08fc10a790c882bf356a8
5、加入mgmt节点
kubeadm token create --print-join-command