k8s的架构
k8s集群的安装
官方文档
https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md#downloads-for-v1131
https://kubernetes.io/docs/home/?path=users&persona=app-developer&level=foundational
https://github.com/etcd-io/etcd
https://shengbao.org/348.html
https://github.com/coreos/flannel
http://www.cnblogs.com/blogscc/p/10105134.html
https://blog.csdn.net/xiegh2014/article/details/84830880
https://blog.csdn.net/tiger435/article/details/85002337
https://www.cnblogs.com/wjoyxt/p/9968491.html
https://blog.csdn.net/zhaihaifei/article/details/79098564
http://blog.51cto.com/jerrymin/1898243
http://www.cnblogs.com/xuxinkun/p/5696031.html
环境准备
#准备三台优化好的全新虚拟机环境
#内存根据电脑的配置情况给,最小给1G
10.0.0.11 k8s-master 1G
10.0.0.12 k8s-node-1 1G
10.0.0.13 k8s-node-2 1G
#所有节点需要做hosts解析
[root@k8s-master ~]# vim /etc/hosts
10.0.0.11 k8s-master
10.0.0.12 k8s-node1
scp -rp /etc/hosts 10.0.0.12:/etc/hosts
scp -rp /etc/hosts 10.0.0.13:/etc/hosts
做一些基础的优化后拍摄快照并克隆
#hosts本地劫持,此操作只用来作者本地网络环境使用
rm -rf /etc/yum.repos.d/local.repo
echo "192.168.37.202 mirrors.aliyun.com" >>/etc/hosts
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
k8s-master上配置
yum install etcd -y
vim /etc/etcd/etcd.conf
6行:ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
21行:ETCD_ADVERTISE_CLIENT_URLS="http://10.0.0.11:2379"
systemctl restart etcd.service
systemctl enable etcd.service
netstat -lntup
127.0.0.1:2380
:::2379
master节点安装kubernetes
#安装kubernetes-master
[root@k8s-master ~]# yum install -y kubernetes-master.x86_64
#修改apiserver配置文件
[root@k8s-master ~]# vim /etc/kubernetes/apiserver
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
KUBE_API_PORT="--port=8080"
KUBE_ETCD_SERVERS="--etcd-servers=http://10.0.0.11:2379"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,
SecurityContextDeny,ResourceQuota"
[root@k8s-master ~]# vim /etc/kubernetes/config
KUBE_MASTER="--master=http://10.0.0.11:8080"
#重启服务
systemctl restart kube-apiserver.service
systemctl enable kube-apiserver.service
systemctl restart kube-controller-manager.service
systemctl enable kube-controller-manager.service
systemctl restart kube-scheduler.service
systemctl enable kube-scheduler.service
#查看美服务是否安装正常
[root@k8s-master ~]# kubectl get componentstatus
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health":"true"}
node节点安装kubernetes
yum install kubernetes-node.x86_64 -y
vim /etc/kubernetes/config
22行:KUBE_MASTER="--master=http://10.0.0.11:8080"
vim /etc/kubernetes/kubelet
5行:KUBELET_ADDRESS="--address=0.0.0.0"
8行:KUBELET_PORT="--port=10250"
11行:KUBELET_HOSTNAME="--hostname-override=10.0.0.12"
14行:KUBELET_API_SERVER="--api-servers=http://10.0.0.11:8080"
#重启服务
systemctl enable kubelet.service
systemctl restart kubelet.service
systemctl enable kube-proxy.service
systemctl restart kube-proxy.service
#docker也启动了
systemctl status docker
在k8s-master节点上检查
[root@k8s-master ~]# kubectl get node
NAME STATUS AGE
k8s-node1 Ready 2m
k8s-node2 Ready 6s
所有节点配置flannel网络
yum install flannel -y
sed -i 's#http://127.0.0.1:2379#http://10.0.0.11:2379#g' /etc/sysconfig/flanneld
##master节点:
etcdctl mk /atomic.io/network/config '{ "Network": "172.18.0.0/16" }'
yum install docker -y
systemctl enable flanneld.service
systemctl restart flanneld.service
service docker restart
systemctl enable docker
systemctl restart kube-apiserver.service
systemctl restart kube-controller-manager.service
systemctl restart kube-scheduler.service
##node节点:
systemctl enable flanneld.service
systemctl restart flanneld.service
service docker restart
systemctl restart kubelet.service
systemctl restart kube-proxy.service
vim /usr/lib/systemd/system/docker.service
#在[Service]区域下增加一行
ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT
systemctl daemon-reload
systemctl restart docker
====================================================
#三台机器都执行,拉取一个自己的镜像
wget http://192.168.37.202/linux59/docker_busybox.tar.gz
docker load -i docker_busybox.tar.gz
#所有节点设置防火墙规则,并让生效
iptables -P FORWARD ACCEPT
vim /usr/lib/systemd/system/docker.service
ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT
systemctl daemon-reload
#将所有节点重启
reboot
#所有节点都创建一台容器,并测试能否相互ping通
docker run -it docker.io/busybox:latest
为什么添加iptables规则
阮一峰—Systemd 入门教程
配置master为镜像仓库
所有节点
vim /etc/sysconfig/docker
OPTIONS='--selinux-enabled --log-driver=journald --signature-verification=false --registry-mirror=https://registry.docker-cn.com --insecure-registry=10.0.0.11:5000'
systemctl restart docker
master节点
[root@k8s-master ~]# vim /etc/docker/daemon.json
{
"registry-mirrors": ["https://registry.docker-cn.com"],
"insecure-registries": ["10.0.0.11:5000"]
}
#上传registry.tar.gz 镜像
#下载链接: 提取码: h9cg
#https://pan.baidu.com/s/1OONeJ_pa1WnYjkvdYqjLnw
#添加仓库容器
docker run -d -p 5000:5000 --restart=always --name registry -v /opt/myregistry:/var/lib/registry registry
node节点上
#打标签并上传镜像
docker images
docker tag docker.io/busybox:latest 10.0.0.11:5000/busybox:latest
docker images
docker push 10.0.0.11:5000/busybox:latest
master节点上查看
[root@k8s-master ~]# ll /opt/myregistry/docker/registry/v2/repositories/
total 0
drwxr-xr-x 5 root root 55 Sep 11 12:18 busybox
什么是k8s,k8s有什么功能?
k8s是一个docker集群的管理工具 core rkt
k8s的核心功能
自愈:
重新启动失败的容器,在节点不可用时,替换和重新调度节点上的容器,对用户定义的健康检查不响应的容器会被中止,并且在容器准备好服务之前不会把其向客户端广播。
弹性伸缩:
通过监控容器的cpu的负载值,如果这个平均高于80%,增加容器的数量,如果这个平均低于10%,减少容器的数量
服务的自动发现和负载均衡:
不需要修改您的应用程序来使用不熟悉的服务发现机制,Kubernetes 为容器提供了自己的 IP 地址和一组容器的单个 DNS 名称,并可以在它们之间进行负载均衡。
滚动升级和一键回滚:
Kubernetes 逐渐部署对应用程序或其配置的更改,同时监视应用程序运行状况,以确保它不会同时终止所有实例。 如果出现问题,Kubernetes会为您恢复更改,利用日益增长的部署解决方案的生态系统。
k8s的历史
2014年 docker容器编排工具,立项
2015年7月 发布kubernetes 1.0, 加入cncf基金会
2016年,kubernetes干掉两个对手,docker swarm,mesos 1.2版
2017年 1.5
2018年 k8s 从cncf基金会 毕业项目
2019年: 1.13, 1.14 ,1.15
cncf cloud native compute foundation
kubernetes (k8s): 希腊语 舵手,领航 容器编排领域,
谷歌15年容器使用经验,borg容器管理平台,使用golang重构borg,kubernetes
k8s的安装
yum安装 1.5 最容易安装成功,最适合学习的
源码编译安装---难度最大 可以安装最新版
二进制安装---步骤繁琐 可以安装最新版 shell,ansible,saltstack
kubeadm 安装最容易, 网络 可以安装最新版
minikube 适合开发人员体验k8s, 网络
k8s的应用场景
k8s最适合跑微服务项目!
微服务和k8s ,弹性伸缩
微服务的好处
能承载更高的并发
业务健壮性,高可用
修改代码,重新编译时间短
持续集成,持续发布
jenkins代码自动上线
k8s常用的资源
创建pod资源
pod是最小资源单位.
k8s yaml的主要组成
apiVersion: v1 api版本(目前k8s版本仍然是v1.1x、待更新大版本后升级为V2)
kind: pod 资源类型 (后面可以接触到deployment、service等资源类型)
metadata: 属性(包含名字、标签等)
spec: 详细(用什么镜像、暴露哪些端口、初始化命令等)
k8s_pod.yaml
apiVersion: v1 ---版本
kind: Pod ----资源类型
metadata: ---下方填写属性(包含名称标签等)
name: nginx ---资源名称
labels: ---标签(labels 加s可以加多个标签)
app: web ---(键值对等于web)是一个web页面
spec: ---详细信息
containers: ---下方填写容器信息
- name: nginx ---容器名称
image: 10.0.0.11:5000/nginx:1.13 ---这个容器用什么镜像
ports: ---下方填写端口信息
- containerPort: 80 ---这个容器用的端口
command: ["sleep","10000"] ---指定容器初始命令(10000秒内有效)
pod资源:至少由两个容器组成,pod基础容器和业务容器组成(最多1+4)
k8s_test.yaml:
apiVersion: v1
kind: Pod
metadata:
name: test
labels:
app: web
spec:
containers:
- name: nginx
image: 10.0.0.11:5000/nginx:1.13
ports:
- containerPort: 80
- name: busybox
image: 10.0.0.11:5000/busybox:latest
command: ["sleep","10000"]
mkdir k8s_yaml
cd k8s_yaml/
mkdir pod
cd pod/
[root@k8s-master pod]# vim k8s_pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx
labels:
app: web
spec:
containers:
- name: nginx
image: 10.0.0.11:5000/nginx:1.13
ports:
- containerPort: 80
==================================================
master上执行
[root@k8s-master pod]# kubectl create -f k8s_pod.yaml
pod "nginx" created
[root@k8s-master pod]# kubectl get pod ---查看pod信息
NAME READY STATUS RESTARTS AGE
nginx 0/1 ContainerCreating 0 7m
[root@k8s-master pod]# kubectl get pods -o wide ---查看pod详细信息(被分配到哪个节点)
NAME READY STATUS RESTARTS AGE IP NODE
nginx 0/1 ContainerCreating 0 2m <none> k8s-node2
wget http://192.168.37.202/linux59/docker_nginx1.13.tar.gz
docker load -i docker_nginx1.13.tar.gz
docker tag docker.io/nginx:1.13 10.0.0.11:5000/nginx:1.13
docker push 10.0.0.11:5000/nginx:1.13
kubectl describe pod nginx
kubectl get nodes
#上传pod-infrastructure-latest.tar.gz 镜像包
[root@k8s-master ~]# ls pod-infrastructure-latest.tar.gz
pod-infrastructure-latest.tar.gz
#打标签并上传镜像
docker tag docker.io/tianyebj/pod-infrastructure:latest 10.0.0.11:5000/rhel7/pod-infrastructure:latest
docker push 10.0.0.11:5000/rhel7/pod-infrastructure:latest
node执行
#修改配置文件
[root@k8s-node2 ~]# vim /etc/kubernetes/kubelet
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=10.0.0.11:5000/rhel7/pod-infrastructure:latest"
#重启kubelet
systemctl restart kubelet.service
master上执行查看
[root@k8s-master pod]# kubectl describe pod nginx ---查看nginx这个pod的详细描述
[root@k8s-master pod]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx 1/1 Running 0 27m
#添加配置文件
[root@k8s-master pod]# vim k8s_test.yaml
apiVersion: v1
kind: Pod
metadata:
name: test
labels:
app: web
spec:
containers:
- name: nginx
image: 10.0.0.11:5000/nginx:1.13
ports:
- containerPort: 80
- name: busybox
image: 10.0.0.11:5000/busybox:latest
command: ["sleep","10000"]
[root@k8s-master pod]# kubectl create -f k8s_test.yaml
[root@k8s-master pod]# kubectl describe pod test
[root@k8s-master pod]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE
nginx 1/1 Running 0 55m 172.18.49.2 k8s-node2
test 2/2 Running 0 11m 172.18.42.2 k8s-node1
#在node1上查看容器
[root@k8s-node1 ~]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
142ce61f2cbb 10.0.0.11:5000/busybox:latest "sleep 10000" 9 minutes ago Up 9 minutes k8s_busybox.7e7ae56a_test_default_6b11a096-d478-11e9-b324-000c29b2785a_dde70056
f09e9c10deda 10.0.0.11:5000/nginx:1.13 "nginx -g 'daemon ..." 9 minutes ago Up 9 minutes k8s_nginx.91390390_test_default_6b11a096-d478-11e9-b324-000c29b2785a_0d95902d
eec2c8045724 10.0.0.11:5000/rhel7/pod-infrastructure:latest "/pod" 10 minutes ago Up 10 minutes k8s_POD.e5ea03c1_test_default_6b11a096-d478-11e9-b324-000c29b2785a_4df2c4f4
pod是k8s最小的资源单位
ReplicationController资源
副本控制器
rc:
保证指定数量的pod始终存活,rc通过标签选择器来关联pod
#创建rc
[root@k8s-master k8s_yaml]# vim k8s_rc.yaml
apiVersion: v1 ---版本
kind: ReplicationController ---资源类型
metadata: ---属性(rc资源只有一个属性(名称))
name: nginx ---资源名称
spec: ---详细信息
replicas: 5 ---副本数
selector: ---标签选择器(帮助rc管理pod数量)
app: myweb ---标签选择器名称
template: ---模板
metadata:
labels: ---pod名称(可为空)
app: myweb ---pod标签
spec:
containers:
- name: myweb
image: 10.0.0.11:5000/nginx:1.13
ports:
- containerPort: 80
#如已经创建资源需要修改yaml配置文件,修改后可执行kubectl apply -f k8s_rc.yaml,会自动更新rc资源配置
[root@k8s-master k8s_yaml]# kubectl create -f k8s_rc.yaml
replicationcontroller "nginx" created
[root@k8s-master k8s_yaml]# kubectl get rc
NAME DESIRED CURRENT READY AGE
nginx 5 5 0 6s
DESIRED ---期望值
CURRENT ---当前值
READY ---有效值
AGE ---年龄
[root@k8s-master k8s_yaml]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx 1/1 Running 2 15h
nginx-b2l78 1/1 Running 0 15s
nginx-gh210 1/1 Running 0 15s
nginx-gs025 1/1 Running 0 15s
nginx-k4hp5 1/1 Running 0 15s
nginx-twf7x 1/1 Running 0 15s
test 2/2 Running 4 15h
k8s资源的常见操作:
增删改查
kubectl get pod|rc ---查看各种资源列表
kubectl describe pod nginx ---查看nginx这个pod资源的详细描述(排错专用)
kubectl delete pod nginx 或者kubectl delete -f xxx.yaml ---删除pod
kubectl edit pod nginx ---编辑yaml文件
在node节点上重启 kubelet.service 恢复
[root@k8s-master k8s_yaml]# kubectl edit rc nginx
spec:
replicas: 10
[root@k8s-master k8s_yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE
nginx 1/1 Running 2 16h 172.18.49.2 k8s-node2
nginx-4dht9 0/1 ContainerCreating 0 6s <none> k8s-node1
nginx-9661w 0/1 ContainerCreating 0 6s <none> k8s-node1
nginx-9ntg5 1/1 Running 0 1m 172.18.49.4 k8s-node2
nginx-b2l78 1/1 Running 0 11m 172.18.42.3 k8s-node1
nginx-gh210 1/1 Running 0 11m 172.18.49.3 k8s-node2
nginx-gs025 1/1 Running 0 11m 172.18.42.4 k8s-node1
nginx-jfg7f 0/1 ContainerCreating 0 6s <none> k8s-node2
nginx-l8l6h 0/1 ContainerCreating 0 6s <none> k8s-node1
nginx-nl4s0 1/1 Running 0 1m 172.18.49.5 k8s-node2
nginx-sld3s 0/1 ContainerCreating 0 6s <none> k8s-node2
test 2/2 Running 4 15h 172.18.42.2 k8s-node1