Day11-K8S日志收集及搭建高可用的kubernetes集群实战案例
0、昨日内容回顾
- 作业讲解
- Ingress资源
- Jenkins集成K8S
1、日志收集
1.1 K8S日志收集项目之架构图解三种方案
1.2 部署ES
[root@k8s231 ~]# docker pull elasticsearch:7.17.5
[root@k8s231 ~]# docker tag elasticsearch:7.17.5 harbor.oldboyedu.com/project/elasticsearch:7.17.5
[root@k8s231 ~]# docker login -u admin -p 1 harbor.oldboyedu.com
[root@k8s231 ~]# docker push harbor.oldboyedu.com/project/elasticsearch:7.17.5
[[email protected] elasticstack]# cat deploy-es.yaml
apiVersion: v1
kind: Namespace
metadata:
name: oldboyedu-efk
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: elasticsearch
namespace: oldboyedu-efk
labels:
k8s-app: elasticsearch
spec:
replicas: 1
selector:
matchLabels:
k8s-app: elasticsearch
template:
metadata:
labels:
k8s-app: elasticsearch
spec:
containers:
# 指定需要安装的ES版本号
# - image: elasticsearch:7.17.5
- image: harbor.oldboyedu.com/project/elasticsearch:7.17.5
name: elasticsearch
resources:
limits:
cpu: 2
memory: 3Gi
requests:
cpu: 0.5
memory: 500Mi
env:
# 配置集群部署模式,此处我由于是实验,配置的是单点
- name: "discovery.type"
value: "single-node"
- name: ES_JAVA_OPTS
value: "-Xms512m -Xmx512m"
ports:
- containerPort: 9200
name: db
protocol: TCP
volumeMounts:
- name: elasticsearch-data
mountPath: /usr/share/elasticsearch/data
volumes:
- name: elasticsearch-data
persistentVolumeClaim:
claimName: es-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: es-pvc
namespace: oldboyedu-efk
spec:
storageClassName: "managed-nfs-storage"
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: Service
metadata:
name: elasticsearch
namespace: oldboyedu-efk
spec:
ports:
- port: 9200
protocol: TCP
targetPort: 9200
selector:
k8s-app: elasticsearch
[[email protected] elasticstack]#
[[email protected] elasticstack]# kubectl apply -f deploy-es.yaml
namespace/oldboyedu-efk created
deployment.apps/elasticsearch created
persistentvolumeclaim/es-pvc created
service/elasticsearch created
[[email protected] elasticstack]#
1.3 部署kibana
[root@k8s231 ~]# docker pull kibana:7.17.5
[root@k8s231 ~]# docker tag kibana:7.17.5 harbor.oldboyedu.com/project/kibana:7.17.5
[root@k8s231 ~]# docker push harbor.oldboyedu.com/project/kibana:7.17.5
[[email protected] elasticstack]# cat deploy-kibana.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: oldboyedu-efk
spec:
replicas: 1
selector:
matchLabels:
k8s-app: kibana
template:
metadata:
labels:
k8s-app: kibana
spec:
containers:
- name: kibana
# image: kibana:7.17.5
image: harbor.oldboyedu.com/project/kibana:7.17.5
resources:
limits:
cpu: 2
memory: 2Gi
requests:
cpu: 0.5
memory: 500Mi
env:
- name: ELASTICSEARCH_HOSTS
value: http://elasticsearch.oldboyedu-efk.svc.oldboyedu.com:9200
- name: I18N_LOCALE
value: zh-CN
ports:
- containerPort: 5601
name: ui
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: oldboyedu-kibana
namespace: oldboyedu-efk
spec:
# type: NodePort
ports:
- port: 5601
protocol: TCP
targetPort: ui
# nodePort: 35601
selector:
k8s-app: kibana
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: oldboyedu-kibana-ing
namespace: oldboyedu-efk
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: kibana.oldboyedu.com
http:
paths:
- backend:
service:
name: oldboyedu-kibana
port:
number: 5601
path: "/"
pathType: "Prefix"
[[email protected] elasticstack]#
[[email protected] elasticstack]#
[[email protected] elasticstack]#
[[email protected] elasticstack]# kubectl apply -f deploy-kibana.yaml
deployment.apps/kibana created
service/oldboyedu-kibana created
ingress.networking.k8s.io/oldboyedu-kibana-ing created
[[email protected] elasticstack]#
1.4 部署filebeat
[root@k8s231 ~]# docker pull filebeat:7.10.2
Error response from daemon: pull access denied for filebeat, repository does not exist or may require 'docker login': denied: requested access to the resource is denied
[root@k8s231 ~]# docker pull elastic/filebeat:7.10.2
[root@k8s231 ~]# docker tag elastic/filebeat:7.10.2 harbor.oldboyedu.com/project/filebeat:7.10.2
[root@k8s231 ~]# docker push harbor.oldboyedu.com/project/filebeat:7.10.2
[[email protected] elasticstack]# cat deploy-filebeat.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: oldboyedu-efk
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
filebeat.config:
inputs:
# Mounted `filebeat-inputs` configmap:
path: ${path.config}/inputs.d/*.yml
# Reload inputs configs as they change:
reload.enabled: false
modules:
path: ${path.config}/modules.d/*.yml
# Reload module configs as they change:
reload.enabled: false
output.elasticsearch:
# hosts: ['elasticsearch.oldboyedu-efk:9200']
# hosts: ['elasticsearch.oldboyedu-efk.svc.oldboyedu.com:9200']
hosts: ['elasticsearch:9200']
# 不建议修改索引,因为索引名称该成功后,pod的数据也将收集不到啦!
# 除非你明确知道自己不收集Pod日志且需要自定义索引名称的情况下,可以打开下面的注释哟~
# index: 'oldboyedu-linux-elk-%{+yyyy.MM.dd}'
# 配置索引模板
# setup.ilm.enabled: false
# setup.template.name: "oldboyedu-linux-elk"
# setup.template.pattern: "oldboyedu-linux-elk*"
# setup.template.overwrite: true
# setup.template.settings:
# index.number_of_shards: 3
# index.number_of_replicas: 0
---
# 注意,官方在filebeat 7.2就已经废弃docker类型,建议后期更换为container.
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-inputs
namespace: oldboyedu-efk
labels:
k8s-app: filebeat
data:
kubernetes.yml: |-
- type: docker
containers.ids:
- "*"
processors:
- add_kubernetes_metadata:
in_cluster: true
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
namespace: oldboyedu-efk
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
spec:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
operator: Exists
serviceAccountName: filebeat
terminationGracePeriodSeconds: 30
containers:
- name: filebeat
# 注意官方的filebeat版本推荐使用"elastic/filebeat:7.10.2",
# 如果高于该版本("elastic/filebeat:7.10.2")可能收集不到K8s集群的Pod相关日志指标哟~
# 经过我测试,直到2022-04-01开源的7.12.2版本依旧没有解决该问题!
# filebeat和ES版本可以不一致哈,因为我测试ES的版本是7.17.2
#
# 待完成: 后续可以尝试更新最新的镜像,并将输入的类型更换为container,因为docker输入类型官方在filebeat 7.2已废弃!
# image: elastic/filebeat:7.10.2
image: harbor.oldboyedu.com/project/filebeat:7.10.2
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
# 出问题后可以用作临时调试,注意需要将args注释哟
# command: ["sleep","3600"]
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: inputs
mountPath: /usr/share/filebeat/inputs.d
readOnly: true
- name: data
mountPath: /usr/share/filebeat/data
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0600
name: filebeat-config
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: inputs
configMap:
defaultMode: 0600
name: filebeat-inputs
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
- name: data
hostPath:
path: /var/lib/filebeat-data
type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat
subjects:
- kind: ServiceAccount
name: filebeat
namespace: oldboyedu-efk
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat
labels:
k8s-app: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
verbs:
- get
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
namespace: oldboyedu-efk
labels:
k8s-app: filebeat
[[email protected] elasticstack]#
[[email protected] elasticstack]# kubectl apply -f deploy-filebeat.yaml
2、监控系统
2.1 部署prometheus
(1)下载资源清单
[[email protected] project]# wget http://192.168.15.253/Kubernetes/day11-/prometheus.zip
[[email protected] project]# unzip prometheus.zip
[[email protected] project]# cd prometheus/
[[email protected] prometheus]#
(2)根据集群情况自行修改ep资源的配置,如上图所示
[root@k8s231 prometheus]# cd serviceMonitor/ && ls | xargs grep ip
kube-state-metrics-serviceMonitor.yaml: insecureSkipVerify: true
kube-state-metrics-serviceMonitor.yaml: insecureSkipVerify: true
node-exporter-serviceMonitor.yaml: insecureSkipVerify: true
prometheus-EtcdService.yaml: - ip: 10.0.0.151
prometheus-kubeControllerManagerService.yaml: - ip: 10.0.0.151
prometheus-KubeProxyService.yaml: - ip: 10.0.0.151
prometheus-KubeProxyService.yaml: - ip: 10.0.0.152
prometheus-KubeProxyService.yaml: - ip: 10.0.0.153
prometheus-kubeSchedulerService.yaml: - ip: 10.0.0.151
prometheus-serviceMonitorKubelet.yaml: insecureSkipVerify: true
prometheus-serviceMonitorKubelet.yaml: insecureSkipVerify: true
[[email protected] prometheus]# sed -i 's#10.0.0.151#10.0.0.231#' `ls serviceMonitor/*`
[[email protected] prometheus]#
[[email protected] prometheus]# sed -i 's#10.0.0.152#10.0.0.232#' `ls serviceMonitor/*`
[[email protected] prometheus]#
[[email protected] prometheus]# sed -i 's#10.0.0.153#10.0.0.233#' `ls serviceMonitor/*`
[[email protected] prometheus]#
[[email protected] prometheus]# grep ip serviceMonitor/*
serviceMonitor/kube-state-metrics-serviceMonitor.yaml: insecureSkipVerify: true
serviceMonitor/kube-state-metrics-serviceMonitor.yaml: insecureSkipVerify: true
serviceMonitor/node-exporter-serviceMonitor.yaml: insecureSkipVerify: true
serviceMonitor/prometheus-EtcdService.yaml: - ip: 10.0.0.231
serviceMonitor/prometheus-kubeControllerManagerService.yaml: - ip: 10.0.0.231
serviceMonitor/prometheus-KubeProxyService.yaml: - ip: 10.0.0.231
serviceMonitor/prometheus-KubeProxyService.yaml: - ip: 10.0.0.232
serviceMonitor/prometheus-KubeProxyService.yaml: - ip: 10.0.0.233
serviceMonitor/prometheus-kubeSchedulerService.yaml: - ip: 10.0.0.231
serviceMonitor/prometheus-serviceMonitorKubelet.yaml: insecureSkipVerify: true
serviceMonitor/prometheus-serviceMonitorKubelet.yaml: insecureSkipVerify: true
[[email protected] prometheus]#
(3)创建自定义资源
kubectl apply -f setup
(3)创建alertmanager服务
kubectl apply -f alertmanager
(4)创建node-exporter服务
kubectl apply -f node-exporter
(5)创建granfa服务
kubectl apply -f grafana
(6)创建promethus服务
kubectl apply -f prometheus
(7)创建serviceMonitor服务
kubectl apply -f serviceMonitor
(8)访问grafna查看数据监控情况
kubectl get svc -A | grep grafana
(9)导入仪表盘,展示数据
略,如下图所示。
温馨提示:
我已经将dashboard多个模板给到大家啦,下图导入的是"node-exporter_rev17.json"文件哟~
准备5台机器,二进制部署K8S高可用集群:
10.0.0.201 k8s-master01
10.0.0.202 k8s-master02
10.0.0.203 k8s-master03
10.0.0.204 k8s-node01
10.0.0.205 k8s-node02
3、K8S二进制部署
3.1 K8S二进制部署准备环境
1.所有节点下载软件包
[root@k8s-master01 ~]# curl -o softwares.zip http://192.168.15.253/Kubernetes/day11-/softwares