前戏:
Kubernetes中文文档
Kubernetes集群部署
开源地址
[root@Fone7 dashboard]# kubectl create -f dashboard-configmap.yaml
configmap/kubernetes-dashboard-settings created
[root@Fone7 dashboard]# kubectl create -f dashboard-rbac.yaml
role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
[root@Fone7 dashboard]# kubectl create -f dashboard-secret.yaml
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-key-holder created
阿里云镜像仓库
# vim dashboard-controller.yaml
image: registry.cn-beijing.aliyuncs.com/kubernetes2s/kubernetes-dashboard-amd64
# kubectl create -f dashboard-controller.yaml
serviceaccount/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
[root@Fone7 dashboard]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
kubernetes-dashboard-77fd5947f-gqgft 1/1 Running 0 5m3s
# vim dashboard-service.yaml
spec:
# 加入下面这一行
type: NodePort
...
# kubectl create -f dashboard-service.yaml
service/kubernetes-dashboard created
# vim k8s-admin.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: dashboard-admin
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: dashboard-admin
subjects:
- kind: ServiceAccount
name: dashboard-admin
namespace: kube-system
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
# kubectl create -f k8s-admin.yaml
serviceaccount/dashboard-admin created
clusterrolebinding.rbac.authorization.k8s.io/dashboard-admin created
[root@Fone7 k8s]# kubectl get secret -n kube-system
NAME TYPE DATA AGE
dashboard-admin-token-vzmfz kubernetes.io/service-account-token 3 49s
default-token-9slj4 kubernetes.io/service-account-token 3 23h
kubernetes-dashboard-certs Opaque 0 43m
kubernetes-dashboard-key-holder Opaque 2 43m
kubernetes-dashboard-token-jmk6l kubernetes.io/service-account-token 3 20m
[root@Fone7 k8s]# kubectl
[root@Fone7 k8s]# kubectl describe secret dashboard-admin-token-vzmfz -n kube-system
Name: dashboard-admin-token-vzmfz
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: dashboard-admin
kubernetes.io/service-account.uid: 103c7142-9973-11ea-b60d-080027b6e76f
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1359 bytes
namespace: 11 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tdnptZnoiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiMTAzYzcxNDItOTk3My0xMWVhLWI2MGQtMDgwMDI3YjZlNzZmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZC1hZG1pbiJ9.twUnFS7avAu4B8IuozgYDbic8GxkrIyc7P205-pG0h5giiQeU-sIJNWc-fKR0DLDzb98QZqAILH6CNCUNwSJwynUxIBoKIkJqaA-ljfGeHh4xSCCoNb7vG66UPjP1mC5woxyIRMg5TTeAWpkMKUm21sp6HVsZHLxyMUk99EtpXa13vWsv2HSN_LWG5zN2zndKFQQ-57K_p5DoJxqHGDLoSJOQ1_DSuFs1wydH15ot0PORaU0nLGNHlPrtWYlCyARhC4tiUmwMsx0c6LqTh3ZbFmXiswFwGAhSVNMgfAS0YIBGwTAndEi_lPsmA_1cV0k2Gn7GoHIxNvKZtYtWe735g
[root@Fone7 dashboard]# kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes-dashboard NodePort 10.0.0.19 <none> 443:49208/TCP 80s
在火狐浏览器访问https://192.168.33.8:49208。(谷歌浏览器无法访问)
将第4步中的token输入令牌,登陆
kubectl get secret | awk '/^ServiceAccount/{print $1}'
KUBE_TOKEN=$(kubectl get secret SERVCIEACCOUNT_SERRET_NAME -o jsonpath={.data.token} | base64 -d)
kubectl config set-cluster mycluster --kubeconfig=/root/def-ns-admin.conf --server="https://192.168.33.6:6443" --certificate-authority=/etc/kubernetes/pki/ca.crt --embed-certs=true
kubectl config set-credentials def-ns-admin --token=$DEF_NS_ADMIN_TOKEN --kubeconfig=/root/def-ns-admin.conf
kubectl config set-context def-ns-admin@kubernetes --cluster=kubernetes --user=def-ns-admin --kubeconfig=/root/def-ns-admin.conf
kubectl config view --kubeconfig=/root/def-ns-admin.conf
kubectl config use-context def-ns-admin@kubernetes --kubeconfig=/root/def-ns-admin.conf
# scp -r /opt/kubernetes/ master2:/opt/
# scp /usr/lib/systemd/system/{kube-apiserver,kube-scheduler,kube-controller-manager}.service master2:/usr/lib/systemd/system
# cd /opt/kubernetes/cfg/
# vim kube-apiserver
# systemctl start kube-apiserver
# systemctl start kube-scheduler
# systemctl start kube-controller-manager
# ps -fe | grep kube
# /opt/kubernetes/bin/kubectl get cs
# /opt/kubernetes/bin/kubectl get nodes
# vim /etc/nginx/nginx.conf
# 修改,加大后台进程
worker_processes 4;
# http上面加入
stream {
log_format main "$remote_addr $upstream_addr - $time_local $status";
access_log /var/log/nginx/k8s-access.log main;
upstream k8s-apiserver {
server 192.168.33.7:6443;
}
server {
listen 0.0.0.0:88;
proxy_pass k8s-apiserver;
}
}
# systemctl restart nginx
# systemctl status nginx
# ps -ef | grep nginx
# yum install -y keepalived
# vim /etc/keepalived/keepalived.conf
global_defs {
# 接收邮件地址
notification_email {
[email protected]
[email protected]
[email protected]
}
# 邮件发送地址
notification_email_from [email protected]
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_MASTER
}
vrrp_script check_nginx {
script "/usr/local/nginx/sbin/check_nginx.sh"
}
vrrp_instance VI_1 {
state MASTER # 备节点则改为BACKUP
interface enp0s3 # 这里修改为配置VIP的网卡
virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
priority 100 # 优先级,备服务器设置 90
advert_int 1 # 指定VRRP 心跳包通告间隔时间,默认1秒
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.33.10/24
}
track_script {
check_nginx
}
}
# vim /usr/local/nginx/sbin/check_nginx.sh
count=$(ps -ef |grep nginx |egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
systemctl stop keepalived
fi
# chmod +x /usr/local/nginx/sbin/check_nginx.sh
# systemctl start keepalived
# ip a # 查看VIP是否生效
# cd /opt/kubernetes/cfg/
# grep 7 *
# vim bootstrap.kubeconfig
# vim kubelet.kubeconfig
# vim kube-proxy.kubeconfig
# systemctl restart kubelet
# systemctl restart kube-proxy
# ps -ef | grep kube
回到master检查集群状态:kubectl get node
--kubeconfig config
---
”表示YAML格式,表示一个文件的开始或者分割# vim nginx-deployment.yaml
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
name: nginx-deployment
spec:
selector:
matchLabels:
app: nginx
replicas: 2 # tells deployment to run 2 pods matching the template
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx-service
labels:
app: nginx
spec:
type: NodePort
ports:
- port: 80
targetPort: 80
selector:
app: nginx
# kubectl create -f nginx-deployment.yaml
实时查看pod创建情况
# kubectl get pod -w
# kubectl get svc
获取所有版本
# kubectl api-versions
饭粒1:使用run命令导出
kubectl run nginx --image=nginx --replicas=3 --dry-run -o yaml > my-deployment.yaml
说明:
--dry-run
参数不会真正执行,只是做检查。-o
指定输出格式,可以指定yaml、json格式。饭粒2:使用get命令导出
kubectl get deploy/nginx --export -o yaml > me-deploy.yaml
说明见饭粒1
忘记关键字查找kubectl explain --help
例如,查看容器资源可用字段:kubectl explain pods.spec.containers
这个命令会输出顶层的属性,我们只需要明白
表示字符串, 表示对象, [] 表示数组即可,对象在 YAML 文件中就需要缩进,数组就需要通过添加一个破折号来表示一个 Item,对于对象和对象数组我们不知道里面有什么属性的,我们还可以继续在后面查看。可以传入一个
--recursive
参数来获取所有层级属性。
kubectl api-resources
可以打印所有已经注册的API资源。
docker login -p [password] -u [username]
cat .docker/config.json
cat .docker/config.json | base64 -w 0
# vim registry-pull-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: registry-pull-secret
namespace: blog
data:
.dockerconfigjson: [上面生成的base64编码]
type: kubernetes.io/dockerconfigjson
# kubectl create -f registry-pull-secret.yaml
# kubectl get secret
输出的Data大于0才算配置成功
...
imagePullSecrets:
- name: registry-pull-secret
...
pod资源限制 官文
# vim wordpress.yaml
apiVersion: v1
kind: Pod
metadata:
name: frontend
spec:
containers:
- name: db
image: mysql
env:
- name: MYSQL_ROOT_PASSWORD
value: "333333"
resources:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "128Mi"
cpu: "500m" # 0.5核cpu
- name: wp
image: wordpress
resources:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "128Mi"
cpu: "500m"
# kubectl apply -f wordpress.yaml # 启动实例
# kubectl describe pod frontend # 查看pod调度情况
# kubectl describe nodes 192.168.33.8 # 查看节点资源使用情况
# kubectl get ns # 查看所有的namespace
重启策略(restartPolicy:放在containers同级)
健康检查(Probe)官文
apiVersion: v1
kind: Pod
metadata:
labels:
test: liveness
name: liveness-exec
spec:
containers:
- name: liveness
image: busybox
args:
- /bin/sh
- -c
- touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
livenessProbe:
exec:
command:
- cat
- /tmp/healthy
initialDelaySeconds: 5
periodSeconds: 5
apiVersion: v1
kind: Pod
metadata:
name: pod-example
labels:
app: nginx
spec:
nodeName: 192.168.33.8
containers:
- name: nginx
image: nginx:1.15
kubectl describe pod [pod_name]
查看调度信息。
# kubectl label nodes 192.168.33.8 team=a
# kubectl label nodes 192.168.33.9 team=b
# kubectl get nodes --show-labels
apiVersion: v1
kind: Pod
metadata:
name: pod-example
spec:
nodeSelector:
team: b
containers:
- name: nginx
image: nginx:1.15
pod鼓掌排查
pod状态:官文
值 | 描述 |
---|---|
Pending | Pod创建已经提交到Kubernetes。但是,因为某种原因而不能顺利创建。例如下载镜像慢 ,调度不成功 。kubectl describe pod [pod_name]打印信息中Events前两步 |
Running | Pod已经绑定到一个节点,并且已经创建了所有容器。至少有一个容器正在运行中,或正在启动或重新启动。 |
Succeeded | Pod中的所有容器都已成功终止,不会重新启动。 |
Failed | Pod的所有容器均已终止,且至少有一个容器已在故障中终止。也就是说,容器要么以非零状态退出,要么被系统终止。kubectl logs [POD_NAME] |
Unknown | 由于某种原因apiserver无法获得Pod的状态,通常是由于Master与Pod所在主机kubelet通信时出错。 |
# vim my-service.yaml
apiVersion: v1
kind: Service
metadata:
name: my-service
namespace: default
spec:
clusterIP: 10.0.0.123
selector:
app: nginx
ports:
- protocol: TCP
name: http
port: 80 # service端口
targetPort: 8080 # 容器端口
# kubectl apply -f my-service.yaml
# kubectl get svc # 查看所有service
# kubectl get ep # 查看后端ENDPOINTS
# kubectl describe svc my-service # 查看service详细信息
apiVersion: v1
kind: Service
metadata:
name: my-service2
spec:
selector:
app: nginx
ports:
- protocol: TCP
port: 80
targetPort: 8080
nodePort: 48300
type: NodePort
ss -antpu | grep 48300
ipvsadm -ln
iptables-save | grep 10.0.0.123
内核
调度模块实现的负载均衡。(阿里云SLB,基于LVS实现四层负载均衡)部署Yaml文件
需要修改:
proxy . /etc/resolv.conf
执行kubectl apply -f coredns.yaml
进行部署
执行kubectl get pods -n kube-system
查看pod是否运行正常
测试能否正常解析
# kubectl run -it --image=busybox:1.28.4 --rm --restart=Never sh
/ # nslookup kubernetes
Ingress通过service关联pod
通过Ingress Controller实现Pod的负载均衡
注意事项:
• 镜像地址修改成国内的:lizhenliang/nginx-ingress-controller:0.20.0
• 使用宿主机网络:hostNetwork: true
• 保证节点的80/443端口没有被占用
• 注意所有node节点的kube-proxy都要配置为ipvs调度,调度算法需要统一,使用ipvsadm -ln
查看。
饭粒1:Ingress实现http
转发
ingress_test.yaml(修改最下面两项即可)
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: simple-fanout-example
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- host: foo.bar.com
http:
paths:
- path: /
backend:
serviceName: nginx-service
servicePort: 80
# kubectl apply -f ingress_test.yaml
ingress.extensions/simple-fanout-example created
# kubectl get ingress
NAME HOSTS ADDRESS PORTS AGE
simple-fanout-example foo.bar.com 80 35s
在宿主机添加hosts解析:192.168.33.8 foo.bar.com
在浏览器访问:http://foo.bar.com/ 即可访问对应service服务
实现原理
进入Ingress实例
# kubectl get pods -n ingress-nginx
NAME READY STATUS RESTARTS AGE
nginx-ingress-controller-7dcb4bbb8d-jtfvr 1/1 Running 0 15h
# kubectl exec -it nginx-ingress-controller-7dcb4bbb8d-jtfvr bash -n ingress-nginx
www-data@Fone8:/etc/nginx$ ps -ef | grep nginx
...
www-data 7 6 1 May21 ? 00:10:19 /nginx-ingress-controller --configmap=ingress-nginx/nginx-configuration --publish-service=ingress-nginx/ingress-nginx --annotations-prefix=nginx.ingress.kubernetes.io
...
上面的进程会实时监控api-server所有 service变化,当发生改变就立刻更新nginx配置文件/etc/nginx/nginx.conf
饭粒2:Ingress实现https
转发
# vim certs.sh
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > ca-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
}
]
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
cat > sslexample.foo.com-csr.json <<EOF
{
"CN": "sslexample.foo.com",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes sslexample.foo.com-csr.json | cfssljson -bare sslexample.foo.com
kubectl create secret tls sslexample.foo.com --cert=sslexample.foo.com.pem --key=sslexample.foo.com-key.pem
# sh certs.sh
# ls sslexample.*.pem
sslexample.foo.com-key.pem sslexample.foo.com.pem
# 生成认证
# kubectl create secret tls sslexample-foo-com --cert=sslexample.foo.com.pem --key=sslexample.foo.com-key.pem
# kubectl get secret
NAME TYPE DATA AGE
...
sslexample-foo-com kubernetes.io/tls 2 19s
...
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: tls-example-ingress
spec:
tls:
- hosts:
- sslexample.foo.com
secretName: sslexample-foo-com
rules:
- host: sslexample.foo.com
http:
paths:
- path: /
backend:
serviceName: nginx
servicePort: 88
# kubectl apply -f ingress_https.yaml
ingress.extensions/tls-example-ingress created
# kubectl get ingress
NAME HOSTS ADDRESS PORTS AGE
simple-fanout-example foo.bar.com 80 48m
tls-example-ingress sslexample.foo.com 80, 443 21s
Kubernetes中的Volume提供了在容器中挂载外部存储的能力。
Pod需要设置卷来源
(spec.valume)和挂载点
(spec.containers.volumeMounts)两个信息后才可以使用响应的Volume。
emptyDir
apiVersion: v1
kind: Pod
metadata:
name: my-pod
spec:
containers:
- name: write
image: centos
command: ["bash","-c","for i in {1..100};do echo $i >> /data/hello;sleep 1;done"]
volumeMounts:
- name: data
mountPath: /data
- name: read
image: centos
command: ["bash","-c","tail -f /data/hello"]
volumeMounts:
- name: data
mountPath: /data
volumes:
- name: data
emptyDir: {}
创建kubectl apply -f emptyDir.yaml
并查看状态kubectl get pods
kubectl logs my-pod -c write
kubectl logs my-pod -c read -f
hostPath
apiVersion: v1
kind: Pod
metadata:
name: my-pod2
spec:
containers:
- name: busybox
image: busybox
args:
- /bin/sh
- -c
- sleep 36000
volumeMounts:
- name: data
mountPath: /data
volumes:
- name: data
hostPath:
path: /tmp
type: Directory
创建kubectl apply -f hostPath.yaml
并查看状态和实例Node位置kubectl get pods -o wide
kubectl exec -it my-pod2 sh
持久化NFS
yum install -y nfs-utils
# vim /etc/exports
/data/nfs *(rw,no_root_squash)
systemctl start nfs
饭粒
NFS_test.yaml
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: nfs-deployment
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
volumeMounts:
- name: wwwroot
mountPath: /usr/share/nginx/html
ports:
- containerPort: 80
volumes:
- name: wwwroot
nfs:
server: 192.168.33.9
path: /data/nfs
启动实例
# kubectl apply -f NFS_test.yaml
# kubectl get pods
# kubectl get svc -o wide # 查看SELECTOR对应的端口
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
kubernetes ClusterIP 10.0.0.1 <none> 443/TCP 4d1h <none>
my-service ClusterIP 10.0.0.123 <none> 80/TCP 47h app=nginx
my-service2 NodePort 10.0.0.210 <none> 80:30008/TCP 45h app=nginx
nginx NodePort 10.0.0.186 <none> 88:37361/TCP 3d22h run=nginx
nginx-service NodePort 10.0.0.235 <none> 80:49236/TCP 2d16h app=nginx
在NFS服务端写入/data/nfs/index.html内容Hello World!!!
进入实例查看是否同步:
# kubectl exec -it nfs-deployment-6b86fcf776-7kzmv bash
root@nfs-deployment-6b86fcf776-7kzmv:/# ls /usr/share/nginx/html/
root@nfs-deployment-6b86fcf776-7kzmv:/# ls /usr/share/nginx/html/
index.html
浏览器访问:http://192.168.33.8:49236/
删除重建实例,浏览器仍可以访问到index.html内容。
部署步骤
一些非官方的组件编排YAML
harbor.yml
中hostname
为机器IP./prepare
进行预配置信息;执行安装脚本./install.sh
docker-compose up -d
docker-compose ps
/etc/docker/daemon.json
中加入可信任机器(所有Node节点都要配置)并重启docker# vim /etc/docker/daemon.json
{"registry-mirrors": ["http://bc437cce.m.daocloud.io"],
"insecure-registries": ["192.168.33.9"]
}
# systemctl restart docker
/usr/local/src/apache-maven-3.6.3/bin/mvn clean package
docker build -t 192.168.33.9/project/java-demo:lastest .
Successfully built 2de0871198e3
Successfully tagged 192.168.33.9/project/java-demo:lastest
# docker login 192.168.33.9
# 输入登陆harbor的用户名密码admin:Harbor12345
# docker push 192.168.33.9/project/java-demo:lastest
kubectl create -f xxx.yaml
,确保前一个创建成功再执行下一个kubectl get pod -n test
)apiVersion: v1
kind: Namespace
metadata:
name: test
创建securt:# kubectl create secret docker-registry registry-pull-secret --docker-username=admin --docker-password=Harbor12345 [email protected] --docker-server=192.168.33.9 -n test
# kubectl get secret -n test # 查看是否创建成功
deployment.yamlapiVersion: apps/v1beta1
kind: Deployment
metadata:
name: tomcat-java-demo
namespace: test
spec:
replicas: 3
selector:
matchLabels:
project: www
app: java-demo
template:
metadata:
labels:
project: www
app: java-demo
spec:
imagePullSecrets:
- name: registry-pull-secret
containers:
- name: tomcat
image: 192.168.33.9/project/java-demo:lastest
imagePullPolicy: Always
ports:
- containerPort: 8080
name: web
protocol: TCP
resources:
requests:
cpu: 0.5
memory: 1Gi
limits:
cpu: 1
memory: 2Gi
livenessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 60
timeoutSeconds: 20
readinessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 60
timeoutSeconds: 20
service.yamlapiVersion: v1
kind: Service
metadata:
name: tomcat-java-demo
namespace: test
spec:
selector:
project: www
app: java-demo
ports:
- name: web
port: 80
targetPort: 8080
ingress.yamlapiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: tomcat-java-demo
namespace: test
spec:
rules:
- host: java.ctnrs.com
http:
paths:
- path: /
backend:
serviceName: tomcat-java-demo
servicePort: 80
mysql.yamlapiVersion: v1
kind: Service
metadata:
name: mysql
spec:
ports:
- port: 3306
name: mysql
clusterIP: None
selector:
app: mysql-public
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: db
spec:
serviceName: "mysql"
template:
metadata:
labels:
app: mysql-public
spec:
containers:
- name: mysql
image: mysql:5.7
env:
- name: MYSQL_ROOT_PASSWORD
value: "123456"
- name: MYSQL_DATABASE
value: test
ports:
- containerPort: 3306
volumeMounts:
- mountPath: "/var/lib/mysql"
name: mysql-data
volumeClaimTemplates:
- metadata:
name: mysql-data
spec:
accessModes: ["ReadWriteMany"]
storageClassName: "managed-nfs-storage"
resources:
requests:
storage: 2Gi
将数据导入数据库中:[root@Fone8 tomcat-java-demo]# scp db/tables_ly_tomcat.sql master:/root
[root@Fone7 java-demo]# kubectl cp /root/tables_ly_tomcat.sql db-0:/
root@db-0:/# mysql -uroot -p123456
mysql> source /tables_ly_tomcat.sql;
[root@Fone7 java-demo]# kubectl describe pod db-0 # 查看pod IP
[root@Fone8 tomcat-java-demo]# vim src/main/resources/application.yml # 修改链接后端数据库ip
...
url: jdbc:mysql://172.17.87.10:3306/test?characterEncoding=utf-8
...
[root@Fone8 tomcat-java-demo]# /usr/local/src/apache-maven-3.6.3/bin/mvn clean package # 重新构建
[root@Fone8 tomcat-java-demo]# docker build -t 192.168.33.9/project/java-demo:lastest . # 重新打包镜像
浏览器访问域名java.ctnrs.com# vim /opt/kubernetes/cfg/kubelet.config
...
readOnlyPort: 10255
...
# systemctl restart kubelet
# curl 192.168.33.8:10255/metrics
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: monitoring-influxdb
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
task: monitoring
k8s-app: influxdb
spec:
containers:
- name: influxdb
image: registry.cn-hangzhou.aliyuncs.com/google-containers/heapster-influxdb-amd64:v1.1.1
volumeMounts:
- mountPath: /data
name: influxdb-storage
volumes:
- name: influxdb-storage
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
labels:
task: monitoring
kubernetes.io/cluster-service: 'true'
kubernetes.io/name: monitoring-influxdb
name: monitoring-influxdb
namespace: kube-system
spec:
ports:
- port: 8086
targetPort: 8086
selector:
k8s-app: influxdb
heapster.yamlapiVersion: v1
kind: ServiceAccount
metadata:
name: heapster
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: heapster
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: heapster
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
task: monitoring
k8s-app: heapster
spec:
serviceAccountName: heapster
containers:
- name: heapster
image: registry.cn-hangzhou.aliyuncs.com/google-containers/heapster-amd64:v1.4.2
imagePullPolicy: IfNotPresent
command:
- /heapster
- --source=kubernetes:https://10.0.0.1
- --sink=influxdb:http://10.0.0.188:8086
---
apiVersion: v1
kind: Service
metadata:
labels:
task: monitoring
kubernetes.io/cluster-service: 'true'
kubernetes.io/name: Heapster
name: heapster
namespace: kube-system
spec:
ports:
- port: 80
targetPort: 8082
selector:
k8s-app: heapster
grafana.yamlapiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: monitoring-grafana
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
task: monitoring
k8s-app: grafana
spec:
containers:
- name: grafana
image: registry.cn-hangzhou.aliyuncs.com/google-containers/heapster-grafana-amd64:v4.4.1
ports:
- containerPort: 3000
protocol: TCP
volumeMounts:
- mountPath: /var
name: grafana-storage
env:
- name: INFLUXDB_HOST
value: monitoring-influxdb
- name: GF_AUTH_BASIC_ENABLED
value: "false"
- name: GF_AUTH_ANONYMOUS_ENABLED
value: "true"
- name: GF_AUTH_ANONYMOUS_ORG_ROLE
value: Admin
- name: GF_SERVER_ROOT_URL
value: /
volumes:
- name: grafana-storage
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
labels:
kubernetes.io/cluster-service: 'true'
kubernetes.io/name: monitoring-grafana
name: monitoring-grafana
namespace: kube-system
spec:
type: NodePort
ports:
- port : 80
targetPort: 3000
selector:
k8s-app: grafana
kubectl get pods -n kube-system
,在浏览器中访问masterIP:端口/var/log
和 /var/lib/docker/containers/
server.port: 5601
server.host: "0.0.0.0"
elasticsearch.hosts: ["http://localhost:9200"]
新建/etc/logstash/conf.d/logstash-to-es.confinput {
beats {
port => 5044
}
}
filter {
}
output {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
index => "k8s-log-%{+YYYY.MM.dd}"
}
stdout { codec => rubydebug }
}
apiVersion: v1
kind: ConfigMap
metadata:
name: k8s-logs-filebeat-config
namespace: kube-system
data:
filebeat.yml: |
filebeat.inputs:
- type: log
paths:
- /var/log/messages
fields:
app: k8s
type: module
fields_under_root: true
setup.ilm.enabled: false
setup.template.name: "k8s-module"
setup.template.pattern: "k8s-module-*"
output.elasticsearch:
hosts: ['192.168.33.8:5044']
index: "k8s-module-%{+yyyy.MM.dd}"
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: k8s-logs
namespace: kube-system
spec:
selector:
matchLabels:
project: k8s
app: filebeat
template:
metadata:
labels:
project: k8s
app: filebeat
spec:
containers:
- name: filebeat
image: elastic/filebeat:7.7.0
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
resources:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 500m
memory: 500Mi
securityContext:
runAsUser: 0
volumeMounts:
- name: filebeat-config
mountPath: /etc/filebeat.yml
subPath: filebeat.yml
- name: k8s-logs
mountPath: /var/log/messages
volumes:
- name: k8s-logs
hostPath:
path: /var/log/messages
- name: filebeat-config
configMap:
name: k8s-logs-filebeat-config