RKE部署高可用k8s集群+高可用Rancher

部署情况:
主机 ip 服务
ngnx 172.16.1.109 etcd controlplane work rancher
k8s-node01 172.16.1.110 etcd controlplane work rancher
k8s-node02 172.16.1.111 etcd controlplane work rancher
#注意nginx主机名没有特殊含义只是主机名起错了。
环境准备:
#以下是3台主机都需要执行
#环境初始化
1、关闭防火墙
2、cat << END>> /etc/sysctl.conf
net.ipv4.ip_forward = 1
vm.swappiness = 10
net.ipv4.ip_forward=1
net.bridge.bridge-nf-call-iptables=1
net.ipv4.neigh.default.gc_thresh1=4096
net.ipv4.neigh.default.gc_thresh2=6144
net.ipv4.neigh.default.gc_thresh3=8192
END
sysctl -p
3、设置主机名(在此不进行演示)
4、设置hosts解析
cat << END>>/etc/hosts
172.16.1.109 nginx
172.16.1.110 k8s-node01
172.16.1.111 k8s-node02
END
5、创建rke部署用户ops
useradd ops
password ops
6、3台主机之间设置密钥登陆(ops用户)
su - ops
ssh-keygen
ssh-copy-id [email protected]
ssh-copy-id [email protected]
ssh-copy-id [email protected]
7、安装docker(root用户安装就行)

扩展:
#本文所使用的工具和版本:
docker		17.03.2-ce
rke			1.3.19
helm		3.5.0
k8s			1.20.15		#k8s网络组件为flana	
ingress		nginx-1.2.1-rancher1
rancher		2.5.12
部署(rke):
#rke安装k8s集群很简单
#以下是nginx主机进行执行:
wget https://github.com/rancher/rke/releases/download/v1.3.19/rke_linux-amd64
chmod +x rke_linux-amd64
mv rke_linux-amd64 /usr/local/bin/rke
#生成部署配置文件:(配置参考)
su - ops
mkdir /home/ops/rke #创建rke配置文件目录
rke config --name cluster.yml #手动生成
 
[+] Cluster Level SSH Private Key Path [~/.ssh/id_rsa]: # ssh 秘钥保存路径
[+] Number of Hosts [1]: # 配置主机数
[+] SSH Address of host (1) [none]: # 第一台主机 SSH 地址
[+] SSH Port of host (1) [22]: # 第一台主机 SSH 端口号
[+] SSH Private Key Path of host (139.196.77.13) [none]: /home/rke/.ssh/id_rsa # 第一台主机 SSH 秘钥路径
[+] SSH User of host (139.196.77.13) [ubuntu]: rke # 第一台主机用户(就是我们上面创建的用户)
[+] Is host (139.196.77.13) a Control Plane host (y/n)? [y]: y # 参考 Rancher 官方文档
[+] Is host (139.196.77.13) a Worker host (y/n)? [n]: y # 参考 Rancher 官方文档
[+] Is host (139.196.77.13) an etcd host (y/n)? [n]: y # 参考 Rancher 官方文档
[+] Override Hostname of host (139.196.77.13) [none]: joker-master-1 # 第一台主机 hostname
[+] Internal IP of host (139.196.77.13) [none]: 172.27.31.149 # 第一台主机内网地址
[+] Docker socket path on host (139.196.77.13) [/var/run/docker.sock]: 
[+] SSH Address of host (2) [none]: # 第二台主机 SSH 地址
[+] SSH Port of host (2) [22]: 
[+] SSH Private Key Path of host (106.14.156.233) [none]: /home/rke/.ssh/id_rsa
[+] SSH User of host (106.14.156.233) [ubuntu]: rke
[+] Is host (106.14.156.233) a Control Plane host (y/n)? [y]: n
[+] Is host (106.14.156.233) a Worker host (y/n)? [n]: y
[+] Is host (106.14.156.233) an etcd host (y/n)? [n]: y
[+] Override Hostname of host (106.14.156.233) [none]: joker-node-1
[+] Internal IP of host (106.14.156.233) [none]: 172.27.31.148
[+] Docker socket path on host (106.14.156.233) [/var/run/docker.sock]: 
[+] SSH Address of host (3) [none]: # 第三台主机 SSH 地址
[+] SSH Port of host (3) [22]: 
[+] SSH Private Key Path of host (106.14.199.170) [none]: /home/rke/.ssh/id_rsa
[+] SSH User of host (106.14.199.170) [ubuntu]: rke
[+] Is host (106.14.199.170) a Control Plane host (y/n)? [y]: n
[+] Is host (106.14.199.170) a Worker host (y/n)? [n]: y
[+] Is host (106.14.199.170) an etcd host (y/n)? [n]: y
[+] Override Hostname of host (106.14.199.170) [none]: joker-node-2
[+] Internal IP of host (106.14.199.170) [none]: 172.27.31.147
[+] Docker socket path on host (106.14.199.170) [/var/run/docker.sock]: 
[+] Network Plugin Type (flannel, calico, weave, canal, aci) [canal]: flannel # 网络组件
[+] Authentication Strategy [x509]: 
[+] Authorization Mode (rbac, none) [rbac]: 
[+] Kubernetes Docker image [rancher/hyperkube:v1.22.6-rancher1]: rancher/hyperkube:v1.20.15-rancher1 # k8s 镜像版本,参考 RKE 官方文档
[+] Cluster domain [cluster.local]:
[+] Service Cluster IP Range [10.43.0.0/16]: 
[+] Enable PodSecurityPolicy [n]: 
[+] Cluster Network CIDR [10.42.0.0/16]: 
[+] Cluster DNS Service IP [10.43.0.10]: 
[+] Add addon manifest URLs or YAML files [no]:
#以下是我的配置文件:(ip一改直接无脑安装:美滋滋)
#注意我的配置文件手动修改了ingress镜像哦,试了下高版本的导致ingress服务故障,所以使用了nginx-1.2.1-rancher1版本。
cat cluster.yml
# If you intended to deploy Kubernetes in an air-gapped environment,
# please consult the documentation on how to configure custom RKE images.
nodes:
- address: 172.16.1.109
  port: "22"
  internal_address: 172.16.1.109
  role:
  - controlplane
  - worker
  - etcd
  hostname_override: nginx
  user: ops
  docker_socket: /var/run/docker.sock
  ssh_key: ""
  ssh_key_path: ~/.ssh/id_rsa
  ssh_cert: ""
  ssh_cert_path: ""
  labels: {}
  taints: []
- address: 172.16.1.110
  port: "22"
  internal_address: 172.16.1.110
  role:
  - controlplane
  - worker
  - etcd
  hostname_override: k8s-node01
  user: ops
  docker_socket: /var/run/docker.sock
  ssh_key: ""
  ssh_key_path: ~/.ssh/id_rsa
  ssh_cert: ""
  ssh_cert_path: ""
  labels: {}
  taints: []
- address: 172.16.1.111
  port: "22"
  internal_address: 172.16.1.111
  role:
  - controlplane
  - worker
  - etcd
  hostname_override: k8s-node02
  user: ops
  docker_socket: /var/run/docker.sock
  ssh_key: ""
  ssh_key_path: ~/.ssh/id_rsa
  ssh_cert: ""
  ssh_cert_path: ""
  labels: {}
  taints: []
services:
  etcd:
    image: ""
    extra_args: {}
    extra_args_array: {}
    extra_binds: []
    extra_env: []
    win_extra_args: {}
    win_extra_args_array: {}
    win_extra_binds: []
    win_extra_env: []
    external_urls: []
    ca_cert: ""
    cert: ""
    key: ""
    path: ""
    uid: 0
    gid: 0
    snapshot: null
    retention: ""
    creation: ""
    backup_config: null
  kube-api:
    image: ""
    extra_args: {}
    extra_args_array: {}
    extra_binds: []
    extra_env: []
    win_extra_args: {}
    win_extra_args_array: {}
    win_extra_binds: []
    win_extra_env: []
    service_cluster_ip_range: 10.43.0.0/16
    service_node_port_range: ""
    pod_security_policy: false
    always_pull_images: false
    secrets_encryption_config: null
    audit_log: null
    admission_configuration: null
    event_rate_limit: null
  kube-controller:
    image: ""
    extra_args: {}
    extra_args_array: {}
    extra_binds: []
    extra_env: []
    win_extra_args: {}
    win_extra_args_array: {}
    win_extra_binds: []
    win_extra_env: []
    cluster_cidr: 10.42.0.0/16
    service_cluster_ip_range: 10.43.0.0/16
  scheduler:
    image: ""
    extra_args: {}
    extra_args_array: {}
    extra_binds: []
    extra_env: []
    win_extra_args: {}
    win_extra_args_array: {}
    win_extra_binds: []
    win_extra_env: []
  kubelet:
    image: ""
    extra_args: {}
    extra_args_array: {}
    extra_binds: []
    extra_env: []
    win_extra_args: {}
    win_extra_args_array: {}
    win_extra_binds: []
    win_extra_env: []
    cluster_domain: cluster.local
    infra_container_image: ""
    cluster_dns_server: 10.43.0.10
    fail_swap_on: false
    generate_serving_certificate: false
  kubeproxy:
    image: ""
    extra_args: {}
    extra_args_array: {}
    extra_binds: []
    extra_env: []
    win_extra_args: {}
    win_extra_args_array: {}
    win_extra_binds: []
    win_extra_env: []
network:
#  plugin: calico
  plugin: flannel
  options: {}
  mtu: 0
  node_selector: {}
  update_strategy: null
  tolerations: []
authentication:
  strategy: x509
  sans: []
  webhook: null
addons: ""
addons_include: []
system_images:
  etcd: rancher/mirrored-coreos-etcd:v3.5.4
  alpine: rancher/rke-tools:v0.1.88
  nginx_proxy: rancher/rke-tools:v0.1.88
  cert_downloader: rancher/rke-tools:v0.1.88
  kubernetes_services_sidecar: rancher/rke-tools:v0.1.88
  kubedns: rancher/mirrored-k8s-dns-kube-dns:1.21.1
  dnsmasq: rancher/mirrored-k8s-dns-dnsmasq-nanny:1.21.1
  kubedns_sidecar: rancher/mirrored-k8s-dns-sidecar:1.21.1
  kubedns_autoscaler: rancher/mirrored-cluster-proportional-autoscaler:1.8.5
  coredns: rancher/mirrored-coredns-coredns:1.9.3
  coredns_autoscaler: rancher/mirrored-cluster-proportional-autoscaler:1.8.5
  nodelocal: rancher/mirrored-k8s-dns-node-cache:1.21.1
  #kubernetes: rancher/hyperkube:v1.20.15-rancher2-2
  kubernetes: rancher/hyperkube:v1.20.15-rancher1
  flannel: rancher/mirrored-coreos-flannel:v0.15.1
  flannel_cni: rancher/flannel-cni:v0.3.0-rancher6
  calico_node: rancher/mirrored-calico-node:v3.22.5
  calico_cni: rancher/calico-cni:v3.22.5-rancher1
  calico_controllers: rancher/mirrored-calico-kube-controllers:v3.22.5
  calico_ctl: rancher/mirrored-calico-ctl:v3.22.5
  calico_flexvol: rancher/mirrored-calico-pod2daemon-flexvol:v3.22.5
  canal_node: rancher/mirrored-calico-node:v3.22.5
  canal_cni: rancher/calico-cni:v3.22.5-rancher1
  canal_controllers: rancher/mirrored-calico-kube-controllers:v3.22.5
  canal_flannel: rancher/mirrored-flannelcni-flannel:v0.17.0
  canal_flexvol: rancher/mirrored-calico-pod2daemon-flexvol:v3.22.5
  weave_node: weaveworks/weave-kube:2.8.1
  weave_cni: weaveworks/weave-npc:2.8.1
  pod_infra_container: rancher/mirrored-pause:3.6
  ingress: rancher/nginx-ingress-controller:nginx-1.2.1-rancher1
  ingress_backend: rancher/mirrored-nginx-ingress-controller-defaultbackend:1.5-rancher1
  ingress_webhook: rancher/mirrored-ingress-nginx-kube-webhook-certgen:v1.1.1
  metrics_server: rancher/mirrored-metrics-server:v0.6.2
  windows_pod_infra_container: rancher/mirrored-pause:3.6
  aci_cni_deploy_container: noiro/cnideploy:5.2.3.5.1d150da
  aci_host_container: noiro/aci-containers-host:5.2.3.5.1d150da
  aci_opflex_container: noiro/opflex:5.2.3.5.1d150da
  aci_mcast_container: noiro/opflex:5.2.3.5.1d150da
  aci_ovs_container: noiro/openvswitch:5.2.3.5.1d150da
  aci_controller_container: noiro/aci-containers-controller:5.2.3.5.1d150da
  aci_gbp_server_container: noiro/gbp-server:5.2.3.5.1d150da
  aci_opflex_server_container: noiro/opflex-server:5.2.3.5.1d150da
ssh_key_path: ~/.ssh/id_rsa
ssh_cert_path: ""
ssh_agent_auth: false
authorization:
  mode: rbac
  options: {}
ignore_docker_version: null
enable_cri_dockerd: null
kubernetes_version: ""
private_registries: []
ingress:
  provider: ""
  options: {}
  node_selector: {}
  extra_args: {}
  dns_policy: ""
  extra_envs: []
  extra_volumes: []
  extra_volume_mounts: []
  update_strategy: null
  http_port: 0
  https_port: 0
  network_mode: ""
  tolerations: []
  default_backend: null
  default_http_backend_priority_class_name: ""
  nginx_ingress_controller_priority_class_name: ""
  default_ingress_class: null
cluster_name: ""
cloud_provider:
  name: ""
prefix_path: ""
win_prefix_path: ""
addon_job_timeout: 0
bastion_host:
  address: ""
  port: ""
  user: ""
  ssh_key: ""
  ssh_key_path: ""
  ssh_cert: ""
  ssh_cert_path: ""
  ignore_proxy_env_vars: false
monitoring:
  provider: ""
  options: {}
  node_selector: {}
  update_strategy: null
  replicas: null
  tolerations: []
  metrics_server_priority_class_name: ""
restore:
  restore: false
  snapshot_name: ""
rotate_encryption_key: false
dns: null
#安装rke:
rke up --config cluster.yml
#安装成功还会有seccess如下:

在这里插入图片描述

#验证:(kubectl pods -A 看下有没有创建失败的pod)
#首先安装kubectl命令
 
# 配置阿里云 Kubernetes yum 软件源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
 
yum install -y kubectl-1.20.15
#然后将rke生成的kubectl配置文件复制到/root/.kube/config,kubectl的配置文件"kube_config_cluster.yml"在rke的配置文件目录,当rke安装好集群后会在此目录生成。
su - root
cp -a /home/ops/rke/kube_config_cluster.yml /root/.kube/config
#查看集群节点状态:
kubectl get nodes

NAME         STATUS   ROLES                      AGE    VERSION
k8s-node01   Ready    controlplane,etcd,worker   3d7h   v1.20.15
k8s-node02   Ready    controlplane,etcd,worker   3d7h   v1.20.15
nginx        Ready    controlplane,etcd,worker   3d7h   v1.20.15
#查看集群pod状态:
kubectl get pods -A

NAMESPACE                 NAME                                      READY   STATUS    RESTARTS   AGE
fleet-system              fleet-controller-7c66bfddd4-6jmww         1/1     Running   0          29h
fleet-system              gitjob-5db86b74dc-kgf4z                   1/1     Running   0          29h
ingress-nginx             nginx-ingress-controller-cqdrj            1/1     Running   0          30h
ingress-nginx             nginx-ingress-controller-dcrrq            1/1     Running   1          30h
ingress-nginx             nginx-ingress-controller-j86tt            1/1     Running   0          30h
kube-system               coredns-64f6858fc6-2c9bz                  1/1     Running   1          3d7h
kube-system               coredns-64f6858fc6-bgr4r                  1/1     Running   0          3d7h
kube-system               coredns-autoscaler-649998cd9c-lprfh       1/1     Running   1          3d7h
kube-system               kube-flannel-4kmlx                        2/2     Running   2          3d7h
kube-system               kube-flannel-78fq2                        2/2     Running   0          3d7h
kube-system               kube-flannel-csn5g                        2/2     Running   0          3d7h
kube-system               metrics-server-94b894f8c-mhcxr            1/1     Running   0          3d7h
rancher-operator-system   rancher-operator-7b49d8789b-cgj99         1/1     Running   0          23h
至此rke部署高可用k8s集群已完毕。
部署(rancher):
#以下是root用户
#通过rke部署的高可用k8s集群安装多副本rancher
#安装过程也比较简单,使用的是helm工具进行安装的。
# 下载 helm 二进制包
wget https://get.helm.sh/helm-v3.5.0-linux-amd64.tar.gz
# 解压
tar -zxvf helm-v3.5.0-linux-amd64.tar.gz
# 这一步需要 root 用户操作,否则可能会有权限不足的问题
mv linux-amd64/helm /usr/bin
#为rancher创建namespace:
kubectl create namespace cattle-system
# 如果你手动安装了CRD,而不是在Helm安装命令中添加了`--set installCRDs=true`选项,你应该在升级Helm chart之前升级CRD资源。
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.1/cert-manager.crds.yaml
# 添加 Helm 仓库
helm repo add jetstack https://charts.jetstack.io
helm repo add rancher-stable https://releases.rancher.com/server-charts/stable
# 更新本地 Helm chart 仓库缓存
helm repo update
# 安装 cert-manager Helm chart
helm install cert-manager jetstack/cert-manager --namespace cert-manager --version v1.5.1

#检测安装是否成功
kubectl get pods --namespace cert-manager
NAME                                      READY   STATUS    RESTARTS   AGE
cert-manager-56b686b465-bdldq             1/1     Running   0          29h
cert-manager-cainjector-75c94654d-f8vzm   1/1     Running   0          29h
cert-manager-webhook-d4fd4f479-pw4sb      1/1     Running   0          29h
#通过helm安装ranhcer服务:
#注意:(下面命令其中rancher.lx.com为ingress配置的域名,安装成功后通过此域名进行访问)
helm install rancher rancher-stable/rancher --namespace cattle-system --set hostname=rancher.lx.com --set replicas=3 --version 2.5.12
#查看rancher运行状态:
kubectl get pods -n cattle-system
NAME                              READY   STATUS    RESTARTS   AGE
rancher-5b8554484f-hjsxj          1/1     Running   0          29h
rancher-5b8554484f-jm4xk          1/1     Running   0          29h
rancher-5b8554484f-xztxs          1/1     Running   0          24h
rancher-webhook-8cf66cf88-59flb   1/1     Running   0          29h
测试(rancher):
#设置本地电脑hosts
172.16.1.109    rancher.lx.com
#浏览器访问:
https://rancher.lx.com/
#第一次登陆需要设置密码:默认admin密码为admin
#登陆之后会看到local集群,rancher默认会将刚才rke创建的k8s集群添加到集群列表。(很好)

RKE部署高可用k8s集群+高可用Rancher_第1张图片

到此安装结束。

你可能感兴趣的:(笔记,kubernetes,rancher,运维)