kubernetes(K8S)集群搭建部署

K8S集群架构图解

kubernetes 安装

仓库初始化
1、购买云主机
主机名称 IP地址 最低配置
registry 192.168.1.100 1CPU,1G内存
2、安装仓库服务
[root@registry ~]# yum makecache
[root@registry ~]# yum install -y docker-distribution
[root@registry ~]# systemctl enable --now docker-distribution
3、使用脚本初始化仓库

新建myos目录,将之前打包导出的myos镜像文件myos.tar.gz放到myos目录下

[root@registry ~]# mkdir myos
[root@registry ~]# cd myos
[root@registry ~]# vim init-img.sh
#!/bin/bash
yum install -y docker-ce
mkdir -p /etc/docker
cat >/etc/docker/daemon.json <<'EOF'
{
    "exec-opts": ["native.cgroupdriver=systemd"],
    "registry-mirrors": ["https://hub-mirror.c.163.com"],
    "insecure-registries":["192.168.1.100:5000", "registry:5000"]
}
EOF
systemctl enable --now docker.service
systemctl restart docker.service
docker load -i myos.tar.gz
# init apache images
cat >Dockerfile<<'EOF'
FROM myos:latest
ENV  LANG=C
WORKDIR /var/www/html/
EXPOSE 80
CMD ["/usr/sbin/httpd", "-DFOREGROUND"]
EOF
docker build -t 192.168.1.100:5000/myos:httpd .

# init php-fpm images
cat >Dockerfile<<'EOF'
FROM myos:latest
EXPOSE 9000
WORKDIR /usr/local/nginx/html
CMD ["/usr/sbin/php-fpm", "--nodaemonize"]
EOF
docker build -t 192.168.1.100:5000/myos:php-fpm .

# init nginx images
cat >Dockerfile<<'EOF'
FROM myos:latest
EXPOSE 80
WORKDIR /usr/local/nginx/html
CMD  ["/usr/local/nginx/sbin/nginx", "-g", "daemon off;"]
EOF
docker build -t 192.168.1.100:5000/myos:nginx .

# upload images
rm -f Dockerfile
docker tag myos:latest 192.168.1.100:5000/myos:v1804
for i in v1804 httpd php-fpm nginx;do
    docker push 192.168.1.100:5000/myos:${i}
done

[root@registry ~]# chmod 755 init-img.sh
[root@registry ~]# ./init-img.sh
[root@registry ~]# curl http://192.168.1.100:5000/v2/myos/tags/list
{"name":"myos","tags":["nginx","php-fpm","v1804","httpd"]}

kube-master安装

按照如下配置准备云主机

主机名 IP地址 最低配置
master 192.168.1.21 2CPU,2G内存
node-0001 192.168.1.31 2CPU,2G内存
node-0002 192.168.1.32 2CPU,2G内存
node-0003 192.168.1.33 2CPU,2G内存
registry 192.168.1.100 1CPU,1G内存
1、防火墙相关配置

参考前面知识点完成禁用 selinux,禁用 swap,卸载 firewalld-*

2、配置yum仓库(跳板机)
[root@ecs-proxy ~]# cp -a v1.17.6/k8s-install  /var/ftp/localrepo/
[root@ecs-proxy ~]# cd /var/ftp/localrepo/
[root@ecs-proxy localrepo]# createrepo --update .
3、安装软件包(master)

安装kubeadm、kubectl、kubelet、docker-ce

[root@master ~]# yum makecache
[root@master ~]# yum install -y kubeadm kubelet kubectl docker-ce
[root@master ~]# mkdir -p /etc/docker
[root@master ~]# vim /etc/docker/daemon.json 
{
    "exec-opts": ["native.cgroupdriver=systemd"],
    "registry-mirrors": ["https://hub-mirror.c.163.com"],
    "insecure-registries":["192.168.1.100:5000", "registry:5000"]
}
[root@master ~]# systemctl enable --now docker kubelet
[root@master ~]# docker info |grep Cgroup
Cgroup Driver: systemd
[root@master ~]# vim /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
[root@master ~]# modprobe br_netfilter
[root@master ~]# sysctl --system
4、镜像导入私有仓库
# 把云盘 kubernetes/v1.17.6/base-images 中的镜像拷贝到 master
[root@master ~]# cd base-images/
[root@master base-image]# for i in *.tar.gz;do docker load -i ${i};done
[root@master base-image]# docker images
[root@master base-image]# docker images |awk '$2!="TAG"{print $1,$2}'|while read _f _v;do
    docker tag ${_f}:${_v} 192.168.1.100:5000/${_f##*/}:${_v}; 
    docker push 192.168.1.100:5000/${_f##*/}:${_v}; 
    docker rmi ${_f}:${_v}; 
done
# 查看验证
[root@master base-image]# curl http://192.168.1.100:5000/v2/_catalog
5、Tab键设置
[root@master ~]# kubectl completion bash >/etc/bash_completion.d/kubectl
[root@master ~]# kubeadm completion bash >/etc/bash_completion.d/kubeadm
[root@master ~]# exit
6、安装IPVS代理软件包
[root@master ~]# yum install -y ipvsadm ipset
7、配置主机名
[root@master ~]# vim /etc/hosts
192.168.1.21    master
192.168.1.31    node-0001
192.168.1.32    node-0002
192.168.1.33    node-0003
192.168.1.100   registry
8、使用kubeadm部署

编辑应答文件:

[root@master ~]# mkdir init;cd init

[root@master ~]# vim kubeadm-init.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.1.21
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: master
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: 192.168.1.100:5000
kind: ClusterConfiguration
kubernetesVersion: v1.17.6
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.254.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
[root@master init]# kubeadm init --config=kubeadm-init.yaml |tee master-init.log
# 根据提示执行命令
[root@master init]# mkdir -p $HOME/.kube
[root@master init]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master init]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
9、验证安装结果
[root@master ~]# kubectl version
[root@master ~]# kubectl get componentstatuses
NAME                        STATUS          MESSAGE                 ERROR
controller-manager          Healthy         ok
scheduler                   Healthy         ok
etcd-0                      Healthy         {"health":"true"}

计算节点安装

1、获取token
# 创建token
[root@master ~]# kubeadm token create --ttl=0 --print-join-command
[root@master ~]# kubeadm token list
# 获取token_hash
[root@master ~]# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt |openssl rsa -pubin -outform der |openssl dgst -sha256 -hex
2、node安装(编写ansible playbook安装)
[root@ecs-proxy ~]# mkdir node-install
[root@ecs-proxy ~]# cd node-install/
[root@ecs-proxy ~]# vim ansible.cfg
[defaults]
inventory               = hostlist.yaml
host_key_checking       = False
[root@ecs-proxy ~]# vim hostlist.yaml
all:
  children:
    nodes:
      hosts:
        192.168.1.31: {}
        192.168.1.32: {}
        192.168.1.33: {}
    ungrouped: {}
[root@ecs-proxy node-install]# vim files/hosts
::1             localhost localhost.localdomain localhost6 localhost6.localdomain6
127.0.0.1       localhost localhost.localdomain localhost4 localhost4.localdomain4
192.168.1.21    master
192.168.1.31    node-0001
192.168.1.32    node-0002
192.168.1.33    node-0003
192.168.1.100   registry
[root@ecs-proxy node-install]# vim file/daemon.json
{
    "exec-opts": ["native.cgroupdriver=systemd"],
    "registry-mirrors": ["https://hub-mirror.c.163.com"],
    "insecure-registries":["192.168.1.100:5000", "registry:5000"]
}
[root@ecs-proxy node-install]# vim file/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
[root@ecs-proxy node-install]# vim node_install.yaml
---
- name: kubernetes node install
  hosts:
  - nodes
  vars:
    master: '192.168.1.21:6443'
    token: 'fm6kui.mp8rr3akn74a3nyn'
    token_hash: 'sha256:f46dd7ee29faa3c096cad189b0f9aedf59421d8a881f7623a543065fa6b0088c'
  tasks:
  - name: disable swap
    lineinfile:
      path: /etc/fstab
      regexp: 'swap'
      state: absent
    notify: disable swap
  - name: Ensure SELinux is set to disabled mode
    lineinfile:
      path: /etc/selinux/config
      regexp: '^SELINUX='
      line: SELINUX=disabled
    notify: disable selinux
  - name: remove the firewalld
    yum:
      name:
      - firewalld
      - firewalld-filesystem
      state: absent
  - name: install k8s node tools
    yum:
      name:
      - kubeadm
      - kubelet
      - docker-ce
      - ipvsadm
      - ipset
      state: present
      update_cache: yes
  - name: Create a directory if it does not exist
    file:
      path: /etc/docker
      state: directory
      mode: '0755'
  - name: Copy file with /etc/hosts
    copy:
      src: files/hosts
      dest: /etc/hosts
      owner: root
      group: root
      mode: '0644'
  - name: Copy file with /etc/docker/daemon.json
    copy:
      src: files/daemon.json
      dest: /etc/docker/daemon.json
      owner: root
      group: root
      mode: '0644'
  - name: Copy file with /etc/sysctl.d/k8s.conf
    copy:
      src: files/k8s.conf
      dest: /etc/sysctl.d/k8s.conf
      owner: root
      group: root
      mode: '0644'
    notify: enable sysctl args
  - name: enable k8s node service
    service:
      name: "{{ item }}"
      state: started
      enabled: yes
    with_items:
    - docker
    - kubelet
  - name: check node state
    stat:
      path: /etc/kubernetes/kubelet.conf
    register: result
  - name: node join
    shell: kubeadm join '{{ master }}' --token '{{ token }}' --discovery-token-ca-cert-hash '{{ token_hash }}'
    when: result.stat.exists == False
  handlers:
  - name: disable swap
    shell: swapoff -a
  - name: disable selinux
    shell: setenforce 0
  - name: enable sysctl args
    shell: sysctl --system
[root@ecs-proxy node-install]# ansible-playbook node_install.yaml
3、验证安装
[root@master ~]# kubectl get nodes
NAME        STATUS     ROLES    AGE     VERSION
master      NotReady   master   130m    v1.17.6
node-0001   NotReady      2m14s   v1.17.6
node-0002   NotReady      2m15s   v1.17.6
node-0003   NotReady      2m9s    v1.17.6

网络插件安装配置

将准备好的flannel.tar.gz 和kube-flannel.yml文件拷贝到master上

1、上传镜像到私有仓库
[root@master ~]# cd flannel
[root@master flannel]# docker load -i flannel.tar.gz
[root@master flannel]# docker tag quay.io/coreos/flannel:v0.12.0-amd64 192.168.1.100:5000/flannel:v0.12.0-amd64
[root@master flannel]# docker push 192.168.1.100:5000/flannel:v0.12.0-amd64
2、修改配置文件并安装
[root@master flannel]# vim kube-flannel.yml
128: "Network": "10.244.0.0/16",
172: image: 192.168.1.100:5000/flannel:v0.12.0-amd64
186: image: 192.168.1.100:5000/flannel:v0.12.0-amd64
227-结尾: 删除
[root@master flannel]# kubectl apply -f kube-flannel.yml
3、验证结果
[root@master flannel]# kubectl get nodes
NAME        STATUS  ROLES   AGE     VERSION
master      Ready   master  26h     v1.17.6
node-0001   Ready     151m    v1.17.6
node-0002   Ready     152m    v1.17.6
node-0003   Ready     153m    v1.17.6

你可能感兴趣的:(kubernetes(K8S)集群搭建部署)