[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群

安装步骤

参考该大佬博客

---------

[ningan@k8s-master pv]$ kubectl get pod
The connection to the server localhost:8080 was refused - did you specify the right host or port?

切换到root用户就好了

-----------

错误提示

[ERROR FileContent–proc-sys-net-ipv4-ip_forward]: /proc/sys/net/ipv4/ip_forward contents are not set to 1

解决方法

[root@k8s-node1 ningan]# echo "1" > /proc/sys/net/ipv4/ip_forward

错误提示

[root@k8s-master ningan]# kubeadm init --apiserver-advertise-address=192.168.11.147 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.18.0 --service-cidr=10.96.0.0/12 --pod-network-cidr=10.244.0.0/16
this version of kubeadm only supports deploying clusters with the control plane version >= 1.19.0. Current version: v1.18.0
To see the stack trace of this error execute with --v=5 or higher

# 错误提示
[root@k8s-master ningan]# kubeadm init --apiserver-advertise-address=192.168.11.147 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.18.0 --service-cidr=10.96.0.0/12  --pod-network-cidr=10.244.0.0/16
this version of kubeadm only supports deploying clusters with the control plane version >= 1.19.0. Current version: v1.18.0
To see the stack trace of this error execute with --v=5 or higher

解决方法

查看需要的镜像源

[root@k8s-master ningan]# kubeadm config images list
k8s.gcr.io/kube-apiserver:v1.20.1
k8s.gcr.io/kube-controller-manager:v1.20.1
k8s.gcr.io/kube-scheduler:v1.20.1
k8s.gcr.io/kube-proxy:v1.20.1
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns:1.7.0

按照下面这种方式换一个镜像源先拉取下来

[root@k8s-master ningan]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.20.1
[root@k8s-master ningan]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.20.1
[root@k8s-master ningan]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.20.1
[root@k8s-master ningan]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.20.1
[root@k8s-master ningan]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
[root@k8s-master ningan]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.13-0
[root@k8s-master ningan]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.7.0

docker images查看docker仓库中的镜像,这时大家会发现所有的镜像都是以registry.aliyuncs.com/google_containers/开头,这与kubeadm config images list中要求的镜像名称不一样。我们要修改镜像名称,即对镜像重新打个tag

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.20.1 k8s.gcr.io/kube-apiserver:v1.20.1

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.20.1 k8s.gcr.io/kube-controller-manager:v1.20.1

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.20.1 k8s.gcr.io/kube-scheduler:v1.20.1

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.20.1 k8s.gcr.io/kube-proxy:v1.20.1

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.13-0 k8s.gcr.io/etcd:3.4.13-0

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.7.0 k8s.gcr.io/coredns:1.7.0
## 注意:这个是自己第二次部署master节点时,因为版本变成了1.20.2,所以在这记录了一下,方便下次使用
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.20.2 k8s.gcr.io/kube-apiserver:v1.20.2

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.20.2 k8s.gcr.io/kube-controller-manager:v1.20.2

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.20.2 k8s.gcr.io/kube-scheduler:v1.20.2

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.20.2 k8s.gcr.io/kube-proxy:v1.20.2

之后再次初始化,还是出现了错误,接着下一个错误继续解决

错误提示

[root@k8s-master ningan]# kubeadm reset && kubeadm init --kubernetes-version=v1.20.1 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --ignore-preflight-errors=Swap
[reset] Reading configuration from the cluster...
[reset] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
W0111 13:07:21.865192   75781 reset.go:99] [reset] Unable to fetch the kubeadm-config ConfigMap from cluster: failed to get config map: Get "https://192.168.11.147:6443/api/v1/namespaces/kube-system/configmaps/kubeadm-config?timeout=10s": dial tcp 192.168.11.147:6443: connect: connection refused
[reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.
[reset] Are you sure you want to proceed? [y/N]: y
[preflight] Running pre-flight checks
W0111 13:07:23.838209   75781 removeetcdmember.go:79] [reset] No kubeadm config, using etcd pod spec to get data directory
[reset] Stopping the kubelet service
[reset] Unmounting mounted directories in "/var/lib/kubelet"
[reset] Deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki]
[reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
[reset] Deleting contents of stateful directories: [/var/lib/etcd /var/lib/kubelet /var/lib/dockershim /var/run/kubernetes /var/lib/cni]

The reset process does not clean CNI configuration. To do so, you must remove /etc/cni/net.d

The reset process does not reset or clean up iptables rules or IPVS tables.
If you wish to reset iptables, you must do so manually by using the "iptables" command.

If your cluster was setup to utilize IPVS, run ipvsadm --clear (or similar)
to reset your system's IPVS tables.

The reset process does not clean your kubeconfig files and you must remove them manually.
Please, check the contents of the $HOME/.kube/config file.
[init] Using Kubernetes version: v1.20.1
[preflight] Running pre-flight checks
	[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.11.147]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.11.147 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.11.147 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.

	Unfortunately, an error has occurred:
		timed out waiting for the condition

	This error is likely caused by:
		- The kubelet is not running
		- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)

	If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
		- 'systemctl status kubelet'
		- 'journalctl -xeu kubelet'

	Additionally, a control plane component may have crashed or exited when started by the container runtime.
	To troubleshoot, list all containers using your preferred container runtimes CLI.

	Here is one example how you may list all Kubernetes containers running in docker:
		- 'docker ps -a | grep kube | grep -v pause'
		Once you have found the failing container, you can inspect its logs with:
		- 'docker logs CONTAINERID'

error execution phase wait-control-plane: couldn't initialize a Kubernetes cluster
To see the stack trace of this error execute with --v=5 or higher
[root@k8s-master ningan]# 

解决方法

先参考这个这个链接进行了尝试,还是解决不了
后来参考这个链接成功解决,具体方法如下:
就是把/usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf复制到 /etc/systemd/system/kubelet.service.d/10-kubeadm.conf

/etc/systemd/system/kubelet.service.d/10-kubeadm.conf中的文件是这样的:
在这里插入图片描述
/usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf中的文件是这样的:
[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第1张图片
至于为什么这样可以解决问题还不清楚,如果有了解的小伙伴欢迎告知。

具体操作如下:

[root@k8s-master ningan]# cd /etc/systemd/system/kubelet.service.d/
[root@k8s-master kubelet.service.d]# ls
10-kubeadm.conf

#此步骤是把之前的文件进行保存,万一不成功,还要恢复回来
[root@k8s-master kubelet.service.d]# cp 10-kubeadm.conf 10-kubeadm.conf_tmp
[root@k8s-master kubelet.service.d]# 
[root@k8s-master kubelet.service.d]# cd /usr/lib/systemd/system/kubelet.service.d

[root@k8s-master kubelet.service.d]# 
[root@k8s-master kubelet.service.d]# cp 10-kubeadm.conf /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
cp:是否覆盖"/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"yes
[root@k8s-master kubelet.service.d]# 
[root@k8s-master kubelet.service.d]# systemctl daemon-reload

再次运行:kubeadm reset && kubeadm init --kubernetes-version=v1.20.1 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --ignore-preflight-errors=Swap,即可执行成功!

[root@k8s-master kubelet.service.d]# kubeadm reset && kubeadm init --kubernetes-version=v1.20.1 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --ignore-preflight-errors=Swap
[reset] Reading configuration from the cluster...
[reset] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
W0111 13:27:44.374803   80499 reset.go:99] [reset] Unable to fetch the kubeadm-config ConfigMap from cluster: failed to get config map: Get "https://192.168.11.147:6443/api/v1/namespaces/kube-system/configmaps/kubeadm-config?timeout=10s": dial tcp 192.168.11.147:6443: connect: connection refused
[reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.
[reset] Are you sure you want to proceed? [y/N]: y
[preflight] Running pre-flight checks
W0111 13:27:45.962138   80499 removeetcdmember.go:79] [reset] No kubeadm config, using etcd pod spec to get data directory
[reset] Stopping the kubelet service
[reset] Unmounting mounted directories in "/var/lib/kubelet"
[reset] Deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki]
[reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
[reset] Deleting contents of stateful directories: [/var/lib/etcd /var/lib/kubelet /var/lib/dockershim /var/run/kubernetes /var/lib/cni]

The reset process does not clean CNI configuration. To do so, you must remove /etc/cni/net.d

The reset process does not reset or clean up iptables rules or IPVS tables.
If you wish to reset iptables, you must do so manually by using the "iptables" command.

If your cluster was setup to utilize IPVS, run ipvsadm --clear (or similar)
to reset your system's IPVS tables.

The reset process does not clean your kubeconfig files and you must remove them manually.
Please, check the contents of the $HOME/.kube/config file.
[init] Using Kubernetes version: v1.20.1
[preflight] Running pre-flight checks
	[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.11.147]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.11.147 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.11.147 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 17.504521 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.20" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master as control-plane by adding the labels "node-role.kubernetes.io/master=''" and "node-role.kubernetes.io/control-plane='' (deprecated)"
[mark-control-plane] Marking the node k8s-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: mosffs.9c4krp4qbo7ox0fa
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.11.147:6443 --token mosffs.9c4krp4qbo7ox0fa \
    --discovery-token-ca-cert-hash sha256:6643599caaa15b516d08d8fa7ec7508e3d9a5224a478651f1380d5d12bbe6416 
[root@k8s-master kubelet.service.d]# 

[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第2张图片
太开心了,终于successfully了!

[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第3张图片

错误提示 部署CNI网络插件

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
因为这个网址我们普通的网络访问不了,所以没有办法直接从网络上下载

解决办法

先自己下载好,上传到服务器上,命名为kube-flannel.yml,直接利用这个文件即可

[root@k8s-master ningan]# kubectl apply -f kube-flannel.yml

此文件放在文章最后,供有需要的人参考!

错误提示 node1和node2 not ready

[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第4张图片

[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第5张图片
找了好久,终于被我解决了!

具体思路如下:
先按照上图提示,master已经是ready状态了,而node1和node2还是notready状态;而且看到有问题的init:0/1和ContainerCreating都是node1和node2,所以推测问题肯定在这

先把两个节点全部都删除,先加一个进行尝试:
在这里插入图片描述
我们看到有问题的pod是kube-proxy-njlgg,运行如下命令:(–namespace kube-system 一定要加,不加的话看不到具体信息)
(最重要的思路:kubectl describe pod kube-proxy-njlgg --namespace kube-system)

[root@k8s-master ningan]# kubectl describe pod kube-proxy-njlgg --namespace kube-system

[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第6张图片
如下图,我们发现这些东西拉取不下来
[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第7张图片

解决办法

后来琢磨了半天,觉得应该是node1中缺少了这些镜像,因为master中这些镜像已经下载好了,所以就在node1中下载好这些镜像:
(最重要的解决办法:在node1中下载好这些镜像,先从国内源下载,在加tag)

[root@k8s-node1 ningan]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
[root@k8s-node1 ningan]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2
[root@k8s-node1 ningan]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.20.1
[root@k8s-node1 ningan]# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.20.1 k8s.gcr.io/kube-proxy:v1.20.1

这时,master中:如果不行,就多输几遍命令,然后把kube-flannel.yml删了再加进去,把node1删了再加进去,因为有一个时间的问题,所以过一会就好了!

[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第8张图片
[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第9张图片
node2按照同样的方法进行操作,也成功啦!

[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第10张图片
[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第11张图片
[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第12张图片

部署nginx,测试是否成功
[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第13张图片

错误提示

error execution phase preflight: couldn’t validate the identity of the API Server: Get “https://192.168.11.152:6443/api/v1/namespaces/kube-public/configmaps/cluster-info?timeout=10s”: x509: certificate has expired or is not yet valid: current time 2021-01-14T15:21:21+08:00 is before 2021-01-14T08:52:57Z To see the stack trace of this error execute with --v=5 or higher

[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第14张图片

解决办法

同步master和node的时间,在master和node中都执行:

# 时间同步
yum install ntpdate -y
ntpdate time.windows.com

问题 token过期怎么办?

# 之前的
kubeadm join 192.168.11.152:6443 --token 2ee4bk.dmlglcduipwk7wyg \
    --discovery-token-ca-cert-hash sha256:7c205406380f65a0dcb365b5fcb51a510488e833a4b5441052180133acde1e8b

# 重新生成token
[root@k8s-master ningan]# kubeadm token create
33g27f.2g8khuprb54p8fdb
[root@k8s-master ningan]# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
7c205406380f65a0dcb365b5fcb51a510488e833a4b5441052180133acde1e8b

# 新的  最好在前面加上 kubeadm reset
kubeadm reset && kubeadm join 192.168.11.152:6443 --token 33g27f.2g8khuprb54p8fdb \
    --discovery-token-ca-cert-hash sha256:7c205406380f65a0dcb365b5fcb51a510488e833a4b5441052180133acde1e8b

问题:The connection to the server localhost:8080 was refused - did you specify the right host or port?

改为root用户再执行就可以了

[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第15张图片

-----------

问题:ip地址变了怎么办?二次安装

在root用户下修改host文件:改ip地址

[ningan@k8s-master ~]$ su
[root@k8s-master ningan]# vim /etc/hosts

部署Kubernetes Master节点

[root@k8s-master ningan]# kubeadm init --apiserver-advertise-address=192.168.11.155 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.20.1 --service-cidr=10.96.0.0/12  --pod-network-cidr=10.244.0.0/16

出现问题:
[ERROR Port-6443]: Port 6443 is in use
[ERROR Port-10259]: Port 10259 is in use
[ERROR Port-10257]: Port 10257 is in use
[ERROR FileAvailable–etc-kubernetes-manifests-kube-apiserver.yaml]: /etc/kubernetes/manifests/kube-apiserver.yaml already exists
[ERROR FileAvailable–etc-kubernetes-manifests-kube-controller-manager.yaml]: /etc/kubernetes/manifests/kube-controller-manager.yaml already exists
[ERROR FileAvailable–etc-kubernetes-manifests-kube-scheduler.yaml]: /etc/kubernetes/manifests/kube-scheduler.yaml already exists
[ERROR FileAvailable–etc-kubernetes-manifests-etcd.yaml]: /etc/kubernetes/manifests/etcd.yaml already exists
[ERROR Port-10250]: Port 10250 is in use
[ERROR Port-2379]: Port 2379 is in use
[ERROR Port-2380]: Port 2380 is in use
[ERROR DirAvailable–var-lib-etcd]: /var/lib/etcd is not empty
[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第16张图片
加入kubeadm reset,重新部署

[root@k8s-master ningan]# kubeadm reset && kubeadm init --apiserver-advertise-address=192.168.11.155 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.20.1 --service-cidr=10.96.0.0/12  --pod-network-cidr=10.244.0.0/16

master节点部署成功!
[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第17张图片

在master节点执行:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

发现master节点已经处于ready状态
[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第18张图片

在node1节点上执行(node1和node2同步执行,因为他们的角色是一样的)

[root@k8s-node1 ningan]# kubeadm join 192.168.11.155:6443 --token cwxtdy.9u4jljdiy6raxlkw \
>     --discovery-token-ca-cert-hash sha256:bdd3952d858d19f46e6d2a281d3596c7f4d6b0850e02c92499735d03968e8bb5

出现错误:
[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第19张图片

建议:只要看到这种already exists的,都在前面加上kubeadm reset

加上之后,继续在node1上执行:

[root@k8s-node1 ningan]# kubeadm reset && kubeadm join 192.168.11.155:6443 --token cwxtdy.9u4jljdiy6raxlkw     --discovery-token-ca-cert-hash sha256:bdd3952d858d19f46e6d2a281d3596c7f4d6b0850e02c92499735d03968e8bb5

你会发现:它会一直卡在 [WARNING Hostname]: hostname “k8s-node1”: lookup k8s-node1 on 192.168.11.2:53: no such host,过了好久好久(不知道有多久),会提示:error execution phase preflight: couldn’t validate the identity of the API Server: Get “https://192.168.11.155:6443/api/v1/namespaces/kube-public/configmaps/cluster-info?timeout=10s”: x509: certificate has expired or is not yet valid: current time 2021-01-15T05:22:28+08:00 is before 2021-01-16T01:46:52Z
To see the stack trace of this error execute with --v=5 or higher

这个问题是因为时间没有同步造成的,在master、node1和node2都执行时间同步

# 时间同步
yum install ntpdate -y   #选择性执行
ntpdate time.windows.com

之后在node1上再次执行:

[root@k8s-node1 ningan]# kubeadm reset && kubeadm join 192.168.11.155:6443 --token cwxtdy.9u4jljdiy6raxlkw     --discovery-token-ca-cert-hash sha256:bdd3952d858d19f46e6d2a281d3596c7f4d6b0850e02c92499735d03968e8bb5

执行成功!
[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第20张图片

回到master节点查看:都已经部署成功!大功告成!
[错误解决]centos中使用kubeadm方式搭建一个单master的K8S集群_第21张图片

-----------

kube-flannel.yml

https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml 该网站的内容:

---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ['extensions']
  resources: ['podsecuritypolicies']
  verbs: ['use']
  resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.13.1-rc1
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.13.1-rc1
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg

你可能感兴趣的:(Linux,云计算,kubernetes,centos,linux)