二进制部署k8s高可用集群

Kubernetes 部署高可用集群

集群环境准备

主机规划

主机IP地址 主机名 配置 角色 软件列表
192.168.220.20 master01 2C2G master kube-apiserver、kube-controller-manager、kube-scheduler、etcd、kubelet、kube-proxy、Containerd、runc
192.168.220.21 master02 2C2G master kube-apiserver、kube-controller-manager、kube-scheduler、etcd、kubelet、kube-proxy、Containerd、runc
192.168.220.22 node01 2C2G worker kubelet、kube-proxy、Containerd、runc
192.168.220.20 master01 2C2G LB haproxy、keepalived
192.168.220.21 master02 2C2G LB haproxy、keepalived
192.168.220.100 / / VIP

软件版本

软件名称 版本 备注
CentOS7 kernel版本:5.17
kubernetes v1.21.10
etcd v3.5.2 最新版本
calico v3.19.4
coredns v1.8.4
containerd 1.6.1
runc 1.1.0
haproxy 5.18 YUM源默认
keepalived 3.5 YUM源默认

网络分配

网络名称 网段 备注
Node网络 192.168.10.0/24
Service网络 10.96.0.0/16
Pod网络 10.244.0.0/16

集群部署

主机准备

主机名设置

# 主机名按照规划中进行设置,例:
[root@base_01 ~]# hostnamectl set-hostname master01

配置 hosts 文件

cat >> /etc/hosts << EOF
192.168.220.20 master01
192.168.220.21 master02
192.168.220.22 node01
EOF

关闭防火墙

[root@base_01 ~]# systemctl stop firewalld
[root@base_01 ~]# systemctl disable firewalld
[root@base_01 ~]# firewall-cmd --state

关闭 selinux

# 暂时关闭 SELINUX
[root@base_01 ~]# setenforce 0

# 修改 /etc/selinux/config 文件,将状态改为关闭,重启后生效
[root@base_01 ~]# sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config

# 查看 SELINUX 状态
[root@base_01 ~]# sestatus

关闭 swap 交换分区

# 临时关闭 swap 交换分区
[root@base_01 ~]# swapoff -a

# 修改 /etc/fstab 文件,关闭交换分区,重启后生效
[root@base_01 ~]# sed -ri 's/.*swap.*/#&/' /etc/fstab

# 添加系统内核参数 vm.swappiness=0 关闭交换分区并使其生效,不需要重启系统
[root@base_01 ~]# echo "vm.swappiness=0" >> /etc/sysctl.conf
[root@base_01 ~]# sysctl -p

主机时间同步

# 下载 ntpdate 时间同步包
[root@base_01 ~]# yum -y install ntpdate

# 制定时间同步计划任务
[root@base_01 ~]# crontab -e
0 */1 * * * ntpdate time1.aliyun.com

limit 优化

# 临时限制
[root@base_01 ~]# ulimit -SHn 65535

# 写入到文件
[root@base_01 ~]# cat <> /etc/security/limits.conf
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
EOF

ipvs 管理工具安装及模块加载

为集群节点安装,负载均衡节点不用安装

[root@base_01 ~]# yum -y install ipvsadm ipset sysstat conntrack libseccomp

# 临时加载模块,所有节点配置ipvs模块,在内核4.19+版本nf_conntrack_ipv4已经改为nf_conntrack, 4.18以下使用nf_conntrack_ipv4即可: 
[root@base_01 ~]# modprobe -- ip_vs 
[root@base_01 ~]# modprobe -- ip_vs_rr
[root@base_01 ~]# modprobe -- ip_vs_wrr 
[root@base_01 ~]# modprobe -- ip_vs_sh
[root@base_01 ~]# modprobe -- nf_conntrack 

# 创建 /etc/modules-load.d/ipvs.conf 并加入以下内容
[root@base_01 ~]# cat >/etc/modules-load.d/ipvs.conf <
ip_vs 
ip_vs_lc 
ip_vs_wlc 
ip_vs_rr 
ip_vs_wrr 
ip_vs_lblc 
ip_vs_lblcr 
ip_vs_dh 
ip_vs_sh 
ip_vs_fo 
ip_vs_nq 
ip_vs_sed 
ip_vs_ftp 
ip_vs_sh 
nf_conntrack 
ip_tables 
ip_set 
xt_set 
ipt_set 
ipt_rpfilter 
ipt_REJECT 
ipip 
EOF

加载 containerd 相关内核模块

# 临时加载模块
[root@base_01 ~]# modprobe overlay
[root@base_01 ~]# modprobe br_netfilter

# 永久性加载模块
[root@base_01 ~]# cat > /etc/modules-load.d/containerd.conf << EOF
overlay
br_netfilter
EOF

# 设置加载模块开机启动
[root@base_01 ~]# systemctl enable --now systemd-modules-load.service

Linux 内核升级

在所有节点中安装,需要操作系统更换内核

# 安装 perl 
[root@base_01 ~]# yum -y install perl

# 安装 GPG-KEY
[root@base_01 ~]# rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org

# 安装 elpo yum 源
[root@base_01 ~]# yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm

# 安装内核
[root@base_01 ~]#  yum  --enablerepo="elrepo-kernel"  -y install kernel-ml.x86_64

# 修改引导菜单
[root@base_01 ~]# grub2-set-default 0

[root@base_01 ~]# grub2-mkconfig -o /boot/grub2/grub.cfg

修改内核参数

# 优化内核
[root@base_01 ~]# cat < /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1

vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 131072
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF

# 使配置生效
[root@base_01 ~]# sysctl --system

# 所有节点配置完内核后,重启服务器,保证重启后内核依旧加载
[root@base_01 ~]# reboot -h now

# 重启后查看ipvs模块加载情况:
[root@master01 ~]# lsmod | grep --color=auto -e ip_vs -e nf_conntrack

# 重启后查看containerd相关模块加载情况:
[root@master01 ~]# lsmod | egrep 'br_netfilter | overlay'

负载均衡器准备

安装haproxy和keeplievd

[root@master01 ~]# yum -y install haproxy keepalived

HaProxy 配置

cat >/etc/haproxy/haproxy.cfg<<"EOF"
global
 maxconn 2000
 ulimit-n 16384
 log 127.0.0.1 local0 err
 stats timeout 30s

defaults
 log global
 mode http
 option httplog
 timeout connect 5000
 timeout client 50000
 timeout server 50000
 timeout http-request 15s
 timeout http-keep-alive 15s

frontend monitor-in
 bind *:33305
 mode http
 option httplog
 monitor-uri /monitor

frontend k8s-master
 bind 0.0.0.0:6535
 bind 127.0.0.1:6535
 mode tcp
 option tcplog
 tcp-request inspect-delay 5s
 default_backend k8s-master

backend k8s-master
 mode tcp
 option tcplog
 option tcp-check
 balance roundrobin
 default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
 server  master01  192.168.220.20:6443 check
 server  master02  192.168.220.21:6443 check
EOF

Keepalived 配置

主从配置不一致,需要注意

# 主:
cat >/etc/keepalived/keepalived.conf<<"EOF"
! Configuration File for keepalived
global_defs {
   router_id LVS_DEVEL
script_user root
   enable_script_security
}
vrrp_script chk_apiserver {
   script "/etc/keepalived/check_apiserver.sh"
   interval 5
   weight -5
   fall 2 
rise 1
}
vrrp_instance VI_1 {
   state MASTER
   interface ens32
   mcast_src_ip 192.168.220.20
   virtual_router_id 51
   priority 100
   advert_int 2
   authentication {
       auth_type PASS
       auth_pass K8SHA_KA_AUTH
   }
   virtual_ipaddress {
       192.168.220.100
   }
   track_script {
      chk_apiserver
   }
}
EOF

# 从:
cat >/etc/keepalived/keepalived.conf<<"EOF"
! Configuration File for keepalived
global_defs {
   router_id LVS_DEVEL
script_user root
   enable_script_security
}
vrrp_script chk_apiserver {
   script "/etc/keepalived/check_apiserver.sh"
  interval 5
   weight -5
   fall 2 
rise 1
}
vrrp_instance VI_1 {
   state BACKUP
   interface ens32
   mcast_src_ip 192.168.220.21
   virtual_router_id 51
   priority 99
   advert_int 2
   authentication {
       auth_type PASS
       auth_pass K8SHA_KA_AUTH
   }
   virtual_ipaddress {
       192.168.220.100
   }
   track_script {
      chk_apiserver
   }
}
EOF

健康检查脚本

cat > /etc/keepalived/check_apiserver.sh <<"EOF"
#!/bin/bash
err=0
for k in $(seq 1 3)
do
   check_code=$(pgrep haproxy)
   if [[ $check_code == "" ]]; then
       err=$(expr $err + 1)
       sleep 1
       continue
   else
       err=0
       break
   fi
done

if [[ $err != "0" ]]; then
   echo "systemctl stop keepalived"
   /usr/bin/systemctl stop keepalived
   exit 1
else
   exit 0
fi
EOF
chmod +x /etc/keepalived/check_apiserver.sh

启动服务并验证

# 启动服务
[root@master01 ~]# systemctl enable --now haproxy keepalived

# 验证
[root@master01 ~]# ip a s

配置免密登录

在 master01 上操作

# 生成密钥文件
[root@master01 ~]# ssh-keygen

# 传输密钥对
[root@master01 ~]# ssh-copy-id master01
[root@master01 ~]# ssh-copy-id master02
[root@master01 ~]# ssh-copy-id worker1

部署 etcd 集群

以下为准备工作,在master01上操作即可

创建工作目录

[root@master01 ~]# mkdir /opt/packages

# 获取 cfssl 工具
[root@master01 ~]# cd /opt/packages/
[root@master01 packages]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
[root@master01 packages]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
[root@master01 packages]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

# cfssl是使用go编写,由CloudFlare开源的一款PKI/TLS工具。主要程序有:
# - cfssl,是CFSSL的命令行工具
# - cfssljson用来从cfssl程序获取JSON输出,并将证书,密钥,CSR和bundle写入文件中

# 赋予执行权限
[root@master01 packages]# chmod +x cf*

# 将 cfssl 工具 cp 至 /usr/local/bin 下
[root@master01 packages]# cp ./cfssl_linux-amd64 /usr/local/bin/cfssl
[root@master01 packages]# cp ./cfssljson_linux-amd64 /usr/local/bin/cfssljson
[root@master01 packages]# cp ./cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo

# 查看 cfssl 版本
[root@master01 packages]# cfssl version

创建CA证书

# 配置 ca 证书请求文件
[root@master01 cert]# cat > ca-csr.json <<"EOF"
{
  "CN": "kubernetes",
  "key": {
      "algo": "rsa",
      "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "kubemsb",
      "OU": "CN"
    }
  ],
  "ca": {
          "expiry": "87600h"
  }
}
EOF

# 创建 ca 证书
[root@master01 cert]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca
[root@master01 cert]# ls
ca.csr  ca-csr.json  ca-key.pem  ca.pem

# 配置 ca 证书策略
# cfssl print-defaults config > ca-config.json 生成默认策略
[root@master01 cert]# cat > ca-config.json <<"EOF"
{
  "signing": {
      "default": {
          "expiry": "87600h"
        },
      "profiles": {
          "kubernetes": {
              "usages": [
                  "signing",
                  "key encipherment",
                  "server auth",
                  "client auth"
              ],
              "expiry": "87600h"
          }
      }
  }
}
EOF

# server auth 表示client可以对使用该ca对server提供的证书进行验证
# client auth 表示server可以使用该ca对client提供的证书进行验证

创建 etcd 证书

# 配置 ectd 请求文件
[root@master01 cert]# cat > etcd-csr.json <<"EOF"
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.220.20",
    "192.168.220.21"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [{
    "C": "CN",
    "ST": "Beijing",
    "L": "Beijing",
    "O": "kubemsb",
    "OU": "CN"
  }]
}
EOF

# 生成 etcd 证书
[root@master01 cert]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson  -bare etcd

在所有主节点进行配置

etcd 下载

etcd 软件包也可在 github 下载

二进制部署k8s高可用集群_第1张图片

二进制部署k8s高可用集群_第2张图片

二进制部署k8s高可用集群_第3张图片

# 下载 etcd 软件包
[root@master01 packages]# wget https://github.com/etcd-io/etcd/releases/download/v3.5.2/etcd-v3.5.2-linux-amd64.tar.gz

# 解压 etcd 软件包,并将 etcd 可执行文件 cp 至 /usr/local/bin 下
[root@master01 softwares]# tar xf etcd-v3.5.2-linux-amd64.tar.gz
[root@master01 softwares]# cp ./etcd-v3.5.2-linux-amd64/etcd* /usr/local/bin/

# 查看当前 etcd 版本
[root@master01 softwares]# etcdctl version
etcdctl version: 3.5.2
API version: 3.5

# 将 etcd 可执行文件 分发至其他 master 服务器
[root@master01 softwares]# scp ./etcd-v3.5.2-linux-amd64/etcd* root@master02:/usr/local/bin/

配置 etcd

# 创建 etcd 配置目录,以及数据目录
[root@master01 ~]# mkdir /etc/etcd
[root@master01 ~]# mkdir -p /opt/data/etcd/default.etcd

# master01 配置文件
[root@master01 ~]# cat >  /etc/etcd/etcd.conf <<"EOF"
# 成员信息
#[Member]
ETCD_NAME="etcd1"
ETCD_DATA_DIR="/opt/data/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.220.20:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.220.20:2379,http://127.0.0.1:2379"

# 集群信息
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.220.20:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.220.20:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.220.20:2380,etcd2=https://192.168.220.21:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

# master02 配置文件
[root@master02 ~]# cat >  /etc/etcd/etcd.conf <<"EOF"
#[Member]
ETCD_NAME="etcd2"
ETCD_DATA_DIR="/opt/data/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.220.21:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.220.21:2379,http://127.0.0.1:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.220.21:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.220.21:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.220.20:2380,etcd2=https://192.168.220.21:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

# 配置文件说明
# ETCD_NAME:节点名称,集群中唯一
# ETCD_DATA_DIR:数据目录
# ETCD_LISTEN_PEER_URLS:集群通信监听地址
# ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址
# ETCD_INITIAL_ADVERTISE_PEER_URLS:集群通告地址
# ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址
# ETCD_INITIAL_CLUSTER:集群节点地址
# ETCD_INITIAL_CLUSTER_TOKEN:集群Token
# ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态,new是新集群,existing表示加入已有集群

创建服务配置文件

# 创建 etcd 证书存放目录
[root@master01 ~]# mkdir -p /etc/etcd/ssl

# cp 证书到指定目录
[root@master01 cert]# cp ./ca*.pem /etc/etcd/ssl/
[root@master01 cert]# cp ./etcd*.pem /etc/etcd/ssl/

# 将证书同步至其他 master 节点
[root@master01 ~]# scp -r /etc/etcd/ssl/ master02:/etc/etcd/ssl/

# 编写服务启动文件
[root@master01 ~]# cat > /usr/lib/systemd/system/etcd.service <<"EOF"
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=-/etc/etcd/etcd.conf									# 指定配置文件位置
WorkingDirectory=/opt/data/etcd/										# 指定工作目录							
ExecStart=/usr/local/bin/etcd \												
  --cert-file=/etc/etcd/ssl/etcd.pem \                                  # 指定 etcd 证书位置
  --key-file=/etc/etcd/ssl/etcd-key.pem \                               # 指定 etcd 证书 key 位置
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \                              # 指定受信任 ca 证书位置
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \                             # 指定成员 etcd 证书位置
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \                          # 指定成员 etcd 证书 key 位置
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \                         # 指定成员受信任 ca 证书位置
  --peer-client-cert-auth \                                             # 成员客户端认证
  --client-cert-auth                                                    # 客户端认证
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

启动 etcd 集群

# 启动 etcd 服务
[root@master01 ~]# systemctl daemon-reload
[root@master01 ~]# systemctl  enable --now etcd.service

# 查看服务启动状态
[root@master01 ssl]# systemctl status etcd

# 验证集群状态
# 查看端点是否健康,ETCDCTL_API 指定api版本,也可不指定
[root@master01 ssl]# ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.220.20:2379,https://192.168.220.21:2379 endpoint health

# 查看 etcd 成员列表
[root@master01 ~]# ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.220.20:2379,https://192.168.220.21:2379 member list
+------------------+---------+-------+-----------------------------+-----------------------------+------------+
|        ID        | STATUS  | NAME  |         PEER ADDRS          |        CLIENT ADDRS         | IS LEARNER |
+------------------+---------+-------+-----------------------------+-----------------------------+------------+
| 32278b01af1acefd | started | etcd1 | https://192.168.220.20:2380 | https://192.168.220.20:2379 |      false |
| 598347348868c61c | started | etcd2 | https://192.168.220.21:2380 | https://192.168.220.21:2379 |      false |
+------------------+---------+-------+-----------------------------+-----------------------------+------------+

# 查看 etcd 集群信息
[root@master01 ~]# ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.220.20:2379,https://192.168.220.21:2379 endpoint status
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|          ENDPOINT           |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://192.168.220.20:2379 | 32278b01af1acefd |   3.5.2 |   20 kB |      true |      false |         2 |         11 |                 11 |        |
| https://192.168.220.21:2379 | 598347348868c61c |   3.5.2 |   20 kB |     false |      false |         2 |         11 |                 11 |        |
+-----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

Kubernetes 集群部署

kubernetes 软件包下载及安装

# 下载软件包,也可至官方网站进行下载
[root@master01 softwares]# wget https://dl.k8s.io/v1.21.10/kubernetes-server-linux-amd64.tar.gz

# 解压软件包
[root@master01 softwares]# tar -xf kubernetes-server-linux-amd64.tar.gz

# cp kube-apiserver kube-controller-manager kube-scheduler kubectl 到 master 节点
[root@master01 bin]# cp kube-apiserver kube-controller-manager kubectl kube-scheduler /usr/local/bin/

# cp kubelet kube-proxy 到 worker 节点,master 节点也可安装
[root@master01 bin]# scp kubelet kube-proxy node01:/usr/local/bin/

# 在所有集群节点创建目录
[root@master01 bin]# mkdir -p /etc/kubernetes/ 
[root@master01 bin]# mkdir -p /etc/kubernetes/ssl
[root@master01 bin]# mkdir -p /opt/log/kubernetes

部署 api-server

# 创建 apiserver 证书请求文件
[root@master01 cert]# cat > kube-apiserver-csr.json << "EOF"
{
"CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "192.168.220.20",
    "192.168.220.21",
    "192.168.220.22",
    "192.168.220.100",
    "10.96.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "kubemsb",
      "OU": "CN"
    }
  ]
}
EOF

# 说明:
# 如果 hosts 字段不为空则需要指定授权使用该证书的 IP(含VIP) 或域名列表。由于该证书被 集群使# 用,需要将节点的IP都填上,为了方便后期扩容可以多写几个预留的IP。
# 同时还需要填写 service 网络的首个IP(一般是 kube-apiserver 指定的 service-cluster-ip-  # range 网段的第一个IP,如 10.96.0.1)。

# 生成 apiserver 证书及 token文件
[root@master01 cert]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver

# 新建 token.csv 文件,用于后期工作节点动态颁发证书
[root@master01 cert]# cat > token.csv << EOF
$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

# 说明:
# 创建TLS机制所需TOKEN
# TLS Bootstraping:Master apiserver启用TLS认证后,Node节点kubelet和kube-proxy与
# kube-apiserver进行通信,必须使用CA签发的有效证书才可以,当Node节点很多时,这种客户端证书颁# 发需要大量工作,同样也会增加集群扩展复杂度。为了简化流程,Kubernetes引入了TLS 
# bootstraping机制来自动颁发客户端证书,kubelet会以一个低权限用户自动向apiserver申请证书,# kubelet的证书由apiserver动态签署。所以强烈建议在Node上使用这种方式,目前主要用于
# kubelet,kube-proxy还是由我们统一颁发一个证书。

创建 apiserver 服务配置文件

# master01 配置文件
[root@master01 ~]# cat > /etc/kubernetes/kube-apiserver.conf << "EOF"
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
  --anonymous-auth=false \
  --bind-address=192.168.220.20 \								# 绑定地址
  --secure-port=6443 \											# 安全端口
  --advertise-address=192.168.220.20 \							# 通告地址
  --insecure-port=0 \											# 非安全端口是否开启,0表示禁用,若需要开启将0改为8080即可
  --authorization-mode=Node,RBAC \								# 认证模式
  --runtime-config=api/all=true \								
  --enable-bootstrap-token-auth \
  --service-cluster-ip-range=10.96.0.0/16 \						# service客户端的范围
  --token-auth-file=/etc/kubernetes/token.csv \
  --service-node-port-range=30000-50000 \						# service 开放端口的范围
  --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem  \
  --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \
  --client-ca-file=/etc/kubernetes/ssl/ca.pem \
  --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
  --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
  --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \
  --service-account-issuer=api \
  --etcd-cafile=/etc/etcd/ssl/ca.pem \
  --etcd-certfile=/etc/etcd/ssl/etcd.pem \
  --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
  --etcd-servers=https://192.168.220.20:2379,https://192.168.220.21:2379 \
  --enable-swagger-ui=true \
  --allow-privileged=true \
  --apiserver-count=3 \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/opt/log/api-server/kube-apiserver-audit.log \
  --event-ttl=1h \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/opt/log/api-server \
  --v=4"
EOF

# master02 服务配置文件
[root@master02 ~]# cat > /etc/kubernetes/kube-apiserver.conf << "EOF"
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
  --anonymous-auth=false \
  --bind-address=192.168.220.21 \								
  --secure-port=6443 \											
  --advertise-address=192.168.220.21 \							
  --insecure-port=0 \											
  --authorization-mode=Node,RBAC \								
  --runtime-config=api/all=true \								
  --enable-bootstrap-token-auth \
  --service-cluster-ip-range=10.96.0.0/16 \						
  --token-auth-file=/etc/kubernetes/token.csv \
  --service-node-port-range=30000-50000 \						
  --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem  \
  --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \
  --client-ca-file=/etc/kubernetes/ssl/ca.pem \
  --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
  --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
  --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \
  --service-account-issuer=api \
  --etcd-cafile=/etc/etcd/ssl/ca.pem \
  --etcd-certfile=/etc/etcd/ssl/etcd.pem \
  --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
  --etcd-servers=https://192.168.220.20:2379,https://192.168.220.21:2379 \
  --enable-swagger-ui=true \
  --allow-privileged=true \
  --apiserver-count=3 \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/opt/log/api-server/kube-apiserver-audit.log \
  --event-ttl=1h \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/opt/log/api-server \
  --v=4"
EOF

创建 apiserver 服务管理配置文件

[root@master01 ~]# cat > /usr/lib/systemd/system/kube-apiserver.service << "EOF"
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=etcd.service
Wants=etcd.service

[Service]
EnvironmentFile=-/etc/kubernetes/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 同步文件到集群 master 节点
[root@master01 cert]# cp ca*.pem /etc/kubernetes/ssl/
[root@master01 cert]# cp kube-apiserver*.pem /etc/kubernetes/ssl/
[root@master01 cert]# cp token.csv /etc/kubernetes/
[root@master01 cert]# scp token.csv ca*.pem kube-apiserver*.pem  master02:/etc/kubernetes/ssl/

启动 apiserver 服务

[root@master01 cert]# systemctl daemon-reload
[root@master01 cert]# systemctl enable --now kube-apiserver

# 测试
[root@master01 cert]# curl --insecure https://192.168.220.20:6535
{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {
    
  },
  "status": "Failure",
  "message": "Unauthorized",
  "reason": "Unauthorized",
  "code": 401
}
[root@master01 cert]# curl --insecure https://192.168.220.21:6535
{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {
    
  },
  "status": "Failure",
  "message": "Unauthorized",
  "reason": "Unauthorized",
  "code": 401
}
[root@master01 cert]# curl --insecure https://192.168.220.100:6535
{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {
    
  },
  "status": "Failure",
  "message": "Unauthorized",
  "reason": "Unauthorized",
  "code": 401
}

部署 kubectl

创建 kubectl 证书请求文件

[root@master01 cert]# cat > admin-csr.json << "EOF"
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:masters",             
      "OU": "system"
    }
  ]
}
EOF

# 说明:
# 后续 kube-apiserver 使用 RBAC 对客户端(如 kubelet、kube-proxy、Pod)请求进行授权;
# kube-apiserver 预定义了一些 RBAC 使用的 RoleBindings,如 cluster-admin 将 Group system:masters 与 Role cluster-admin 绑定,该 Role 授予了调用kube-apiserver 的所有
# API的权限;
# O指定该证书的 Group 为 system:masters,kubelet 使用该证书访问 kube-apiserver 时 ,由于证书被 CA 签名,所以认证通过,同时由于证书用户组为经过预授权的 system:masters,所以被授予访问所有 API 的权限;
# 注:
# 这个admin 证书,是将来生成管理员用的kubeconfig 配置文件用的,现在我们一般建议使用RBAC 来对#	kubernetes 进行角色权限控制, kubernetes 将证书中的CN 字段 作为User, O 字段作Group;
# "O": "system:masters", 必须是system:masters,否则后面kubectl create clusterrolebinding报错。

# 生成证书文件
[root@master01 cert]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

# 复制文件到指定目录
[root@master01 cert]# cp admin*pem /etc/kubernetes/ssl/

生成 kubeconfig 配置文件

​ kube.config 为 kubectl 的配置文件,包含访问 apiserver 的所有信息,如 apiserver 地址、CA 证书和自身使用的证书

# 配置管理的集群以及证书和证书访问链接
[root@master01 ssl]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/ssl/ca.pem --embed-certs=true --server=https://192.168.220.100:6535 --kubeconfig=/root/.kube/kube.config

# 配置证书角色 admin
[root@master01 ssl]# kubectl config set-credentials admin --client-certificate=/etc/kubernetes/ssl/admin.pem --client-key=/etc/kubernetes/ssl/admin-key.pem --embed-certs=true --kubeconfig=/root/.kube/kube.config

# 设置安全上下文
[root@master01 ssl]# kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=/root/.kube/kube.config

# 使用安全上下文进行管理
[root@master01 ssl]# kubectl config use-context kubernetes --kubeconfig=/root/.kube/kube.config

进行角色绑定

[root@master01 ssl]# kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes --kubeconfig=/root/.kube/kube.config

查看集群状态

# 为配置文件配置环境变量,最好将其书写进 .bashrc、.bash_profile 或 /etc/profile 文件中
[root@master01 ~]# export KUBECONFIG=$HOME/.kube/kube.config

# 查看集群信息
[root@master01 ~]# kubectl cluster-info
Kubernetes control plane is running at https://192.168.220.100:6535

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

# 查看集群组件状态
[root@master01 ~]# kubectl get componentstatuses
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS      MESSAGE                                                                                       ERROR
controller-manager   Unhealthy   Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused   
scheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused   
etcd-0               Healthy     {"health":"true","reason":""}                                                                 
etcd-1               Healthy     {"health":"true","reason":""}

# 由于controller-manager和scheduler还未安装配置,则状态为Unhealthy

# 查看命名空间中资源对象
[root@master01 ~]# kubectl get all --all-namespaces
NAMESPACE   NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
default     service/kubernetes   ClusterIP   10.96.0.1            443/TCP   25h

同步 kubectl 配置文件到集群其他 master 节点

# 同步证书文件
[root@master01 ~]# scp /etc/kubernetes/ssl/admin* master02:/etc/kubernetes/ssl/

# 同步配置文件
[root@master01 ~]# scp -r /root/.kube/ master02:/root/

# 集群节点验证
[root@master02 ~]# export KUBECONFIG=$HOME/.kube/kube.config

[root@master02 ~]# kubectl cluster-info
Kubernetes control plane is running at https://192.168.220.100:6535

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

[root@master02 ~]# kubectl get componentstatuses
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS      MESSAGE                                                                                       ERROR
scheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused   
controller-manager   Unhealthy   Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused   
etcd-0               Healthy     {"health":"true","reason":""}                                                                 
etcd-1               Healthy     {"health":"true","reason":""} 

[root@master02 ~]# kubectl get all --all-namespaces
NAMESPACE   NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
default     service/kubernetes   ClusterIP   10.96.0.1            443/TCP   25h

配置 kubectl 命令补全

[root@master02 ~]# yum install -y bash-completion

[root@master02 ~]# source /usr/share/bash-completion/bash_completion
[root@master02 ~]# source <(kubectl completion bash)
[root@master02 ~]# kubectl completion bash > ~/.kube/completion.bash.inc
[root@master02 ~]# source '/root/.kube/completion.bash.inc'  
[root@master02 ~]# source $HOME/.bash_profile

部署 kube-controller-manager

创建 kube-controller-manager 证书请求文件并完成集群配置

[root@master01 cert]# cat > kube-controller-manager-csr.json << "EOF"
{
    "CN": "system:kube-controller-manager",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [
      "127.0.0.1",
      "192.168.220.20",
      "192.168.220.21",
      "192.168.220.22"
    ],
    "names": [
      {
        "C": "CN",
        "ST": "Beijing",
        "L": "Beijing",
        "O": "system:kube-controller-manager",
        "OU": "system"
      }
    ]
}
EOF

# 说明:
# hosts 列表包含所有 kube-controller-manager 节点 IP;
# CN 为 system:kube-controller-manager;
# O 为 system:kube-controller-manager,kubernetes 内置的 ClusterRoleBindings 
# system:kube-controller-manager 赋予 kube-controller-manager 工作所需的权限

# 创建 kube-controller-manager 证书文件
[root@master01 cert]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager

# 创建 kube-controll-manager 证书存放目录,并将证书 cp 至改目录
[root@master01 cert]# cp kube-controller-manager-key.pem kube-controller-manager.pem /etc/kubernetes/ssl/

# 创建 kube-controller-manager 的 kube-controller-manager.kubeconfig 并进行集群配置
# 配置管理的集群以及证书和证书访问链接
[root@master01 cert]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/ssl/ca.pem --embed-certs=true --server=https://192.168.220.100:6535 --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig

# 设置集群需要的证书
[root@master01 cert]# kubectl config set-credentials system:kube-controller-manager --client-certificate=/etc/kubernetes/ssl/kube-controller-manager.pem --client-key=/etc/kubernetes/ssl/kube-controller-manager-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig

# 设置集群访问的安全上下文
[root@master01 cert]# kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig

# 使用设置的安全上下文
[root@master01 cert]# kubectl config use-context system:kube-controller-manager --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig

创建 kube-controller-manager 配置文件

# 创建 kube-controller-manager 日志目录
[root@master01 ~]# mkdir /opt/logs/control-manager

# 创建 kube-controller-manager 配置文件
[root@master01 cert]# cat > kube-controller-manager.conf << "EOF"
KUBE_CONTROLLER_MANAGER_OPTS="--port=10252 \							# controller-manager 监听端口
  --secure-port=10257 \													# controller-manager 安全端口
  --bind-address=127.0.0.1 \											# 绑定本地IP
  --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \		# 引用kube-controller-manager.kubeconfig 文件
  --service-cluster-ip-range=10.96.0.0/16 \								# service IP 范围
  --cluster-name=kubernetes \											# 集群名称
  --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \				# 配置集群 ca 证书
  --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \			# 引用集群 ca 证书的key
  --allocate-node-cidrs=true \
  --cluster-cidr=10.244.0.0/16 \										# 配置 pod IP 范围
  --experimental-cluster-signing-duration=87600h \
  --root-ca-file=/etc/kubernetes/ssl/ca.pem \
  --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --leader-elect=true \
  --feature-gates=RotateKubeletServerCertificate=true \
  --controllers=*,bootstrapsigner,tokencleaner \
  --horizontal-pod-autoscaler-use-rest-clients=true \
  --horizontal-pod-autoscaler-sync-period=10s \
  --tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \
  --tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
  --use-service-account-credentials=true \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/opt/log/control-manager \
  --v=2"
EOF

创建服务启动文件

# cp 配置文件到指定目录
[root@master01 cert]# cp kube-controller-manager.conf /etc/kubernetes/

# 配置服务启动文件
[root@master01 cert]# cat > /usr/lib/systemd/system/kube-controller-manager.service << "EOF"
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

同步文件到集群 master 节点

# cp 证书文件以及 cont 文件到其他集群节点
[root@master01 kubernetes]# scp kube-controller-manager.* master02:/etc/kubernetes/

[root@master01 ssl]# scp kube-controller-manager* master02:/etc/kubernetes/ssl/

[root@master01 system]# scp kube-controller-manager.service master02:/usr/lib/systemd/system

启动服务

# 重新加载配置文件
[root@master01 system]# systemctl daemon-reload

# 设置 kube-controller-manager 服务开机自启
[root@master01 system]# systemctl enable --now kube-controller-manager

# 查看服务状态
[root@master01 system]# systemctl status kube-controller-manager

# 验证
[root@master01 system]# kubectl get componentstatuses
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS      MESSAGE                                                                                       ERROR
scheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused   
controller-manager   Healthy     ok                                                                                            
etcd-1               Healthy     {"health":"true","reason":""}                                                                 
etcd-0               Healthy     {"health":"true","reason":""} 
# 可以看到现在 controller 健康状态为正常,其余master节点均为此操作

部署 kube-scheduler

生成 kube-scheduler 证书请求文件

# 证书请求文件
[root@master01 cert]# cat > kube-scheduler-csr.json << "EOF"
{
    "CN": "system:kube-scheduler",
    "hosts": [
      "127.0.0.1",
      "192.168.220.20",
      "192.168.220.21"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
      {
        "C": "CN",
        "ST": "Beijing",
        "L": "Beijing",
        "O": "system:kube-scheduler",
        "OU": "system"
      }
    ]
}
EOF

# 生成 kube-scheduler 证书
[root@master01 cert]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler

# 将生成的 kube-scheduler 证书文件 cp 到 /etc/kubenetes/ssl 目录
[root@master01 cert]# cp kube-scheduler.pem kube-scheduler-key.pem /etc/kubernetes/ssl/

**创建 kube-scheduler 的 kubeconfig **

# 配置管理的集群以及证书和证书访问链接
[root@master01 ssl]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/ssl/ca.pem --embed-certs=true --server=https://192.168.220.100:6535 --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig

# 设置集群需要的证书
[root@master01 kubernetes]# kubectl config set-credentials system:kube-scheduler --client-certificate=/etc/kubernetes/ssl/kube-scheduler.pem --client-key=/etc/kubernetes/ssl/kube-scheduler-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig

# 设置集群访问的安全上下文
[root@master01 kubernetes]# kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig

# 使用设置的安全上下文
[root@master01 kubernetes]# kubectl config use-context system:kube-scheduler --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig

创建服务配置文件以及服务启动文件

# 创建 kube-scheduler 日志目录
[root@master01 ~]# mkdir /opt/logs/scheduler

# 创建服务配置文件
[root@master01 kubernetes]# cat > /etc/kubernetes/kube-scheduler.conf << "EOF"
KUBE_SCHEDULER_OPTS="--address=127.0.0.1 \
--kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
--leader-elect=true \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/opt/logs/scheduler \
--v=2"
EOF

# 创建服务启动文件
[root@master01 kubernetes]# cat > /usr/lib/systemd/system/kube-scheduler.service << "EOF"
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

同步文件至集群 master 节点

# 同步 ssl 证书
[root@master01 ~]# scp /etc/kubernetes/ssl/kube-scheduler* master02:/etc/kubernetes/ssl/

# 同步配置文件
[root@master01 ~]# scp /etc/kubernetes/kube-scheduler.* master02:/etc/kubernetes/

# 同步服务启动文件
[root@master01 ~]# scp /usr/lib/systemd/system/kube-scheduler.service master02:/usr/lib/systemd/system/

启动 scheduler 服务并验证

# 重新加载配置文件
[root@master01 ~]# systemctl daemon-reload

# 启动 scheduler 服务
[root@master01 ~]# systemctl enable --now kube-scheduler

# 查看服务状态
[root@master01 ~]# systemctl status kube-scheduler

# 查看组件状态
[root@master01 ~]#  kubectl get componentstatuses
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE                         ERROR
controller-manager   Healthy   ok                              
scheduler            Healthy   ok                              
etcd-0               Healthy   {"health":"true","reason":""}   
etcd-1               Healthy   {"health":"true","reason":""}
# master 所需要的四个节点均正常

Containerd 安装及配置

获取软件包

可以上到 githup 官网上进行下载

二进制部署k8s高可用集群_第4张图片

二进制部署k8s高可用集群_第5张图片

二进制部署k8s高可用集群_第6张图片

二进制部署k8s高可用集群_第7张图片

或直接使用 wget 下载到指定服务器

[root@worker1 ~]# wget https://github.com/containerd/containerd/releases/download/v1.6.1/cri-containerd-cni-1.6.1-linux-amd64.tar.gz

# 解压containerd软件包到 /下
[root@worker1 ~]# tar -xf cri-containerd-cni-1.6.1-linux-amd64.tar.gz -C /
# 默认解压后会有如下目录:
# etc
# opt
# usr
# 直接解压到 / 下这样就省去复制文件步骤

生成 containerd 配置文件并修改

# 创建配置文件目录
[root@worker1 ~]# mkdir /etc/containerd

# 生成默认配置模板
[root@worker1 ~]# containerd config default >/etc/containerd/config.toml

# 修改配置文件
[root@worker1 ~]# cat >/etc/containerd/config.toml<
root = "/var/lib/containerd"			# 设置工作目录
state = "/run/containerd"				# 设置状态存放目录
oom_score = -999

[grpc]
  address = "/run/containerd/containerd.sock"	# 设置 sock 文件存放位置
  uid = 0
  gid = 0
  max_recv_message_size = 16777216
  max_send_message_size = 16777216

[debug]
  address = ""
  uid = 0
  gid = 0
  level = ""

[metrics]
  address = ""
  grpc_histogram = false

[cgroup]
  path = ""

[plugins]
  [plugins.cgroups]
    no_prometheus = false
  [plugins.cri]
    stream_server_address = "127.0.0.1"
    stream_server_port = "0"
    enable_selinux = false
    sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6"		# 设置黑盒镜像
    stats_collect_period = 10
    systemd_cgroup = true													# 设置cgroup启动方式
    enable_tls_streaming = false
    max_container_log_line_size = 16384
    [plugins.cri.containerd]
      snapshotter = "overlayfs"
      no_pivot = false
      [plugins.cri.containerd.default_runtime]
        runtime_type = "io.containerd.runtime.v1.linux"
        runtime_engine = ""
        runtime_root = ""
      [plugins.cri.containerd.untrusted_workload_runtime]
        runtime_type = ""
        runtime_engine = ""
        runtime_root = ""
    [plugins.cri.cni]
      bin_dir = "/opt/cni/bin"
      conf_dir = "/etc/cni/net.d"
      conf_template = "/etc/cni/net.d/10-default.conf"
    [plugins.cri.registry]
      [plugins.cri.registry.mirrors]
        [plugins.cri.registry.mirrors."docker.io"]
          endpoint = [
            "https://docker.mirrors.ustc.edu.cn",
            "http://hub-mirror.c.163.com"
          ]
        [plugins.cri.registry.mirrors."gcr.io"]
          endpoint = [
            "https://gcr.mirrors.ustc.edu.cn"
          ]
        [plugins.cri.registry.mirrors."k8s.gcr.io"]
          endpoint = [
            "https://gcr.mirrors.ustc.edu.cn/google-containers/"
          ]
        [plugins.cri.registry.mirrors."quay.io"]
          endpoint = [
            "https://quay.mirrors.ustc.edu.cn"
          ]
        [plugins.cri.registry.mirrors."harbor.kubemsb.com"]
          endpoint = [
            "http://harbor.kubemsb.com"
          ]
    [plugins.cri.x509_key_pair_streaming]
      tls_cert_file = ""
      tls_key_file = ""
  [plugins.diff-service]
    default = ["walking"]
  [plugins.linux]
    shim = "containerd-shim"
    runtime = "runc"
    runtime_root = ""
    no_shim = false
    shim_debug = false
  [plugins.opt]
    path = "/opt/containerd"
  [plugins.restart]
    interval = "10s"
  [plugins.scheduler]
    pause_threshold = 0.02
    deletion_threshold = 0
    mutation_threshold = 100
    schedule_delay = "0s"
    startup_delay = "100ms"
EOF

安装 runc

由于上述软件包中包含的runc对系统依赖过多,所以建议单独下载安装。

默认runc执行时提示:runc: symbol lookup error: runc: undefined symbol: seccomp_notify_respond

使用 github 下载

二进制部署k8s高可用集群_第8张图片

二进制部署k8s高可用集群_第9张图片

二进制部署k8s高可用集群_第10张图片

二进制部署k8s高可用集群_第11张图片

# 直接使用 wget 下载
[root@worker1 ~]#  wget https://github.com/opencontainers/runc/releases/download/v1.1.0/runc.amd64

# 下载之后赋予 runc 执行权限,并替换掉源软件包中的 runc
[root@worker1 ~]# chmod +x runc.amd64
[root@worker1 ~]# mv runc.amd64 /usr/local/sbin/runc

# 查看当前 runc 的版本
[root@worker1 ~]# runc -v
runc version 1.1.0
commit: v1.1.0-0-g067aaf85
spec: 1.0.2-dev
go: go1.17.6
libseccomp: 2.5.3

启动 containerd 服务

# 启动服务并设置为开机自启
[root@worker1 ~]#  systemctl enable --now containerd

# 查看服务状态
[root@worker1 ~]#  systemctl status --now containerd

注意:docker容器管理 方式以及 containerd 管理方式选择其中一种即可

Docker 安装及配置

官网下载 docker-compose: https://github.com/docker/compose/releases

官网限制 docker: https://download.docker.com/linux/static/stable/x86_64 可以使用 yum 安装

yum -y install docker-ce-19.03.*

部署安装docker

# 解压 docker 压缩包,可以去官网下载最新的 docker
[root@localhost ~]# tar xf docker-18.06.3-ce.tgz 

# cp docker目录下的二进制文件到 /usr/local/bin
[root@localhost ~]# cp docker/* /usr/local/bin/

# cp docker-compose 命令到 /usr/local/bin
[root@localhost ~]# chmod +x docker-compose-linux-x86_64 && cp docker-compose-linux-x86_64 /usr/local/bin/docker-compose

# 配置 docker service 文件
[root@localhost ~]# cat > /usr/lib/systemd/system/docker.service <
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
ExecStart=/usr/local/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
OOMScoreAdjust=-500
 
[Install]
WantedBy=multi-user.target
EOF

# 配置 docker daemon.json 文件
[root@localhost ~]# cat >/etc/docker/daemon.json <
{
  "registry-mirrors": [
    "https://651cp807.mirror.aliyuncs.com",
    "https://docker.mirrors.ustc.edu.cn",
    "http://hub-mirror.c.163.com"
  ],
  "insecure-registries": ["https://harbor.images.io:9181"],	# 私有仓库地址,这里需要跟harbor保持一致
  "exec-opts": ["native.cgroupdriver=systemd"],
  "max-concurrent-downloads": 10,
  "log-driver": "json-file",
  "log-level": "warn",
  "log-opts": {
    "max-size": "10m",
    "max-file": "3"
    },
  "data-root": "/opt/data/docker"
}
EOF

# 启动docker服务
[root@localhost ~]# systemctl daemon-reload && systemctl enable --now docker

# 查看 docker 版本信息
[root@localhost ~]# docker version
Client:
 Version:           18.06.3-ce
 API version:       1.38
 Go version:        go1.10.4
 Git commit:        d7080c1
 Built:             Wed Feb 20 02:24:22 2019
 OS/Arch:           linux/amd64
 Experimental:      false

Server:
 Engine:
  Version:          18.06.3-ce
  API version:      1.38 (minimum version 1.12)
  Go version:       go1.10.3
  Git commit:       d7080c1
  Built:            Wed Feb 20 02:25:33 2019
  OS/Arch:          linux/amd64
  Experimental:     false

部署 kubelet

在 master01 上操作

创建 kubelet-bootstrap.kubeconfig

# 取出存放在 token.csv 中的 token
[root@master01 cert]# BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /etc/kubernetes/token.csv)

# 配置管理的集群以及证书和证书访问链接
[root@master01 ssl]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/ssl/ca.pem --embed-certs=true --server=https://192.168.220.100:6535 --kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig

# 设置集群需要的证书
[root@master01 ~]# kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig

# 设置集群的安全上下文
[root@master01 ~]# kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig

# 设置使用集群的安全上下文
[root@master01 ~]# kubectl config use-context default --kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig

# 创建绑定角色 cluster-system-anonymous
[root@master01 ~]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubelet-bootstrap

# 创建角色kubelet-bootstrap
[root@master01 ~]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap --kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig

# 验证
# 
[root@master01 kubernetes]# kubectl describe clusterrolebinding cluster-system-anonymous
Name:         cluster-system-anonymous
Labels:       
Annotations:  
Role:
  Kind:  ClusterRole
  Name:  cluster-admin
Subjects:
  Kind  Name               Namespace
  ----  ----               ---------
  User  kubelet-bootstrap  
  
[root@master01 kubernetes]# kubectl describe clusterrolebinding kubelet-bootstrap
Name:         kubelet-bootstrap
Labels:       
Annotations:  
Role:
  Kind:  ClusterRole
  Name:  system:node-bootstrapper
Subjects:
  Kind  Name               Namespace
  ----  ----               ---------
  User  kubelet-bootstrap  

Containerd 方式

创建 kubelet 配置文件

[root@master01 ~]# cat > kubelet.json << "EOF"
{
  "kind": "KubeletConfiguration",
  "apiVersion": "kubelet.config.k8s.io/v1beta1",
  "authentication": {
    "x509": {
      "clientCAFile": "/etc/kubernetes/ssl/ca.pem"			# 配置ca证书
    },
    "webhook": {	
      "enabled": true,										# webhook 是否开启
      "cacheTTL": "2m0s"
    },
    "anonymous": {
      "enabled": false										# 匿名用户是否开启
    }
  },
  "authorization": {
    "mode": "Webhook",										# 认证方式
    "webhook": {
      "cacheAuthorizedTTL": "5m0s",
      "cacheUnauthorizedTTL": "30s"
    }
  },
  "address": "192.168.220.20",								# 当前主机IP
  "port": 10250,
  "readOnlyPort": 10255,
  "cgroupDriver": "systemd",                    
  "hairpinMode": "promiscuous-bridge",
  "serializeImagePulls": false,
  "clusterDomain": "cluster.local.",
  "clusterDNS": ["10.96.0.2"]
}
EOF

创建 kubelet 服务启动管理文件

[root@master01 kubelet]# cat > kubelet.service << "EOF"
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
  --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \
  --cert-dir=/etc/kubernetes/ssl \
  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
  --config=/etc/kubernetes/kubelet.json \
  --cni-bin-dir=/opt/cni/bin \
  --cni-conf-dir=/etc/cni/net.d \
  --container-runtime=remote \
  --container-runtime-endpoint=unix:///run/containerd/containerd.sock \
  --network-plugin=cni \
  --rotate-certificates \
  --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.2 \
  --root-dir=/etc/cni/net.d \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/opt/log/kubernetes/kubelet \
  --v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

同步配置文件到集群节点

# cp 文件到其他应用节点
[root@master01 ~]# for i in master02 node01;do
scp /etc/kubernetes/kubelet* $i:/etc/kubernetes
scp /etc/kubernetes/ssl/ca.pem $i:/etc/kubernetes/ssl
scp /usr/lib/systemd/system/kublet.service $i:/usr/lib/systemd/system/
done

# 需要注意的是 kubelet.json 中的 address 需要改为当前主机的IP地址

# 配置更改完成后创建logs目录并启动服务
[root@master01 ~]# mkdir /opt/log/kubernetes/kubelet 
[root@master01 ~]# systemctl daemon-reload
[root@master01 ~]# systemctl enable --now kubelet

# 查看集群状态
[root@master01 kubernetes]# kubectl get node
NAME      STATUS   ROLES    AGE   VERSION
master01   Ready       29m   v1.21.10
master02   Ready       29m   v1.21.10
worker1   Ready       29m   v1.21.10

Docker 方式

创建 kubelet 配置文件

# 对应节点的IP以及主机名需要按照实际情况来
[root@master01 ~]# cat > /etc/kubernetes/kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/logs/kubelet \
--hostname-override=master01 \
--network-plugin=cni \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \
--config=/etc/kubernetes/kubelet-config.yml \
--cert-dir=/etc/kubernetes/ssl \
--pod-infra-container-image=harbor.images.io:9181/library/pause/pause:3.2"
EOF

# --hostname-override:显示名称,集群中唯一
# --network-plugin:启用CNI
# --kubeconfig:空路径,会自动生成,后面用于连接apiserver
# --bootstrap-kubeconfig:首次启动向apiserver申请证书
# --config:配置参数文件
# --cert-dir:kubelet证书生成目录
# --pod-infra-container-image:管理Pod网络容器的镜像

配置参数文件

[root@master01 ~]# cat > /opt/kubernetes/cfg/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 192.168.220.20
port: 10250
readOnlyPort: 10255
cgroupDriver: systemd
clusterDNS:
- 10.96.0.2
clusterDomain: cluster.local 
failSwapOn: false
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /opt/kubernetes/ssl/ca.pem 
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF

创建服务管理启动文件

[root@master01 ~]# cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet.conf
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 启动服务
[root@master01 ~]# systemctl daemon-reload && systemctl enabel --now kubelet
[root@master01 ~]# systemctl status kubelet

$KUBELET_OPTS ,前面加个转义,不加的话,他取变量KUBELET_OPTS 的值,为空。因此,为了保留这个$KUBELET_OPTS 在前面加了一个转义\

node节点状态

[root@master01 cert]# kubectl get csr
NAME                                                   AGE   SIGNERNAME                                    REQUESTOR           CONDITION
node-csr-DARl9unGiqaggF1Se3EXa9LjMgiGB-pMbFvgA4xHIxA   10m   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued
node-csr-YwyUtC-JMlbxToWTnonpvLVs3wQPhJZfkOhfoakBUDI   67s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued
node-csr-zxgKbAQRZeVDtHvrlPh8HPFhD9wWzTr6d67V56e1dA0   84s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued


[root@master01 cert]# kubectl get node
NAME       STATUS     ROLES    AGE     VERSION
master01   NotReady      8m18s   v1.21.10
master02   NotReady      108s    v1.21.10
node01     NotReady      36s     v1.21.10

注:由于网络插件还没有部署,节点会没有准备就绪 NotReady

部署 kube-proxy

**创建 kube-proxy 证书请求文件 **

[root@master01 cert]# cat > kube-proxy-csr.json << "EOF"
{
  "CN": "system:kube-proxy",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "kubemsb",
      "OU": "CN"
    }
  ]
}
EOF

# 生成 kube-proxy 证书
[root@master01 cert]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

创建 kubeconfig 文件

# 设置管理集群信息以及访问链接
[root@master01 cert]# kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/ssl/ca.pem --embed-certs=true --server=https://192.168.220.100:6535 --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

# 设置集群需要的证书
[root@master01 cert]# kubectl config set-credentials kube-proxy --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

# 设置集群所需的安全上下文
[root@master01 cert]# kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

# 设置集群使用安全上下文
[root@master01 cert]# kubectl config use-context default --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

创建服务配置文件

同步到其他节点时需要修改对应的IP

[root@master01 cert]# cat > /etc/kubernetes/kube-proxy.yaml << "EOF"
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 192.168.220.20					# 绑定当前节点IP
clientConnection:
  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
clusterCIDR: 10.244.0.0/16							# kubernetes Pod 地址规划
healthzBindAddress: 192.168.220.20:10256			# 健康状态检查服务器提供服务时所使用的的 IP 地址和端口
kind: KubeProxyConfiguration					
metricsBindAddress: 192.168.220.20:10249			# 服务器提供服务时所使用的 IP 地址和端口
mode: "ipvs"										# 指定使用的代理模式
EOF

创建服务启动管理文件

[root@master01 ~]# cat >  /usr/lib/systemd/system/kube-proxy.service << "EOF"
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
WorkingDirectory=/opt/data/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
  --config=/etc/kubernetes/kube-proxy.yaml \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/opt/logs/kubernetes \
  --v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

# 创建对应的 log 存放目录
[root@master01 ~]# mkdir /opt/logs/proxy
[root@master01 ~]# mkdir /opt/data/kube-proxy

同步配置文件到其他节点并启动服务

# 同步配置文件
[root@master01 ~]# for i in master02 node01
do                    
scp /etc/kubernetes/kube-proxy.* $i:/etc/kubernetes/
scp /etc/kubernetes/ssl/kube-proxy* $i:/etc/kubernetes/ssl/
scp /usr/lib/systemd/system/kube-proxy.service $i:/usr/lib/systemd/system
done

# 重新加载并启动服务
[root@master01 ~]# systemctl daemon-reload && systemctl enable --now kube-proxy

# 查看服务启动状态
root@worker1 ~]# systemctl status kube-proxy.service

网络组件

Calico YAML 部署

# 为每个节点下载 calico 文件
[root@master01 packages]# wget https://docs.projectcalico.org/v3.19/manifests/calico.yaml --no-check-certificate

# 修改配置文件
3683             - name: CALICO_IPV4POOL_CIDR
3684               value: "10.244.0.0/16"			# Pod IP规划

# 引用文件
[root@master01 packages]# kubectl apply -f calico.yaml

# 验证应用结果
[root@master01 packages]# kubectl get pods -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-7cc8dd57d9-9t6c5   1/1     Running   0          16m
kube-system   calico-node-8gt8z                          1/1     Running   0          55s
kube-system   calico-node-9bf65                          1/1     Running   0          65s
kube-system   calico-node-trl6m                          1/1     Running   0          75s

注意:部署安装 calica 中出现:BIRD is not ready: Error querying BIRD: unable to connect to BIRDv4 这种错误时,首先查看自己的网卡名称,修改 calica 配置文件,搜索 CLUSTER_TYPE 在其下面增加

        - name: IP_AUTODETECTION_METHOD
          value: "interface=ens.*"	# 按照实际情况填写网卡名

Flannel 二进制部署

Flannel:通过给每台宿主机分配一个子网的方式为容器提供虚拟网络,它基于 Linux TUN/TAP,使用UDP封装 IP 包来创建 overlay 网络,并借助 etcd 维护网络的分配情况

# 下载和安装 flannel 二进制文件
[root@master01 packages]#  wget https://github.com/coreos/flannel/releases/download/v0.12.0/flannel-v0.12.0-linux-amd64.tar.gz

# 解压 flannel 包
[root@master01 others]# tar -xf flannel-v0.12.0-linux-amd64.tar.gz
[root@master01 others]# cp flanneld /usr/local/bin/

# 同步至其他节点
[root@master01 others]# scp {flanneld,mk-docker-opts.sh} master02:~
[root@master02 ~]# cp flanneld /usr/local/bin/

创建 flanneld 证书以及私钥

# 创建证书请求
[root@master01 cert]# cat > flanneld-csr.json <
{
  "CN": "flanneld",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "NanJing",
      "L": "NanJing",
      "O": "k8s",
      "OU": "system"
    }
  ]
}
EOF

# 生成证书和私钥
[root@master01 cert]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld

# 分发至其他节点
[root@master01 cert]# cp flanneld*.pem /etc/kubernetes/ssl/
[root@master01 cert]# scp flanneld*.pem master02:/etc/kubernetes/ssl/

# 想 etcd 写入集群 Pod 网段信息
[root@master01 ~]# export FLANNEL_ETCD_PREFIX="/kubernetes/network"
[root@master01 ~]# export ETCD_ENDPOINTS="https://192.168.220.30:2379,https://192.168.220.31:2379"
[root@master01 others]# ETCDCTL_API=2 etcdctl \
  --endpoints=${ETCD_ENDPOINTS} \
  --ca-file=/etc/kubernetes/ssl/ca.pem \
  --cert-file=/etc/kubernetes/ssl/flanneld.pem \
  --key-file=/etc/kubernetes/ssl/flanneld-key.pem \
  mk /etc/kubernetes/network/config '{"Network":"172.17.0.0/16", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'
  
{"Network":"172.17.0.0/16", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}

注意:使用 flanneld 给 etcd 集群添加 pod 网络配置时,因为 flanneld 0.12 版本不能给 etcd 3 进行通信,需要在 etcd 启动文件中加上 --enable-v2 开启 etcd v2 API 接口再去创建 flannel 的网络配置

创建flanneld 服务的启动文件

[root@master01 others]# cat > /usr/lib/systemd/system/flanneld.service << EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service

[Service]
Type=notify
ExecStart=/usr/local/bin/flanneld \
  -etcd-cafile=/etc/kubernetes/ssl/ca.pem \
  -etcd-certfile=/etc/kubernetes/ssl/flanneld.pem \
  -etcd-keyfile=/etc/kubernetes/ssl/flanneld-key.pem \
  -etcd-endpoints=https://192.168.220.30:2379,https://192.168.220.31:2379 \
  -etcd-prefix=/etc/kubernetes/network/config \
  -ip-masq
ExecStartPost=/etc/kubernetes/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
Restart=always
RestartSec=5
StartLimitInterval=0

[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF

# 启动 flanneld 服务
[root@master02 ~]# systemctl daemon-reload && systemctl enable --now flanneld
# 查看服务启动状态
[root@master02 ~]# systemctl status flanneld.service 
  • mk-docker-opts.sh 脚本将分配给 flanneld 的 Pod 子网段信息,通过-d参数写入 /run/flannel/docker 文件,后续 docker 启动时使用这个文件中的环境变量配置 docker0 网桥, -k 参数控制生成文件中变量的名称,下面docker启动时会用到这个变量;
  • flanneld 使用系统缺省路由所在的接口与其它节点通信,对于有多个网络接口(如内网和公网)的节点,可以用 -iface 参数指定通信接口;
  • -ip-masq: flanneld 为访问 Pod 网络外的流量设置 SNAT 规则,同时将传递给 Docker 的变量 --ip-masq(/run/flannel/docker 文件中)设置为 false,这样 Docker 将不再创建 SNAT 规则; Docker 的 --ip-masq 为 true 时,创建的 SNAT 规则比较“暴力”:将所有本节点 Pod 发起的、访问非 docker0 接口的请求做 SNAT,这样访问其他节点 Pod 的请求来源 IP 会被设置为 flannel.1 接口的 IP,导致目的 Pod 看不到真实的来源 Pod IP。 flanneld 创建的 SNAT 规则比较温和,只对访问非 Pod 网段的请求做 SNAT

检查

# 检查分配给各 flanneld 的 Pod 网段信息
[root@master01 others]# ETCDCTL_API=2 etcdctl \
  --endpoints=${ETCD_ENDPOINTS} \
  --ca-file=/etc/kubernetes/ssl/ca.pem \
  --cert-file=/etc/kubernetes/ssl/flanneld.pem \
  --key-file=/etc/kubernetes/ssl/flanneld-key.pem \
  get /etc/kubernetes/network/config
  
# 输出结果:{"Network":"172.17.0.0/16", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}

# 查看已分配的 Pod 子网段列表
[root@master01 ~]# ETCDCTL_API=2 etcdctl \
  --endpoints=${ETCD_ENDPOINTS} \
  --ca-file=/etc/kubernetes/ssl/ca.pem \
  --cert-file=/etc/kubernetes/ssl/flanneld.pem \
  --key-file=/etc/kubernetes/ssl/flanneld-key.pem \
  ls /etc/kubernetes/network/subnets
  
/etc/kubernetes/network/subnets/172.17.54.0-24
/etc/kubernetes/network/subnets/172.17.66.0-24

# 检查节点 flannel 网络信息
[root@master01 network]# ifconfig flannel.1
flannel.1: flags=4163,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 172.17.66.0  netmask 255.255.255.255  broadcast 0.0.0.0
        inet6 fe80::186f:12ff:fe79:f54e  prefixlen 64  scopeid 0x20
        ether 1a:6f:12:79:f5:4e  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 15 overruns 0  carrier 0  collisions 0

# 查看路由信息
[root@master01 network]# route
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
default         gateway         0.0.0.0         UG    100    0        0 ens32
172.17.0.0      0.0.0.0         255.255.0.0     U     0      0        0 docker0
172.17.54.0     172.17.54.0     255.255.255.0   UG    0      0        0 flannel.1
192.168.220.0   0.0.0.0         255.255.255.0   U     100    0        0 ens32

# 查看 flannel 生成文件信息
[root@master01 network]# cat /run/flannel/subnet.env
FLANNEL_NETWORK=172.17.0.0/16
FLANNEL_SUBNET=172.17.66.1/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=true

[root@master01 network]# cat /run/flannel/docker
DOCKER_OPT_BIP="--bip=172.17.66.1/24"
DOCKER_OPT_IPMASQ="--ip-masq=false"
DOCKER_OPT_MTU="--mtu=1450"
DOCKER_NETWORK_OPTIONS=" --bip=172.17.66.1/24 --ip-masq=false --mtu=1450"

部署 CoreDNS

# coredns 配置文件
[root@master01 packages]# cat >  coredns.yaml << "EOF"
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
rules:
  - apiGroups:
    - ""
    resources:
    - endpoints
    - services
    - pods
    - namespaces
    verbs:
    - list
    - watch
  - apiGroups:
    - discovery.k8s.io
    resources:
    - endpointslices
    verbs:
    - list
    - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
        errors
        health {
          lameduck 5s
        }
        ready
        kubernetes cluster.local  in-addr.arpa ip6.arpa {
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf {
          max_concurrent 1000
        }
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/name: "CoreDNS"
spec:
  # replicas: not specified here:
  # 1. Default is 1.
  # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        kubernetes.io/os: linux
      affinity:
         podAntiAffinity:
           preferredDuringSchedulingIgnoredDuringExecution:
           - weight: 100
             podAffinityTerm:
               labelSelector:
                 matchExpressions:
                   - key: k8s-app
                     operator: In
                     values: ["kube-dns"]
               topologyKey: kubernetes.io/hostname
      containers:
      - name: coredns
        image: coredns/coredns:1.8.4
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.96.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
EOF

# 引用 coredns.yaml
[root@master01 packages]# kubectl apply -f coredns.yaml

# 查看dns pod 状态
[root@master01 packages]# kubectl get pods -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-7cc8dd57d9-9t6c5   1/1     Running   0          26m
kube-system   calico-node-8gt8z                          1/1     Running   0          11m
kube-system   calico-node-9bf65                          1/1     Running   0          11m
kube-system   calico-node-trl6m                          1/1     Running   0          11m
kube-system   coredns-675db8b7cc-4lvmc                   1/1     Running   0          43s

部署应用验证

# 编写yaml文件
[root@master01 packages]# cat >  nginx.yaml  << "EOF"
---
apiVersion: v1
kind: ReplicationController
metadata:
  name: nginx-web
spec:
  replicas: 2
  selector:
    name: nginx
  template:
    metadata:
      labels:
        name: nginx
    spec:
      containers:
        - name: nginx
          image: nginx:1.19.6
          ports:
            - containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
  name: nginx-service-nodeport
spec:
  ports:
    - port: 80
      targetPort: 80
      nodePort: 30001
      protocol: TCP
  type: NodePort
  selector:
    name: nginx
EOF

# 引用 nginx.yaml
[root@master01 packages]# kubectl apply -f nginx.yaml

# 查看 pod 状态
[root@master01 packages]# kubectl get all 
NAME                  READY   STATUS    RESTARTS   AGE
pod/nginx-web-26dmn   1/1     Running   0          78s
pod/nginx-web-phnl5   1/1     Running   0          4m7s
pod/nginx-web-sclxd   1/1     Running   0          4m7s

NAME                              DESIRED   CURRENT   READY   AGE
replicationcontroller/nginx-web   3         3         3       4m7s

NAME                             TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
service/kubernetes               ClusterIP   10.96.0.1              443/TCP        17h
service/nginx-service-nodeport   NodePort    10.96.120.18           80:30001/TCP   4m7s

# 进行访问

二进制部署k8s高可用集群_第12张图片

设置集群角色

# 将k8s-master-1设置为master角色
[root@k8s-master-2 ~]# kubectl label nodes k8s-master-1 node-role.kubernetes.io/master=
node/k8s-master-1 labeled

# 将k8s-node-1设置为node角色
[root@k8s-master-2 ~]# kubectl label nodes k8s-node-1 node-role.kubernetes.io/node=
node/k8s-node-1 labeled

# 将k8s-master-1设置master角色,一般不接受负载
[root@k8s-msater-2 ~] kubectl taint nodes k8s-master-1 node-role.kubernetes.io/master=true:NoSchedule
# 将k8s-mster-1设置master运行pod
[root@k8s-master-2 ~] kubectl taint nodes k8s-master-1 node-role.kubernetes.io/master-
# 将k8s-master-1设置master不运行pod
[root@k8s-master-2 ~] kubectl taint nodes k8s-master-1 node-role.kubernetes.io/master=:NoSchedule

部署 Ingress-nginx

helm 方式安装部署 Ingress-nginx

# 下载二进制文件
[root@master01 ~]# wget https://get.helm.sh/helm-v3.9.3-linux-amd64.tar.gz
[root@master01 ~]# tar -xf helm-v3.9.3-linux-amd64.tar.gz 
[root@master01 linux-amd64]# cp helm /usr/local/bin/

# 添加 ingress-nginx 源
[root@master01 ~]# helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx

# 查看源
[root@master01 ~]# helm repo list
NAME         	URL                                       
ingress-nginx	https://kubernetes.github.io/ingress-nginx

[root@master01 ~]# helm search repo ingress-nginx
NAME                       	CHART VERSION	APP VERSION	DESCRIPTION                                       
ingress-nginx/ingress-nginx	4.2.3        	1.3.0      	Ingress controller for Kubernetes using NGINX a...

# helm 下载 Ingress-nginx 包,helm pull 仓库/包
[root@master01 ~]# helm pull ingress-nginx/ingress-nginx
[root@master01 ~]# tar -xf ingress-nginx-4.2.3.tgz 

# 修改配置文件
[root@master01 ~]# vim ingress-nginx/values.yaml
  image:
    repository: registry.cn-beijing.aliyuncs.com/dotbalo/controller
  #    digest: sha256:46ba23c3fbaafd9e5bd01ea85b2f921d9f2217be082580edc22e6c704a83f02f 删除镜像的hash值
    dnsPolicy: ClusterFirstWithHostNet # 修改DNS策略
      hostNetwork: true # 打开hostNetwork
  kind: DaemonSet # 修改部署方式为DaemonSet
  nodeSelector: # 添加 ingress=true 标签
    kubernetes.io/os: linux
    ingress: "true"
    type: ClusterIP # LoadBalancer 更改为 ClusterIP
  image:
    repository: registry.cn-beijing.aliyuncs.com/dotbalo/defaultbackend-amd64

# 使用helm进行安装
# 为需要安装 ingress-nginx 的节点设置标签
[root@master01 ingress-nginx]# kubectl label node master02 ingress=true

# 执行安装
[root@master01 ~]# helm install ingress-nginx -n ingress-nginx .

# 查看nginx-contaroller 节点
[root@master01 ingress-nginx]# kubectl get pod -n ingress-nginx -o wide
NAME                             READY   STATUS    RESTARTS   AGE   IP               NODE      NOMINATED NODE   READINESS GATES
ingress-nginx-controller-792mp   1/1     Running   1          15h   192.168.220.31   master02              

编写测试用例

# 查看本地是否有svc
[root@master01 ingress-nginx]# kubectl get svc
NAME                     TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
kubernetes               ClusterIP   10.96.0.1              443/TCP        16d
nginx-service-nodeport   NodePort    10.96.81.131           80:30001/TCP   7d14h

# ingress.yaml
[root@master01 ingress-nginx]# vi ingress.yaml
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
  annotations:
    kubernetes.io/ingress.class: nginx # 标注类
  name: test-ingress
spec:
  rules:
    - host: ingress.test.com # 自定义域名
      http:
        paths:
          - backend:
              serviceName: svc-nginx # 关联svc名称
              servicePort: 8091 # 关联端口
            path: /

# 创建 ingress
[root@master01 ~]# kubectl apply -f ingress.yaml 

# 修改 hosts 文件,只配置安装 nginx-controller 的节点
[root@master01 ~]# vim /etc/hosts
192.168.220.31 master02 ingress.test.com

# 进行访问
[root@master01 ~]# curl ingress.test.com
<!DOCTYPE html>


Welcome to nginx!<<span class="token operator">/</span>title>
<style>
html <span class="token punctuation">{</span> color-scheme: light dark<span class="token punctuation">;</span> <span class="token punctuation">}</span>
body <span class="token punctuation">{</span> width: 35em<span class="token punctuation">;</span> margin: 0 auto<span class="token punctuation">;</span>
font-family: Tahoma<span class="token punctuation">,</span> Verdana<span class="token punctuation">,</span> Arial<span class="token punctuation">,</span> sans-serif<span class="token punctuation">;</span> <span class="token punctuation">}</span>
<<span class="token operator">/</span>style>
<<span class="token operator">/</span>head>
<body>
<h1>Welcome to nginx!<<span class="token operator">/</span>h1>
<p><span class="token keyword">If</span> you see this page<span class="token punctuation">,</span> the nginx web server is successfully installed and
working<span class="token punctuation">.</span> Further configuration is required<span class="token punctuation">.</span><<span class="token operator">/</span>p>

<p><span class="token keyword">For</span> online documentation and support please refer to
<a href=<span class="token string">"http://nginx.org/"</span>>nginx<span class="token punctuation">.</span>org<<span class="token operator">/</span>a><span class="token punctuation">.</span><br/>
Commercial support is available at
<a href=<span class="token string">"http://nginx.com/"</span>>nginx<span class="token punctuation">.</span>com<<span class="token operator">/</span>a><span class="token punctuation">.</span><<span class="token operator">/</span>p>

<p><em>Thank you <span class="token keyword">for</span> <span class="token keyword">using</span> nginx<span class="token punctuation">.</span><<span class="token operator">/</span>em><<span class="token operator">/</span>p>
<<span class="token operator">/</span>body>
<<span class="token operator">/</span>html>
</code></pre> 
  <h3>部署 harbor</h3> 
  <p>官网下载 harbor 仓库: https://github.com/goharbor/harbor/releases</p> 
  <h4>以 https 方式部署 harbor</h4> 
  <p><strong>配置 hosts 文件</strong></p> 
  <pre><code class="prism language-powershell"><span class="token namespace">[root@localhost harbor]</span><span class="token comment"># vim /etc/hosts</span>
192<span class="token punctuation">.</span>168<span class="token punctuation">.</span>220<span class="token punctuation">.</span>32 harbor<span class="token punctuation">.</span>images<span class="token punctuation">.</span>io
</code></pre> 
  <p><strong>生成客户端证书</strong></p> 
  <pre><code class="prism language-powershell"><span class="token comment"># 创建 harbor 工作目录以及证书目录</span>
<span class="token namespace">[root@localhost data]</span><span class="token comment"># mkdir -p /opt/data/harbor/cert && cd /opt/data/harbor/cert</span>

<span class="token comment"># 生成 CA 证书私钥</span>
<span class="token namespace">[root@localhost cert]</span><span class="token comment"># openssl genrsa -out ca.key 4096</span>

<span class="token comment"># 生成 CA 证书,harbor.images.io 需要换成自己的harbor仓库域名名称</span>
<span class="token namespace">[root@localhost cert]</span><span class="token comment"># openssl req -x509 -new -nodes -sha512 -days 3650  -subj "/C=CN/ST=Beijing/L=Beijing/O=example/OU=Personal/CN=harbor.images.io"  -key ca.key -out ca.crt</span>
</code></pre> 
  <p><strong>生成服务器证书</strong></p> 
  <pre><code class="prism language-powershell"><span class="token comment"># 生成私钥,注意域名名称</span>
<span class="token namespace">[root@localhost cert]</span><span class="token comment"># openssl genrsa -out harbor.images.io.key 4096</span>

<span class="token comment"># 生成证书签名请求 (CSR)</span>
<span class="token namespace">[root@localhost cert]</span><span class="token comment"># openssl req -sha512 -new -subj "/C=CN/ST=Beijing/L=Beijing/O=example/OU=Personal/CN=harbor.images.io" -key harbor.images.io.key -out harbor.images.io.csr</span>

<span class="token comment"># 生成 x509 v3 扩展文件,注意域名名称</span>
<span class="token namespace">[root@localhost cert]</span><span class="token comment"># cat > v3.ext <<-EOF</span>
authorityKeyIdentifier=keyid<span class="token punctuation">,</span>issuer
basicConstraints=CA:FALSE
keyUsage = digitalSignature<span class="token punctuation">,</span> nonRepudiation<span class="token punctuation">,</span> keyEncipherment<span class="token punctuation">,</span> dataEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names

<span class="token namespace">[alt_names]</span>
DNS<span class="token punctuation">.</span>1=harbor<span class="token punctuation">.</span>images<span class="token punctuation">.</span>io
DNS<span class="token punctuation">.</span>2=harbor<span class="token punctuation">.</span>images
DNS<span class="token punctuation">.</span>3=harbor
EOF

<span class="token comment"># 使用该v3.ext文件为您的 Harbor 主机生成证书</span>
<span class="token namespace">[root@localhost cert]</span><span class="token comment"># openssl x509 -req -sha512 -days 9999 -extfile v3.ext -CA ca.crt -CAkey ca.key -CAcreateserial -in harbor.images.io.csr -out harbor.images.io.crt</span>

<span class="token comment"># 向 Docker 提供证书</span>
<span class="token comment"># 转换 harbor.images.io.crt为 harbor.images.io.cert, 供 Docker 使用。Docker 守护进程将.crt文件解释为 CA 证书,将.cert文件解释为客户端证书。</span>
<span class="token namespace">[root@localhost cert]</span><span class="token comment"># openssl x509 -inform PEM -in harbor.images.io.crt -out harbor.images.io.cert</span>

<span class="token comment"># 将服务器证书、密钥和 CA 文件复制到 Harbor 主机上的 Docker 证书文件夹中</span>
<span class="token comment"># 创建证书目录 </span>
<span class="token comment"># 如果您将默认nginx端口 443 映射到不同的端口,请创建文件夹/etc/docker/certs.d/yourdomain.com:port或/etc/docker/certs.d/harbor_IP:port </span>
<span class="token namespace">[root@localhost cert]</span><span class="token comment"># mkdir -p /etc/docker/certs.d/harbor.images.io:9181</span>

<span class="token comment"># 将服务器证书、密钥和 CA 文件复制到 Harbor 主机上的 Docker 证书文件夹中</span>
<span class="token namespace">[root@localhost cert]</span><span class="token comment"># cp harbor.images.io.cert harbor.images.io.key ca.crt /etc/docker/certs.d/harbor.images.io\:9181/</span>

<span class="token comment"># 重启docker</span>
<span class="token namespace">[root@localhost cert]</span><span class="token comment"># systemctl restart docker</span>
</code></pre> 
  <p><strong>部署harbor</strong></p> 
  <pre><code class="prism language-powershell"><span class="token comment"># 解压 harbor 压缩包到 /usr/loca/ 目录下</span>
<span class="token namespace">[root@localhost ~]</span><span class="token comment"># tar -xf harbor-offline-installer-v2.4.3.tgz -C /usr/local/</span>

<span class="token comment"># 拷贝 harbor yml配置文件</span>
<span class="token namespace">[root@localhost ~]</span><span class="token comment"># cd /usr/local/harbor/</span>
<span class="token namespace">[root@localhost harbor]</span><span class="token comment"># cp harbor.yml.tmpl harbor.yml</span>

<span class="token comment"># 修改 harbor.yml</span>
<span class="token namespace">[root@localhost harbor]</span><span class="token comment"># vim harbor.yml</span>
hostname: harbor<span class="token punctuation">.</span>images<span class="token punctuation">.</span>io   <span class="token comment"># 修改访问域名</span>
<span class="token comment"># http:							# 注释 http 请求</span>
<span class="token comment">#  port: 80 </span>
https:							<span class="token comment"># 修改 https 监听端口</span>
  port: 9181
  certificate: <span class="token operator">/</span>opt/<span class="token keyword">data</span><span class="token operator">/</span>harbor/cert/harbor<span class="token punctuation">.</span>images<span class="token punctuation">.</span>io<span class="token punctuation">.</span>crt <span class="token comment"># 配置 crt 证书</span>
  private_key: <span class="token operator">/</span>opt/<span class="token keyword">data</span><span class="token operator">/</span>harbor/cert/harbor<span class="token punctuation">.</span>images<span class="token punctuation">.</span>io<span class="token punctuation">.</span>key  <span class="token comment"># 配置私钥</span>
harbor_admin_password: dream@0324  <span class="token comment"># 修改harbor网页登录密码,默认用户为 admin</span>
database:
  password: admin  <span class="token comment"># 修改数据库密码</span>
data_volume: <span class="token operator">/</span>opt/<span class="token keyword">data</span><span class="token operator">/</span>harbor  <span class="token comment"># 修改默认工作目录</span>
log:
    location: <span class="token operator">/</span>opt/logs/harbor <span class="token comment"># 修改日志目录</span>
    
<span class="token comment"># 执行预安装脚本</span>
<span class="token namespace">[root@localhost harbor]</span><span class="token comment"># ./install.sh</span>
<span class="token punctuation">.</span><span class="token punctuation">.</span><span class="token punctuation">.</span><span class="token punctuation">.</span><span class="token punctuation">.</span>
<span class="token namespace">[Step 5]</span>: starting Harbor <span class="token punctuation">.</span><span class="token punctuation">.</span><span class="token punctuation">.</span>
<span class="token punctuation">[</span><span class="token operator">+</span><span class="token punctuation">]</span> Running 10/10
 ⠿ Network harbor_harbor        Created                                                                          0<span class="token punctuation">.</span>1s
 ⠿ Container harbor-log         Started                                                                          0<span class="token punctuation">.</span>7s
 ⠿ Container registry           Started                                                                          2<span class="token punctuation">.</span>2s
 ⠿ Container harbor-portal      Started                                                                          2<span class="token punctuation">.</span>1s
 ⠿ Container harbor-db          Started                                                                          2<span class="token punctuation">.</span>0s
 ⠿ Container registryctl        Started                                                                          2<span class="token punctuation">.</span>0s
 ⠿ Container redis              Started                                                                          2<span class="token punctuation">.</span>2s
 ⠿ Container harbor-core        Started                                                                          2<span class="token punctuation">.</span>7s
 ⠿ Container nginx              Started                                                                          3<span class="token punctuation">.</span>7s
 ⠿ Container harbor-jobservice  Started                                                                          3<span class="token punctuation">.</span>6s
✔ <span class="token operator">--</span><span class="token operator">--</span>Harbor has been installed and started successfully<span class="token punctuation">.</span><span class="token operator">--</span><span class="token operator">--</span>

<span class="token comment"># 这时harbor已经成功运行</span>

<span class="token comment"># 使用 systemctl 管理 harbor</span>
<span class="token namespace">[root@localhost harbor]</span><span class="token comment"># cat > /usr/lib/systemd/system/harbor.service <<-EOF</span>
<span class="token namespace">[Unit]</span> 
Description=Harbor 
After=docker<span class="token punctuation">.</span>service systemd-networkd<span class="token punctuation">.</span>service systemd-resolved<span class="token punctuation">.</span>service 
Requires=docker<span class="token punctuation">.</span>service 
Documentation=http:<span class="token operator">/</span><span class="token operator">/</span>github<span class="token punctuation">.</span>com/vmware/harbor  

<span class="token namespace">[Service]</span> 
<span class="token function">Type</span>=simple 
Restart=on-failure 
RestartSec=5 
ExecStart=<span class="token operator">/</span>usr/local/bin/docker-compose <span class="token operator">-</span>f  <span class="token operator">/</span>usr/local/harbor/docker-compose<span class="token punctuation">.</span>yml up 
ExecStop=<span class="token operator">/</span>usr/local/bin/docker-compose <span class="token operator">-</span>f <span class="token operator">/</span>usr/local/harbor/docker-compose<span class="token punctuation">.</span>yml down  

<span class="token namespace">[Install]</span> 
WantedBy=multi-user<span class="token punctuation">.</span>target
EOF

<span class="token comment"># 将harbor设置为开机自启,并重新启动</span>
<span class="token namespace">[root@localhost harbor]</span><span class="token comment"># systemctl daemon-reload && systemctl restart harbor && systemctl enable harbor</span>
</code></pre> 
  <p>由于没有配置域名解析,浏览器访问绑定的是hosts,访问harbor</p> 
  <p><a href="http://img.e-com-net.com/image/info8/27a9841175a84b68ad1c01a3a212d1ba.jpg" target="_blank"><img src="http://img.e-com-net.com/image/info8/27a9841175a84b68ad1c01a3a212d1ba.jpg" alt="二进制部署k8s高可用集群_第13张图片" width="650" height="490" style="border:1px solid black;"></a></p> 
 </div> 
</div>
                            </div>
                        </div>
                    </div>
                    <!--PC和WAP自适应版-->
                    <div id="SOHUCS" sid="1687408355552866304"></div>
                    <script type="text/javascript" src="/views/front/js/chanyan.js"></script>
                    <!-- 文章页-底部 动态广告位 -->
                    <div class="youdao-fixed-ad" id="detail_ad_bottom"></div>
                </div>
                <div class="col-md-3">
                    <div class="row" id="ad">
                        <!-- 文章页-右侧1 动态广告位 -->
                        <div id="right-1" class="col-lg-12 col-md-12 col-sm-4 col-xs-4 ad">
                            <div class="youdao-fixed-ad" id="detail_ad_1"> </div>
                        </div>
                        <!-- 文章页-右侧2 动态广告位 -->
                        <div id="right-2" class="col-lg-12 col-md-12 col-sm-4 col-xs-4 ad">
                            <div class="youdao-fixed-ad" id="detail_ad_2"></div>
                        </div>
                        <!-- 文章页-右侧3 动态广告位 -->
                        <div id="right-3" class="col-lg-12 col-md-12 col-sm-4 col-xs-4 ad">
                            <div class="youdao-fixed-ad" id="detail_ad_3"></div>
                        </div>
                    </div>
                </div>
            </div>
        </div>
    </div>
    <div class="container">
        <h4 class="pt20 mb15 mt0 border-top">你可能感兴趣的:(k8s,kubernetes,linux)</h4>
        <div id="paradigm-article-related">
            <div class="recommend-post mb30">
                <ul class="widget-links">
                    <li><a href="/article/1950227023859347456.htm"
                           title="Linux系统配置(应用程序)" target="_blank">Linux系统配置(应用程序)</a>
                        <span class="text-muted">1风天云月</span>
<a class="tag" taget="_blank" href="/search/Linux/1.htm">Linux</a><a class="tag" taget="_blank" href="/search/linux/1.htm">linux</a><a class="tag" taget="_blank" href="/search/%E5%BA%94%E7%94%A8%E7%A8%8B%E5%BA%8F/1.htm">应用程序</a><a class="tag" taget="_blank" href="/search/%E7%BC%96%E8%AF%91%E5%AE%89%E8%A3%85/1.htm">编译安装</a><a class="tag" taget="_blank" href="/search/rpm/1.htm">rpm</a><a class="tag" taget="_blank" href="/search/http/1.htm">http</a>
                        <div>目录前言一、应用程序概述1、命令与程序的关系2、程序的组成3、软件包封装类型二、RPM1、RPM概述2、RPM用法三、编译安装1、解包2、配置3、编译4、安装5、启用httpd服务结语前言在Linux中的应用程序被视为将软件包安装到系统中后产生的各种文档,其中包括可执行文件、配置文件、用户手册等内容,这些文档被组织为一个有机的整体,为用户提供特定的功能,因此对于“安装软件包”与“安装应用程序”这两</div>
                    </li>
                    <li><a href="/article/1950219166367674368.htm"
                           title="包含日志获取webshell" target="_blank">包含日志获取webshell</a>
                        <span class="text-muted">陈望_ning</span>

                        <div>日志文件关闭:Apache目录下的httpd.conf文件#ErrorLog"logs/error.log"#CustomLog"logs/access.log"common加#号为注释不产生日志文件如果去掉#将会在Apache/logs/目录下产生日志文件linux:access_logerror_logwindows:access.logerror.logaccess_log每一行记录了一次网</div>
                    </li>
                    <li><a href="/article/1950209621381672960.htm"
                           title="Android 应用权限管理详解" target="_blank">Android 应用权限管理详解</a>
                        <span class="text-muted"></span>

                        <div>文章目录1.权限类型2.权限请求机制3.权限组和分级4.权限管理的演进5.权限监控和SELinux强制访问控制6.应用权限审核和GooglePlayProtect7.开发者最佳实践8.用户权限管理9.Android应用沙箱模型10.ScopedStorage(分区存储)11.背景位置权限(BackgroundLocationAccess)12.权限回收和自动清理13.权限请求的用户体验设计14.G</div>
                    </li>
                    <li><a href="/article/1950205081676738560.htm"
                           title="K8s常用的命令" target="_blank">K8s常用的命令</a>
                        <span class="text-muted">尚未来-</span>
<a class="tag" taget="_blank" href="/search/%E8%BF%90%E7%BB%B4/1.htm">运维</a><a class="tag" taget="_blank" href="/search/k8s/1.htm">k8s</a>
                        <div>一、基础命令查看集群信息bashkubectlcluster-info#显示集群端点和服务信息查看节点bashkubectlgetnodes#列出所有节点kubectldescribenode#查看节点详细信息查看命名空间bashkubectlgetnamespaces#列出所有命名空间切换命名空间bashkubectlconfigset-context--current--namespace=二</div>
                    </li>
                    <li><a href="/article/1950202054706262016.htm"
                           title="centos7安装配置 Anaconda3" target="_blank">centos7安装配置 Anaconda3</a>
                        <span class="text-muted"></span>

                        <div>Anaconda是一个用于科学计算的Python发行版,Anaconda于Python,相当于centos于linux。下载[root@testsrc]#mwgethttps://mirrors.tuna.tsinghua.edu.cn/anaconda/archive/Anaconda3-5.2.0-Linux-x86_64.shBegintodownload:Anaconda3-5.2.0-L</div>
                    </li>
                    <li><a href="/article/1950192091439099904.htm"
                           title="K8S 常用命令全解析:高效管理容器化集群" target="_blank">K8S 常用命令全解析:高效管理容器化集群</a>
                        <span class="text-muted">恩爸编程</span>
<a class="tag" taget="_blank" href="/search/docker/1.htm">docker</a><a class="tag" taget="_blank" href="/search/kubernetes/1.htm">kubernetes</a><a class="tag" taget="_blank" href="/search/%E5%AE%B9%E5%99%A8/1.htm">容器</a><a class="tag" taget="_blank" href="/search/k8s%E5%B8%B8%E7%94%A8%E5%91%BD%E4%BB%A4/1.htm">k8s常用命令</a><a class="tag" taget="_blank" href="/search/k8s%E6%9C%89%E5%93%AA%E4%BA%9B%E5%B8%B8%E7%94%A8%E5%91%BD%E4%BB%A4/1.htm">k8s有哪些常用命令</a><a class="tag" taget="_blank" href="/search/k8s%E5%91%BD%E4%BB%A4%E6%9C%89%E5%93%AA%E4%BA%9B/1.htm">k8s命令有哪些</a><a class="tag" taget="_blank" href="/search/K8S%E5%B8%B8%E7%94%A8%E5%91%BD%E4%BB%A4%E6%9C%89%E5%93%AA%E4%BA%9B/1.htm">K8S常用命令有哪些</a>
                        <div>K8S常用命令全解析:高效管理容器化集群一、引言Kubernetes(K8S)作为强大的容器编排平台,其丰富的命令行工具(kubectl)为用户提供了便捷的方式来管理集群中的各种资源。熟练掌握K8S常用命令对于开发人员和运维人员至关重要,能够有效提高容器化应用的部署、监控与维护效率。本文将详细介绍一些K8S常用命令及其使用案例。二、基础资源操作命令(一)kubectlcreate功能:用于创建K8</div>
                    </li>
                    <li><a href="/article/1950190073274232832.htm"
                           title="k8s常用基础命令总结" target="_blank">k8s常用基础命令总结</a>
                        <span class="text-muted">Tony666688888</span>
<a class="tag" taget="_blank" href="/search/kubernetes/1.htm">kubernetes</a><a class="tag" taget="_blank" href="/search/docker/1.htm">docker</a><a class="tag" taget="_blank" href="/search/%E5%AE%B9%E5%99%A8/1.htm">容器</a><a class="tag" taget="_blank" href="/search/k8s/1.htm">k8s</a>
                        <div>----------------------k8s常用基础命令---------------------------------获取Pod信息#1.获取k8s的命名空间kubectlgetnamespaces1)获取Pod列表及简要信息:kubectlgetpods2)以YAML格式获取Pod详细信息:kubectlgetpod-oyaml3)获取特定命名空间中的Pod列表kubectlgetpo</div>
                    </li>
                    <li><a href="/article/1950175199089455104.htm"
                           title="PDF转Markdown - Python 实现方案与代码" target="_blank">PDF转Markdown - Python 实现方案与代码</a>
                        <span class="text-muted">Eiceblue</span>
<a class="tag" taget="_blank" href="/search/Python/1.htm">Python</a><a class="tag" taget="_blank" href="/search/Python/1.htm">Python</a><a class="tag" taget="_blank" href="/search/PDF/1.htm">PDF</a><a class="tag" taget="_blank" href="/search/pdf/1.htm">pdf</a><a class="tag" taget="_blank" href="/search/python/1.htm">python</a><a class="tag" taget="_blank" href="/search/%E5%BC%80%E5%8F%91%E8%AF%AD%E8%A8%80/1.htm">开发语言</a><a class="tag" taget="_blank" href="/search/vscode/1.htm">vscode</a>
                        <div>PDF作为广泛使用的文档格式,转换为轻量级标记语言Markdown后,可无缝集成到技术文档、博客平台和版本控制系统中,提高内容的可编辑性和可访问性。本文将详细介绍如何使用国产Spire.PDFforPython库将PDF文档转换为Markdown格式。技术优势:精准保留原始文档结构(段落/列表/表格)完整提取文本和图像内容无需Adobe依赖的纯Python实现支持Linux/Windows/mac</div>
                    </li>
                    <li><a href="/article/1950170537426743296.htm"
                           title="在Windows11上安装Linux操作系统的几种技术方案" target="_blank">在Windows11上安装Linux操作系统的几种技术方案</a>
                        <span class="text-muted">yuanpan</span>
<a class="tag" taget="_blank" href="/search/linux/1.htm">linux</a><a class="tag" taget="_blank" href="/search/%E8%BF%90%E7%BB%B4/1.htm">运维</a><a class="tag" taget="_blank" href="/search/%E6%9C%8D%E5%8A%A1%E5%99%A8/1.htm">服务器</a>
                        <div>在Windows11上安装Linux主要有以下几种技术方案,每种方案适用于不同的需求场景:1.WindowsSubsystemforLinux(WSL)适用场景:开发、命令行工具、轻量级Linux环境支持发行版:Ubuntu、Debian、KaliLinux、Fedora等优点:轻量级:无需虚拟机,直接在Windows上运行Linux命令行环境。无缝集成:可访问Windows文件系统,支持VSCo</div>
                    </li>
                    <li><a href="/article/1950169523193704448.htm"
                           title="Claude Code 超详细完整指南(2025最新版)" target="_blank">Claude Code 超详细完整指南(2025最新版)</a>
                        <span class="text-muted">笙囧同学</span>
<a class="tag" taget="_blank" href="/search/python/1.htm">python</a>
                        <div>终端AI编程助手|高频使用点+生态工具+完整命令参考+最新MCP配置目录快速开始(5分钟上手)详细安装指南系统要求Windows安装(WSL方案)macOS安装Linux安装安装验证配置与认证首次认证环境变量配置代理配置⚡基础命令详解启动命令会话管理文件操作Think模式完全指南MCP服务器配置详解MCP基础概念添加MCP服务器10个必备MCP服务器MCP故障排除记忆系统详解高级使用技巧成本控制策</div>
                    </li>
                    <li><a href="/article/1950166876634017792.htm"
                           title="如何在 Ubuntu 24.04 或 22.04 Linux 上安装和运行 Redis 服务器" target="_blank">如何在 Ubuntu 24.04 或 22.04 Linux 上安装和运行 Redis 服务器</a>
                        <span class="text-muted">山岚的运维笔记</span>
<a class="tag" taget="_blank" href="/search/Linux/1.htm">Linux</a><a class="tag" taget="_blank" href="/search/%E8%BF%90%E7%BB%B4%E5%8F%8A%E4%BD%BF%E7%94%A8/1.htm">运维及使用</a><a class="tag" taget="_blank" href="/search/linux/1.htm">linux</a><a class="tag" taget="_blank" href="/search/%E6%9C%8D%E5%8A%A1%E5%99%A8/1.htm">服务器</a><a class="tag" taget="_blank" href="/search/ubuntu/1.htm">ubuntu</a><a class="tag" taget="_blank" href="/search/redis/1.htm">redis</a><a class="tag" taget="_blank" href="/search/%E6%95%B0%E6%8D%AE%E5%BA%93/1.htm">数据库</a>
                        <div>Redis(RemoteDictionaryServer,远程字典服务器)是一种内存数据结构存储,通常用作NoSQL数据库、缓存和消息代理。它是开源的,因此用户可以免费安装,无需支付任何费用。Redis旨在为需要快速数据访问和低延迟的应用程序提供速度和效率。Redis支持多种数据类型,包括字符串(Strings)、列表(Lists)、集合(Sets)、哈希(Hashes)、有序集合(SortedS</div>
                    </li>
                    <li><a href="/article/1950163093577330688.htm"
                           title="Linux中Samba服务器安装与配置文件" target="_blank">Linux中Samba服务器安装与配置文件</a>
                        <span class="text-muted">長樂.-</span>
<a class="tag" taget="_blank" href="/search/linux/1.htm">linux</a><a class="tag" taget="_blank" href="/search/%E8%BF%90%E7%BB%B4/1.htm">运维</a><a class="tag" taget="_blank" href="/search/%E6%9C%8D%E5%8A%A1%E5%99%A8/1.htm">服务器</a>
                        <div>Samba简述27zkqsamba是一个基于TCP/IP协议的开源软件套件,可以在Linux、Windows、macOS等操作系统上运行。它允许不同操作系统的计算机之间实现文件和打印机共享。samba提供了一个服务,使得Windows操作系统可以像访问本地文件一样访问Linux、Mac等操作系统上的共享文件。实现跨平台的文件共享,提高办公环境的效率和便利性。samba也支持Windows网络邻居协</div>
                    </li>
                    <li><a href="/article/1950152629841620992.htm"
                           title="ubuntu qt环境下出现No suitable kits found解决方案" target="_blank">ubuntu qt环境下出现No suitable kits found解决方案</a>
                        <span class="text-muted"></span>

                        <div>1.清理QtCreator缓存QtCreator会缓存项目配置、索引等数据,可能导致某些异常。清理方法:(1)删除QtCreator配置目录bashrm-rf~/.config/QtProject/(Ubuntu/Linux)或Windows:cmdrmdir/s/q"%APPDATA%\QtProject"(2)清除QtCreator的编译缓存bashrm-rf~/.cache/QtProjec</div>
                    </li>
                    <li><a href="/article/1950152503563710464.htm"
                           title="如何在 Ubuntu 24.04 或 22.04 Linux 上安装和使用 NoMachine" target="_blank">如何在 Ubuntu 24.04 或 22.04 Linux 上安装和使用 NoMachine</a>
                        <span class="text-muted">山岚的运维笔记</span>
<a class="tag" taget="_blank" href="/search/Linux/1.htm">Linux</a><a class="tag" taget="_blank" href="/search/%E8%BF%90%E7%BB%B4%E5%8F%8A%E4%BD%BF%E7%94%A8/1.htm">运维及使用</a><a class="tag" taget="_blank" href="/search/linux/1.htm">linux</a><a class="tag" taget="_blank" href="/search/ubuntu/1.htm">ubuntu</a><a class="tag" taget="_blank" href="/search/%E8%BF%90%E7%BB%B4/1.htm">运维</a><a class="tag" taget="_blank" href="/search/nomachine/1.htm">nomachine</a><a class="tag" taget="_blank" href="/search/%E8%BF%9C%E7%A8%8B%E8%BF%9E%E6%8E%A5/1.htm">远程连接</a>
                        <div>NoMachine是一款适用于Linux(Ubuntu)及其他支持的操作系统的远程桌面应用程序,允许用户通过本地或远程系统从世界任何地方控制计算机。它可以在低带宽连接下工作,被专业人士和家庭用户广泛使用。NoMachine的主要功能高性能远程访问跨平台兼容性易于使用,因为用户界面友好提供强大的加密协议,如SSH、SSL及其他安全标准支持远程文件传输和打印服务允许从远程计算机进行音频和视频流媒体传输</div>
                    </li>
                    <li><a href="/article/1950135356531732480.htm"
                           title="linux实战--日志管理" target="_blank">linux实战--日志管理</a>
                        <span class="text-muted"></span>

                        <div>简介日志文件重要的信息系统文件,及了许多重要的系统事件,包括用户的登录信息,系统的启动信息,系统的安全信息,邮寄相关信息,各种服务相关的信息。日志对安全也很重要。每天记录系统发生的各种事情,通过日志检查错误发生的原因或受到攻击时攻击者留下的痕迹。总的来说,日志是记录重大事件的文件。处理日志的工具rsyslog系统日志管理专职管理日志的工具,它产生各种信息文件,主要存放在/var/loglogrot</div>
                    </li>
                    <li><a href="/article/1950131321980383232.htm"
                           title="深入了解 Kubernetes(k8s):从概念到实践" target="_blank">深入了解 Kubernetes(k8s):从概念到实践</a>
                        <span class="text-muted"></span>

                        <div>目录一、k8s核心概念二、k8s的优势三、k8s架构组件控制平面组件节点组件四、k8s+docker运行前后端分离项目的例子1.准备前端项目2.准备后端项目3.创建k8s部署配置文件4.部署应用到k8s集群在当今云计算和容器化技术飞速发展的时代,Kubernetes(简称k8s)已成为容器编排领域的事实标准。无论是互联网巨头、传统企业还是初创公司,都在广泛采用k8s来管理和部署容器化应用。本文将带</div>
                    </li>
                    <li><a href="/article/1950114557917720576.htm"
                           title="Windows系统第一次运行C语言程序,环境配置,软件安装等遇到的坑及解决方法" target="_blank">Windows系统第一次运行C语言程序,环境配置,软件安装等遇到的坑及解决方法</a>
                        <span class="text-muted">灬爱码士灬</span>
<a class="tag" taget="_blank" href="/search/windows/1.htm">windows</a><a class="tag" taget="_blank" href="/search/c%E8%AF%AD%E8%A8%80/1.htm">c语言</a><a class="tag" taget="_blank" href="/search/%E5%BC%80%E5%8F%91%E8%AF%AD%E8%A8%80/1.htm">开发语言</a>
                        <div>明确需要编辑器和编译器,并选择自己要用什么(我选的编辑器是VSCode:VisualStudioCode;编译器是gcc)下载VSCode并配置环境变量(这里没啥问题),安装C/C++的拓展安装Cygwin,用来在Windows操作系统上模拟Unix/Linux环境(Cygwin官网:https://www.cygwin.com/。)安装过程中镜像可以选择https://mirrors.aliyu</div>
                    </li>
                    <li><a href="/article/1950110777142734848.htm"
                           title="一篇教你学会Git" target="_blank">一篇教你学会Git</a>
                        <span class="text-muted">编程界的彭于晏qaq</span>
<a class="tag" taget="_blank" href="/search/java/1.htm">java</a><a class="tag" taget="_blank" href="/search/GIT/1.htm">GIT</a><a class="tag" taget="_blank" href="/search/git/1.htm">git</a>
                        <div>从安装到高级使用(2025最新版)引言:为什么Git是开发者必备技能Git(GlobalInformationTracker)作为最流行的分布式版本控制系统,由Linux之父LinusTorvalds于2005年创建,现已成为软件开发的基础设施。与传统集中式版本控制系统(如SVN)相比,Git具有三大核心优势:分布式架构:每个开发者本地都有完整仓库副本,支持离线工作高效分支管理:创建和切换分支几乎</div>
                    </li>
                    <li><a href="/article/1950102202521546752.htm"
                           title="基本服务 FTP & SMB" target="_blank">基本服务 FTP & SMB</a>
                        <span class="text-muted">会飞的灰大狼</span>
<a class="tag" taget="_blank" href="/search/Centos7/1.htm">Centos7</a><a class="tag" taget="_blank" href="/search/linux/1.htm">linux</a>
                        <div>基本服务FTP&SMB前言:FTP简称为文件传输协议前面说的他可以做到备份的功能那么它可以做到文件传输的过程smb我们简单来说共享文件夹‍NFSNFS(NetworkFileSystem,网络文件系统)是一种分布式文件系统协议,允许不同计算机之间通过网络共享文件和目录,使远程文件系统像本地文件系统一样被访问。它最初由SunMicrosystems开发,现在已成为UNIX/Linux系统中常用的网络</div>
                    </li>
                    <li><a href="/article/1950099173521027072.htm"
                           title="Ubuntu lamp" target="_blank">Ubuntu lamp</a>
                        <span class="text-muted">会飞的灰大狼</span>
<a class="tag" taget="_blank" href="/search/linux/1.htm">linux</a><a class="tag" taget="_blank" href="/search/ubuntu/1.htm">ubuntu</a>
                        <div>Ubuntulamp前言在Ubuntu安装lamp架构我们了解到lamp是完整的架构我们前面了解到了集合了Linux系统apacheMySQL和PHP语言的完整架构我们前面说了Centos7中编译安装lamp那么我们去说一下在Ubuntu中安装‍‍安装apache2‍apt直接安装apache2apt-yinstallapache2‍启动apache2systemctlstartapache2#测</div>
                    </li>
                    <li><a href="/article/1950088840886677504.htm"
                           title="Linux文件权限与进程管理解析" target="_blank">Linux文件权限与进程管理解析</a>
                        <span class="text-muted">雨季西柚</span>
<a class="tag" taget="_blank" href="/search/linux/1.htm">linux</a>
                        <div>控制对文件的访问1。什么是文件系统权限?它是如何工作的?如何查看文件的权限?答:文件系统权限就是规定谁能对文件/文件夹做什么(比如看、改、删)的规则。简单说,就是分"所有者、所属组、其他人"三类,给每类分配"读、写、执行"三种权限。操作时系统先看你属于哪类,再查有没有对应权限,有就允许,没有就拦着。查看方式:Linux/mac:终端输ls-l文件名,看开头的rwxr--r--这类字符(3个一组,对</div>
                    </li>
                    <li><a href="/article/1950065015335743488.htm"
                           title="Coze开源实战指南:构建企业级AI应用的全链路技术解析(含Kubernetes+服务网格深度实践)" target="_blank">Coze开源实战指南:构建企业级AI应用的全链路技术解析(含Kubernetes+服务网格深度实践)</a>
                        <span class="text-muted"></span>

                        <div>一、Coze技术架构深度解析1.1核心组件与五层异构架构Coze采用五层异构架构(感知层→执行层→决策层→监控层→进化层),实现亚毫秒级实时响应与动态弹性扩展。其核心模块包括:架构亮点支持横向扩展的微服务集群基于Kubernetes的自动扩缩容机制服务网格(Istio)实现流量治理核心组件对比表组件功能特性典型性能指标CozeStudio30+节点类型/多模式编排响应速度提升300%CozeLoo</div>
                    </li>
                    <li><a href="/article/1950060480118714368.htm"
                           title="【Linux内核模块】调试技巧" target="_blank">【Linux内核模块】调试技巧</a>
                        <span class="text-muted">byte轻骑兵</span>
<a class="tag" taget="_blank" href="/search/%23/1.htm">#</a><a class="tag" taget="_blank" href="/search/%E5%B5%8C%E5%85%A5%E5%BC%8FLinux%E9%A9%B1%E5%8A%A8%E5%BC%80%E5%8F%91%E5%AE%9E%E6%88%98/1.htm">嵌入式Linux驱动开发实战</a><a class="tag" taget="_blank" href="/search/linux/1.htm">linux</a>
                        <div>内核模块开发最让人头疼的不是写代码,而是调试——代码编译通过了,加载后却要么没反应,要么直接让系统崩溃。这就像在黑屋子里修机器,看不见摸不着。其实内核调试有一套成熟的工具箱,掌握这些工具和技巧,就能给内核装个监控监控仪,让问题无所遁形。目录一、调试前的安全须知:别让系统崩溃二、最基础也最常用:printk打印日志2.1printk的基本用法2.2控制日志输出2.3printk的高级技巧三、内核Oo</div>
                    </li>
                    <li><a href="/article/1950059723604684800.htm"
                           title="linux网卡显示未知未托管,linux有线网络显示设备未托管" target="_blank">linux网卡显示未知未托管,linux有线网络显示设备未托管</a>
                        <span class="text-muted"></span>

                        <div>NetworkManagerNetworkManager是为了使网络配置尽可能简单而开发的网络管理软件包,如果使用DHCP,NetworkManager会替换默认的路由表、从DHCP服务器获取IP地址并根据情况设置域名服务器,NetworkManager的目标是使网络能够开箱即用。NetworkManager由两部分组成:一个以超级用户运行的守护进程(network-manager);一个前端管理</div>
                    </li>
                    <li><a href="/article/1950057957823672320.htm"
                           title="DNF 与 YUM 的区别详解:从 CentOS 7 到 CentOS 9 的演进" target="_blank">DNF 与 YUM 的区别详解:从 CentOS 7 到 CentOS 9 的演进</a>
                        <span class="text-muted"></span>

                        <div>DNF与YUM的区别详解:从CentOS7到CentOS9的演进标签:CentOS、YUM、DNF、Linux包管理、系统升级、兼容性适用版本:CentOS7、CentOS8、CentOS9一、背景介绍CentOS中使用的包管理工具是RedHat系列系统的重要组成部分。随着系统版本升级,包管理器从yum(CentOS7)逐渐过渡到dnf(CentOS8及之后版本)。二、YUM与DNF对比表对比项Y</div>
                    </li>
                    <li><a href="/article/1950053669370130432.htm"
                           title="Python简单练习3" target="_blank">Python简单练习3</a>
                        <span class="text-muted">第九条雀</span>
<a class="tag" taget="_blank" href="/search/python/1.htm">python</a><a class="tag" taget="_blank" href="/search/%E9%9D%A2%E8%AF%95/1.htm">面试</a><a class="tag" taget="_blank" href="/search/%E5%BC%80%E5%8F%91%E8%AF%AD%E8%A8%80/1.htm">开发语言</a>
                        <div>1.技术面试题(1)解释Linux中的进程、线程和守护进程的概念,以及如何管理它们?答:1.进程:进程是程序的一次动态执行过程,是系统进行资源分配和调度的基本单位拥有独立的内存空间(代码段、数据段、堆栈等),进程间内存不共享包含进程PID、PPID、状态(运行、就绪、阻塞等)、优先级等属性进程间通信需通过管道、信号、共享内存等机制实现线程:线程是进程内的一个执行单元,是CPU调度的最小单位,共享所</div>
                    </li>
                    <li><a href="/article/1950036519733489664.htm"
                           title="docker-基础入门" target="_blank">docker-基础入门</a>
                        <span class="text-muted"></span>

                        <div>docker入门dockers安装1.首先如果系统中已经存在旧的Docker,则先卸载:yumremovedocker\docker-client\docker-client-latest\docker-common\docker-latest\docker-latest-logrotate\docker-logrotate\docker-engine\docker-selinux2.配置Dock</div>
                    </li>
                    <li><a href="/article/1950031354561359872.htm"
                           title="Linux系统之lvremove 命令详解" target="_blank">Linux系统之lvremove 命令详解</a>
                        <span class="text-muted">门前灯</span>
<a class="tag" taget="_blank" href="/search/%E8%BF%90%E7%BB%B4/1.htm">运维</a><a class="tag" taget="_blank" href="/search/linux/1.htm">linux</a><a class="tag" taget="_blank" href="/search/%E8%BF%90%E7%BB%B4/1.htm">运维</a><a class="tag" taget="_blank" href="/search/%E6%9C%8D%E5%8A%A1%E5%99%A8/1.htm">服务器</a><a class="tag" taget="_blank" href="/search/lvremove/1.htm">lvremove</a>
                        <div>lvremove命令详解一、命令概述lvremove是LVM(逻辑卷管理)工具集中用于删除逻辑卷(LV)的核心命令,可移除一个或多个逻辑卷,将其占用的逻辑扩展(LE)归还给卷组(VG),供其他逻辑卷复用。其核心功能是彻底删除逻辑卷及其关联数据,操作前需确保LV未被使用(如未挂载文件系统),且删除源LV会同时移除其所有依赖的快照卷。适用于清理废弃存储资源、释放卷组空间的场景。二、语法格式lvremo</div>
                    </li>
                    <li><a href="/article/1950027442433355776.htm"
                           title="linux查看lvm命令,LVM管理常用命令参数详细解释" target="_blank">linux查看lvm命令,LVM管理常用命令参数详细解释</a>
                        <span class="text-muted">南大鳥</span>
<a class="tag" taget="_blank" href="/search/linux%E6%9F%A5%E7%9C%8Blvm%E5%91%BD%E4%BB%A4/1.htm">linux查看lvm命令</a>
                        <div>物理卷管理pvchangepvchange命令允许管理员改变物理卷的分配许可。如果物理卷出现故障,可以使用pvchange命令禁止分配物理卷上的PE。语法pvchange(选项)(参数)选项-u:生成新的UUID;-x:是否允许分配PE。参数物理卷:指定要修改属性的物理卷所对应的设备文件。实例使用pvchange命令禁止分配指定物理卷上的PE。在命令行中输入下面的命令:pvchange-xn/de</div>
                    </li>
                    <li><a href="/article/1950027190192107520.htm"
                           title="Linux从入门到精通——基础篇" target="_blank">Linux从入门到精通——基础篇</a>
                        <span class="text-muted">weixin_30390075</span>

                        <div>一.关于Linux的简介1.什么是Linux?Linux这个词指的是Linux内核,它是用来驱动电脑里的硬件的,而Linux操作系统指的是Linux内核和GUN组织(GUN的全称为GUNisnotUnix)所编写的软件,有了这个内核和相应的软件,由“死”的机械硬件组成的计算机才变成了“活”的,需要注意的是,单纯的软件并不能代表Linux系统。2.Linux的特点和优势1.模块化程度高Linux的内</div>
                    </li>
                                <li><a href="/article/31.htm"
                                       title="312个免费高速HTTP代理IP(能隐藏自己真实IP地址)" target="_blank">312个免费高速HTTP代理IP(能隐藏自己真实IP地址)</a>
                                    <span class="text-muted">yangshangchuan</span>
<a class="tag" taget="_blank" href="/search/%E9%AB%98%E9%80%9F/1.htm">高速</a><a class="tag" taget="_blank" href="/search/%E5%85%8D%E8%B4%B9/1.htm">免费</a><a class="tag" taget="_blank" href="/search/superword/1.htm">superword</a><a class="tag" taget="_blank" href="/search/HTTP%E4%BB%A3%E7%90%86/1.htm">HTTP代理</a>
                                    <div>  
  
124.88.67.20:843
190.36.223.93:8080
117.147.221.38:8123
122.228.92.103:3128
183.247.211.159:8123
124.88.67.35:81
112.18.51.167:8123
218.28.96.39:3128
49.94.160.198:3128
183.20</div>
                                </li>
                                <li><a href="/article/158.htm"
                                       title="pull解析和json编码" target="_blank">pull解析和json编码</a>
                                    <span class="text-muted">百合不是茶</span>
<a class="tag" taget="_blank" href="/search/android/1.htm">android</a><a class="tag" taget="_blank" href="/search/pull%E8%A7%A3%E6%9E%90/1.htm">pull解析</a><a class="tag" taget="_blank" href="/search/json/1.htm">json</a>
                                    <div>n.json文件: 
[{name:java,lan:c++,age:17},{name:android,lan:java,age:8}] 
  
pull.xml文件 
<?xml version="1.0" encoding="utf-8"?> 
<stu> 
    <name>java</div>
                                </li>
                                <li><a href="/article/285.htm"
                                       title="[能源与矿产]石油与地球生态系统" target="_blank">[能源与矿产]石油与地球生态系统</a>
                                    <span class="text-muted">comsci</span>
<a class="tag" taget="_blank" href="/search/%E8%83%BD%E6%BA%90/1.htm">能源</a>
                                    <div> 
      按照苏联的科学界的说法,石油并非是远古的生物残骸的演变产物,而是一种可以由某些特殊地质结构和物理条件生产出来的东西,也就是说,石油是可以自增长的.... 
 
      那么我们做一个猜想: 石油好像是地球的体液,我们地球具有自动产生石油的某种机制,只要我们不过量开采石油,并保护好</div>
                                </li>
                                <li><a href="/article/412.htm"
                                       title="类与对象浅谈" target="_blank">类与对象浅谈</a>
                                    <span class="text-muted">沐刃青蛟</span>
<a class="tag" taget="_blank" href="/search/java/1.htm">java</a><a class="tag" taget="_blank" href="/search/%E5%9F%BA%E7%A1%80/1.htm">基础</a>
                                    <div> 
       类,字面理解,便是同一种事物的总称,比如人类,是对世界上所有人的一个总称。而对象,便是类的具体化,实例化,是一个具体事物,比如张飞这个人,就是人类的一个对象。但要注意的是:张飞这个人是对象,而不是张飞,张飞只是他这个人的名字,是他的属性而已。而一个类中包含了属性和方法这两兄弟,他们分别用来描述对象的行为和性质(感觉应该是</div>
                                </li>
                                <li><a href="/article/539.htm"
                                       title="新站开始被收录后,我们应该做什么?" target="_blank">新站开始被收录后,我们应该做什么?</a>
                                    <span class="text-muted">IT独行者</span>
<a class="tag" taget="_blank" href="/search/PHP/1.htm">PHP</a><a class="tag" taget="_blank" href="/search/seo/1.htm">seo</a>
                                    <div>新站开始被收录后,我们应该做什么? 
  
      百度终于开始收录自己的网站了,作为站长,你是不是觉得那一刻很有成就感呢,同时,你是不是又很茫然,不知道下一步该做什么了?至少我当初就是这样,在这里和大家一份分享一下新站收录后,我们要做哪些工作。 
      至于如何让百度快速收录自己的网站,可以参考我之前的帖子《新站让百</div>
                                </li>
                                <li><a href="/article/666.htm"
                                       title="oracle 连接碰到的问题" target="_blank">oracle 连接碰到的问题</a>
                                    <span class="text-muted">文强chu</span>
<a class="tag" taget="_blank" href="/search/oracle/1.htm">oracle</a>
                                    <div>Unable to find a java Virtual Machine--安装64位版Oracle11gR2后无法启动SQLDeveloper的解决方案 
作者:草根IT网 来源:未知 人气:813标签: 
导读:安装64位版Oracle11gR2后发现启动SQLDeveloper时弹出配置java.exe的路径,找到Oracle自带java.exe后产生的路径“C:\app\用户名\prod</div>
                                </li>
                                <li><a href="/article/793.htm"
                                       title="Swing中按ctrl键同时移动鼠标拖动组件(类中多借口共享同一数据)" target="_blank">Swing中按ctrl键同时移动鼠标拖动组件(类中多借口共享同一数据)</a>
                                    <span class="text-muted">小桔子</span>
<a class="tag" taget="_blank" href="/search/java/1.htm">java</a><a class="tag" taget="_blank" href="/search/%E7%BB%A7%E6%89%BF/1.htm">继承</a><a class="tag" taget="_blank" href="/search/swing/1.htm">swing</a><a class="tag" taget="_blank" href="/search/%E6%8E%A5%E5%8F%A3/1.htm">接口</a><a class="tag" taget="_blank" href="/search/%E7%9B%91%E5%90%AC/1.htm">监听</a>
                                    <div>        都知道java中类只能单继承,但可以实现多个接口,但我发现实现多个接口之后,多个接口却不能共享同一个数据,应用开发中想实现:当用户按着ctrl键时,可以用鼠标点击拖动组件,比如说文本框。 
编写一个监听实现KeyListener,NouseListener,MouseMotionListener三个接口,重写方法。定义一个全局变量boolea</div>
                                </li>
                                <li><a href="/article/920.htm"
                                       title="linux常用的命令" target="_blank">linux常用的命令</a>
                                    <span class="text-muted">aichenglong</span>
<a class="tag" taget="_blank" href="/search/linux/1.htm">linux</a><a class="tag" taget="_blank" href="/search/%E5%B8%B8%E7%94%A8%E5%91%BD%E4%BB%A4/1.htm">常用命令</a>
                                    <div>1 startx切换到图形化界面 
2 man命令:查看帮助信息 
 man 需要查看的命令,man命令提供了大量的帮助信息,一般可以分成4个部分 
 name:对命令的简单说明 
 synopsis:命令的使用格式说明 
 description:命令的详细说明信息 
 options:命令的各项说明 
3 date:显示时间 
 语法:date [OPTION]... [+FORMAT] 
 </div>
                                </li>
                                <li><a href="/article/1047.htm"
                                       title="eclipse内存优化" target="_blank">eclipse内存优化</a>
                                    <span class="text-muted">AILIKES</span>
<a class="tag" taget="_blank" href="/search/java/1.htm">java</a><a class="tag" taget="_blank" href="/search/eclipse/1.htm">eclipse</a><a class="tag" taget="_blank" href="/search/jvm/1.htm">jvm</a><a class="tag" taget="_blank" href="/search/jdk/1.htm">jdk</a>
                                    <div>一 基本说明      在JVM中,总体上分2块内存区,默认空余堆内存小于 40%时,JVM就会增大堆直到-Xmx的最大限制;空余堆内存大于70%时,JVM会减少堆直到-Xms的最小限制。     1)堆内存(Heap memory):堆是运行时数据区域,所有类实例和数组的内存均从此处分配,是Java代码可及的内存,是留给开发人</div>
                                </li>
                                <li><a href="/article/1174.htm"
                                       title="关键字的使用探讨" target="_blank">关键字的使用探讨</a>
                                    <span class="text-muted">百合不是茶</span>
<a class="tag" taget="_blank" href="/search/%E5%85%B3%E9%94%AE%E5%AD%97/1.htm">关键字</a>
                                    <div>//关键字的使用探讨/*访问关键词private 只能在本类中访问public 只能在本工程中访问protected 只能在包中和子类中访问默认的 只能在包中访问*//*final   类 方法 变量 final 类 不能被继承  final 方法 不能被子类覆盖,但可以继承 final 变量 只能有一次赋值,赋值后不能改变 final 不能用来修饰构造方法*///this()</div>
                                </li>
                                <li><a href="/article/1301.htm"
                                       title="JS中定义对象的几种方式" target="_blank">JS中定义对象的几种方式</a>
                                    <span class="text-muted">bijian1013</span>
<a class="tag" taget="_blank" href="/search/js/1.htm">js</a>
                                    <div>    1. 基于已有对象扩充其对象和方法(只适合于临时的生成一个对象): 
<html>
<head>
    <title>基于已有对象扩充其对象和方法(只适合于临时的生成一个对象)</title>
</head>
<script>
    var obj = new Object();
 </div>
                                </li>
                                <li><a href="/article/1428.htm"
                                       title="表驱动法实例" target="_blank">表驱动法实例</a>
                                    <span class="text-muted">bijian1013</span>
<a class="tag" taget="_blank" href="/search/java/1.htm">java</a><a class="tag" taget="_blank" href="/search/%E8%A1%A8%E9%A9%B1%E5%8A%A8%E6%B3%95/1.htm">表驱动法</a><a class="tag" taget="_blank" href="/search/TDD/1.htm">TDD</a>
                                    <div>获得月的天数是典型的直接访问驱动表方式的实例,下面我们来展示一下: 
MonthDaysTest.java 
package com.study.test;

import org.junit.Assert;
import org.junit.Test;

import com.study.MonthDays;

public class MonthDaysTest {

	@T</div>
                                </li>
                                <li><a href="/article/1555.htm"
                                       title="LInux启停重启常用服务器的脚本" target="_blank">LInux启停重启常用服务器的脚本</a>
                                    <span class="text-muted">bit1129</span>
<a class="tag" taget="_blank" href="/search/linux/1.htm">linux</a>
                                    <div>启动,停止和重启常用服务器的Bash脚本,对于每个服务器,需要根据实际的安装路径做相应的修改 
  
#! /bin/bash

Servers=(Apache2, Nginx, Resin, Tomcat, Couchbase, SVN, ActiveMQ, Mongo);
Ops=(Start, Stop, Restart);
currentDir=$(pwd);
echo</div>
                                </li>
                                <li><a href="/article/1682.htm"
                                       title="【HBase六】REST操作HBase" target="_blank">【HBase六】REST操作HBase</a>
                                    <span class="text-muted">bit1129</span>
<a class="tag" taget="_blank" href="/search/hbase/1.htm">hbase</a>
                                    <div>HBase提供了REST风格的服务方便查看HBase集群的信息,以及执行增删改查操作 
  1. 启动和停止HBase REST 服务 1.1 启动REST服务 
前台启动(默认端口号8080) 
[hadoop@hadoop bin]$ ./hbase rest start 
  
后台启动 
hbase-daemon.sh start rest 
  
启动时指定</div>
                                </li>
                                <li><a href="/article/1809.htm"
                                       title="大话zabbix 3.0设计假设" target="_blank">大话zabbix 3.0设计假设</a>
                                    <span class="text-muted">ronin47</span>

                                    <div>What’s new in Zabbix 2.0? 
去年开始使用Zabbix的时候,是1.8.X的版本,今年Zabbix已经跨入了2.0的时代。看了2.0的release notes,和performance相关的有下面几个: 
 
          :: Performance improvements::Trigger related da</div>
                                </li>
                                <li><a href="/article/1936.htm"
                                       title="http错误码大全" target="_blank">http错误码大全</a>
                                    <span class="text-muted">byalias</span>
<a class="tag" taget="_blank" href="/search/http%E5%8D%8F%E8%AE%AE/1.htm">http协议</a><a class="tag" taget="_blank" href="/search/javaweb/1.htm">javaweb</a>
                                    <div>响应码由三位十进制数字组成,它们出现在由HTTP服务器发送的响应的第一行。 
响应码分五种类型,由它们的第一位数字表示: 
1)1xx:信息,请求收到,继续处理 
2)2xx:成功,行为被成功地接受、理解和采纳 
3)3xx:重定向,为了完成请求,必须进一步执行的动作 
4)4xx:客户端错误,请求包含语法错误或者请求无法实现 
5)5xx:服务器错误,服务器不能实现一种明显无效的请求 
 
 
</div>
                                </li>
                                <li><a href="/article/2063.htm"
                                       title="J2EE设计模式-Intercepting Filter" target="_blank">J2EE设计模式-Intercepting Filter</a>
                                    <span class="text-muted">bylijinnan</span>
<a class="tag" taget="_blank" href="/search/java/1.htm">java</a><a class="tag" taget="_blank" href="/search/%E8%AE%BE%E8%AE%A1%E6%A8%A1%E5%BC%8F/1.htm">设计模式</a><a class="tag" taget="_blank" href="/search/%E6%95%B0%E6%8D%AE%E7%BB%93%E6%9E%84/1.htm">数据结构</a>
                                    <div>Intercepting Filter类似于职责链模式 
有两种实现 
其中一种是Filter之间没有联系,全部Filter都存放在FilterChain中,由FilterChain来有序或无序地把把所有Filter调用一遍。没有用到链表这种数据结构。示例如下: 
 
 

package com.ljn.filter.custom;

import java.util.ArrayList;</div>
                                </li>
                                <li><a href="/article/2190.htm"
                                       title="修改jboss端口" target="_blank">修改jboss端口</a>
                                    <span class="text-muted">chicony</span>
<a class="tag" taget="_blank" href="/search/jboss/1.htm">jboss</a>
                                    <div>修改jboss端口 
  
%JBOSS_HOME%\server\{服务实例名}\conf\bindingservice.beans\META-INF\bindings-jboss-beans.xml 
  
中找到 
  
   <!-- The ports-default bindings are obtained by taking the base bindin</div>
                                </li>
                                <li><a href="/article/2317.htm"
                                       title="c++ 用类模版实现数组类" target="_blank">c++ 用类模版实现数组类</a>
                                    <span class="text-muted">CrazyMizzz</span>
<a class="tag" taget="_blank" href="/search/C%2B%2B/1.htm">C++</a>
                                    <div>最近c++学到数组类,写了代码将他实现,基本具有vector类的功能 
 
 

#include<iostream>
#include<string>
#include<cassert>
using namespace std;
template<class T>
class Array
{
public:
	//构造函数 
	</div>
                                </li>
                                <li><a href="/article/2444.htm"
                                       title="hadoop dfs.datanode.du.reserved 预留空间配置方法" target="_blank">hadoop dfs.datanode.du.reserved 预留空间配置方法</a>
                                    <span class="text-muted">daizj</span>
<a class="tag" taget="_blank" href="/search/hadoop/1.htm">hadoop</a><a class="tag" taget="_blank" href="/search/%E9%A2%84%E7%95%99%E7%A9%BA%E9%97%B4/1.htm">预留空间</a>
                                    <div>对于datanode配置预留空间的方法 为:在hdfs-site.xml添加如下配置 
 <property> 
    <name>dfs.datanode.du.reserved</name> 
    <value>10737418240</value> 
  
 </div>
                                </li>
                                <li><a href="/article/2571.htm"
                                       title="mysql远程访问的设置" target="_blank">mysql远程访问的设置</a>
                                    <span class="text-muted">dcj3sjt126com</span>
<a class="tag" taget="_blank" href="/search/mysql/1.htm">mysql</a><a class="tag" taget="_blank" href="/search/%E9%98%B2%E7%81%AB%E5%A2%99/1.htm">防火墙</a>
                                    <div>第一步: 激活网络设置 你需要编辑mysql配置文件my.cnf. 通常状况,my.cnf放置于在以下目录: /etc/mysql/my.cnf (Debian linux) /etc/my.cnf (Red Hat Linux/Fedora Linux) /var/db/mysql/my.cnf (FreeBSD) 然后用vi编辑my.cnf,修改内容从以下行: [mysqld] 你所需要: 1</div>
                                </li>
                                <li><a href="/article/2698.htm"
                                       title="ios 使用特定的popToViewController返回到相应的Controller" target="_blank">ios 使用特定的popToViewController返回到相应的Controller</a>
                                    <span class="text-muted">dcj3sjt126com</span>
<a class="tag" taget="_blank" href="/search/controller/1.htm">controller</a>
                                    <div>1、取navigationCtroller中的Controllers
NSArray * ctrlArray = self.navigationController.viewControllers;
2、取出后,执行,
[self.navigationController popToViewController:[ctrlArray objectAtIndex:0] animated:YES</div>
                                </li>
                                <li><a href="/article/2825.htm"
                                       title="Linux正则表达式和通配符的区别" target="_blank">Linux正则表达式和通配符的区别</a>
                                    <span class="text-muted">eksliang</span>
<a class="tag" taget="_blank" href="/search/%E6%AD%A3%E5%88%99%E8%A1%A8%E8%BE%BE%E5%BC%8F/1.htm">正则表达式</a><a class="tag" taget="_blank" href="/search/%E9%80%9A%E9%85%8D%E7%AC%A6%E5%92%8C%E6%AD%A3%E5%88%99%E8%A1%A8%E8%BE%BE%E5%BC%8F%E7%9A%84%E5%8C%BA%E5%88%AB/1.htm">通配符和正则表达式的区别</a><a class="tag" taget="_blank" href="/search/%E9%80%9A%E9%85%8D%E7%AC%A6/1.htm">通配符</a>
                                    <div>转载请出自出处:http://eksliang.iteye.com/blog/1976579 
 
首先得明白二者是截然不同的 
通配符只能用在shell命令中,用来处理字符串的的匹配。 
判断一个命令是否为bash shell(linux 默认的shell)的内置命令 
 type -t commad  
返回结果含义 
 file  表示为外部命令 
 alias  表示该</div>
                                </li>
                                <li><a href="/article/2952.htm"
                                       title="Ubuntu Mysql Install and CONF" target="_blank">Ubuntu Mysql Install and CONF</a>
                                    <span class="text-muted">gengzg</span>
<a class="tag" taget="_blank" href="/search/Install/1.htm">Install</a>
                                    <div>http://www.navicat.com.cn/download/navicat-for-mysql
    Step1: 下载Navicat ,网址:http://www.navicat.com/en/download/download.html  
      
    Step2:进入下载目录,解压压缩包:tar -zxvf  navicat11_mysql_en.tar.gz  </div>
                                </li>
                                <li><a href="/article/3079.htm"
                                       title="批处理,删除文件bat" target="_blank">批处理,删除文件bat</a>
                                    <span class="text-muted">huqiji</span>
<a class="tag" taget="_blank" href="/search/windows/1.htm">windows</a><a class="tag" taget="_blank" href="/search/dos/1.htm">dos</a>
                                    <div>@echo off
::演示:删除指定路径下指定天数之前(以文件名中包含的日期字符串为准)的文件。
::如果演示结果无误,把del前面的echo去掉,即可实现真正删除。
::本例假设文件名中包含的日期字符串(比如:bak-2009-12-25.log)
rem 指定待删除文件的存放路径
set SrcDir=C:/Test/BatHome
rem 指定天数
set DaysAgo=1</div>
                                </li>
                                <li><a href="/article/3206.htm"
                                       title="跨浏览器兼容的HTML5视频音频播放器" target="_blank">跨浏览器兼容的HTML5视频音频播放器</a>
                                    <span class="text-muted">天梯梦</span>
<a class="tag" taget="_blank" href="/search/html5/1.htm">html5</a>
                                    <div>HTML5的video和audio标签是用来在网页中加入视频和音频的标签,在支持html5的浏览器中不需要预先加载Adobe Flash浏览器插件就能轻松快速的播放视频和音频文件。而html5media.js可以在不支持html5的浏览器上使video和audio标签生效。    How to enable <video> and <audio> tags in </div>
                                </li>
                                <li><a href="/article/3333.htm"
                                       title="Bundle自定义数据传递" target="_blank">Bundle自定义数据传递</a>
                                    <span class="text-muted">hm4123660</span>
<a class="tag" taget="_blank" href="/search/android/1.htm">android</a><a class="tag" taget="_blank" href="/search/Serializable/1.htm">Serializable</a><a class="tag" taget="_blank" href="/search/%E8%87%AA%E5%AE%9A%E4%B9%89%E6%95%B0%E6%8D%AE%E4%BC%A0%E9%80%92/1.htm">自定义数据传递</a><a class="tag" taget="_blank" href="/search/Bundle/1.htm">Bundle</a><a class="tag" taget="_blank" href="/search/Parcelable/1.htm">Parcelable</a>
                                    <div>      我们都知道Bundle可能过put****()方法添加各种基本类型的数据,Intent也可以通过putExtras(Bundle)将数据添加进去,然后通过startActivity()跳到下一下Activity的时候就把数据也传到下一个Activity了。如传递一个字符串到下一个Activity 
  
把数据放到Intent</div>
                                </li>
                                <li><a href="/article/3460.htm"
                                       title="C#:异步编程和线程的使用(.NET 4.5 )" target="_blank">C#:异步编程和线程的使用(.NET 4.5 )</a>
                                    <span class="text-muted">powertoolsteam</span>
<a class="tag" taget="_blank" href="/search/.net/1.htm">.net</a><a class="tag" taget="_blank" href="/search/%E7%BA%BF%E7%A8%8B/1.htm">线程</a><a class="tag" taget="_blank" href="/search/C%23/1.htm">C#</a><a class="tag" taget="_blank" href="/search/%E5%BC%82%E6%AD%A5%E7%BC%96%E7%A8%8B/1.htm">异步编程</a>
                                    <div>异步编程和线程处理是并发或并行编程非常重要的功能特征。为了实现异步编程,可使用线程也可以不用。将异步与线程同时讲,将有助于我们更好的理解它们的特征。 
本文中涉及关键知识点 
1. 异步编程 
2. 线程的使用 
3. 基于任务的异步模式 
4. 并行编程 
5. 总结 
 
  异步编程  
 
什么是异步操作?异步操作是指某些操作能够独立运行,不依赖主流程或主其他处理流程。通常情况下,C#程序</div>
                                </li>
                                <li><a href="/article/3587.htm"
                                       title="spark 查看 job history 日志" target="_blank">spark 查看 job history 日志</a>
                                    <span class="text-muted">Stark_Summer</span>
<a class="tag" taget="_blank" href="/search/%E6%97%A5%E5%BF%97/1.htm">日志</a><a class="tag" taget="_blank" href="/search/spark/1.htm">spark</a><a class="tag" taget="_blank" href="/search/history/1.htm">history</a><a class="tag" taget="_blank" href="/search/job/1.htm">job</a>
                                    <div>SPARK_HOME/conf 下:  
spark-defaults.conf 增加如下内容 
spark.eventLog.enabled true spark.eventLog.dir hdfs://master:8020/var/log/spark spark.eventLog.compress true  
spark-env.sh 增加如下内容 
export SP</div>
                                </li>
                                <li><a href="/article/3714.htm"
                                       title="SSH框架搭建" target="_blank">SSH框架搭建</a>
                                    <span class="text-muted">wangxiukai2015eye</span>
<a class="tag" taget="_blank" href="/search/spring/1.htm">spring</a><a class="tag" taget="_blank" href="/search/Hibernate/1.htm">Hibernate</a><a class="tag" taget="_blank" href="/search/struts/1.htm">struts</a>
                                    <div>MyEclipse搭建SSH框架 Struts Spring Hibernate 
1、new一个web project。 
2、右键项目,为项目添加Struts支持。 
   选择Struts2 Core Libraries -<MyEclipes-Library> 
     点击Finish。src目录下多了struts</div>
                                </li>
                </ul>
            </div>
        </div>
    </div>

<div>
    <div class="container">
        <div class="indexes">
            <strong>按字母分类:</strong>
            <a href="/tags/A/1.htm" target="_blank">A</a><a href="/tags/B/1.htm" target="_blank">B</a><a href="/tags/C/1.htm" target="_blank">C</a><a
                href="/tags/D/1.htm" target="_blank">D</a><a href="/tags/E/1.htm" target="_blank">E</a><a href="/tags/F/1.htm" target="_blank">F</a><a
                href="/tags/G/1.htm" target="_blank">G</a><a href="/tags/H/1.htm" target="_blank">H</a><a href="/tags/I/1.htm" target="_blank">I</a><a
                href="/tags/J/1.htm" target="_blank">J</a><a href="/tags/K/1.htm" target="_blank">K</a><a href="/tags/L/1.htm" target="_blank">L</a><a
                href="/tags/M/1.htm" target="_blank">M</a><a href="/tags/N/1.htm" target="_blank">N</a><a href="/tags/O/1.htm" target="_blank">O</a><a
                href="/tags/P/1.htm" target="_blank">P</a><a href="/tags/Q/1.htm" target="_blank">Q</a><a href="/tags/R/1.htm" target="_blank">R</a><a
                href="/tags/S/1.htm" target="_blank">S</a><a href="/tags/T/1.htm" target="_blank">T</a><a href="/tags/U/1.htm" target="_blank">U</a><a
                href="/tags/V/1.htm" target="_blank">V</a><a href="/tags/W/1.htm" target="_blank">W</a><a href="/tags/X/1.htm" target="_blank">X</a><a
                href="/tags/Y/1.htm" target="_blank">Y</a><a href="/tags/Z/1.htm" target="_blank">Z</a><a href="/tags/0/1.htm" target="_blank">其他</a>
        </div>
    </div>
</div>
<footer id="footer" class="mb30 mt30">
    <div class="container">
        <div class="footBglm">
            <a target="_blank" href="/">首页</a> -
            <a target="_blank" href="/custom/about.htm">关于我们</a> -
            <a target="_blank" href="/search/Java/1.htm">站内搜索</a> -
            <a target="_blank" href="/sitemap.txt">Sitemap</a> -
            <a target="_blank" href="/custom/delete.htm">侵权投诉</a>
        </div>
        <div class="copyright">版权所有 IT知识库 CopyRight © 2000-2050 E-COM-NET.COM , All Rights Reserved.
<!--            <a href="https://beian.miit.gov.cn/" rel="nofollow" target="_blank">京ICP备09083238号</a><br>-->
        </div>
    </div>
</footer>
<!-- 代码高亮 -->
<script type="text/javascript" src="/static/syntaxhighlighter/scripts/shCore.js"></script>
<script type="text/javascript" src="/static/syntaxhighlighter/scripts/shLegacy.js"></script>
<script type="text/javascript" src="/static/syntaxhighlighter/scripts/shAutoloader.js"></script>
<link type="text/css" rel="stylesheet" href="/static/syntaxhighlighter/styles/shCoreDefault.css"/>
<script type="text/javascript" src="/static/syntaxhighlighter/src/my_start_1.js"></script>





</body>

</html>