【高可用HA】Centos7.0下通过Corosync+pacemaker+pcs+drbd实现mariadb的高可用

作者:吴业亮
博客:https://wuyeliang.blog.csdn.net/

一、操作系统配置
1.1、准备:
两个节点ha-node1和ha-node2均按照centos7.0系统,每个节点两块磁盘,一块用作根分区一块用作drbd
192.168.8.51 ha-node1
192.168.8.52 ha-node2
修改主机名:
节点1

# hostnamectl set-hostname ha-node1
# su -l

节点2

# hostnamectl set-hostname ha-node2
# su -l

1.2、磁盘分区如下

[root@ha-node2 corosync]# lsblk 
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda               8:0    0   20G  0 disk 
├─sda1            8:1    0  500M  0 part /boot
└─sda2            8:2    0 19.5G  0 part 
  ├─centos-swap 253:0    0    2G  0 lvm  [SWAP]
  └─centos-root 253:1    0 17.5G  0 lvm  /
sdb               8:16   0   20G  0 disk 
sr0              11:0    1 1024M  0 rom  
[root@ha-node1 corosync]# lsblk 
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda               8:0    0   20G  0 disk 
├─sda1            8:1    0  500M  0 part /boot
└─sda2            8:2    0 19.5G  0 part 
  ├─centos-swap 253:0    0    2G  0 lvm  [SWAP]
  └─centos-root 253:1    0 17.5G  0 lvm  /
sdb               8:16   0   20G  0 disk 
sr0              11:0    1 1024M  0 rom 

1.3、创建lvm(每个节点都需执行)

# pvcreate /dev/sdb
# vgcreate data /dev/sdb
# lvcreate --size 2G --name mysql data

1.4、关闭防火墙(每个节点都需执行)

setenforce 0
sed -i.bak "s/SELINUX=enforcing/SELINUX=permissive/g" /etc/selinux/config
systemctl disable firewalld.service
systemctl stop firewalld.service
iptables --flush

1.5、配置hosts文件

echo '192.168.8.51 ha-node1 ' >>/etc/hosts
echo '192.168.8.52 ha-node2 ' >>/etc/hosts

1.6、配置ntp(10.239.41.128为ntp服务器)每个节点都需执行

# chkconfig chronyd off
# chkconfig ntpd on  
# sed -i "/^server\ 3.centos.pool/a server\ 10.239.41.128 " /etc/ntp.conf 
# service ntpd start
# ntpq -p 

1.6、配置互信(每个节点都需执行)

# ssh-keygen -t dsa -f ~/.ssh/id_dsa -N ""
# ssh-copy-id ha-node1
# ssh-copy-id ha-node2

二、安装drbd
2.1、安装drbd软件(各个节点)

# rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
# rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
# yum install -y kmod-drbd84 drbd84-utils

2.2、配置文件介绍
/etc/drbd.conf #主配置文件
/etc/drbd.d/global_common.conf #全局配置文件
a、/etc/drbd.conf说明
主配置文件中包含了全局配置文件及"drbd.d/"目录下以.res结尾的文件

# You can find an example in  /usr/share/doc/drbd.../drbd.conf.example
include "drbd.d/global_common.conf";
include "drbd.d/*.res"; 

b、/etc/drbd.d/global_common.conf说明

global {
    usage-count no;  #是否参加DRBD使用统计,默认为yes。官方统计drbd的装机量
    # minor-count dialog-refresh disable-ip-verification
}
common {
    #protocol C;      #使用DRBD的同步协议
    handlers {
        # These are EXAMPLE handlers only.
        # They may have severe implications,
        # like hard resetting the node under certain circumstances.
        # Be careful when chosing your poison.
        pri-on-incon-degr "/usr/lib/drbd/notify-pri-on-incon-degr.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";
        pri-lost-after-sb "/usr/lib/drbd/notify-pri-lost-after-sb.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";
        local-io-error "/usr/lib/drbd/notify-io-error.sh; /usr/lib/drbd/notify-emergency-shutdown.sh; echo o > /proc/sysrq-trigger ; halt -f";
        # fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
        # split-brain "/usr/lib/drbd/notify-split-brain.sh root";
        # out-of-sync "/usr/lib/drbd/notify-out-of-sync.sh root";

        # before-resync-target "/usr/lib/drbd/snapshot-resync-target-lvm.sh -p 15 -- -c 16k";
        # after-resync-target /usr/lib/drbd/unsnapshot-resync-target-lvm.sh;
    }
    startup {
        # wfc-timeout degr-wfc-timeout outdated-wfc-timeout wait-after-sb
    }
    options {
        # cpu-mask on-no-data-accessible
    }
    disk {
        on-io-error detach; #配置I/O错误处理策略为分离
        # size max-bio-bvecs on-io-error fencing disk-barrier disk-flushes
        # disk-drain md-flushes resync-rate resync-after al-extents
        # c-plan-ahead c-delay-target c-fill-target c-max-rate
 	    # c-min-rate disk-timeout
    }
    net {

        # protocol timeout max-epoch-size max-buffers unplug-watermark
        # connect-int ping-int sndbuf-size rcvbuf-size ko-count
        # allow-two-primaries cram-hmac-alg shared-secret after-sb-0pri
        # after-sb-1pri after-sb-2pri always-asbp rr-conflict
        # ping-timeout data-integrity-alg tcp-cork on-congestion
        # congestion-fill congestion-extents csums-alg verify-alg
        # use-rle
    }

注释: on-io-error 策略可能为以下选项之一
detach 分离:这是默认和推荐的选项,如果在节点上发生底层的硬盘I/O错误,它会将设备运行在Diskless无盘模式下
pass_on:DRBD会将I/O错误报告到上层,在主节点上,它会将其报告给挂载的文件系统,但是在此节点上就往往忽略(因此此节点上没有可以报告的上层)
-local-in-error:调用本地磁盘I/O处理程序定义的命令;这需要有相应的local-io-error调用的资源处理程序处理错误的命令;这就给管理员有足够自由的权力命令命令或是脚本调用local-io-error处理I/O错误
c、定义一个资源
新建/etc/drbd.d/mysql.res并写入下列内容

resource mysql { #资源名称
protocol C; #使用协议
meta-disk internal;
device /dev/drbd1; #DRBD设备名称
syncer {
verify-alg sha1;# 加密算法
}
net {
allow-two-primaries;
}
on ha-node1 {
disk /dev/data/mysql; drbd1使用的磁盘分区为"mysql"
address 192.168.8.51:7789; #设置DRBD监听地址与端口
}
on ha-node2 {
disk /dev/data/mysql;
address 192.168.8.52:7789;
}
}

2.4、将配置文件拷贝到node2上

# scp -rp  /etc/drbd.d/* ha-node2:/etc/drbd.d/

2.5、启用drbd

# drbdadm create-md mysql
# modprobe drbd
# drbdadm up mysql
# drbdadm -- --force primary mysql

查看状态

# cat /proc/drbd 

2.6、配置对端节点

# ssh ha-node2 “drbdadm create-md mysql”
# ssh ha-node2 “modprobe drbd”
# ssh ha-node2 “drbdadm up mysql”

2.7、格式化设备并挂载

# mkfs.xfs /dev/drbd1
meta-data=/dev/drbd1 isize=256 agcount=4, agsize=65532 blks
= sectsz=512 attr=2, projid32bit=1
= crc=0 finobt=0
data = bsize=4096 blocks=262127, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=0
log =internal log bsize=4096 blocks=853, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0

挂载设备

# mount /dev/drbd1 /var/lib/mysql

三、安装集群软件
3.1安装相关软件包

# yum install -y pacemaker pcs psmisc policycoreutils-python

启动pcs服务并保持开机启动

# systemctl start pcsd.service
# systemctl enable pcsd.service

3.2修改用户hacluster的密码

# ssh ha-node2 -- 'echo redhat1 | passwd --stdin hacluster'
# echo redhat1 | passwd --stdin hacluster
注意:redhat1为hacluster用户密码

四、安装mysql(各个节点)
4.1、安装软件

# yum install epel* -y
# yum install mariadb mariadb-server MySQL-python

4.2、禁止mysql开机启动

# chkconfig  mariadb off

五、配置corosync
5.1认证各个节点,并创建集群(注意如果有代理记得取消)

# pcs cluster auth ha-node1 ha-node2
# pcs cluster setup --name mycluster ha-node1 ha-node2

5.2启动集群

[root@ha-node1 ~]# pcs cluster start --all
ha-node1: Starting Cluster...
ha-node2: Starting Cluster...


5.3验证corosync的安装

[root@ha-node1 ~]# corosync-cfgtool -s
Printing ring status.
Local node ID 1
RING ID 0
id = 192.168.8.51
status = ring 0 active with no faults

5.4查看接入成员

# corosync-cmapctl | grep members
runtime.totem.pg.mrp.srp.members.1.config_version (u64) = 0
runtime.totem.pg.mrp.srp.members.1.ip (str) = r(0) ip(192.168.8.51)
runtime.totem.pg.mrp.srp.members.1.join_count (u32) = 1
runtime.totem.pg.mrp.srp.members.1.status (str) = joined
runtime.totem.pg.mrp.srp.members.2.config_version (u64) = 0
runtime.totem.pg.mrp.srp.members.2.ip (str) = r(0) ip(192.168.8.52)
runtime.totem.pg.mrp.srp.members.2.join_count (u32) = 2
runtime.totem.pg.mrp.srp.members.2.status (str) = joined

5.5查看corosync状态

# pcs status corosync
Membership information
--------------------------
Nodeid Votes Name
1 1 ha-node1 (local)
2 1 ha-node2

5.6检查pacemaker的安装

# ps axf
PID TTY STAT TIME COMMAND
2 ? S 0:00 [kthreadd]
...lots of processes...
1362 ? Ssl 0:35 corosync
1379 ? Ss 0:00 /usr/sbin/pacemakerd -f
1380 ? Ss 0:00 \_ /usr/libexec/pacemaker/cib
1381 ? Ss 0:00 \_ /usr/libexec/pacemaker/stonithd
1382 ? Ss 0:00 \_ /usr/libexec/pacemaker/lrmd
1383 ? Ss 0:00 \_ /usr/libexec/pacemaker/attrd
1384 ? Ss 0:00 \_ /usr/libexec/pacemaker/pengine
1385 ? Ss 0:00 \_ /usr/libexec/pacemaker/crmd

5.7检查pcs status

[root@ha-node1 ~]# pcs status
Cluster name: mycluster
WARNING: no stonith devices and stonith-enabled is not false
Last updated: Tue Dec 16 16:15:29 2014
Last change: Tue Dec 16 15:49:47 2014
Stack: corosync
Current DC: ha-node2 (2) - partition with quorum
Version: 1.1.12-a14efad
2 Nodes configured
0 Resources configured
Online: [ ha-node1 ha-node2 ]
Full list of resources:
PCSD Status:
ha-node1: Online
ha-node2: Online
Daemon Status:
corosync: active/disabled
pacemaker: active/disabled
pcsd: active/enabled

5.8查看系统中error(stonith除外)

# journalctl | grep -i error

六、配置集群(任选一个节点)
6.1集群属性
投票属性

# pcs property set no-quorum-policy=ignore

集群故障时候服务迁移

# pcs resource defaults migration-threshold=1

由于两个节点无stonith设备

# pcs property set stonith-enabled=false

在node1恢复后,为防止node2资源迁回node01(迁来迁去对还是会对业务有一定影响)

# pcs resource defaults resource-stickiness=100
# pcs resource defaults

设置资源超时时间

# pcs resource op defaults timeout=90s
# pcs resource op defaults

验证,正常无回显

# crm_verify -L –V

6.2配置浮动IP

# pcs resource create vip ocf:heartbeat:IPaddr2 ip=192.168.8.53 cidr_netmask=24 op monitor interval=30s

vip为自定义的一个集群IP的名称,监控时间为30S
6.3配置drbd高可用

# umount /dev/drbd1(各个节点)

创建文件

# pcs cluster cib drbd_cfg

创建mysqlData资源

#pcs -f drbd_cfg resource create mysqlData ocf:linbit:drbd \
drbd_resource=mysql op monitor interval=60s

clone资源

# pcs -f drbd_cfg resource master mysqlDataClone mysqlData \
master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 \
notify=true

查看配置

[root@ha-node1 ~]# pcs -f drbd_cfg resource show
ClusterIP (ocf::heartbeat:IPaddr2): Started
mysqlSite (ocf::heartbeat:apache): Started
Master/Slave Set: mysqlDataClone [mysqlData]
Stopped: [ ha-node1 ha-node2 ]

提交配置

# pcs cluster cib-push drbd_cfg

查看状态

[root@ha-node1 ~]# pcs status
Cluster name: mycluster
Last updated: Fri Aug 14 09:29:41 2015
Last change: Fri Aug 14 09:29:25 2015
Stack: corosync
Current DC: ha-node1 (1) - partition with quorum
Version: 1.1.12-a14efad
2 Nodes configured
4 Resources configured
Online: [ ha-node1 ha-node2 ]
Full list of resources:
ClusterIP (ocf::heartbeat:IPaddr2): Started ha-node1
mysqlSite (ocf::heartbeat:apache): Started ha-node1
Master/Slave Set: mysqlDataClone [mysqlData]
Masters: [ ha-node1 ]
Slaves: [ ha-node2 ]
PCSD Status:
ha-node1: Online
ha-node2: Online
Daemon Status:
corosync: active/disabled
pacemaker: active/disabled
pcsd: active/enabled

确保模块载入

# echo drbd >/etc/modules-load.d/drbd.conf

6.4配置文件系统高可用
创建文件

# pcs cluster cib fs_cfg

创建资源dbFS

# pcs -f fs_cfg resource create dbFS Filesystem \
device="/dev/drbd1" directory="/var/lib/mysql" fstype="xfs"

将dbFS和mysqlDataClone绑定在一起

# pcs -f fs_cfg constraint colocation add dbFS with mysqlDataClone INFINITY with-rsc-role=Master

设置启动顺序

# pcs -f fs_cfg constraint order promote mysqlDataClone then start dbFS

提交配置

# pcs cluster cib-push fs_cfg

6.5配置mariadb高可用

# pcs resource create mysql-s systemd:mariadb binary="/usr/libexec/mysqld" \
config="/etc/my.cnf" datadir="/var/lib/mysql" pid="/var/run/mariadb/mariadb.pid" \
socket="/var/lib/mysql/mysql.sock" op start timeout=180s op stop timeout=180s \
op monitor interval=20s timeout=60s

配置资源关系

# pcs constraint colocation add mariadb  dbFS  INFINITY
# pcs constraint colocation add mariadb  vip  INFINITY

设置启动顺序

# pcs constraint order dbFS   then mariadb  
# pcs constraint order vip  then mysqlDataClone

查看资源约束关系

# pcs constraint –full

查询系统状态

# pcs status

七、群集操作命令
7.1、验证群集安装

 # pacemakerd -F ## 查看pacemaker组件,ps axf | grep pacemaker
 # corosync-cfgtool -s ## 查看corosync序号
 # corosync-cmapctl | grep members ## corosync 2.3.x
 # corosync-objctl | grep members ## corosync 1.4.x

7.2、查看群集资源

 # pcs resource standards ## 查看支持资源类型
 # pcs resource providers ## 查看资源提供商
 # pcs resource agents ## 查看所有资源代理
 # pcs resource list ## 查看支持资源列表
 # pcs stonith list ## 查看支持Fence列表
 # pcs property list --all ## 显示群集默认变量参数
 # crm_simulate -sL ## 检验资源 score 值

7.3、使用群集脚本

 # pcs cluster cib ra_cfg ## 将群集资源配置信息保存在指定文件
 # pcs -f ra_cfg resource create ## 创建群集资源并保存在指定文件中(而非保存在运行配置)
 # pcs -f ra_cfg resource show ## 显示指定文件的配置信息,检查无误后
 # pcs cluster cib-push ra_cfg ## 将指定配置文件加载到运行配置中

7.4、STONITH 设备操作

# stonith_admin -I ## 查询fence设备
# stonith_admin -M -a agent_name ## 查询fence设备的元数据,stonith_admin -M -a fence_vmware_soap
# stonith_admin --reboot nodename ## 测试 STONITH 设备

7.5、查看群集配置

 # crm_verify -L -V ## 检查配置有无错误
 # pcs property ## 查看群集属性
 # pcs stonith ## 查看stonith
 # pcs constraint ## 查看资源约束
 # pcs config ## 查看群集资源配置
 # pcs cluster cib ## 以XML格式显示群集配置

7.6、管理群集

 # pcs status ## 查看群集状态
 # pcs status cluster
 # pcs status corosync
 # pcs cluster stop [node11] ## 停止群集
 # pcs cluster start --all ## 启动群集
 # pcs cluster standby node11 ## 将节点置为后备standby状态,pcs cluster unstandby node11 
 # pcs cluster destroy [--all] ## 删除群集,[--all]同时恢复corosync.conf文件
 # pcs resource cleanup ClusterIP ## 清除指定资源的状态与错误计数
 # pcs stonith cleanup vmware-fencing ## 清除Fence资源的状态与错误计数

参考:
http://clusterlabs.org/doc/
http://www.linux-ha.org/doc/
https://access.redhat.com/documentation

你可能感兴趣的:(高可用)