Ambari安装监控失败导致server启动不了,Ambari集群完全卸载重装(含脚本)

# ambari-server restart

报错内容:

DB configs consistency check found warnings. See /var/log/ambari-server/ambari-server-check-database.log for more details.
ERROR: Exiting with exit code -1. 
REASON: Ambari Server java process has stopped. Please check the logs for more information.

查看日志:tail -200 /var/log/ambari-server/ambari-server.log 

ERROR [main] AlertDefinitionFactory:136 - Could not read the alert definitions

原因:经定位,翻译中文/var/lib/ambari-server/resources/common-services/HIVE/0.12.0.2.0/alerts.json时,格式有误,修改为正确的json格式即可,可在线json验证


重装ambar-server流程:采用编写好的脚本完全卸载ambari集群

下载cleanAmbariNew.sh脚本内容如下:

#!/bin/bash
#!/usr/bin/expect
# uninstall ambari automatic

if [ $# -ne 1 ]; then
    echo "Usage:"
    echo "$0  hostsFile"
    exit 1
fi
 

#取得集群的所有主机名,这里需要注意:/etc/hosts配置的IP和主机名只能用一个空格分割
 
hostList=$1

#定义ambari组件所在目录对应的变量
yumReposDir=/etc/yum.repos.d/
alterNativesDir=/etc/alternatives/
hdpDir=/usr/hdp/
usrBinDir=/usr/bin/
etcDir=/etc/
varLogDir=/var/log/
varRunDir=/var/run/


#ping主机的次数
pingCount=5

#Log日志开头
logPre=UNINSTALL

#输入ambari主机名称
read -p "Please input your master hostname: " master
master=${master:-"master"}
ssh $master "ambari-server stop"
ssh $master "yum remove -y ambari-server"

for host in `cat  $hostList`
do
    yum clean all
	yum makecache
	
	
	#运行python脚本
	python /usr/lib/python2.6/site-packages/ambari_agent/HostCleanup.py --silent --skip=users
	
	#关闭agent
	ssh $host "ambari-agent stop"
	ssh $host "yum remove -y ambari-agent"

    echo "$logPre======>$host deleting... \n"
	
    #删除HDP相关的安装包
	ssh $host "yum remove -y  hive\*"
	ssh $host "yum remove -y  oozie\*"
	ssh $host "yum remove -y  pig\*"
	ssh $host "yum remove -y  zookeeper\*"
	ssh $host "yum remove -y  tez\*"
	ssh $host "yum remove -y  hbase\*"
	ssh $host "yum remove -y  ranger\*"
	ssh $host "yum remove -y  knox\*"
	ssh $host "yum remove -y  storm\*"
	ssh $host "yum remove -y  accumulo\*"
	
    ssh $host "yum remove -y  *sqoop*"
	ssh $host "yum remove -y  *spark*"
	ssh $host "yum remove -y *slider*"
    ssh $host "yum remove -y  *lzo.x86_64" 
    ssh $host "yum remove -y  *snappy-devel.x86_64"
    ssh $host "yum remove -y  *hcatalog.noarch"
    ssh $host "yum remove -y  *snappy.x86_64"
    ssh $host "yum remove -y  *extjs.noarch"
    ssh $host "yum remove -y  *hadoop*"
    ssh $host "yum remove -y *bigtop-jsvc.x86_64"
	ssh $host "yum remove -y hdp-select"
    
    #删除快捷方式
    ssh $host "cd $alterNativesDir"
    ssh $host "rm -rf hadoop-etc" 
    ssh $host "rm -rf zookeeper-conf"
    ssh $host "rm -rf hbase-conf" 
    ssh $host "rm -rf hadoop-log" 
    ssh $host "rm -rf hadoop-lib"
    ssh $host "rm -rf hadoop-default" 
    ssh $host "rm -rf oozie-conf"
    ssh $host "rm -rf hcatalog-conf" 
    ssh $host "rm -rf hive-conf"
    ssh $host "rm -rf hadoop-man"
    ssh $host "rm -rf sqoop-conf"
    ssh $host "rm -rf hadoop-confone"
	
	ssh $host "cd /usr/bin"
	ssh $host "rm -rf accumulo"
	ssh $host "rm -rf atlas-start"
	ssh $host "rm -rf atlas-stop"
	ssh $host "rm -rf beeline"
	ssh $host "rm -rf falcon"
	ssh $host "rm -rf flume-ng"
	ssh $host "rm -rf hbase"
	ssh $host "rm -rf hcat"
	ssh $host "rm -rf hdfs"
	ssh $host "rm -rf hive"
	ssh $host "rm -rf hiveserver2"
	ssh $host "rm -rf kafka"
	ssh $host "rm -rf mahout"
	ssh $host "rm -rf mapred"
	ssh $host "rm -rf oozie"
	ssh $host "rm -rf oozied.sh"
	ssh $host "rm -rf phoenix-psql"
	ssh $host "rm -rf phoenix-queryserver"
	ssh $host "rm -rf phoenix-sqlline"
	ssh $host "rm -rf phoenix-sqlline-thin"
	ssh $host "rm -rf pig"
	ssh $host "rm -rf python-wrap"
	ssh $host "rm -rf ranger-admin"
	ssh $host "rm -rf ranger-admin-start"
	ssh $host "rm -rf ranger-admin-stop"
	ssh $host "rm -rf ranger-kms"
	ssh $host "rm -rf ranger-usersync"
	ssh $host "rm -rf ranger-usersync-start"
	ssh $host "rm -rf ranger-usersync-stop"
	ssh $host "rm -rf slider"
	ssh $host "rm -rf sqoop"
	ssh $host "rm -rf sqoop-codegen"
	ssh $host "rm -rf sqoop-create-hive-table"
	ssh $host "rm -rf sqoop-eval"
	ssh $host "rm -rf sqoop-export"
	ssh $host "rm -rf sqoop-help"
	ssh $host "rm -rf sqoop-import"
	ssh $host "rm -rf sqoop-import-all-tables"
	ssh $host "rm -rf sqoop-job"
	ssh $host "rm -rf sqoop-list-databases"
	ssh $host "rm -rf sqoop-list-tables"
	ssh $host "rm -rf sqoop-merge"
	ssh $host "rm -rf sqoop-metastore"
	ssh $host "rm -rf sqoop-version"
	ssh $host "rm -rf storm"
	ssh $host "rm -rf storm-slider"
	ssh $host "rm -rf worker-lanucher"
	ssh $host "rm -rf yarn"
	ssh $host "rm -rf zookeeper-client"
	ssh $host "rm -rf zookeeper-server"
	ssh $host "rm -rf zookeeper-server-cleanup"

    #删除用户
	ssh $host "userdel -rf accumulo"
    ssh $host "userdel -rf nagios"
    ssh $host "userdel -rf hive"
    ssh $host "userdel -rf ambari-qa"
    ssh $host "userdel -rf hbase"
    ssh $host "userdel -rf oozie"
    ssh $host "userdel -rf hcat"
    ssh $host "userdel -rf mapred"
    ssh $host "userdel -rf hdfs"
    ssh $host "userdel -rf rrdcached"
    ssh $host "userdel -rf zookeeper"
    ssh $host "userdel -rf sqoop"
    ssh $host "userdel -rf puppet"
    ssh $host "userdel -rf flume"
    ssh $host "userdel -rf tez"
    ssh $host "userdel -rf yarn"
	
	ssh $host "userdel -rf knox"
    ssh $host "userdel -rf storm"
    ssh $host "userdel -rf spark"
    ssh $host "userdel -rf kafka"
    ssh $host "userdel -rf ams"
    ssh $host "userdel -rf falcon"
    ssh $host "userdel -rf kms"
    ssh $host "userdel -rf ranger"
	ssh $host "userdel -r zeppelin"

    #删除文件夹
    ssh $host "rm -rf /hadoop"
    ssh $host "rm -rf /etc/hadoop" 
    ssh $host "rm -rf /etc/hbase"
    ssh $host "rm -rf /etc/hcatalog" 
    ssh $host "rm -rf /etc/hive"
    ssh $host "rm -rf /etc/ganglia" 
    ssh $host "rm -rf /etc/nagios"
    ssh $host "rm -rf /etc/oozie"
    ssh $host "rm -rf /etc/sqoop"
    ssh $host "rm -rf /etc/zookeeper" 
	ssh $host "rm -rf /etc/hive2"
	ssh $host "rm -rf /etc/hive-hcatalog"
	ssh $host "rm -rf /etc/hive-webhcat"
	ssh $host "rm -rf /etc/knox"
	ssh $host "rm -rf /etc/livy"
	ssh $host "rm -rf /etc/livy2"
	ssh $host "rm -rf /etc/mahout"
	ssh $host "rm -rf /etc/phoenix"
	ssh $host "rm -rf /etc/pig"
	ssh $host "rm -rf /etc/smartsense-activity"
	ssh $host "rm -rf /etc/spark2"
	ssh $host "rm -rf /etc/storm*"
	ssh $host "rm -rf /etc/tez"
	ssh $host "rm -rf /etc/tez_hive2"
	ssh $host "rm -rf /etc/falcon"
	ssh $host "rm -rf /etc/slider"
	ssh $host "rm -rf /etc/ranger-admin"
	ssh $host "rm -rf /etc/ranger-usersync"
	ssh $host "rm -rf /etc/ranger-tagsync"
	ssh $host "rm -rf /etc/ams-hbase"
	ssh $host "rm -rf /var/lib/hdfs"
	ssh $host "rm -rf /var/lib/knox"
	ssh $host "rm -rf /var/lib/slider"
	ssh $host "rm -rf /var/lib/ranger"
	ssh $host "rm -rf /var/lib/hdfs"
	ssh $host "rm -rf /var/lib/hadoop*" 
    ssh $host "rm -rf /var/run/hadoop" 
    ssh $host "rm -rf /var/run/hbase"
    ssh $host "rm -rf /var/run/hive"
    ssh $host "rm -rf /var/run/ganglia" 
    ssh $host "rm -rf /var/run/nagios"
    ssh $host "rm -rf /var/run/oozie"
    ssh $host "rm -rf /var/run/zookeeper"
    ssh $host "rm -rf /var/run/falcon" 
    ssh $host "rm -rf /var/run/ambari-agent"
    ssh $host "rm -rf /var/run/ambari-infra-solr"
    ssh $host "rm -rf /var/run/ambari-metrics-grafana"	
	ssh $host "rm -rf /var/run/ambari-server"	
	ssh $host "rm -rf /var/run/hadoop-mapreduce"	
	ssh $host "rm -rf /var/run/hadoop-yarn"	
	ssh $host "rm -rf /var/run/hive2"	
	ssh $host "rm -rf /var/run/storm"	
	ssh $host "rm -rf /var/log/storm"	
	ssh $host "rm -rf /etc/flume"	
	ssh $host "rm -rf /var/run/flume"	
	ssh $host "rm -rf /var/run/hive2"	
	
	ssh $host "rm -rf /var/lib/flume"	
	ssh $host "rm -rf /etc/kafka"	
	ssh $host "rm -rf /var/run/kafka"	
	ssh $host "rm -rf /var/log/kafka"	
	ssh $host "rm -rf /kafka-logs"	 
	
	ssh $host "rm -rf /var/run/hive-hcatalog"
	ssh $host "rm -rf /var/run/knox"
	ssh $host "rm -rf /var/run/ranger"
	ssh $host "rm -rf /var/run/ranger_kms"
	ssh $host "rm -rf /var/run/smartsense-activity-analyzer"
	ssh $host "rm -rf /var/run/smartsense-activity-explorer"
	ssh $host "rm -rf /var/run/spark"
	ssh $host "rm -rf /var/run/spark2"
	ssh $host "rm -rf /var/run/sqoop"
	ssh $host "rm -rf /var/run/webhcat"
    ssh $host "rm -rf /var/log/hadoop"
    ssh $host "rm -rf /var/log/hbase"
    ssh $host "rm -rf /var/log/hive"
    ssh $host "rm -rf /var/log/nagios" 
    ssh $host "rm -rf /var/log/oozie"
    ssh $host "rm -rf /var/log/zookeeper" 
    ssh $host "rm -rf /var/log/falcon" 
    ssh $host "rm -rf /var/log/ambari-agent"
    ssh $host "rm -rf /var/log/ambari-infra-solr"
    ssh $host "rm -rf /var/log/ambari-metrics-grafana"	
	ssh $host "rm -rf /var/log/ambari-server"	
	ssh $host "rm -rf /var/log/hadoop-mapreduce"	
	ssh $host "rm -rf /var/log/hadoop-yarn"	
	ssh $host "rm -rf /var/log/hive2"	
	ssh $host "rm -rf /var/log/hive-hcatalog"
	ssh $host "rm -rf /var/log/knox"
	ssh $host "rm -rf /var/log/ranger"
	ssh $host "rm -rf /var/log/ranger_kms"
	ssh $host "rm -rf /var/log/smartsense-activity-analyzer"
	ssh $host "rm -rf /var/log/smartsense-activity-explorer"
	ssh $host "rm -rf /var/log/spark"
	ssh $host "rm -rf /var/log/spark2"
	ssh $host "rm -rf /var/log/sqoop"
	ssh $host "rm -rf /var/log/webhcat"
	ssh $host "rm -rf /var/log/flume"		
    ssh $host "rm -rf /usr/lib/hadoop-hdfs"
	ssh $host "rm -rf /usr/lib/hadoop-mapreduce"
	ssh $host "rm -rf /usr/lib/hadoop-yarn"
    ssh $host "rm -rf /usr/lib/hbase"
    ssh $host "rm -rf /usr/lib/hcatalog" 
    ssh $host "rm -rf /usr/lib/hive"
	ssh $host "rm -rf /usr/lib/hive2"
    ssh $host "rm -rf /usr/lib/oozie"
    ssh $host "rm -rf /usr/lib/sqoop"
    ssh $host "rm -rf /usr/lib/zookeeper" 
    ssh $host "rm -rf /var/lib/hive"
    ssh $host "rm -rf /var/lib/ganglia" 
    ssh $host "rm -rf /var/lib/oozie"
    ssh $host "rm -rf /var/lib/zookeeper" 
    ssh $host "rm -rf /usr/lib/ambari-*"
	ssh $host "rm -rf /usr/lib/ams-hbase"
    ssh $host "rm -rf /usr/lib/flume"
    ssh $host "rm -rf /usr/lib/storm"  
    ssh $host "rm -rf /var/tmp/oozie"
    ssh $host "rm -rf /tmp/hadoop-hdfs"	
	ssh $host "rm -rf /var/tmp/sqoop" 
    ssh $host "rm -rf /tmp/hive"
    ssh $host "rm -rf /tmp/nagios" 
    ssh $host "rm -rf /tmp/ambari-qa" 
    ssh $host "rm -rf /tmp/sqoop-ambari-qa"
    ssh $host "rm -rf /var/nagios"
    ssh $host "rm -rf /hadoop/oozie"
    ssh $host "rm -rf /hadoop/zookeeper"
    ssh $host "rm -rf /hadoop/mapred"
    ssh $host "rm -rf /hadoop/hdfs"
    ssh $host "rm -rf /tmp/hadoop-hive" 
    ssh $host "rm -rf /tmp/hadoop-nagios" 
    ssh $host "rm -rf /tmp/hadoop-hcat"
    ssh $host "rm -rf /tmp/hadoop-ambari-qa" 
    ssh $host "rm -rf /tmp/hsperfdata_hbase"
    ssh $host "rm -rf /tmp/hsperfdata_hive"
    ssh $host "rm -rf /tmp/hsperfdata_nagios"
    ssh $host "rm -rf /tmp/hsperfdata_oozie"
    ssh $host "rm -rf /tmp/hsperfdata_zookeeper"
    ssh $host "rm -rf /tmp/hsperfdata_mapred"
    ssh $host "rm -rf /tmp/hsperfdata_hdfs"
    ssh $host "rm -rf /tmp/hsperfdata_hcat"
    ssh $host "rm -rf /tmp/hsperfdata_ambari-qa"


    #删除ambari相关包
    ssh $host "yum remove -y ambari-*"
    ssh $host "yum remove -y postgresql"
	ssh $host "yum remove -y mysql mysql-server"
    ssh $host "rm -rf /var/lib/ambari*"
    ssh $host "rm -rf /var/log/ambari*"
    ssh $host "rm -rf /etc/ambari*"
	
	ssh $host "rm -rf /var/lib/mysql"
    ssh $host "rm -rf /var/lib/pgsql"
	ssh $host "rm -rf /usr/bin/hadoop"
    ssh $host "rm -rf /usr/hdp"
    ssh $host "rm -rf /usr/lib/python2.6/site-packages"
	    #1.)删除hdp.repo、HDP.repo、HDP-UTILS.repo和ambari.repo
    #ssh $host "cd $yumReposDir"
    #ssh $host "rm -rf $yumReposDir/hdp.repo"
    #ssh $host "rm -rf $yumReposDir/HDP*" 

    echo "$logPre======>$host is done! \n"
done



执行命令:

sh cleanAmbariNew.sh hostfile

其中,hostfile文件内容(机器地址):


可以reboot重启下,防止启用组件端口会被占用

注意:脚本中删除Postgres数据库(重装会造成数据丢失)




你可能感兴趣的:(Ambari)