用python抓取yarn和jstorm界面资源信息在grafana展示

背景:大数据系统分布式计算和存储强大功能推动了科技的进步,而在大数据运维过程中计算资源是时刻要关注的指标。为了能够快速了解集群资源使用率,写了个python脚本将数据入到MySQL,再通过grafana在界面进行展示,当然可以把数据落到时序数据库:tdengine或opntsdb上。本文只用于交流和学习。

#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
#抓取yarn集群的队列资源使用信息和jstorm集群资源信息
#@author zjh
#@date:2022-05-13
#小数点保留1位
import json
import requests
import pymysql
import time
from bs4 import BeautifulSoup

#Resourcemanager资源统计
class Resourcinfo:
    def getresource(self,url_dict):
        #初始化插入数据的列表
        for clustername,urlinfo in url_dict.items():
            tmplistdata = []
            print("集群名称:", clustername)
            #1.集群名称
            url = urlinfo + str('/ws/v1/cluster/scheduler')
            req = requests.get(url)
            #将url请求信息json化
            result_json = json.loads(req.text)
            #取字典值到子队列
            result = result_json["scheduler"]["schedulerInfo"]["rootQueue"]["childQueues"]
            csort = 0
            for allqueue in result:
                csort += 1
                queuename = allqueue["queueName"]
                allresource = allqueue["maxResources"]
                usedresource = allqueue["usedResources"]
            
                if  queuename == "root.compute1" or queuename == "root.compute2":
                    tmplist=[]
                    tmplistdata.append(clustername)
                    # 2.总资源数
                    tmplistdata.append(allresource["memory"])
                    tmplistdata.append(allresource["vCores"])
                    # 3.使用资源
                    tmplistdata.append(usedresource["memory"])
                    tmplistdata.append(usedresource["vCores"])
                    #1集群名称
                    Tclustername = clustername
                    #tmplist.append(Tclustername)
                    #2内存使用量TB
                    Tusedmem = usedresource["memory"] / (1024 * 1024)
                    tmplist.append(Tusedmem)
                    #3总内存TB
                    Tallmem = allresource["memory"] / (1024 * 1024)
                    tmplist.append(Tallmem)
                    #4总cu(cu与内存比计算,1:4)
                    Tallcu = allresource["memory"] / (1024 * 4)
                    tmplist.append(Tallcu)
                    #5内存使用率
                    memusedpercent = (usedresource["memory"]) / allresource["memory"] * 100
                    tmplist.append(memusedpercent)
                    #6vcore使用量
                    cu_used=usedresource["vCores"]
                    tmplist.append(cu_used)
                    #7总CU(按vcore计算)
                    Tallcu = allresource["vCores"]
                    tmplist.append(Tallcu)
                    #8 min总CU
                    minallcu = min(Tallcu,allresource["vCores"])
                    tmplist.append(minallcu)
                    #9 CU使用率
                    cuusedpercent = (usedresource["vCores"]) / allresource["vCores"] * 100
                    #print("CU使用率:" + str('%.1f' % cuusedpercent) + "%")
                    tmplist.append(cuusedpercent)
                    #10最大使用率,从内存和cu使用率比较出最大的值
                    maxused=max(memusedpercent,cuusedpercent)
                    tmplist.append(maxused)
                    #11使用率70%前剩余CU
                    spercent = (70 - maxused) * minallcu / 100
                    tmplist.append(spercent)
                    #12使用率80%前剩余CU
                    epercent = (80 - maxused) * minallcu / 100
                    tmplist.append(epercent)
                    #13时间
                    #对数字限制小数位为 1
                    insertdata=[]
                    for dt in tmplist:
                        #if dt is not str:
                        if dt != '':
                            insertdata.append(round(dt,1))
                    #记录当前时间
                    ctime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
                    timeArray = time.strptime(ctime, "%Y-%m-%d %H:%M:%S")
                    timestamp = int(time.mktime(timeArray))
                    insertdata.append(ctime)
                    insertdata.append(timestamp)
                    #将集群名称插到列表的第一位
                    insertdata.insert(0,clustername)
                    #将队列名插到列表的第二位
                    insertdata.insert(1,queuename)
                    #获取集群的节点数
                    getnodes=self.getnodestattus(urlinfo)
                    insertdata.insert(1,getnodes[0])
                    insertdata.insert(2,getnodes[1])
                    
                    SQL = cnnMysql.create_sql(self, insertdata)
                    print(SQL)
                    try:
                       cnnMysql.insertdata2mysql(self, SQL)
                    except ValueError:
                       print('SQL不正确,请检查SQL!!!')

    def getnodestattus(self,yarn_url):
        url = yarn_url + str('/ws/v1/cluster/metrics')
        req = requests.get(url)
        # 将url请求信息json化
        result_json = json.loads(req.text)
        # print(result_json)
        # 取字典值到子队列
        result = result_json["clusterMetrics"]
        totalNodes = result["totalNodes"]
        lostNodes = result["lostNodes"]
        unhealthyNodes = result['unhealthyNodes']
        decommissionedNodes = result['decommissionedNodes']
        rebootedNodes = result['rebootedNodes']
        activeNodes = result['activeNodes']
        return totalNodes,activeNodes

#Jstorm集群资源统计
class JstormRS:
    def getJstormResource(self,url_dict):
        # 初始化插入数据的列表
        for clustername, urlinfo in url_dict.items():
            tmplistdata = []
            # 1.集群名称
            jstormUI_url = urlinfo + str('/cluster?name=xx-jstorm')
            req = requests.get(url=jstormUI_url)
            req.encoding = "utf-8"
            html = req.text
            soup = BeautifulSoup(req.text, features="html.parser")
            jstormweb_items = soup.find_all("table", class_="table table-bordered table-hover table-striped center")
            #临时列表
            infolist=[]
            for jstormweb_item in jstormweb_items:
                list = []
                dd = jstormweb_item.text.strip()
                list.append(dd)
                for i in list:
                    t = i.split('\n')
                    tmplist = []
                    if 'Cluster Name' in t:
                        for j in t:
                            if len(j) > 0:
                                tmplist.append(j)
                        Supervisors = tmplist[7]
                        PortsUsage = tmplist[8]
                        allPorts = PortsUsage.split("/")[1]
                        usePorts = PortsUsage.split("/")[0]
                        Topologies = tmplist[9]
                        allMem = (int(allPorts) * 4)
                        useMem = (int(usePorts) * 4)
                        tPortsUsagePercent = (int(usePorts) / int(allPorts)) * 100
                        PortsUsagePercent = round(tPortsUsagePercent, 1)
                        sPercent = (70 - PortsUsagePercent) * int(allPorts) / 100
                        ePercent = (80 - PortsUsagePercent) * int(allPorts) / 100
            infolist.append(clustername)
            infolist.append(Supervisors)
            infolist.append(allPorts)
            infolist.append(usePorts)
            infolist.append(PortsUsagePercent)
            infolist.append(allMem)
            infolist.append(useMem)
            infolist.append(Topologies)
            infolist.append(round(sPercent,1))
            infolist.append(round(ePercent,1))
            
            SQL=cnnMysql.create_sql(self,infolist)
            print(SQL)
            try:
               cnnMysql.insertdata2mysql(self,SQL)
            except ValueError:
               print('SQL不正确,请检查SQL!!!')

#连接MySQL入数据
class cnnMysql:
    #生成插入SQL语句
    def create_sql(self, data):
        if len(data) < 15:
          sql = "insert into jstorm_cluster_resourceinfon(cluster_name,all_sv,all_workers,use_workers,used_percent,all_mem,used_mem," \
                  "topologies,seventy_percent_reserve,eighty_percent_reserve) " \
                  "values(" + "'" + str(data[0]) + "'" + "," + str(data[1])  + "," + str(data[2]) + "," + str(data[3]) + "," + \
           "'" + str(data[4])  + str('%') + "'" + "," + str(data[5]) + ","  + str(data[6]) + "," + str(data[7]) + "," + str(data[8]) + "," + str(data[9]) + \
              ");"
        else:
            sql = "insert into flink_cluster_resourceinfo(cluster_name,totalNodes,activeNodes,cluster_queuename,mem_used," \
                  "mem_total,cu_scale_mem_total,mem_used_percent,cu_used,total_cu,min_cu,cu_used_percent," \
                  "cu_max_percent,seventy_percent_reserve,eighty_percent_reserve) " \
                  "values(" + "'" + str(data[0]) + "'" + "," + str(data[1]) + "," + str(data[2]) + "," + "'" + str(data[3]) + "'" + "," + "'" + str(data[4]) + str('T') + \
                  "'" + "," + "'" + str(data[5]) + str('T') + "'" + "," + str(data[6]) + "," + "'" + str(data[7]) + \
                  str('%') + "'" + "," + str(data[8]) + "," + str(data[9]) + "," + str(data[10]) + "," + "'" + \
                  str(data[11]) + str('%') + "'" + "," + "'" + str(data[12]) + str('%') + "'" + "," + str(data[13]) + "," + str(data[14]) + \
                  ");"
                  
        return sql
        
    def insertdata2mysql(self,sql):
        # db为所使用的数据库
        connent = pymysql.connect(host='192.168.0.1', user='test', passwd='#Q@123456', db='test', charset='utf8')  
        cursor = connent.cursor()
        cursor.execute(sql)
        #提交任务,数据才会写入数据库
        connent.commit()

    def checkdata_mysql(self,table):
        connent = pymysql.connect(host='192.168.0.1', user='test', passwd='#Q@123456', db='test', charset='utf8') 
        cursor = connent.cursor()
        csql = "select * from flink_cluster_resourceinfo;"
        check=cursor.execute(csql)
        return check

if __name__ == '__main__':
    YarnRSinfo = Resourcinfo()
    yarn_dict = {
         '集群1':'http://192.168.10.8:8088',
         '集群2':'http://192.168.12.1:8088',
         '集群3':'http://192.168.10.8:8088'
    }

    YarnRSinfo.getresource(yarn_dict)
    jstorm_dict = {
      'jstorm集群99': 'http://192.168.10.100:8080',
      'jstorm集群100': 'http://192.168.1.12:8080'
    }
    jsrs = JstormRS()
    jsrs.getJstormResource(jstorm_dict)

获取Yarn jmx信息:
curl -i http://xxx:8088/jmx

Hadoop:service=ResourceManager,name=FSOpDurations
Hadoop:service=ResourceManager,name=JvmMetrics
Hadoop:service=ResourceManager,name=ClusterMetrics
Hadoop:service=ResourceManager,name=RpcActivityForPort8033
Hadoop:service=ResourceManager,name=MetricsSystem,sub=Stats
Hadoop:service=ResourceManager,name=QueueMetrics,q0=root,q1=user02
Hadoop:service=ResourceManager,name=QueueMetrics,q0=root,q1=A,q2=C
Hadoop:service=ResourceManager,name=QueueMetrics,q0=root
Hadoop:service=ResourceManager,name=QueueMetrics,q0=root,q1=A
Hadoop:service=ResourceManager,name=QueueMetrics,q0=root,q1=A,q2=B
Hadoop:service=ResourceManager,name=QueueMetrics,q0=root,q1=default
Hadoop:service=ResourceManager,name=QueueMetrics,q0=root,q1=user01

#QueueMetrics
running_0
running_60
running_300
running_1440
FairShareMB Fair share of memory in MB
FairShareVCores Fair share of CPU in vcores
SteadyFairShareMB Steady fair share of memory in MB
SteadyFairShareVCores Steady fair share of CPU in vcores
MinShareMB Minimum share of memory in MB
MinShareVCores Minimum share of CPU in vcores
MaxShareMB Maximum share of memory in MB
MaxShareVCores Maximum share of CPU in vcores
AppsSubmitted application提交个数
AppsRunning 正在运行的application个数
AppsPending 挂起的application个数
AppsCompleted 完成的application个数
AppsKilled 被杀死的application个数
AppsFailed 失败的application个数
AllocatedMB 已分配的内存量
AllocatedVCores 分配给正在运行的应用的虚拟core
AllocatedContainers 已分配容器数
AggregateContainersAllocated Aggregate # of allocated containers
AggregateContainersReleased Aggregate # of released containers
AvailableMB 可用内存量
AvailableVCores 可用虚拟Core数
PendingMB Pending memory allocation in MB
PendingVCores Pending CPU allocation in virtual cores
PendingContainers # of pending containers
ReservedMB 预留内存量
ReservedVCores 预留虚拟Core数
ReservedContainers 预留容器数
ActiveUsers 激活用户数
ActiveApplications 激活的应用数
AppAttemptFirstContainerAllocationDelayNumOps 为应用分配容器延迟数
AppAttemptFirstContainerAllocationDelayAvgTime 为应用分配容器延迟时间
#FSOpDurations
ContinuousSchedulingRunNumOps Duration for a continuous scheduling run
ContinuousSchedulingRunAvgTime
ContinuousSchedulingRunStdevTime
ContinuousSchedulingRunIMinTime
ContinuousSchedulingRunIMaxTime
ContinuousSchedulingRunMinTime
ContinuousSchedulingRunMaxTime
NodeUpdateCallNumOps Duration to handle a node update
NodeUpdateCallAvgTime
NodeUpdateCallStdevTime
NodeUpdateCallIMinTime
NodeUpdateCallIMaxTime
NodeUpdateCallMinTime
NodeUpdateCallMaxTime
UpdateThreadRunNumOps Duration for a update thread run
UpdateThreadRunAvgTime
UpdateThreadRunStdevTime
UpdateThreadRunIMinTime
UpdateThreadRunIMaxTime
UpdateThreadRunMinTime
UpdateThreadRunMaxTime
UpdateCallNumOps Duration for an update call
UpdateCallAvgTime
UpdateCallStdevTime
UpdateCallIMinTime
UpdateCallIMaxTime
UpdateCallMinTime
UpdateCallMaxTime
PreemptCallNumOps Duration for a preempt call
PreemptCallAvgTime
PreemptCallStdevTime
PreemptCallIMinTime
PreemptCallIMaxTime
PreemptCallMinTime
PreemptCallMaxTime
#ClusterMetrics
NumActiveNMs active NMs
NumDecommissionedNMs decommissioned NMs
NumLostNMs lost NMs
NumUnhealthyNMs unhealthy NMs
NumRebootedNMs Rebooted NMs
AMLaunchDelayNumOps AM container launch delay
AMLaunchDelayAvgTime
AMRegisterDelayNumOps AM register delay
AMRegisterDelayAvgTime
#RpcActivityForPort8033
ReceivedBytes Total number of received bytes
SentBytes Total number of sent bytes
RpcQueueTimeNumOps Total number of RPC calls
RpcQueueTimeAvgTime Average queue time in milliseconds
RpcProcessingTimeNumOps Total number of RPC calls (same to RpcQueueTimeNumOps)
RpcProcessingTimeAvgTime Average Processing time in milliseconds
RpcAuthenticationFailures Total number of authentication failures
RpcAuthenticationSuccesses Total number of authentication successes
RpcAuthorizationFailures Total number of authorization failures
RpcAuthorizationSuccesses Total number of authorization successes
RpcClientBackoff
NumOpenConnections NumOpenConnections
CallQueueLength Current length of the call queue

你可能感兴趣的:(大数据,Python,python,grafana,开发语言)