flink-sql 1.10 kafka 实时采集kafka中的数据写入到hbase

flink-sql从kafka读取数据写入mysql

1.导入依赖



    4.0.0

    com.lw
    flin2k
    1.0-SNAPSHOT
    
        
            org.apache.flink
            flink-scala_2.11
            1.10.0
        
        
        
            org.apache.flink
            flink-streaming-scala_2.11
            1.10.0
        
        
            org.apache.flink
            flink-connector-kafka-0.11_2.11
            1.10.0
        
        
            org.apache.flink
            flink-table-common
            1.10.0
        
        
            org.apache.flink
            flink-table-api-scala-bridge_2.11
            1.10.0
        
        
            org.apache.flink
            flink-csv
            1.10.0
        
        
        
            org.apache.flink
            flink-jdbc_2.11
            1.10.0
        
        
        
            mysql
            mysql-connector-java
            5.1.47
        
        
        
            org.apache.flink
            flink-hbase_2.11
            1.10.0
        
        
        
            org.apache.hadoop
            hadoop-common
            2.7.1
        



        
            org.apache.flink
            flink-table-planner_2.11
            1.10.0
        

        
            org.apache.flink
            flink-table-planner_2.11
            1.10.0
        

        
            org.apache.flink
            flink-table-planner-blink_2.11
            1.10.0
        
        
            org.apache.flink
            flink-table-api-scala-bridge_2.11
            1.10.0
        

    

    
        
            
            
                net.alchim31.maven
                scala-maven-plugin
                3.4.6
                
                    
                        
                        
                            testCompile
                        
                    
                
            
            
                org.apache.maven.plugins
                maven-assembly-plugin
                3.0.0
                
                    
                        jar-with-dependencies
                    
                
                
                    
                        make-assembly
                        package
                        
                            single
                        
                    
                
            
        
    

2.代码

package com.lw.table

import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api.{DataTypes, EnvironmentSettings, Table}
import org.apache.flink.table.api.scala._
import org.apache.flink.table.descriptors.{Csv, HBase, Kafka, Schema}
import org.apache.flink.types.Row
import org.apache.kafka.clients.consumer.ConsumerConfig


object printToHbase {
  def main(args: Array[String]): Unit = {
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    val settings: EnvironmentSettings = EnvironmentSettings.newInstance().useOldPlanner().inStreamingMode().build()
    val tableEnv: StreamTableEnvironment = StreamTableEnvironment.create(env,settings)

    tableEnv.connect(new Kafka().version("0.11").topic("test2").property(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"hadoop102:9092").property("zookeeper.connect","hadoop102:2181")
      .property(ConsumerConfig.GROUP_ID_CONFIG,"ae"))
      .withFormat(new Csv().fieldDelimiter(' '))
      .withSchema(new Schema().field("name",DataTypes.STRING())
        .field("age",DataTypes.INT()))
      .createTemporaryTable("kafkaInputTable")


    //创建hbasesink表时候  要求 第一列必须是rowkey。对应你查询的数据的第一列
    //字段对应 hbase 列族。
    //如果hbase列足 只有一个列足 那么hbasesink表就只有两列  rowkey 和 info列族。
    val hbaseDDL : String =
      """
        |Create table myHbaseTable(
        |rowkey string,   
        |info ROW(name varchar,age int)  -- info 要与hbase列族名字一样
        |) with(
        |    'connector.type' = 'hbase',
        |    'connector.version' = '1.4.3',                    -- hbase vesion 当前只支持1.4.3
        |    'connector.table-name' = 'user',                  -- hbase table name
        |    'connector.zookeeper.quorum' = 'hadoop102:2181',       -- zookeeper quorum 
        |    'connector.zookeeper.znode.parent' = '/hbase',    -- hbase znode in zookeeper
        |    'connector.write.buffer-flush.max-size' = '10mb', -- max flush size
        |    'connector.write.buffer-flush.max-rows' = '1000', -- max flush rows
        |    'connector.write.buffer-flush.interval' = '2s'    -- max flush interval
        |
        |)
      """.stripMargin

    tableEnv.sqlUpdate(hbaseDDL);

    //執行sql建表

    val resultTable: Table = tableEnv.sqlQuery("select name as rowkey,ROW(name,age) as info from kafkaInputTable ")

    resultTable.toAppendStream[Row].print()

    resultTable.insertInto("myHbaseTable")

    tableEnv.execute("kafka2mysql")

  }
}

 

你可能感兴趣的:(flink)