Kafka - 消费端&服务端 demo

目录

一. 服务端demo

二. 消费端demo


 

一. 服务端demo

 

package com.mzs.KafkaDemo;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;

import java.util.Properties;
import java.util.concurrent.ExecutionException;

public class KafkaProducerDemo2 {

    public static void main(String[] args) throws InterruptedException {
        // 判断异步还是同步
        boolean isAsync = args.length == 0 || !args[0].trim().equalsIgnoreCase("sync");
        Properties properties = new Properties();
        properties.put("bootstrap.servers", "localhost:9092, localhost:9093, localhost:9094");
        properties.put("client.id", "KafkaProducer");
        properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        KafkaProducer producer = new KafkaProducer<>(properties);
        String topic = "topic1";
        int messageNo = 1;
        while (true) {
            Thread.sleep(1000);
            String messageStr = "Message_" + messageNo;
            long startTime = System.currentTimeMillis();
            if (isAsync) {
                producer.send(new ProducerRecord<>(topic, messageNo + "", messageStr),
                        new DemoCallback2(startTime));
            } else {
                try {
                    producer.send(new ProducerRecord<>(topic, messageNo + "", messageStr)).get();
                } catch (InterruptedException e) {
                    e.printStackTrace();
                } catch (ExecutionException e) {
                    e.printStackTrace();
                }
            }
            messageNo++;
        }
    }
}

class DemoCallback2 implements Callback {

    private long startTime;

    public DemoCallback2(long startTime) {
        this.startTime = startTime;
    }

    /**
     * 当producer收到服务端的ack响应时,回调
     *
     * @param metadata  元数据
     * @param exception 超时或者被中断,发生的异常
     */
    @Override
    public void onCompletion(RecordMetadata metadata, Exception exception) {
        long elapsedTime = System.currentTimeMillis() - startTime;
        if (metadata != null) {
            System.out.println("topic:" + metadata.topic() + ", offset:" + metadata.offset()
                    + ", partition:" + metadata.partition() + ", serializedKeySize:" + metadata.serializedKeySize()
                    + ", serializedValueSize:" + metadata.serializedValueSize());
            System.out.println("elapsedTime:" + elapsedTime + "ms");
        }
    }
}

二. 消费端demo

package com.mzs.KafkaDemo;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;

import java.time.Duration;
import java.util.*;

public class KafkaConsumerDemo2 {

    private static Map map = new HashMap<>();    //用于存放客户端读取的数据
    private static KafkaConsumer consumer; // Kafka消费端
    private static TopicPartition topicPartition; // 处理过的最新topic partition
    private static long offset = 0; // 处理过的最新偏移位
    private static int count = 0;   // 计数器

    public static void main(String[] args) {
        Properties properties = new Properties();
        properties.put("bootstrap.servers", "localhost:9092, localhost:9093, localhost:9094");
        properties.put("group.id", "consumer-group1");
        properties.put("client.id", "KafkaConsumer1");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        consumer = new KafkaConsumer<>(properties);
        consumer.subscribe(Collections.singletonList("topic1"), new DemoConsumerRebalanceListener());
        try {
            while (true) {
                // 读取数据,并阻塞100毫秒
                ConsumerRecords records = consumer.poll(Duration.ofMillis(100));
                for (ConsumerRecord record : records) {
                    // 更新topic partition
                    topicPartition = new TopicPartition(record.topic(), record.partition());
                    // 将读取到的信息放到Map里。offset设置为下一个偏移的位置。
                    map.put(topicPartition, new OffsetAndMetadata(record.offset() + 1, "metadata"));
                    // 每读取十条信息,就进行提交
                    if (count % 10 == 0) {
                        // 提交最新处理过的Map,并进行回调
                        consumer.commitAsync(map, new DemoOffsetCommitCallback());
                        // 获取提交过的最新offset
                        offset = map.get(topicPartition).offset();
                    }
                    count++;
                }
            }
        } finally {
            try {
                consumer.commitSync();
            } finally {
                consumer.close();
            }
        }
    }

    /**
     * 再均衡监听器
     */
    static class DemoConsumerRebalanceListener implements ConsumerRebalanceListener {

        /**
         * 消费端停止读取信息之后,再均衡发生之前调用
         * @param partitions TopicPartition集合
         */
        @Override
        public void onPartitionsRevoked(Collection partitions) {
            System.out.println("Lost partitions in rebalance. Commit current offsets:" + map);
            consumer.commitSync(map);
        }

        /**
         * 再均衡发生之后,消费端读取信息之前调用
         * @param partitions TopicPartition集合
         */
        @Override
        public void onPartitionsAssigned(Collection partitions) {
            consumer.seek(topicPartition, offset);
        }
    }
}

/**
 * broker做出响应后,回调
 */
class DemoOffsetCommitCallback implements OffsetCommitCallback {

    /**
     * 回调方法
     * @param offsets 成功提交的相关信息的Map集合
     * @param exception 提交过程中遇到的异常
     */
    @Override
    public void onComplete(Map offsets, Exception exception) {
        if (exception != null) {
            exception.printStackTrace();
        }
        for (Map.Entry mapEntries : offsets.entrySet()) {
            System.out.println(mapEntries.getKey() + "=" + mapEntries.getValue());
        }
    }
}


 

你可能感兴趣的:(Kafka,Kafka)