Golang kafka build by docker-compose

  • 使用docker-compose拉起kafkadocker-compose.yaml
version: '3.2'
services:
  zookeeper:
    image: wurstmeister/zookeeper
    container_name: zookeeper
    ports:
      - "2181:2181"
    restart: always
  kafka:
    image: wurstmeister/kafka
    container_name: kafka
    links: 
      - zookeeper
    ports:
      - "9092:9092"
    environment:
      - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
      - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092
      - KAFKA_LISTENERS=PLAINTEXT://:9092
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock
    restart: always
    depends_on: 
      - zookeeper
  kafka-manager:
    image: sheepkiller/kafka-manager:latest
    ports:
      - "9000:9000"
    links:
      - zookeeper
      - kafka
    environment:
      ZK_HOSTS: zookeeper:2181
      APPLICATION_SECRET: letmein
      KM_ARGS: -Djava.net.preferIPv4Stack=true
    depends_on: 
      - kafka
      - zookeeper

  • 启动kafka服务:在docker-compose.yaml文件统计目录:docker-compose up (不想看日志的: docker-compose up -d)

  • 使用 go mod init my_produser 初始化一个项目, main.go

package main

import (
    "bytes"
    "encoding/json"
    "fmt"
    "time"

    "github.com/Shopify/sarama"
)

func main() {
    startProduce()
}

var (
    producer sarama.SyncProducer
    brokers  = []string{"127.0.0.1:9092"}
    topic    = "test_topic_1"
)

func init() {
    config := sarama.NewConfig()
    config.Producer.RequiredAcks = sarama.WaitForLocal
    config.Producer.Retry.Max = 5
    config.Producer.Return.Successes = true
    brokers := brokers
    var err error
    producer, err = sarama.NewSyncProducer(brokers, config)
    if err != nil {
        fmt.Printf("init producer failed -> %v \n", err)
        panic(err)
    } else {
        fmt.Println("producer init success")
    }
}

func produceMsg(msg string) {
    msgX := &sarama.ProducerMessage{
        Topic: topic,
        Value: sarama.StringEncoder(msg),
    }
    fmt.Printf("SendMsg -> %v\n", dumpString(msgX))

    partition, offset, err := producer.SendMessage(msgX)
    if err != nil {
        fmt.Printf("send msg error:%s \n", err)
    } else {
        fmt.Printf("msg send success, message is stored in topic(%s)/partition(%d)/offset(%d)\n", topic, partition, offset)
    }
}

func startProduce() {
    tick := time.Tick(2 * time.Second)
    for {
        time.Sleep(5 * time.Second)
        select {
        case <-tick:
            t := time.Now().Unix() * 1000
            msg := fmt.Sprintf("{\"timestamp\":%d}--cass0", t)
            produceMsg(msg)
        }
    }
}

//解析为json字符串
func dumpString(v interface{}) (str string) {

    bs, err := json.Marshal(v)
    b := bytes.Buffer{}
    if err != nil {
        b.WriteString("{err:\"json format error.")
        b.WriteString(err.Error())
        b.WriteString("\"}")
    } else {
        b.Write(bs)
    }
    str = b.String()
    return str
}

  • 运行生产者

  • 使用 go mod init my_comsumer 初始化一个项目, main.go

package main

import (
    "fmt"
    "time"

    "github.com/Shopify/sarama"
    cluster "github.com/bsm/sarama-cluster"
)

var (
    kafkaConsumer *cluster.Consumer
    kafkaBrokers  = []string{"127.0.0.1:9092"}
    kafkaTopic    = "test_topic_1"
    groupId       = "csdn_test_1"
)

func init() {
    var err error
    config := cluster.NewConfig()
    config.Consumer.Return.Errors = true
    config.Group.Return.Notifications = true
    config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange
    config.Consumer.Offsets.Initial = -2
    config.Consumer.Offsets.CommitInterval = 1 * time.Second
    config.Group.Return.Notifications = true
    kafkaConsumer, err = cluster.NewConsumer(kafkaBrokers, groupId, []string{kafkaTopic}, config)
    if err != nil {
        panic(err.Error())
    }
    if kafkaConsumer == nil {
        panic(fmt.Sprintf("consumer is nil. kafka info -> {brokers:%v, topic: %v, group: %v}", kafkaBrokers, kafkaTopic, groupId))
    }
    fmt.Printf("kafka init success, consumer -> %v, topic -> %v, ", kafkaConsumer, kafkaTopic)
}

func main() {
    for {
        select {
        case msg, ok := <-kafkaConsumer.Messages():
            if ok {
                fmt.Printf("kafka msg: %s \n", msg.Value)
                kafkaConsumer.MarkOffset(msg, "")
            } else {
                fmt.Printf("kafka 监听服务失败")
            }
        case err, ok := <-kafkaConsumer.Errors():
            if ok {
                fmt.Printf("consumer error: %v", err)
            }
        case ntf, ok := <-kafkaConsumer.Notifications():
            if ok {
                fmt.Printf("consumer notification: %v", ntf)
            }
        }
    }
}

  • 运行消费者
最后
  1. 这是一个基础的使用,需要得到最佳实践还需要根据自己的项目来配置,去kafka看看
  2. 借鉴1 借鉴2

你可能感兴趣的:(Golang kafka build by docker-compose)