Kafka群集部署

Posted

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了Kafka群集部署相关的知识,希望对你有一定的参考价值。

一、环境准备

主机名 IP 运行服务
kafka1 192.168.171.131 kafka+zookeeper
kafka2 192.168.171.134 kafka+zookeeper
kafka3 192.168.171.135 kafka+zookeeper

二、部署zookeeper服务

源码包(提取码:6q58)
1、kafka1配置如下

#部署zookeeper
[root@kafka1 ~]# tar zxf zookeeper-3.4.9.tar.gz 
[root@kafka1 ~]# mv zookeeper-3.4.9 /usr/local/zookeeper
[root@kafka1 ~]# cd /usr/local/zookeeper/conf/
[root@kafka1 conf]# cp zoo_sample.cfg zoo.cfg
[root@kafka1 conf]# sed -i ‘s/dataDir=/tmp/zookeeper/dataDir=/usr/local/zookeeper/data/g‘ zoo.cfg
#声明群集节点信息,2888和3888端口用于群集内部通信
[root@kafka1 conf]# echo "server.1 192.168.171.131:2888:3888" >> zoo.cfg 
[root@kafka1 conf]# echo "server.2 192.168.171.134:2888:3888" >> zoo.cfg 
[root@kafka1 conf]# echo "server.3 192.168.171.135:2888:3888" >> zoo.cfg  
tickTime=2000              #节点之间的心跳检测时间单位为毫秒
initLimit=10                 #达到5个访问进行同步数据
syncLimit=5                 #节点之间检查失败次数超过后断开相应的节点
dataDir=/usr/local/zookeeper/data           #日志文件存放路径
clientPort=2181
#声明参与集群的主机
server.1 192.168.171.131:2888:3888
server.2 192.168.171.134:2888:3888
server.3 192.168.171.135:2888:3888
#创建所需目录及设置节点的ID号
[root@kafka1 conf]# mkdir  /usr/local/zookeeper/data
[root@kafka1 conf]# echo 1 > /usr/local/zookeeper/data/myid
#将配置好的zookeeper目录复制到群集内的其他节点
[root@kafka1 conf]# scp -r /usr/local/zookeeper/ root@192.168.171.134:/usr/local/
[root@kafka1 conf]# scp -r /usr/local/zookeeper/ root@192.168.171.135:/usr/local/
#启动zookeeper服务
[root@kafka1 conf]# /usr/local/zookeeper/bin/zkServer.sh start 
[root@kafka1 conf]# netstat -antp | egrep ‘2181|2888|3888‘ 

2、kafka2配置如下

#修改ID号为2
[root@kafka2 ~]# echo 2 > /usr/local/zookeeper/data/myid 
[root@kafka2 ~]# /usr/local/zookeeper/bin/zkServer.sh start 

3、kafka3配置如下

#修改ID号为3
[root@kafka3 ~]# echo 3 > /usr/local/zookeeper/data/myid 
[root@kafka3 ~]# /usr/local/zookeeper/bin/zkServer.sh start 

4、查看zookeeper群集内节点的角色

#kafka1
[root@kafka1 conf]# /usr/local/zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper/bin/../conf/zoo.cfg
Mode: follower                  #角色为follower
#kafka2
[root@kafka2 ~]# /usr/local/zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper/bin/../conf/zoo.cfg
Mode: leader                   #角色为leader
#kafka3
[root@kafka3 ~]# /usr/local/zookeeper/bin/zkServer.sh status 
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper/bin/../conf/zoo.cfg
Mode: follower                #角色为follower

三、部署kafka集群

1、kafka1配置如下

[root@kafka1 ~]# wget https://archive.apache.org/dist/kafka/2.2.1/kafka_2.11-2.2.1.tgz
[root@kafka1 ~]# tar zxf kafka_2.11-2.2.1.tgz 
[root@kafka1 ~]# mv kafka_2.11-2.2.1/ /usr/local/kafka
#修改配置文件
[root@kafka1 ~]# cd /usr/local/kafka/config/
[root@kafka1 config]# sed -i ‘s/broker.id=0/broker.id=1/g‘ server.properties 
[root@kafka1 config]# sed -i ‘s/#listeners=PLAINTEXT://:9092/listeners=PLAINTEXT://192.168.171.131:9092/g‘ server.properties
[root@kafka1 config]# sed -i ‘s/#advertised.listeners=PLAINTEXT://your.host.name:9092/advertised.listeners=PLAINTEXT://192.168.171.131:9092/g‘ server.properties
[root@kafka1 config]# sed -i ‘s/log.dirs=/tmp/kafka-logs/log.dirs=/usr/local/zookeeper/data/g‘ server.properties
[root@kafka1 config]# sed -i ‘s/zookeeper.connect=localhost:2181/zookeeper.connect=192.168.171.131:2181,192.168.171.134:2181,192.168.171.135:2181/g‘ server.properties
[root@kafka1 config]# sed -i ‘s/zookeeper.connection.timeout.ms=6000/zookeeper.connection.timeout.ms=600000/g‘ server.properties
[root@kafka1 config]# egrep -v ‘^$|^#‘ server.properties 
broker.id=1                 #kafka的ID号,这里为1,其他节点依次是2、3
listeners=PLAINTEXT://192.168.171.131:9092            #节点监听地址,填写每个节点自己的IP地址
advertised.listeners=PLAINTEXT://192.168.171.131:9092            #集群中节点内部交流使用的端口,填写每个节点自己的IP地址
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/usr/local/zookeeper/data
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=192.168.171.131:2181,192.168.171.134:2181,192.168.171.135:2181                      #声明链接zookeeper节点的地址
zookeeper.connection.timeout.ms=600000         #修改这的时间,单位是毫秒,为了防止连接zookeeper超时
group.initial.rebalance.delay.ms=0
[root@kafka1 config]# scp -r /usr/local/kafka/ root@192.168.171.134:/usr/local/
[root@kafka1 config]# scp -r /usr/local/kafka/ root@192.168.171.135:/usr/local/
#启动kafka
[root@kafka1 bin]# ./kafka-server-start.sh ../config/server.properties &

2、kafka2配置如下:

#修改与kafka01冲突之处
[root@kafka2 ~]# cd /usr/local/kafka/
[root@kafka2 kafka]# sed -i ‘s/192.168.171.131/192.168.171.134/g‘ config/server.properties
[root@kafka2 kafka]# sed -i ‘s/broker.id=1/broker.id=2/g‘ config/server.properties
#启动kafka服务
[root@kafka2 kafka]# cd bin/
[root@kafka2 bin]# ./kafka-server-start.sh ../config/server.properties &
[root@kafka2 bin]# netstat -anupt | grep 9092

3、kafka3配置如下:

[root@kafka3 ~]# cd /usr/local/kafka/
[root@kafka3 kafka]# sed -i ‘s/192.168.171.131/192.168.171.135/g‘ config/server.properties
[root@kafka3 kafka]# sed -i ‘s/broker.id=1/broker.id=3/g‘ config/server.properties
#启动kafka服务
[root@kafka3 kafka]# cd bin/
[root@kafka3 bin]# ./kafka-server-start.sh ../config/server.properties &
[root@kafka3 bin]# netstat -anupt | grep 9092

4、发布与订阅消息测试

#创建名为my-replicated-topic的topic
[root@kafka1 bin]# ./kafka-topics.sh --create --bootstrap-server 192.168.171.131:9092 --replication-factor 3 --partitions 1 --topic my-replicated-topic
#查看topic的状态和leader
[root@kafka1 bin]# ./kafka-topics.sh --describe --bootstrap-server 192.168.171.131:9092 --topic my-replicated-topic
Topic:my-replicated-topic   PartitionCount:1    ReplicationFactor:3 Configs:segment.bytes=1073741824
    Topic: my-replicated-topic  Partition: 0    Leader: 2   Replicas: 2,3,1 Isr: 2,3,1
#返回的信息表示partition数量为1,副本数量为3,segment字节数为1073741824
#名称为“my-replicated-topic”,ID为2的节点为leader
[root@kafka1 bin]# ./kafka-console-producer.sh --broker-list 192.168.171.131:9092 --topic my-replicated-topic
>abc
>123

#在其他节点上订阅消息
[root@kafka2 bin]# ./kafka-console-consumer.sh --bootstrap-server 192.168.171.134:9092 --from-beginning --topic my-replicated-topic
abc
123

5、模拟leader宕机,查看topic的状态及新的leader

#可以看到当前leader是ID为2的节点
[root@kafka1 bin]# ./kafka-topics.sh --describe --bootstrap-server 192.168.171.131:9092 --topic my-repated-topic
Topic:my-replicated-topic   PartitionCount:1    ReplicationFactor:3 Configs:segment.bytes=1073741824
    Topic: my-replicated-topic  Partition: 0    Leader: 2   Replicas: 2,3,1 Isr: 2,3,1
#到kafka02主机上停止kafka服务
[root@kafka2 bin]# ./kafka-server-stop.sh 
#再次查看leader是哪个节点?(可以发现leader换成了ID为3的节点)
[root@kafka1 bin]# ./kafka-topics.sh --describe --bootstrap-server 192.168.171.131:9092 --topic my-replicated-topic
Topic:my-replicated-topic   PartitionCount:1    ReplicationFactor:3 Configs:segment.bytes=1073741824
    Topic: my-replicated-topic  Partition: 0    Leader: 3   Replicas: 2,3,1 Isr: 3,1

以上是关于Kafka群集部署的主要内容,如果未能解决你的问题,请参考以下文章

Kafka群集部署

Kafka群集部署

技术分享| 消息队列Kafka群集部署

技术分享| 消息队列Kafka群集部署

kafka群集管理

设置Kafka集群的方法