kafka2.13-3.4.0三节点部署

# 0. 准备工作

准备三台机器

os host 配置 角色
centos7.9 10.20.24.111 4C CPU,16G 内存,100G 存储 master
centos7.9 10.20.24.112 4C CPU,16G 内存,100G 存储 slave
centos7.9 10.20.24.113 4C CPU,16G 内存,100G 存储 slave

三台机器全部配置好ntp,并提前关闭selinux。最好是从虚拟机模板复制。

关闭防火墙或开放端口2181,2888,3888,9092

# 1. 安装jdk 11(三台机器都执行)

yum安装

 # jdk11安装
 yum install java-11-openjdk.x86_64
 # jps工具
 yum install java-11-openjdk-devel.x86_64
1
2
3
4

配置环境变量 /etc/profile文件末尾添加

export JAVA_HOME=/usr/lib/jvm/jre-11-openjdk
export PATH=$PATH:$JAVA_HOME/bin
1
2

刷新生效

source /etc/profle
1

# 2. 安装zookeeper3.8.1(三台机器都执行)

将apache-zookeeper-3.8.1-bin.tar.gz下载到/opt/kafka下

# 解压
tar -zxvf apache-zookeeper-3.8.1-bin.tar.gz
# 新建目录
mkdir /data
mkdir /logs
1
2
3
4
5

三台机器分别执行

# myid 不能重复  111节点是 1,112节点是 2,113节点是 3
echo "1" >/opt/kafka/apache-zookeeper-3.8.1/data/myid
1
2

修改/opt/kafka/apache-zookeeper-3.8.1/conf/zoo.cfg文件

#f milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/opt/kafka/apache-zookeeper-3.8.1-bin/data
dataLogDir=/opt/kafka/apache-zookeeper-3.8.1-bin/logs
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1

## Metrics Providers
#
# https://prometheus.io Metrics Exporter
#metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider
#metricsProvider.httpHost=0.0.0.0
#metricsProvider.httpPort=7000
#metricsProvider.exportJvmInfo=true
4lw.commands.whitelist=*
server.1=10.20.24.111:2888:3888

server.2=10.20.24.112:2888:3888

server.3=10.20.24.113:2888:3888
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43

启动zookeeper

# 所有节点需要同时启动
zkServer.sh start 
# 查看状态
zkServer.sh status
[root@10-20-24-111 bin]# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /opt/kafka/apache-zookeeper-3.8.1-bin/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower
1
2
3
4
5
6
7
8
9

环境变量

export ZOOKEEPER_HOME=/opt/kafka/apache-zookeeper-3.8.1-bin
export PATH=$PATH:$ZOOKEEPER_HOME/bin
1
2

# 3. 安装kafka

将kafka安装包下载到/opt/kafka下

wget https://archive.apache.org/dist/kafka/3.4.0/kafka_2.13-3.4.0.tgz
tar zxvf kafka_2.13-3.4.0.tgz
# 创建kafka-log目录
mkdir /opt/kafka/kafka_2.13-3.4.0/kafka-logs
1
2
3
4

修改server.properties

listeners=PLAINTEXT://0.0.0.0:9092
#各自填写每个节点ip地址,暴露对外访问的地址,这个是对外提供的地址 , 当client请求到kafka时, 会分发这个地址,如果没有设置 ,会用listeners地址
advertised.listeners=PLAINTEXT://10.20.24.111:9092
#处理网络请求的最大线程数
num.network.threads=32
#处理I/O请求的线程数
num.io.threads=32
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/opt/kafka/apache-zookeeper-3.8.1-bin/kafka-logs
#全局默认分区,集群环境下修改为节点数量3
num.partitions=3
num.recovery.threads.per.data.dir=1
#默认主题__consumer_offsets的副本数量,默认值是1,集群环境下修改为节点数量3,否则挂掉一个节点整个集群就不可用了
offsets.topic.replication.factor=3
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
#全局默认副本,集群环境下修改为节点数量
default.replication.factor=3
#允许通过kafka命令行就可以直接删除topic
delete.topic.enable=true
#关闭自动创建topic
auto.create.topics.enable=false
buffer.memory=2684354560
batch.size=524288
max.request.size=5242880
linger.ms=50
log.retention.hours=168
log.retention.check.interval.ms=300000
zookeeper.connect=10.20.24.111:2181,10.20.24.112:2181,10.20.24.113:2181
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33

启动

bin/kafka-server-start.sh -daemon config/server.properties
[root@10-20-24-111 ~]# jps
20418 QuorumPeerMain
22520 Kafka
26510 Jps
1
2
3
4
5

环境变量

export KAFKA_HOME=/opt/kafka/kafka_2.13-3.4.0
export PATH=$PATH:$KAFKA_HOME/bin
1
2
上次更新: 2023/7/12 16:50:43