kube-prometheus 监控Kafka

2024-08-13 624 0

helm包

[root@harbor ~]# helm repo add bitnami https://charts.bitnami.com/bitnami
[root@harbor ~]# helm repo update
[root@harbor ~]# helm search repo zookeeper
NAME                        CHART VERSION   APP VERSION DESCRIPTION                                       
bitnami/zookeeper           13.4.10         3.9.2       Apache ZooKeeper provides a reliable, centraliz...

[root@harbor ~]# helm search repo kafka
NAME                                            CHART VERSION   APP VERSION DESCRIPTION                                       
bitnami/kafka                                   30.0.0          3.8.0       Apache Kafka is a distributed streaming platfor...

部署zookeeper

[root@harbor ~]# cd monitoring/
[root@harbor monitoring]# helm pull bitnami/zookeeper
[root@harbor monitoring]# tar xf zookeeper-13.4.10.tgz
[root@harbor monitoring]# cd zookeeper
[root@harbor zookeeper]# vim values.yaml 
# 110行
auth:
  client: 
    ## @param auth.client.enabled Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5
    ##
    enabled: false # 默认即可

# 232行 添加时区
extraEnvVars: 
  - name: TZ
    value: "Asia/Shanghai"

# 250行
    replicaCount: 3 # 修改为3副本

# 699行
persistence:
  ## @param persistence.enabled Enable ZooKeeper data persistence using PVC. If false, use emptyDir
  ##
  enabled: true
  ## @param persistence.existingClaim Name of an existing PVC to use (only when deploying a single replica)
  ##
  existingClaim: ""
  ## @param persistence.storageClass PVC Storage Class for ZooKeeper data volume
  ## If defined, storageClassName: <storageClass>
  ## If set to "-", storageClassName: "", which disables dynamic provisioning
  ## If undefined (the default) or set to null, no storageClassName spec is
  ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
  ##   GKE, AWS & OpenStack)
  ##
  storageClass: "nfs-client" # 修改
  ## @param persistence.accessModes PVC Access modes
  ##
  accessModes:
    - ReadWriteOnce
  ## @param persistence.size PVC Storage Request for ZooKeeper data volume
  ##
  size: 8Gi

# 839行
  serviceMonitor:
    ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator
    ##
    enabled: true # 开启
    ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace)
    ##
    namespace: "kafka" # 修改
    ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped.
    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
    ##
    interval: "10" # 修改
    ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended
    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint
    ##
    scrapeTimeout: "20" # 修改
[root@harbor zookeeper]# kubectl create namespace kafka
[root@harbor zookeeper]# helm install zookeeper ./ -f values.yaml -n kafka
[root@harbor zookeeper]# kubectl get pod -n kafka -owide
NAME          READY   STATUS    RESTARTS   AGE     IP              NODE         NOMINATED NODE   READINESS GATES
zookeeper-0   1/1     Running   0          8m51s   10.243.58.233   k8s-node02   <none>           <none>
zookeeper-1   1/1     Running   0          8m51s   10.243.85.208   k8s-node01   <none>           <none>
zookeeper-2   1/1     Running   0          8m51s   10.243.85.206   k8s-node01   <none>           <none>

[root@harbor zookeeper]# kubectl get svc -n kafka
NAME                 TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)                      AGE
zookeeper            ClusterIP   10.103.30.91   <none>        2181/TCP,2888/TCP,3888/TCP   9m22s
zookeeper-headless   ClusterIP   None           <none>        2181/TCP,2888/TCP,3888/TCP   9m22s

[root@harbor zookeeper]# kubectl get pvc -n kafka
NAME               STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
data-zookeeper-0   Bound    pvc-e0337902-d0c4-48c5-ae24-dced385d5b3e   8Gi        RWO            nfs-client     9m37s
data-zookeeper-1   Bound    pvc-a763837a-7fec-4de6-8526-60f1d5dbda75   8Gi        RWO            nfs-client     9m37s
data-zookeeper-2   Bound    pvc-f5f61925-09f4-4a97-9373-96da309695a9   8Gi        RWO            nfs-client     9m37s
[root@k8s-master01 ~]# dig -t a zookeeper-headless.kafka.svc.cluster.local @10.96.0.10

; <<>> DiG 9.16.23-RH <<>> -t a zookeeper-headless.kafka.svc.cluster.local @10.96.0.10
;; global options: +cmd
;; Got answer:
;; WARNING: .local is reserved for Multicast DNS
;; You are currently testing what happens when an mDNS query is leaked to DNS
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 23047
;; flags: qr aa rd; QUERY: 1, ANSWER: 3, AUTHORITY: 0, ADDITIONAL: 1
;; WARNING: recursion requested but not available

;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
; COOKIE: 52dd6293e6c864bb (echoed)
;; QUESTION SECTION:
;zookeeper-headless.kafka.svc.cluster.local. IN A

;; ANSWER SECTION:
zookeeper-headless.kafka.svc.cluster.local. 5 IN A 10.243.85.206
zookeeper-headless.kafka.svc.cluster.local. 5 IN A 10.243.58.233
zookeeper-headless.kafka.svc.cluster.local. 5 IN A 10.243.85.208

;; Query time: 1 msec
;; SERVER: 10.96.0.10#53(10.96.0.10)
;; WHEN: Sun Aug 11 12:15:30 EDT 2024
;; MSG SIZE  rcvd: 257
[root@harbor zookeeper]# kubectl exec -it zookeeper-0 -n kafka -- bash
I have no name!@zookeeper-0:/$ zkServer.sh status
/opt/bitnami/java/bin/java
ZooKeeper JMX enabled by default
Using config: /opt/bitnami/zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower
I have no name!@zookeeper-0:/$ 

部署Kafka

https://artifacthub.io/packages/helm/bitnami/kafka

[root@harbor monitoring]# helm pull bitnami/kafka
[root@harbor monitoring]# tar xf kafka-30.0.0.tgz 
[root@harbor monitoring]# cd kafka
[root@harbor kafka]# vim values.yaml 
# 162行
listeners:
  client:
    containerPort: 9092 
    protocol: PLAINTEXT # 修改
    name: CLIENT
    sslClientAuth: ""

# 418行 添加时区
extraEnvVars: 
  - name: TZ
    value: "Asia/Shanghai"

# 488行
controller:
  ## @param controller.replicaCount Number of Kafka controller-eligible nodes
  ## Ignore this section if running in Zookeeper mode.
  ##
  replicaCount: 0

# 872行
  persistence:
    ## @param controller.persistence.enabled Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected
    ##  
    enabled: true 
    ## @param controller.persistence.existingClaim A manually managed Persistent Volume and Claim
    ## If defined, PVC must be created manually before volume will be bound
    ## The value is evaluated as a template
    ##  
    existingClaim: ""
    ## @param controller.persistence.storageClass PVC Storage Class for Kafka data volume
    ## If defined, storageClassName: <storageClass>
    ## If set to "-", storageClassName: "", which disables dynamic provisioning
    ## If undefined (the default) or set to null, no storageClassName spec is
    ## set, choosing the default provisioner.
    ##  
    storageClass: "nfs-client" # 指定
    ## @param controller.persistence.accessModes Persistent Volume Access Modes
    ##  
    accessModes:
      - ReadWriteOnce
    ## @param controller.persistence.size PVC Storage Request for Kafka data volume
    ##  
    size: 8Gi # 修改

# 949行
broker:
  ## @param broker.replicaCount Number of Kafka broker-only nodes
  ##
  replicaCount: 3 # 修改

# 2373行
kraft:
  ## @param kraft.enabled Switch to enable or disable the KRaft mode for Kafka
  ##
  enabled: false # 关闭  # 启用Kraft,不依赖zookeeper建立集群。

# 2397行
zookeeper:
  ## @param zookeeper.enabled Switch to enable or disable the ZooKeeper helm chart. Must be false if you use KRaft mode.
  ##
  enabled: false # 关闭内部zookeeper
  replicaCount: 1

# 2436行
## External Zookeeper Configuration
##
externalZookeeper:
  ## @param externalZookeeper.servers List of external zookeeper servers to use. Typically used in combination with 'zookeeperChrootPath'. Must be empty if you use KRaft mode.
  ##
  servers: zookeeper # 使用外部zookeeper

高可用配置
设置默认分区、默认副本数、日志过期时间,需要根据kafka节点数设定。

# 539行
[root@harbor kafka]# vim values.yaml 
extraConfigYaml: extra_config.yaml

[root@harbor kafka]# cat extra_config.yaml
# 允许删除topic
deleteTopicEnable: true
# 默认日志保留时间,为一周
logRetentionHours: 168
# 自动创建topic默认的副本数
defaultReplicationFactor: 2
# 用于配置offset记录的topic的partition的副本个数
offsetsTopicReplicationFactor: 2
# 事务主题的复制因子
transactionStateLogReplicationFactor: 2
# in.insync.replicas 默认为1修改为2
transactionStateLogMinIsr: 2
numPartitions: 3 # 新建Topic时默认的分区数
[root@harbor kafka]# helm install kafka ./ -f values.yaml  -n kafka
NAME: kafka
LAST DEPLOYED: Mon Aug 12 12:10:45 2024
NAMESPACE: kafka
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
CHART NAME: kafka
CHART VERSION: 30.0.0
APP VERSION: 3.8.0

** Please be patient while the chart is being deployed **

Kafka can be accessed by consumers via port 9092 on the following DNS name from within your cluster:

    kafka.kafka.svc.cluster.local

Each Kafka broker can be accessed by producers via port 9092 on the following DNS name(s) from within your cluster:

    kafka-broker-0.kafka-broker-headless.kafka.svc.cluster.local:9092
    kafka-broker-1.kafka-broker-headless.kafka.svc.cluster.local:9092
    kafka-broker-2.kafka-broker-headless.kafka.svc.cluster.local:9092

To create a pod that you can use as a Kafka client run the following commands:

    kubectl run kafka-client --restart='Never' --image docker.io/bitnami/kafka:3.8.0-debian-12-r0 --namespace kafka --command -- sleep infinity
    kubectl exec --tty -i kafka-client --namespace kafka -- bash

    PRODUCER:
        kafka-console-producer.sh \
            --broker-list kafka-broker-0.kafka-broker-headless.kafka.svc.cluster.local:9092,kafka-broker-1.kafka-broker-headless.kafka.svc.cluster.local:9092,kafka-broker-2.kafka-broker-headless.kafka.svc.cluster.local:9092 \
            --topic test

    CONSUMER:
        kafka-console-consumer.sh \
            --bootstrap-server kafka.kafka.svc.cluster.local:9092 \
            --topic test \
            --from-beginning

WARNING: There are "resources" sections in the chart not set. Using "resourcesPreset" is not recommended for production. For production installations, please set the following values according to your workload needs:
  - broker.resources
+info https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
[root@harbor kafka]# kubectl get pod -n kafka -owide
NAME             READY   STATUS    RESTARTS   AGE    IP              NODE         NOMINATED NODE   READINESS GATES
kafka-broker-0   1/1     Running   0          109s   10.243.58.237   k8s-node02   <none>           <none>
kafka-broker-1   1/1     Running   0          109s   10.243.85.205   k8s-node01   <none>           <none>
kafka-broker-2   1/1     Running   0          109s   10.243.58.238   k8s-node02   <none>           <none>
zookeeper-0      1/1     Running   0          24h    10.243.58.233   k8s-node02   <none>           <none>
zookeeper-1      1/1     Running   0          24h    10.243.85.208   k8s-node01   <none>           <none>
zookeeper-2      1/1     Running   0          24h    10.243.85.206   k8s-node01   <none>           <none>
[root@harbor kafka]# kubectl get svc -n kafka
NAME                    TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                      AGE
kafka                   ClusterIP   10.102.129.24   <none>        9092/TCP                     118s
kafka-broker-headless   ClusterIP   None            <none>        9094/TCP,9092/TCP            118s
zookeeper               ClusterIP   10.103.30.91    <none>        2181/TCP,2888/TCP,3888/TCP   24h
zookeeper-headless      ClusterIP   None            <none>        2181/TCP,2888/TCP,3888/TCP   24h

kafka调度

# 进入容器
[root@harbor kafka]# kubectl exec -it kafka-broker-0 -n kafka -- bash
Defaulted container "kafka" out of: kafka, kafka-init (init)

# 创建topic, 3分区+2副本
I have no name!@kafka-broker-0:/$ kafka-topics.sh --bootstrap-server kafka.kafka.svc.cluster.local:9092 --topic test001 --create --partitions 3 --replication-factor 2
Created topic test001.

# 列出topic
I have no name!@kafka-broker-0:/$ kafka-topics.sh --list --bootstrap-server kafka.kafka.svc.cluster.local:9092
test001

# 查看topic详情
I have no name!@kafka-broker-0:/$ kafka-topics.sh  --bootstrap-server kafka.kafka.svc.cluster.local:9092 --describe --topic test001
[2024-08-13 00:14:01,591] WARN [AdminClient clientId=adminclient-1] The DescribeTopicPartitions API is not supported, using Metadata API to describe topics. (org.apache.kafka.clients.admin.KafkaAdminClient)
Topic: test001  TopicId: kE9Jhs_nSfChKZo81zEnPQ PartitionCount: 3   ReplicationFactor: 2    Configs: 
    Topic: test001  Partition: 0    Leader: 101 Replicas: 101,102   Isr: 101,102    Elr: N/A    LastKnownElr: N/A
    Topic: test001  Partition: 1    Leader: 100 Replicas: 100,101   Isr: 100,101    Elr: N/A    LastKnownElr: N/A
    Topic: test001  Partition: 2    Leader: 102 Replicas: 102,100   Isr: 102,100    Elr: N/A    LastKnownElr: N/A

生产者与消费者

# 在一个窗口中打开生产者创建数据
[root@harbor kafka]# kubectl exec -it kafka-broker-0 -n kafka -- bash
Defaulted container "kafka" out of: kafka, kafka-init (init)
I have no name!@kafka-broker-0:/$ kafka-console-producer.sh --broker-list kafka.kafka.svc.cluster.local:9092 --topic test001
>hello
>sunday

# 在另一个窗口中使用消费者访问数据
[root@harbor ~]# kubectl exec -it kafka-broker-1 -n kafka -- /bin/bash
Defaulted container "kafka" out of: kafka, kafka-init (init)
I have no name!@kafka-broker-1:/$ kafka-console-consumer.sh --bootstrap-server kafka:9092 --from-beginning --topic test001
hello
sunday

修改与删除topic

# 修改topic配置:增加分区至4个(分区只可增不可减)
I have no name!@kafka-broker-1:/$ kafka-topics.sh --alter  --bootstrap-server kafka:9092  --partitions 4 --topic test001
[2024-08-13 00:29:13,667] WARN [AdminClient clientId=adminclient-1] The DescribeTopicPartitions API is not supported, using Metadata API to describe topics. (org.apache.kafka.clients.admin.KafkaAdminClient)

# 查看增加后的结果
I have no name!@kafka-broker-1:/$ kafka-topics.sh --describe --bootstrap-server kafka:9092 --topic test001
[2024-08-13 00:29:34,963] WARN [AdminClient clientId=adminclient-1] The DescribeTopicPartitions API is not supported, using Metadata API to describe topics. (org.apache.kafka.clients.admin.KafkaAdminClient)
Topic: test001  TopicId: kE9Jhs_nSfChKZo81zEnPQ PartitionCount: 4   ReplicationFactor: 2    Configs: 
    Topic: test001  Partition: 0    Leader: 101 Replicas: 101,102   Isr: 101,102    Elr: N/A    LastKnownElr: N/A
    Topic: test001  Partition: 1    Leader: 100 Replicas: 100,101   Isr: 100,101    Elr: N/A    LastKnownElr: N/A
    Topic: test001  Partition: 2    Leader: 102 Replicas: 102,100   Isr: 102,100    Elr: N/A    LastKnownElr: N/A
    Topic: test001  Partition: 3    Leader: 101 Replicas: 101,102   Isr: 101,102    Elr: N/A    LastKnownElr: N/A

# 删除topic
I have no name!@kafka-broker-1:/$ kafka-topics.sh --delete --bootstrap-server kafka:9092 --topic test001
I have no name!@kafka-broker-1:/$ kafka-topics.sh --list --bootstrap-server kafka.kafka.svc.cluster.local:9092
__consumer_offsets

kafka-exporter部署

[root@harbor ~]# helm search repo kafka-exporter
NAME                                            CHART VERSION   APP VERSION DESCRIPTION                                       
prometheus-community/prometheus-kafka-exporter  2.10.0          v1.7.0      A Helm chart to export the metrics from Kafka i...
[root@harbor ~]# helm pull prometheus-community/prometheus-kafka-exporter
[root@harbor ~]# tar xf prometheus-kafka-exporter-2.10.0.tgz 
[root@harbor ~]# cd prometheus-kafka-exporter
[root@harbor prometheus-kafka-exporter]# vim values.yaml

# 32行
kafkaServer:
 #- kafka-server:9092
 - kafka.kafka.svc.cluster.local:9092 # svc

# 102行
prometheus:
  serviceMonitor:
    enabled: true # 开启
    namespace: monitoring
    apiVersion: "monitoring.coreos.com/v1"
    interval: "30s"

# 152行
annotations: 
  prometheus.io/scrape: "true"
  prometheus.io/path: "/metrics"
  prometheus.io/port: "9308"
[root@harbor prometheus-kafka-exporter]# helm install kafka-exporter ./ -f values.yaml -n monitoring
[root@harbor prometheus-kafka-exporter]# kubectl get pod -n monitoring -owide| grep kafka-exporter
kafka-exporter-prometheus-kafka-exporter-d7c46f76-pzpqz   1/1     Running   0          59s   10.243.85.209   k8s-node01     <none>   

等待30多秒
image.png

image.png

如果想采集更多的数据,需要对消费者进行配置(--consumer-property),以便获取更多的数据

# 创建topic
[root@harbor ~]# kubectl exec -it kafka-broker-0 -n kafka -- bash
Defaulted container "kafka" out of: kafka, kafka-init (init)
I have no name!@kafka-broker-0:/$ kafka-topics.sh --bootstrap-server kafka.kafka.svc.cluster.local:9092 --topic test002 --create --partitions 3 --replication-factor 2
Created topic test002.
# 创建生产者生产数据
I have no name!@kafka-broker-0:/$ kafka-console-producer.sh --broker-list kafka:9092 --topic test002
>hello
>sunday
>
# 创建消费者获取数据
[root@harbor kafka]# kubectl exec -it kafka-broker-2 -n kafka -- bash
Defaulted container "kafka" out of: kafka, kafka-init (init)
I have no name!@kafka-broker-2:/$ kafka-console-consumer.sh --bootstrap-server kafka:9092 --from-beginning --topic test002 --consumer-property group.id=test
hello
sunday

image.png

grafana

导入模板ID: 7589

image.png

报错

kafka-topics.sh --bootstrap-server kafka.kafka.svc.cluster.local:9092 --topic test001 --create --partitions 3 --replication-factor 2
Error while executing topic command : Timed out waiting for a node assignment. Call: createTopics

values.yaml 将listeners -> client -> protocol 从 SASL_PLAINTEXT 更改为 PLAINTEXT,则可以创建主题。

https://github.com/bitnami/charts/issues/19128

相关文章

KubeSphere DevOps 流水线JAVA项目配置
虚拟机热添加内存 Kubernetes未生效
Containerd镜像加速及私有仓库配置(用户密码和忽略HTTPS)
在Kubernetes集群部署kubesphere
使用KubeKey快速部署Kubernetes集群1.28.8
Rancher 快速创建RKE K8S集群

发布评论