0% found this document useful (0 votes)
2 views

lab07

Uploaded by

nhoangvu2306
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
2 views

lab07

Uploaded by

nhoangvu2306
Copyright
© © All Rights Reserved
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 4

1 # Deploy kafka and zookeeper on containers

2 curl -SL
https://github.com/docker/compose/releases/download/v2.30.1/docker-compose-linux-x86_6
4 -o /usr/local/bin/docker-compose
3 sudo chmod +x /usr/local/bin/docker-compose
4 sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
5 git clone https://github.com/tuan-devops/content-kafka-deep-dive.git
6 cd content-kafka-deep-dive
7 sed -i 's/5.2.1/7.7.1/g' docker-compose.yml
8
9 # Create Dockerfile
10 FROM zookeeper:3.4.14
11 RUN sed -i 's/2181/$ZOO_PORT/g' /docker-entrypoint.sh
12
13 # Update docker-compose.yml
14
15 zookeep1:
16 volumes:
17 - ~/data/zookeep1/data:/data
18 - ~/data/zookeep1/datalog:/datalog
19
20 zookeep2:
21 build: .
22 hostname: zookeep2
23
24 volumes:
25 - ~/data/zookeep2/data:/data
26 - ~/data/zookeep2/datalog:/datalog
27
28 zookeep3:
29 build: .
30 hostname: zookeep3
31
32 kafka1:
33 volumes:
34 - ~/data/kafka1/data:/var/lib/kafka/data
35
36 kafka2:
37 volumes:
38 - ~/data/kafka2/data:/var/lib/kafka/data
39
40 kafka3:
41 volumes:
42 - ~/data/kafka3/data:/var/lib/kafka/data
43
44 # Create data folder structure
45 mkdir -p ~/data/kafka{1,2,3}/data
46 mkdir -p ~/data/zookeep{1,2,3}/data
47 mkdir -p ~/data/zookeep{1,2,3}/datalog
48 chown -Rf tuanl:tuanl ~/kafka/data
49 chmod -Rf 777 ~/kafka/data
50
51 data/
52 ├── kafka1
53 │ └── data
54 ├── kafka2
55 │ └── data
56 ├── kafka3
57 │ └── data
58 ├── zookeep1
59 │ ├── data
60 │ └── datalog
61 ├── zookeep2
62 │ ├── data
63 │ └── datalog
64 └── zookeep3
65 ├── data
66 └── datalog
67
68 # Deploy
69 docker-compose up -d
70
71 # Install Java
72 sudo apt update && sudo apt install openjdk-8-jdk
73
74 # Install Kafka
75 cd ~/
76 wget https://archive.apache.org/dist/kafka/2.2.0/kafka_2.12-2.2.0.tgz
77 tar -xvf kafka_2.12-2.2.0.tgz
78 mv kafka_2.12-2.2.0 kafka
79 nano .bashrc
80 # Add this line
81 export PATH=$PATH:/home/cloud_user/kafka/bin
82 . .bashrc
83
84 ### Example 1
85
86 # Topic
87 kafka-topics.sh --zookeeper localhost:2181,localhost:2182,localhost:2183 --list
88 kafka-topics.sh --zookeeper localhost:2181,localhost:2182,localhost:2183 --describe
89
90 # Topic 1
91 kafka-topics.sh --zookeeper localhost:2181 --create --topic test1
--replication-factor 1 --partitions 3
92 kafka-topics.sh --zookeeper localhost:2181 --create --topic test1
--replication-factor 1 --partitions 3 --if-not-exists
93 kafka-topics.sh --zookeeper localhost:2181 --alter --topic test1 --partitions 6
94 kafka-topics.sh --zookeeper localhost:2181 --describe --topic test1
95
96 # Topic 2
97 kafka-topics.sh --zookeeper localhost:2181,localhost:2182 --create --topic test2
--replication-factor 2 --partitions 3
98 kafka-topics.sh --zookeeper localhost:2181,localhost:2182 --alter --topic test2
--partitions 9
99 kafka-topics.sh --zookeeper localhost:2181,localhost:2182 --describe --topic test2
100
101 # Topic 3
102 kafka-topics.sh --zookeeper localhost:2181,localhost:2182,localhost:2183 --create
--topic test3 --replication-factor 3 --partitions 3
103 kafka-topics.sh --zookeeper localhost:2181,localhost:2182,localhost:2183 --alter
--topic test3 --partitions 12
104 kafka-topics.sh --zookeeper localhost:2181,localhost:2182,localhost:2183 --describe
--topic test3
105
106 # Delete Topics
107 kafka-topics.sh --zookeeper localhost:2181,localhost:2182,localhost:2183 --delete
--topic test1,test2,test3
108
109 # Config
110 kafka-configs.sh --zookeeper localhost:2181 --describe --entity-type topics
111 kafka-configs.sh --zookeeper localhost:2181 --describe --entity-type brokers
112
113 # For Topic 1
114 kafka-configs.sh --zookeeper localhost:2181 --alter --entity-type topics
--entity-name test1 --add-config retention.ms=86400000,min.insync.replicas=3
115 kafka-configs.sh --zookeeper localhost:2181 --alter --entity-type topics
--entity-name test1 --delete-config retention.ms,min.insync.replicas
116 kafka-configs.sh --zookeeper localhost:2181 --describe --entity-type topics
--entity-name test1
117
118 # For Broker 1
119 kafka-configs.sh --zookeeper localhost:2181 --entity-type brokers --entity-name 0
--alter --add-config log.cleaner.threads=10
120 kafka-configs.sh --zookeeper localhost:2181 --entity-type brokers --entity-name 0
--alter --delete-config log.cleaner.threads
121 kafka-configs.sh --zookeeper localhost:2181 --describe --entity-type topics
--entity-name test1
122 kafka-configs.sh --zookeeper localhost:2181 --describe --entity-type brokers
--entity-name 0
123
124 # Reassign Partition
125 kafka-reassign-partitions.sh --zookeeper localhost:2181 --execute
--reassignment-json-file ./replicas.json
126
127 # Producer
128 kafka-console-producer.sh --broker-list localhost:9092 --topic test1
129 kafka-console-producer.sh --broker-list localhost:9092 --topic test1
--producer-property acks=0
130 kafka-console-producer.sh --broker-list localhost:9092 --topic test1
--producer-property acks=1
131 kafka-console-producer.sh --broker-list localhost:9092 --topic test1
--producer-property acks=all
132
133 # Consumer
134 kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test1
135 kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test1
--from-beginning
136
137 # Consumer group
138 kafka-consumer-groups.sh --bootstrap-server localhost:9092 --list
139 kafka-consumer-groups.sh --bootstrap-server localhost:9092 --describe --group
<consumer-group-name>
140
141 ### Example 2
142
143 # Kafka commands
144 kafka-topics.sh --zookeeper localhost:2181 --create --topic test --replication-factor
3 --partitions 3 --if-not-exists
145 kafka-topics.sh --zookeeper localhost:2181 --alter --topic test --partitions 6
146 kafka-topics.sh --zookeeper localhost:2181 --topic test --describe
147
148 # Producer
149 kafka-console-producer.sh --broker-list localhost:9092 --topic test
150
151 # Code Repository
152 cd ~/
153 git clone https://github.com/tuan-devops/content-ccdak-kafka-simple-consumer
154
155 # Test code
156 cd content-ccdak-kafka-simple-consumer/
157 ./gradlew run
158
159 # Update configuration
160 vi build.gradle
161
162 # Add this line
163 dependencies {
164 implementation 'org.apache.kafka:kafka-clients:2.2.1'
165 }
166
167 # Update Main.java
168 package com.example.kafkaSimpleConsumer;
169
170 import org.apache.kafka.clients.consumer.*;
171 import java.util.Properties;
172 import java.util.Arrays;
173 import java.time.Duration;
174
175 public class Main {
176
177 public static void main(String[] args) {
178 Properties props = new Properties();
179 props.setProperty("bootstrap.servers", "localhost:9092");
180 props.setProperty("group.id", "test-group");
181 props.setProperty("enable.auto.commit", "true");
182 props.setProperty("auto.commit.interval.ms", "1000");
183 props.setProperty("key.deserializer",
"org.apache.kafka.common.serialization.StringDeserializer");
184 props.setProperty("value.deserializer",
"org.apache.kafka.common.serialization.StringDeserializer");
185 KafkaConsumer<String, String> consumer = new KafkaConsumer<String,
String>(props);
186 consumer.subscribe(Arrays.asList("test"));
187 while (true) {
188 ConsumerRecords<String, String> records =
consumer.poll(Duration.ofMillis(100));
189 for (ConsumerRecord<String, String> record : records) {
190 System.out.printf("offset = %d, key = %s, value = %s%n",
record.offset(), record.key(), record.value());
191 }
192 }
193 }
194 }
195
196 # Consumer Group
197 kafka-consumer-groups.sh --bootstrap-server localhost:9092 --list
198 kafka-consumer-groups.sh --bootstrap-server localhost:9092 --describe --group
test-group
199
200 ### Example 3
201
202 # Repository
203 https://github.com/tuan-devops/kafka/tree/trunk/streams/examples/src/main/java/org/apa
che/kafka/streams/examples/wordcount
204
205 # Topic
206 kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 2
--partitions 3 --topic streams-plaintext-input --if-not-exists
207 kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 2
--partitions 3 --topic streams-wordcount-output --if-not-exists
208 kafka-topics.sh --zookeeper localhost:2181 --list
209 kafka-topics.sh --zookeeper localhost:2181 --topic streams-plaintext-input --describe
210 kafka-topics.sh --zookeeper localhost:2181 --topic streams-wordcount-output --describe
211
212 # Producer
213 kafka-console-producer.sh --broker-list localhost:9092 --topic streams-plaintext-input
214 # Add these lines
215 word1 word2 word1 word3 word3 word1 word2 word3 word2 word4 word1 word3 word2 word2
word3 word1 word2
216
217 # Consumer
218 kafka-console-consumer.sh --bootstrap-server localhost:9092 \
219 --topic streams-wordcount-output \
220 --from-beginning \
221 --formatter kafka.tools.DefaultMessageFormatter \
222 --property print.key=true \
223 --property print.value=true \
224 --property
key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \
225 --property
value.deserializer=org.apache.kafka.common.serialization.LongDeserializer
226
227 # Streaming
228 kafka-run-class.sh org.apache.kafka.streams.examples.wordcount.WordCountDemo

You might also like