1、zookeeper集群安裝
參考《日子》.分布式之開篇-Zookeeper集群安裝
zookeeper 三臺
? ? ? ?192.168.0.70
? ? ? ?192.168.0.71
? ? ? ?192.168.0.72
2、kafka集群安裝
? ?部署機(jī)器三臺:
? ? 192.168.0.73
? ? 192.168.0.74
? ? 192.168.0.75
? ? java 環(huán)境安裝:
? ? ? jdk包 jdk-7u51-linux-x64.rpm 上傳至目錄 /cluster/install/jdk-7u51-linux-x64.rpm
? ? ? [root@localhost #] rpm -ivh /cluster/install/jdk-7u51-linux-x64.rpm
?準(zhǔn)備開發(fā)包:kafka_2.10-0.8.2.0.tgz 上傳至目錄 /cluster/install/kafka_2.10-0.8.2.0.tgz
? ? ?解壓縮包:[root@localhost #] tar zxvf /cluster/install/kafka_2.10-0.8.2.0.tgz
? ? ?復(fù)制到cluster目錄下[root@localhost #] cp /cluster/install/kafka_2.10-0.8.2.0 ?/cluster
? ? ?修改server.properties文件
[root@localhost #] vi /cluster/kafka_2.10-0.8.2.0/config/server.properties
? ? broker.id=73 --分別對應(yīng)每個(gè)機(jī)器IP末尾數(shù)
host.name=192.168.0.73
? ? zookeeper.connect=192.168.0.70:2181,192.168.0.71:2181,192.168.0.72:2181
復(fù)制到其他機(jī)器[root@localhost #] scp ?-r /cluster root192.168.0.74:/
? ? ? ? ? ? ? ? ? ? ? ? ? [root@localhost #] scp? -r /cluster root192.168.0.75:/
? ?更改相應(yīng)的broker.id 、host.name
? 啟動:? [root@localhost bin#]./kafka-server-start.sh /cluster/kafka_2.10-0.8.2.0/config/server.properties
? ?忍不住來張圖,剛啟動時(shí)74是leader,嘗試了下down掉74,現(xiàn)在轉(zhuǎn)為73了
192.168.0.73,192.168.0.74,192.168.0.75,開啟端口 9092
[root@localhost #]/sbin/iptables -I INPUT -p tcp --dport 9092 -j ACCEPT
[root@localhost #]/etc/rc.d/init.d/iptables save #將更改進(jìn)行保存
[root@localhost #]/etc/init.d/iptables restart #重啟防火墻以便改動生效
3、代碼示例
最關(guān)鍵的部分,所有程序員最心切的期待開始
屬性配制:
public interface KafkaProperties {
final static String zkConnect = "192.168.0.70:2181,192.168.0.71:2181,192.168.0.72:2181";
final static String groupId = "group1";
final static String topic = "topic1";
final static String kafkaServerURL = "192.168.0.73,192.168.0.74,192.168.0.75";
final static int kafkaServerPort = 9092;
final static int kafkaProducerBufferSize = 64 * 1024;
final static int connectionTimeOut = 20000;
final static int reconnectInterval = 10000;
final static String topic2 = "topic2";
final static String topic3 = "topic3";
final static String clientId = "SimpleConsumerDemoClient";
}
消息生產(chǎn)者
KafkaProducer
import java.util.Properties;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;
public class KafkaProducer extends Thread
{
private final kafka.javaapi.producer.Producerproducer;
private final String topic;
private final Properties props = new Properties();
public KafkaProducer(String topic)
{
props.put("serializer.class", "kafka.serializer.StringEncoder");
props.put("metadata.broker.list","192.168.0.73:9092,192.168.0.74:9092,192.168.0.75:9092");
producer = new kafka.javaapi.producer.Producer(new ProducerConfig(props));
this.topic = topic;
}
@Override
public void run() {
int messageNo = 1;
Long start=new java.util.Date().getTime();
while (true)
{
String messageStr = new String("Message_" + messageNo);
System.out.println("Send:" + messageStr);
producer.send(new KeyedMessage(topic,messageStr));
messageNo++;
try {
sleep(3000);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
}
消息消費(fèi)者
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import
kafka.javaapi.consumer.ConsumerConnector;
/**
*@author leicui bourne_cui@163.com
*/
public class KafkaConsumer extends Thread
{
private final ConsumerConnector consumer;
private final String topic;
public KafkaConsumer(String topic)
{
consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
createConsumerConfig());
this.topic = topic;
}
private static ConsumerConfig createConsumerConfig()
{
Properties props = new Properties();
props.put("zookeeper.connect", KafkaProperties.zkConnect);
props.put("group.id", KafkaProperties.groupId);
props.put("zookeeper.session.timeout.ms", "40000");
props.put("zookeeper.sync.time.ms", "200");
props.put("auto.commit.interval.ms", "1000");
return new ConsumerConfig(props);
}
@Override
public void run() {
Map topicCountMap = new HashMap();
topicCountMap.put(topic, new Integer(1));
Map<String,List<KafkaSteam<byte[],byte[]>>> consumerMap = ? ? ? ? ? ? ? ? ?consumer.createMessageStreams(topicCountMap);
? ? ? ? ? ? ? ? KafkaStream<byte[],byte[]> stream =consumerMap.get(topic).get(0);
? ? ? ? ? ? ? ? ConsumerIterator<byte[],byte[]> it = stream.iterator();
? ? ? ? ? ? ? ? while (it.hasNext()) {
? ? ? ? ? ? ? ? System.out.println("receive:" + new
? ? ? ? ? ? ? ? String(it.next().message()));
? ? ? ? ? ? ? ?try {
? ? ? ? ? ? ? ? ?sleep(3000);
? ? ? ? ? ? ? ? ?} catch (InterruptedException e) {
? ? ? ? ? ? ? ? ? ?e.printStackTrace();
? ? ? ? ? ? ? ? ? }
? ? ? ? ? ? }
? ? ? ?}
}
運(yùn)行入口:
public class KafkaConsumerProducerDemo {
? ?public static void main(String[] args)
? ?{
? ? ? ?KafkaProducer producerThread = new KafkaProducer(KafkaProperties.topic);
? ? ? ? producerThread.start();
? ? ? ? ?KafkaConsumer consumerThread = new KafkaConsumer(KafkaProperties.topic);
? ? ? ?consumerThread.start();
? ? }
}