hive_kafka.properties
vim hive_kafka.propertiesagent.sources = kafka_sourceagent.channels = mem_channelagent.sinks = hive_sink# 以下配置 sourceagent.sources.kafka_source.type = org.apache.flume.source.kafka.KafkaSourceagent.sources.kafka_source.channels = mem_channelagent.sources.kafka_source.batchSize = 5000agent.sources.kafka_source.kafka.bootstrap.servers = $kafkaIP:9092agent.sources.kafka_source.kafka.topics = kafka_test# 以下配置 sinkagent.sinks.hive_sink.channel = mem_channelagent.sinks.hive_sink.type = hiveagent.sinks.hive_sink.hive.metastore = thrift://172.16.32.51:7004agent.sinks.hive_sink.hive.database = defaultagent.sinks.hive_sink.hive.table = weblogsagent.sinks.hive_sink.hive.partition = asia,india,%y-%m-%d-%H-%Magent.sinks.hive_sink.useLocalTimeStamp = trueagent.sinks.hive_sink.round = trueagent.sinks.hive_sink.roundValue = 10agent.sinks.hive_sink.roundUnit = minuteagent.sinks.hive_sink.serializer = DELIMITEDagent.sinks.hive_sink.serializer.delimiter = ","agent.sinks.hive_sink.serializer.serdeSeparator = ','agent.sinks.hive_sink.serializer.fieldnames =id,msg# 以下配置 channelagent.channels.mem_channel.type = memoryagent.channels.mem_channel.capacity = 100000agent.channels.mem_channel.transactionCapacity = 100000
grep "hive.metastore.uris" -C 2 /usr/local/service/hive/conf/hive-site.xml
<property><name>hive.metastore.uris</name><value>thrift://172.16.32.51:7004</value></property>
create table weblogs ( id int , msg string )partitioned by (continent string, country string, time string)clustered by (id) into 5 bucketsstored as orc TBLPROPERTIES ('transactional'='true');
hive-site.xml
添加以下配置项。<property><name>hive.support.concurrency</name><value>true</value></property><property><name>hive.exec.dynamic.partition.mode</name><value>nonstrict</value></property><property><name>hive.txn.manager</name><value>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager</value></property><property><name>hive.compactor.initiator.on</name><value>true</value></property><property><name>hive.compactor.worker.threads</name><value>1</value></property><property><name>hive.enforce.bucketing</name><value>true</value></property>
hadoop-hive
日志中会提示 metastore 无法连接,请忽略该错误。由于进程启动顺序导致,需先启动 metastore 再启动 hiveserver2。hive-hcatalog-streaming-xxx.jar
到 flume 的 lib 目录cp -ra /usr/local/service/hive/hcatalog/share/hcatalog/hive-hcatalog-streaming-2.3.3.jar /usr/local/service/flume/lib/
./bin/flume-ng agent --conf ./conf/ -f hive_kafka.properties -n agent -Dflume.root.logger=INFO,console
[hadoop@172 kafka]$ ./bin/kafka-console-producer.sh --broker-list $kafkaIP:9092 --topic kafka_test1,hello2,hi
本页内容是否解决了您的问题?