Compare commits
43 Commits
v1.2.0.M1
...
v1.2.1.REL
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ee4f3935ec | ||
|
|
4e0107b4d2 | ||
|
|
8f8a1e8709 | ||
|
|
c68ea8c570 | ||
|
|
61e7936978 | ||
|
|
5c70e2df43 | ||
|
|
f280edc9ce | ||
|
|
2c9afde8c6 | ||
|
|
6a8c0cd0c6 | ||
|
|
ec73f2785d | ||
|
|
8982f896fd | ||
|
|
005ec51d8b | ||
|
|
47aaf29e3e | ||
|
|
88d4b8eef5 | ||
|
|
66eb15a8e2 | ||
|
|
466400cdb7 | ||
|
|
bff0a072dc | ||
|
|
7fad2951f7 | ||
|
|
e7c5f750da | ||
|
|
1f28adaf4c | ||
|
|
91bdee65ec | ||
|
|
6a31e9c94f | ||
|
|
b541fca68f | ||
|
|
9d23e6a8fe | ||
|
|
89249b233f | ||
|
|
1998d5e7e8 | ||
|
|
e1906711a8 | ||
|
|
78213c98e8 | ||
|
|
c10206d41b | ||
|
|
73eda0ddd0 | ||
|
|
5b51d7cce3 | ||
|
|
2530229fb5 | ||
|
|
70cba0ae03 | ||
|
|
3eb9493cba | ||
|
|
89b75b4734 | ||
|
|
dc0bc18f37 | ||
|
|
8fafc2605e | ||
|
|
83eca9b734 | ||
|
|
b2d579b5b6 | ||
|
|
c389fa3fa4 | ||
|
|
50e0a8b30e | ||
|
|
7daa0f1b72 | ||
|
|
f3e38961c5 |
25
pom.xml
25
pom.xml
@@ -2,27 +2,30 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.2.0.M1</version>
|
||||
<version>1.2.1.RELEASE</version>
|
||||
<packaging>pom</packaging>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-build</artifactId>
|
||||
<version>1.3.1.M1</version>
|
||||
<version>1.3.2.RELEASE</version>
|
||||
<relativePath />
|
||||
</parent>
|
||||
<properties>
|
||||
<java.version>1.7</java.version>
|
||||
<kafka.version>0.9.0.1</kafka.version>
|
||||
<spring-kafka.version>1.0.5.RELEASE</spring-kafka.version>
|
||||
<spring-integration-kafka.version>2.0.1.RELEASE</spring-integration-kafka.version>
|
||||
<spring-cloud-stream.version>1.2.0.M1</spring-cloud-stream.version>
|
||||
<kafka.version>0.10.1.1</kafka.version>
|
||||
<spring-kafka.version>1.1.2.RELEASE</spring-kafka.version>
|
||||
<spring-integration-kafka.version>2.1.0.RELEASE</spring-integration-kafka.version>
|
||||
<spring-cloud-stream.version>1.2.2.RELEASE</spring-cloud-stream.version>
|
||||
</properties>
|
||||
<modules>
|
||||
<module>spring-cloud-stream-binder-kafka</module>
|
||||
<module>spring-cloud-starter-stream-kafka</module>
|
||||
<module>spring-cloud-stream-binder-kafka-docs</module>
|
||||
<module>spring-cloud-stream-binder-kafka-0.10-test</module>
|
||||
</modules>
|
||||
<module>spring-cloud-stream-binder-kafka-0.9-test</module>
|
||||
<module>spring-cloud-stream-binder-kafka-0.10.0-test</module>
|
||||
<module>spring-cloud-stream-binder-kafka-core</module>
|
||||
<module>spring-cloud-stream-binder-kafka-test-support</module>
|
||||
</modules>
|
||||
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
@@ -41,6 +44,10 @@
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>jline</groupId>
|
||||
<artifactId>jline</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
@@ -131,7 +138,7 @@
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-build-tools</artifactId>
|
||||
<version>1.3.1.M1</version>
|
||||
<version>1.3.2.RELEASE</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<executions>
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.2.0.M1</version>
|
||||
<version>1.2.1.RELEASE</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
<description>Spring Cloud Starter Stream Kafka</description>
|
||||
|
||||
@@ -4,10 +4,10 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.2.0.M1</version>
|
||||
<version>1.2.1.RELEASE</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-0.10-test</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder 0.10 Tests</description>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-0.10.0-test</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder 0.10.0 Tests</description>
|
||||
<url>http://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
@@ -16,15 +16,20 @@
|
||||
<properties>
|
||||
<main.basedir>${basedir}/../..</main.basedir>
|
||||
<!--
|
||||
Override Kafka dependencies to Kafka 0.10 and supporting Spring Kafka and
|
||||
Override Kafka dependencies to Kafka 0.9.0.1 and supporting Spring Kafka and
|
||||
Spring Integration Kafka versions
|
||||
-->
|
||||
<kafka.version>0.10.0.0</kafka.version>
|
||||
<spring-kafka.version>1.1.1.RELEASE</spring-kafka.version>
|
||||
<kafka.version>0.10.0.1</kafka.version>
|
||||
<spring-kafka.version>1.1.2.RELEASE</spring-kafka.version>
|
||||
<spring-integration-kafka.version>2.1.0.RELEASE</spring-integration-kafka.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<version>1.2.1.RELEASE</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
@@ -100,5 +105,4 @@
|
||||
</repository>
|
||||
</repositories>
|
||||
|
||||
|
||||
</project>
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2015 the original author or authors.
|
||||
* Copyright 2014-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -14,7 +14,15 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* This package contains an implementation of the {@link org.springframework.cloud.stream.binder.Binder} for Kafka.
|
||||
*/
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
/**
|
||||
* Integration tests for the {@link KafkaMessageChannelBinder}.
|
||||
*
|
||||
* This test specifically tests for the 0.10.0.1 version of Kafka.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class Kafka_0_10_0_BinderTests extends Kafka10BinderTests {
|
||||
|
||||
}
|
||||
78
spring-cloud-stream-binder-kafka-0.9-test/pom.xml
Normal file
78
spring-cloud-stream-binder-kafka-0.9-test/pom.xml
Normal file
@@ -0,0 +1,78 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.2.1.RELEASE</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-0.9</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder 0.9 Tests</description>
|
||||
<url>http://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>http://www.spring.io</url>
|
||||
</organization>
|
||||
<properties>
|
||||
<main.basedir>${basedir}/../..</main.basedir>
|
||||
<!--
|
||||
Override Kafka dependencies to Kafka 0.9.0.1 and supporting Spring Kafka and
|
||||
Spring Integration Kafka versions
|
||||
-->
|
||||
<kafka.version>0.9.0.1</kafka.version>
|
||||
<spring-kafka.version>1.0.5.RELEASE</spring-kafka.version>
|
||||
<spring-integration-kafka.version>2.0.1.RELEASE</spring-integration-kafka.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<scope>test</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
</project>
|
||||
@@ -16,8 +16,10 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka09AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.context.support.GenericApplicationContext;
|
||||
import org.springframework.kafka.support.LoggingProducerListener;
|
||||
import org.springframework.kafka.support.ProducerListener;
|
||||
@@ -35,14 +37,18 @@ public class Kafka09TestBinder extends AbstractKafkaTestBinder {
|
||||
|
||||
public Kafka09TestBinder(KafkaBinderConfigurationProperties binderConfiguration) {
|
||||
try {
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(binderConfiguration);
|
||||
AdminUtilsOperation adminUtilsOperation = new Kafka09AdminUtilsOperation();
|
||||
KafkaTopicProvisioner provisioningProvider =
|
||||
new KafkaTopicProvisioner(binderConfiguration, adminUtilsOperation);
|
||||
provisioningProvider.afterPropertiesSet();
|
||||
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(binderConfiguration, provisioningProvider);
|
||||
binder.setCodec(getCodec());
|
||||
ProducerListener producerListener = new LoggingProducerListener();
|
||||
binder.setProducerListener(producerListener);
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
binder.setApplicationContext(context);
|
||||
binder.setAdminUtilsOperation(new Kafka09AdminUtilsOperation());
|
||||
binder.afterPropertiesSet();
|
||||
this.setBinder(binder);
|
||||
}
|
||||
@@ -34,13 +34,12 @@ import org.junit.ClassRule;
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.Spy;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka09AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.support.KafkaHeaders;
|
||||
import org.springframework.kafka.test.core.BrokerAddress;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.retry.RetryOperations;
|
||||
|
||||
/**
|
||||
* Integration tests for the {@link KafkaMessageChannelBinder}.
|
||||
@@ -50,7 +49,7 @@ import org.springframework.retry.RetryOperations;
|
||||
* @author Mark Fisher
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
public class Kafka09BinderTests extends KafkaBinderTests {
|
||||
public class Kafka_09_BinderTests extends KafkaBinderTests {
|
||||
|
||||
private final String CLASS_UNDER_TEST_NAME = KafkaMessageChannelBinder.class.getSimpleName();
|
||||
|
||||
@@ -93,11 +92,6 @@ public class Kafka09BinderTests extends KafkaBinderTests {
|
||||
return consumerFactory().createConsumer().partitionsFor(topic).size();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setMetadataRetryOperations(Binder binder, RetryOperations retryOperations) {
|
||||
((Kafka09TestBinder) binder).getBinder().setMetadataRetryOperations(retryOperations);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ZkUtils getZkUtils(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties) {
|
||||
final ZkClient zkClient = new ZkClient(kafkaBinderConfigurationProperties.getZkConnectionString(),
|
||||
46
spring-cloud-stream-binder-kafka-core/pom.xml
Normal file
46
spring-cloud-stream-binder-kafka-core/pom.xml
Normal file
@@ -0,0 +1,46 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.2.1.RELEASE</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder Core</description>
|
||||
<url>http://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>http://www.spring.io</url>
|
||||
</organization>
|
||||
<properties>
|
||||
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
<version>${spring-integration-kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.apache.avro</groupId>
|
||||
<artifactId>avro-compiler</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
</project>
|
||||
@@ -24,7 +24,7 @@ import kafka.utils.ZkUtils;
|
||||
* API around {@link kafka.admin.AdminUtils} to support
|
||||
* various versions of Kafka brokers.
|
||||
*
|
||||
* Note: Implementations that support Kafka brokers other than 0.9, need to use
|
||||
* Note: Implementations that support Kafka brokers other than 0.10, need to use
|
||||
* a possible strategy that involves reflection around {@link kafka.admin.AdminUtils}.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
@@ -18,10 +18,8 @@ package org.springframework.cloud.stream.binder.kafka.admin;
|
||||
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
|
||||
import kafka.api.PartitionMetadata;
|
||||
import kafka.utils.ZkUtils;
|
||||
|
||||
import org.springframework.util.ClassUtils;
|
||||
@@ -30,7 +28,7 @@ import org.springframework.util.ReflectionUtils;
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class Kafka10AdminUtilsOperation implements AdminUtilsOperation {
|
||||
public class Kafka09AdminUtilsOperation implements AdminUtilsOperation {
|
||||
|
||||
private static Class<?> ADMIN_UTIL_CLASS;
|
||||
|
||||
@@ -53,10 +51,9 @@ public class Kafka10AdminUtilsOperation implements AdminUtilsOperation {
|
||||
addPartitions = m;
|
||||
}
|
||||
}
|
||||
|
||||
if (addPartitions != null) {
|
||||
addPartitions.invoke(null, zkUtils, topic, numPartitions,
|
||||
replicaAssignmentStr, checkBrokerAvailable, null);
|
||||
replicaAssignmentStr, checkBrokerAvailable);
|
||||
}
|
||||
else {
|
||||
throw new InvocationTargetException(
|
||||
@@ -69,20 +66,16 @@ public class Kafka10AdminUtilsOperation implements AdminUtilsOperation {
|
||||
catch (IllegalAccessException e) {
|
||||
ReflectionUtils.handleReflectionException(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public short errorCodeFromTopicMetadata(String topic, ZkUtils zkUtils) {
|
||||
try {
|
||||
Method fetchTopicMetadataFromZk = ReflectionUtils.findMethod(ADMIN_UTIL_CLASS, "fetchTopicMetadataFromZk", String.class, ZkUtils.class);
|
||||
|
||||
Object result = fetchTopicMetadataFromZk.invoke(null, topic, zkUtils);
|
||||
Class<?> topicMetadataClass = ClassUtils.forName("org.apache.kafka.common.requests.MetadataResponse$TopicMetadata", null);
|
||||
|
||||
Method errorCodeMethod = ReflectionUtils.findMethod(topicMetadataClass, "error");
|
||||
Object obj = errorCodeMethod.invoke(result);
|
||||
Method code = ReflectionUtils.findMethod(obj.getClass(), "code");
|
||||
|
||||
return (short) code.invoke(obj);
|
||||
Class<?> topicMetadataClass = ClassUtils.forName("kafka.api.TopicMetadata", null);
|
||||
Method errorCodeMethod = ReflectionUtils.findMethod(topicMetadataClass, "errorCode");
|
||||
return (short) errorCodeMethod.invoke(result);
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("AdminUtils class not found", e);
|
||||
@@ -94,7 +87,6 @@ public class Kafka10AdminUtilsOperation implements AdminUtilsOperation {
|
||||
ReflectionUtils.handleReflectionException(e);
|
||||
}
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@@ -102,11 +94,13 @@ public class Kafka10AdminUtilsOperation implements AdminUtilsOperation {
|
||||
try {
|
||||
Method fetchTopicMetadataFromZk = ReflectionUtils.findMethod(ADMIN_UTIL_CLASS, "fetchTopicMetadataFromZk", String.class, ZkUtils.class);
|
||||
Object result = fetchTopicMetadataFromZk.invoke(null, topic, zkUtils);
|
||||
Class<?> topicMetadataClass = ClassUtils.forName("org.apache.kafka.common.requests.MetadataResponse$TopicMetadata", null);
|
||||
Class<?> topicMetadataClass = ClassUtils.forName("kafka.api.TopicMetadata", null);
|
||||
|
||||
Method partitionsMetadata = ReflectionUtils.findMethod(topicMetadataClass, "partitionMetadata");
|
||||
List<PartitionMetadata> foo = (List<PartitionMetadata>) partitionsMetadata.invoke(result);
|
||||
return foo.size();
|
||||
Method partitionsMetadata = ReflectionUtils.findMethod(topicMetadataClass, "partitionsMetadata");
|
||||
scala.collection.Seq<kafka.api.PartitionMetadata> partitionSize =
|
||||
(scala.collection.Seq<kafka.api.PartitionMetadata>)partitionsMetadata.invoke(result);
|
||||
|
||||
return partitionSize.size();
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("AdminUtils class not found", e);
|
||||
@@ -118,22 +112,23 @@ public class Kafka10AdminUtilsOperation implements AdminUtilsOperation {
|
||||
ReflectionUtils.handleReflectionException(e);
|
||||
}
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
public void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions,
|
||||
int replicationFactor, Properties topicConfig) {
|
||||
int replicationFactor, Properties topicConfig) {
|
||||
try {
|
||||
Method[] declaredMethods = ADMIN_UTIL_CLASS.getDeclaredMethods();
|
||||
Method createTopic = null;
|
||||
for (Method m : declaredMethods) {
|
||||
if (m.getName().equals("createTopic") && m.getParameterTypes()[m.getParameterTypes().length - 1].getName().endsWith("RackAwareMode")) {
|
||||
if (m.getName().equals("createTopic")) {
|
||||
createTopic = m;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (createTopic != null) {
|
||||
createTopic.invoke(null, zkUtils, topic, partitions,
|
||||
replicationFactor, topicConfig, null);
|
||||
replicationFactor, topicConfig);
|
||||
}
|
||||
else {
|
||||
throw new InvocationTargetException(
|
||||
@@ -147,4 +142,4 @@ public class Kafka10AdminUtilsOperation implements AdminUtilsOperation {
|
||||
ReflectionUtils.handleReflectionException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -19,33 +19,36 @@ package org.springframework.cloud.stream.binder.kafka.admin;
|
||||
import java.util.Properties;
|
||||
|
||||
import kafka.admin.AdminUtils;
|
||||
import kafka.api.TopicMetadata;
|
||||
import kafka.utils.ZkUtils;
|
||||
import org.apache.kafka.common.requests.MetadataResponse;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class Kafka09AdminUtilsOperation implements AdminUtilsOperation {
|
||||
public class Kafka10AdminUtilsOperation implements AdminUtilsOperation {
|
||||
|
||||
public void invokeAddPartitions(ZkUtils zkUtils, String topic, int numPartitions,
|
||||
String replicaAssignmentStr, boolean checkBrokerAvailable) {
|
||||
AdminUtils.addPartitions(zkUtils, topic, numPartitions,
|
||||
replicaAssignmentStr, checkBrokerAvailable);
|
||||
AdminUtils.addPartitions(zkUtils, topic, numPartitions, replicaAssignmentStr, checkBrokerAvailable, null);
|
||||
}
|
||||
|
||||
public short errorCodeFromTopicMetadata(String topic, ZkUtils zkUtils) {
|
||||
TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils);
|
||||
return topicMetadata.errorCode();
|
||||
|
||||
MetadataResponse.TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils);
|
||||
return topicMetadata.error().code();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public int partitionSize(String topic, ZkUtils zkUtils) {
|
||||
TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils);
|
||||
return topicMetadata.partitionsMetadata().size();
|
||||
|
||||
MetadataResponse.TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils);
|
||||
return topicMetadata.partitionMetadata().size();
|
||||
}
|
||||
|
||||
public void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions,
|
||||
int replicationFactor, Properties topicConfig) {
|
||||
int replicationFactor, Properties topicConfig) {
|
||||
|
||||
AdminUtils.createTopic(zkUtils, topic, partitions, replicationFactor,
|
||||
topicConfig);
|
||||
topicConfig, null);
|
||||
}
|
||||
}
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.config;
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.config;
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
@@ -33,7 +33,7 @@ public class KafkaBinderConfigurationProperties {
|
||||
|
||||
private String[] zkNodes = new String[] {"localhost"};
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
private Map<String, Object> configuration = new HashMap<>();
|
||||
|
||||
private String defaultZkPort = "2181";
|
||||
|
||||
@@ -249,11 +249,11 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.socketBufferSize = socketBufferSize;
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
public Map<String, Object> getConfiguration() {
|
||||
return configuration;
|
||||
}
|
||||
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
public void setConfiguration(Map<String, Object> configuration) {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -14,13 +14,14 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*
|
||||
* <p>Thanks to Laszlo Szabo for providing the initial patch for generic property support.</p>
|
||||
*/
|
||||
@@ -38,6 +39,8 @@ public class KafkaConsumerProperties {
|
||||
|
||||
private boolean enableDlq;
|
||||
|
||||
private String dlqName;
|
||||
|
||||
private int recoveryInterval = 5000;
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
@@ -119,4 +122,12 @@ public class KafkaConsumerProperties {
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
public String getDlqName() {
|
||||
return dlqName;
|
||||
}
|
||||
|
||||
public void setDlqName(String dlqName) {
|
||||
this.dlqName = dlqName;
|
||||
}
|
||||
}
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -14,7 +14,9 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import org.springframework.expression.Expression;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
@@ -23,6 +25,7 @@ import javax.validation.constraints.NotNull;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Henryk Konsek
|
||||
*/
|
||||
public class KafkaProducerProperties {
|
||||
|
||||
@@ -34,6 +37,8 @@ public class KafkaProducerProperties {
|
||||
|
||||
private int batchTimeout;
|
||||
|
||||
private Expression messageKeyExpression;
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
public int getBufferSize() {
|
||||
@@ -69,6 +74,14 @@ public class KafkaProducerProperties {
|
||||
this.batchTimeout = batchTimeout;
|
||||
}
|
||||
|
||||
public Expression getMessageKeyExpression() {
|
||||
return messageKeyExpression;
|
||||
}
|
||||
|
||||
public void setMessageKeyExpression(Expression messageKeyExpression) {
|
||||
this.messageKeyExpression = messageKeyExpression;
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
@@ -0,0 +1,345 @@
|
||||
/*
|
||||
* Copyright 2014-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.provisioning;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
import kafka.common.ErrorMapping;
|
||||
import kafka.utils.ZkUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.security.JaasUtils;
|
||||
|
||||
import org.springframework.beans.factory.InitializingBean;
|
||||
import org.springframework.cloud.stream.binder.BinderException;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.utils.KafkaTopicUtils;
|
||||
import org.springframework.cloud.stream.provisioning.ConsumerDestination;
|
||||
import org.springframework.cloud.stream.provisioning.ProducerDestination;
|
||||
import org.springframework.cloud.stream.provisioning.ProvisioningException;
|
||||
import org.springframework.cloud.stream.provisioning.ProvisioningProvider;
|
||||
import org.springframework.retry.RetryCallback;
|
||||
import org.springframework.retry.RetryContext;
|
||||
import org.springframework.retry.RetryOperations;
|
||||
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
|
||||
import org.springframework.retry.policy.SimpleRetryPolicy;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Kafka implementation for {@link ProvisioningProvider}
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Simon Flandergan
|
||||
*/
|
||||
public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsumerProperties<KafkaConsumerProperties>,
|
||||
ExtendedProducerProperties<KafkaProducerProperties>>, InitializingBean {
|
||||
|
||||
private final Log logger = LogFactory.getLog(getClass());
|
||||
|
||||
private final KafkaBinderConfigurationProperties configurationProperties;
|
||||
|
||||
private final AdminUtilsOperation adminUtilsOperation;
|
||||
|
||||
private RetryOperations metadataRetryOperations;
|
||||
|
||||
public KafkaTopicProvisioner(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties,
|
||||
AdminUtilsOperation adminUtilsOperation) {
|
||||
this.configurationProperties = kafkaBinderConfigurationProperties;
|
||||
this.adminUtilsOperation = adminUtilsOperation;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param metadataRetryOperations the retry configuration
|
||||
*/
|
||||
public void setMetadataRetryOperations(RetryOperations metadataRetryOperations) {
|
||||
this.metadataRetryOperations = metadataRetryOperations;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterPropertiesSet() throws Exception {
|
||||
if (this.metadataRetryOperations == null) {
|
||||
RetryTemplate retryTemplate = new RetryTemplate();
|
||||
|
||||
SimpleRetryPolicy simpleRetryPolicy = new SimpleRetryPolicy();
|
||||
simpleRetryPolicy.setMaxAttempts(10);
|
||||
retryTemplate.setRetryPolicy(simpleRetryPolicy);
|
||||
|
||||
ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy();
|
||||
backOffPolicy.setInitialInterval(100);
|
||||
backOffPolicy.setMultiplier(2);
|
||||
backOffPolicy.setMaxInterval(1000);
|
||||
retryTemplate.setBackOffPolicy(backOffPolicy);
|
||||
this.metadataRetryOperations = retryTemplate;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ProducerDestination provisionProducerDestination(final String name, ExtendedProducerProperties<KafkaProducerProperties> properties) {
|
||||
if (this.logger.isInfoEnabled()) {
|
||||
this.logger.info("Using kafka topic for outbound: " + name);
|
||||
}
|
||||
KafkaTopicUtils.validateTopicName(name);
|
||||
createTopicsIfAutoCreateEnabledAndAdminUtilsPresent(name, properties.getPartitionCount(), false);
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminUtilsOperation != null) {
|
||||
final ZkUtils zkUtils = ZkUtils.apply(this.configurationProperties.getZkConnectionString(),
|
||||
this.configurationProperties.getZkSessionTimeout(),
|
||||
this.configurationProperties.getZkConnectionTimeout(),
|
||||
JaasUtils.isZkSecurityEnabled());
|
||||
int partitions = adminUtilsOperation.partitionSize(name, zkUtils);
|
||||
return new KafkaProducerDestination(name, partitions);
|
||||
}
|
||||
else {
|
||||
return new KafkaProducerDestination(name);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConsumerDestination provisionConsumerDestination(final String name, final String group, ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
KafkaTopicUtils.validateTopicName(name);
|
||||
boolean anonymous = !StringUtils.hasText(group);
|
||||
Assert.isTrue(!anonymous || !properties.getExtension().isEnableDlq(),
|
||||
"DLQ support is not available for anonymous subscriptions");
|
||||
if (properties.getInstanceCount() == 0) {
|
||||
throw new IllegalArgumentException("Instance count cannot be zero");
|
||||
}
|
||||
int partitionCount = properties.getInstanceCount() * properties.getConcurrency();
|
||||
createTopicsIfAutoCreateEnabledAndAdminUtilsPresent(name, partitionCount, properties.getExtension().isAutoRebalanceEnabled());
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminUtilsOperation != null) {
|
||||
final ZkUtils zkUtils = ZkUtils.apply(this.configurationProperties.getZkConnectionString(),
|
||||
this.configurationProperties.getZkSessionTimeout(),
|
||||
this.configurationProperties.getZkConnectionTimeout(),
|
||||
JaasUtils.isZkSecurityEnabled());
|
||||
int partitions = adminUtilsOperation.partitionSize(name, zkUtils);
|
||||
if (properties.getExtension().isEnableDlq() && !anonymous) {
|
||||
String dlqTopic = StringUtils.hasText(properties.getExtension().getDlqName()) ?
|
||||
properties.getExtension().getDlqName() : "error." + name + "." + group;
|
||||
createTopicAndPartitions(dlqTopic, partitions, properties.getExtension().isAutoRebalanceEnabled());
|
||||
return new KafkaConsumerDestination(name, partitions, dlqTopic);
|
||||
}
|
||||
return new KafkaConsumerDestination(name, partitions);
|
||||
}
|
||||
return new KafkaConsumerDestination(name);
|
||||
}
|
||||
|
||||
private void createTopicsIfAutoCreateEnabledAndAdminUtilsPresent(final String topicName, final int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker) {
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminUtilsOperation != null) {
|
||||
createTopicAndPartitions(topicName, partitionCount, tolerateLowerPartitionsOnBroker);
|
||||
}
|
||||
else if (this.configurationProperties.isAutoCreateTopics() && adminUtilsOperation == null) {
|
||||
this.logger.warn("Auto creation of topics is enabled, but Kafka AdminUtils class is not present on the classpath. " +
|
||||
"No topic will be created by the binder");
|
||||
}
|
||||
else if (!this.configurationProperties.isAutoCreateTopics()) {
|
||||
this.logger.info("Auto creation of topics is disabled.");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Kafka topic if needed, or try to increase its partition count to the
|
||||
* desired number.
|
||||
*/
|
||||
private void createTopicAndPartitions(final String topicName, final int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker) {
|
||||
|
||||
final ZkUtils zkUtils = ZkUtils.apply(this.configurationProperties.getZkConnectionString(),
|
||||
this.configurationProperties.getZkSessionTimeout(),
|
||||
this.configurationProperties.getZkConnectionTimeout(),
|
||||
JaasUtils.isZkSecurityEnabled());
|
||||
try {
|
||||
short errorCode = adminUtilsOperation.errorCodeFromTopicMetadata(topicName, zkUtils);
|
||||
if (errorCode == ErrorMapping.NoError()) {
|
||||
// only consider minPartitionCount for resizing if autoAddPartitions is true
|
||||
int effectivePartitionCount = this.configurationProperties.isAutoAddPartitions()
|
||||
? Math.max(this.configurationProperties.getMinPartitionCount(), partitionCount)
|
||||
: partitionCount;
|
||||
int partitionSize = adminUtilsOperation.partitionSize(topicName, zkUtils);
|
||||
|
||||
if (partitionSize < effectivePartitionCount) {
|
||||
if (this.configurationProperties.isAutoAddPartitions()) {
|
||||
adminUtilsOperation.invokeAddPartitions(zkUtils, topicName, effectivePartitionCount, null, false);
|
||||
}
|
||||
else if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "There will be " + (effectivePartitionCount - partitionSize) + " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "Consider either increasing the partition count of the topic or enabling " +
|
||||
"`autoAddPartitions`");
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (errorCode == ErrorMapping.UnknownTopicOrPartitionCode()) {
|
||||
// always consider minPartitionCount for topic creation
|
||||
final int effectivePartitionCount = Math.max(this.configurationProperties.getMinPartitionCount(),
|
||||
partitionCount);
|
||||
|
||||
this.metadataRetryOperations.execute(new RetryCallback<Object, RuntimeException>() {
|
||||
|
||||
@Override
|
||||
public Object doWithRetry(RetryContext context) throws RuntimeException {
|
||||
|
||||
try {
|
||||
adminUtilsOperation.invokeCreateTopic(zkUtils, topicName, effectivePartitionCount,
|
||||
configurationProperties.getReplicationFactor(), new Properties());
|
||||
}
|
||||
catch (Exception e) {
|
||||
String exceptionClass = e.getClass().getName();
|
||||
if (exceptionClass.equals("kafka.common.TopicExistsException")
|
||||
|| exceptionClass.equals("org.apache.kafka.common.errors.TopicExistsException")) {
|
||||
if (logger.isWarnEnabled()) {
|
||||
logger.warn("Attempt to create topic: " + topicName + ". Topic already exists.");
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("Error fetching Kafka topic metadata: ",
|
||||
ErrorMapping.exceptionFor(errorCode));
|
||||
}
|
||||
}
|
||||
finally {
|
||||
zkUtils.close();
|
||||
}
|
||||
}
|
||||
|
||||
public Collection<PartitionInfo> getPartitionsForTopic(final int partitionCount,
|
||||
final boolean tolerateLowerPartitionsOnBroker,
|
||||
final Callable<Collection<PartitionInfo>> callable) {
|
||||
try {
|
||||
return this.metadataRetryOperations
|
||||
.execute(new RetryCallback<Collection<PartitionInfo>, Exception>() {
|
||||
|
||||
@Override
|
||||
public Collection<PartitionInfo> doWithRetry(RetryContext context) throws Exception {
|
||||
Collection<PartitionInfo> partitions = callable.call();
|
||||
// do a sanity check on the partition set
|
||||
int partitionSize = partitions.size();
|
||||
if (partitionSize < partitionCount) {
|
||||
if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "There will be " + (partitionCount - partitionSize) + " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ") + "been found instead");
|
||||
}
|
||||
}
|
||||
return partitions;
|
||||
}
|
||||
});
|
||||
}
|
||||
catch (Exception e) {
|
||||
this.logger.error("Cannot initialize Binder", e);
|
||||
throw new BinderException("Cannot initialize binder:", e);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class KafkaProducerDestination implements ProducerDestination {
|
||||
|
||||
private final String producerDestinationName;
|
||||
|
||||
private final int partitions;
|
||||
|
||||
KafkaProducerDestination(String destinationName) {
|
||||
this(destinationName, 0);
|
||||
}
|
||||
|
||||
KafkaProducerDestination(String destinationName, Integer partitions) {
|
||||
this.producerDestinationName = destinationName;
|
||||
this.partitions = partitions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return producerDestinationName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getNameForPartition(int partition) {
|
||||
return producerDestinationName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KafkaProducerDestination{" +
|
||||
"producerDestinationName='" + producerDestinationName + '\'' +
|
||||
", partitions=" + partitions +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
private static final class KafkaConsumerDestination implements ConsumerDestination {
|
||||
|
||||
private final String consumerDestinationName;
|
||||
|
||||
private final int partitions;
|
||||
|
||||
private final String dlqName;
|
||||
|
||||
KafkaConsumerDestination(String consumerDestinationName) {
|
||||
this(consumerDestinationName, 0, null);
|
||||
}
|
||||
|
||||
KafkaConsumerDestination(String consumerDestinationName, int partitions) {
|
||||
this(consumerDestinationName, partitions, null);
|
||||
}
|
||||
|
||||
KafkaConsumerDestination(String consumerDestinationName, Integer partitions, String dlqName) {
|
||||
this.consumerDestinationName = consumerDestinationName;
|
||||
this.partitions = partitions;
|
||||
this.dlqName = dlqName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return this.consumerDestinationName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KafkaConsumerDestination{" +
|
||||
"consumerDestinationName='" + consumerDestinationName + '\'' +
|
||||
", partitions=" + partitions +
|
||||
", dlqName='" + dlqName + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
package org.springframework.cloud.stream.binder.kafka.utils;
|
||||
|
||||
import java.io.UnsupportedEncodingException;
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.2.0.M1</version>
|
||||
<version>1.2.1.RELEASE</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>spring-cloud-stream-binder-kafka-docs</artifactId>
|
||||
|
||||
109
spring-cloud-stream-binder-kafka-docs/src/main/asciidoc/dlq.adoc
Normal file
109
spring-cloud-stream-binder-kafka-docs/src/main/asciidoc/dlq.adoc
Normal file
@@ -0,0 +1,109 @@
|
||||
[[kafka-dlq-processing]]
|
||||
== Dead-Letter Topic Processing
|
||||
|
||||
Because it can't be anticipated how users would want to dispose of dead-lettered messages, the framework does not provide any standard mechanism to handle them.
|
||||
If the reason for the dead-lettering is transient, you may wish to route the messages back to the original topic.
|
||||
However, if the problem is a permanent issue, that could cause an infinite loop.
|
||||
The following `spring-boot` application is an example of how to route those messages back to the original topic, but moves them to a third "parking lot" topic after three attempts.
|
||||
The application is simply another spring-cloud-stream application that reads from the dead-letter topic.
|
||||
It terminates when no messages are received for 5 seconds.
|
||||
|
||||
The examples assume the original destination is `so8400out` and the consumer group is `so8400`.
|
||||
|
||||
There are several considerations.
|
||||
|
||||
- Consider only running the rerouting when the main application is not running.
|
||||
Otherwise, the retries for transient errors will be used up very quickly.
|
||||
- Alternatively, use a two-stage approach - use this application to route to a third topic, and another to route from there back to the main topic.
|
||||
- Since this technique uses a message header to keep track of retries, it won't work with `headerMode=raw`.
|
||||
In that case, consider adding some data to the payload (that can be ignored by the main application).
|
||||
- `x-retries` has to be added to the `headers` property `spring.cloud.stream.kafka.binder.headers=x-retries` on both this, and the main application so that the header is transported between the applications.
|
||||
- Since kafka is publish/subscribe, replayed messages will be sent to each consumer group, even those that successfully processed a message the first time around.
|
||||
|
||||
.application.properties
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.input.group=so8400replay
|
||||
spring.cloud.stream.bindings.input.destination=error.so8400out.so8400
|
||||
|
||||
spring.cloud.stream.bindings.output.destination=so8400out
|
||||
spring.cloud.stream.bindings.output.producer.partitioned=true
|
||||
|
||||
spring.cloud.stream.bindings.parkingLot.destination=so8400in.parkingLot
|
||||
spring.cloud.stream.bindings.parkingLot.producer.partitioned=true
|
||||
|
||||
spring.cloud.stream.kafka.binder.configuration.auto.offset.reset=earliest
|
||||
|
||||
spring.cloud.stream.kafka.binder.headers=x-retries
|
||||
----
|
||||
|
||||
.Application
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(TwoOutputProcessor.class)
|
||||
public class ReRouteDlqKApplication implements CommandLineRunner {
|
||||
|
||||
private static final String X_RETRIES_HEADER = "x-retries";
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(ReRouteDlqKApplication.class, args).close();
|
||||
}
|
||||
|
||||
private final AtomicInteger processed = new AtomicInteger();
|
||||
|
||||
@Autowired
|
||||
private MessageChannel parkingLot;
|
||||
|
||||
@StreamListener(Processor.INPUT)
|
||||
@SendTo(Processor.OUTPUT)
|
||||
public Message<?> reRoute(Message<?> failed) {
|
||||
processed.incrementAndGet();
|
||||
Integer retries = failed.getHeaders().get(X_RETRIES_HEADER, Integer.class);
|
||||
if (retries == null) {
|
||||
System.out.println("First retry for " + failed);
|
||||
return MessageBuilder.fromMessage(failed)
|
||||
.setHeader(X_RETRIES_HEADER, new Integer(1))
|
||||
.setHeader(BinderHeaders.PARTITION_OVERRIDE,
|
||||
failed.getHeaders().get(KafkaHeaders.RECEIVED_PARTITION_ID))
|
||||
.build();
|
||||
}
|
||||
else if (retries.intValue() < 3) {
|
||||
System.out.println("Another retry for " + failed);
|
||||
return MessageBuilder.fromMessage(failed)
|
||||
.setHeader(X_RETRIES_HEADER, new Integer(retries.intValue() + 1))
|
||||
.setHeader(BinderHeaders.PARTITION_OVERRIDE,
|
||||
failed.getHeaders().get(KafkaHeaders.RECEIVED_PARTITION_ID))
|
||||
.build();
|
||||
}
|
||||
else {
|
||||
System.out.println("Retries exhausted for " + failed);
|
||||
parkingLot.send(MessageBuilder.fromMessage(failed)
|
||||
.setHeader(BinderHeaders.PARTITION_OVERRIDE,
|
||||
failed.getHeaders().get(KafkaHeaders.RECEIVED_PARTITION_ID))
|
||||
.build());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run(String... args) throws Exception {
|
||||
while (true) {
|
||||
int count = this.processed.get();
|
||||
Thread.sleep(5000);
|
||||
if (count == this.processed.get()) {
|
||||
System.out.println("Idle, terminating");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public interface TwoOutputProcessor extends Processor {
|
||||
|
||||
@Output("parkingLot")
|
||||
MessageChannel parkingLot();
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
@@ -23,7 +23,7 @@ Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinat
|
||||
|
||||
= Reference Guide
|
||||
include::overview.adoc[]
|
||||
|
||||
include::dlq.adoc[]
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
|
||||
@@ -41,7 +41,7 @@ Partitioning also maps directly to Apache Kafka partitions as well.
|
||||
|
||||
This section contains the configuration options used by the Apache Kafka binder.
|
||||
|
||||
For common configuration options and properties pertaining to binder, refer to the https://github.com/spring-cloud/spring-cloud-stream/blob/master/spring-cloud-stream-docs/src/main/asciidoc/spring-cloud-stream-overview.adoc#configuration-options[core docs].
|
||||
For common configuration options and properties pertaining to binder, refer to the <<binding-properties,core documentation>>.
|
||||
|
||||
=== Kafka Binder Properties
|
||||
|
||||
@@ -153,11 +153,13 @@ Default: `false`.
|
||||
startOffset::
|
||||
The starting offset for new groups, or when `resetOffsets` is `true`.
|
||||
Allowed values: `earliest`, `latest`.
|
||||
If the consumer group is set explicitly for the consumer 'binding' (via `spring.cloud.stream.bindings.<channelName>.group`), then 'startOffset' is set to `earliest`; otherwise it is set to `latest` for the `anonymous` consumer group.
|
||||
+
|
||||
Default: null (equivalent to `earliest`).
|
||||
enableDlq::
|
||||
When set to true, it will send enable DLQ behavior for the consumer.
|
||||
Messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`.
|
||||
By default, messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`.
|
||||
The DLQ topic name can be configurable via the property `dlqName`.
|
||||
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
|
||||
+
|
||||
Default: `false`.
|
||||
@@ -165,6 +167,10 @@ configuration::
|
||||
Map with a key/value pair containing generic Kafka consumer properties.
|
||||
+
|
||||
Default: Empty map.
|
||||
dlqName::
|
||||
The name of the DLQ topic to receive the error messages.
|
||||
+
|
||||
Default: null (If not specified, messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`).
|
||||
|
||||
=== Kafka Producer Properties
|
||||
|
||||
@@ -184,6 +190,11 @@ batchTimeout::
|
||||
(Normally the producer does not wait at all, and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.
|
||||
+
|
||||
Default: `0`.
|
||||
messageKeyExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message.
|
||||
For example `headers.key` or `payload.myKey`.
|
||||
+
|
||||
Default: `none`.
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka producer properties.
|
||||
+
|
||||
@@ -328,26 +339,18 @@ In secure environments, we strongly recommend creating topics and managing ACLs
|
||||
|
||||
==== Using the binder with Apache Kafka 0.10
|
||||
|
||||
The binder also supports connecting to Kafka 0.10 brokers.
|
||||
In order to support this, when you create the project that contains your application, include `spring-cloud-starter-stream-kafka` as you normally would do for 0.9 based applications.
|
||||
Then add these dependencies at the top of the `<dependencies>` section in the pom.xml file to override the Apache Kafka, Spring Kafka, and Spring Integration Kafka with 0.10-compatible versions as in the following example:
|
||||
The default Kafka support in Spring Cloud Stream Kafka binder is for Kafka version 0.10.1.1. The binder also supports connecting to other 0.10 based versions and 0.9 clients.
|
||||
In order to do this, when you create the project that contains your application, include `spring-cloud-starter-stream-kafka` as you normally would do for the default binder.
|
||||
Then add these dependencies at the top of the `<dependencies>` section in the pom.xml file to override the dependencies.
|
||||
|
||||
Here is an example for downgrading your application to 0.10.0.1. Since it is still on the 0.10 line, the default `spring-kafka` and `spring-integration-kafka` versions can be retained.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<version>1.1.1.RELEASE</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
<version>2.1.0.RELEASE</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>0.10.0.0</version>
|
||||
<version>0.10.0.1</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
@@ -355,6 +358,44 @@ Then add these dependencies at the top of the `<dependencies>` section in the po
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>0.10.0.1</version>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
Here is another example of using 0.9.0.1 version.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<version>1.0.5.RELEASE</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
<version>2.0.1.RELEASE</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>0.9.0.1</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>0.9.0.1</version>
|
||||
</dependency>
|
||||
|
||||
----
|
||||
|
||||
[NOTE]
|
||||
@@ -369,8 +410,8 @@ For best results, we recommend using the most recent 0.10-compatible versions of
|
||||
The Apache Kafka Binder uses the administrative utilities which are part of the Apache Kafka server library to create and reconfigure topics.
|
||||
If the inclusion of the Apache Kafka server library and its dependencies is not necessary at runtime because the application will rely on the topics being configured administratively, the Kafka binder allows for Apache Kafka server dependency to be excluded from the application.
|
||||
|
||||
If you use Kafka 10 dependencies as advised above, all you have to do is not to include the kafka broker dependency.
|
||||
If you use Kafka 0.9, then ensure that you exclude the kafka broker jar from the `spring-cloud-starter-stream-kafka` dependency as following.
|
||||
If you use non default versions for Kafka dependencies as advised above, all you have to do is not to include the kafka broker dependency.
|
||||
If you use the default Kafka version, then ensure that you exclude the kafka broker jar from the `spring-cloud-starter-stream-kafka` dependency as following.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
|
||||
25
spring-cloud-stream-binder-kafka-test-support/pom.xml
Normal file
25
spring-cloud-stream-binder-kafka-test-support/pom.xml
Normal file
@@ -0,0 +1,25 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.2.1.RELEASE</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-test-support</artifactId>
|
||||
<description>Kafka related test classes</description>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<version>${spring-kafka.version}</version>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
@@ -10,10 +10,15 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.2.0.M1</version>
|
||||
<version>1.2.1.RELEASE</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<version>1.2.1.RELEASE</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-configuration-processor</artifactId>
|
||||
@@ -37,17 +42,6 @@
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
<version>${spring-integration-kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.apache.avro</groupId>
|
||||
<artifactId>avro-compiler</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
@@ -82,8 +76,38 @@
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-schema</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-avro-serializer</artifactId>
|
||||
<version>3.1.2</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-schema-registry</artifactId>
|
||||
<version>3.1.2</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>confluent</id>
|
||||
<url>http://packages.confluent.io/maven/</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
|
||||
@@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.env.EnvironmentPostProcessor;
|
||||
import org.springframework.core.env.ConfigurableEnvironment;
|
||||
import org.springframework.core.env.MapPropertySource;
|
||||
|
||||
/**
|
||||
* An {@link EnvironmentPostProcessor} that sets some common configuration properties (log config etc.,) for Kafka
|
||||
* binder.
|
||||
*
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
public class KafkaBinderEnvironmentPostProcessor implements EnvironmentPostProcessor {
|
||||
|
||||
public final static String SPRING_KAFKA = "spring.kafka";
|
||||
|
||||
public final static String SPRING_KAFKA_PRODUCER = SPRING_KAFKA + ".producer";
|
||||
|
||||
public final static String SPRING_KAFKA_CONSUMER = SPRING_KAFKA + ".consumer";
|
||||
|
||||
public final static String SPRING_KAFKA_PRODUCER_KEY_SERIALIZER = SPRING_KAFKA_PRODUCER + "." + "keySerializer";
|
||||
|
||||
public final static String SPRING_KAFKA_PRODUCER_VALUE_SERIALIZER = SPRING_KAFKA_PRODUCER + "." + "valueSerializer";
|
||||
|
||||
public final static String SPRING_KAFKA_CONSUMER_KEY_DESERIALIZER = SPRING_KAFKA_CONSUMER + "." + "keyDeserializer";
|
||||
|
||||
public final static String SPRING_KAFKA_CONSUMER_VALUE_DESERIALIZER = SPRING_KAFKA_CONSUMER + "." + "valueDeserializer";
|
||||
|
||||
private static final String KAFKA_BINDER_DEFAULT_PROPERTIES = "kafkaBinderDefaultProperties";
|
||||
|
||||
@Override
|
||||
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
|
||||
if (!environment.getPropertySources().contains(KAFKA_BINDER_DEFAULT_PROPERTIES)) {
|
||||
Map<String, Object> kafkaBinderDefaultProperties = new HashMap<>();
|
||||
kafkaBinderDefaultProperties.put("logging.level.org.I0Itec.zkclient", "ERROR");
|
||||
kafkaBinderDefaultProperties.put("logging.level.kafka.server.KafkaConfig", "ERROR");
|
||||
kafkaBinderDefaultProperties.put("logging.level.kafka.admin.AdminClient.AdminConfig", "ERROR");
|
||||
kafkaBinderDefaultProperties.put(SPRING_KAFKA_PRODUCER_KEY_SERIALIZER, ByteArraySerializer.class.getName());
|
||||
kafkaBinderDefaultProperties.put(SPRING_KAFKA_PRODUCER_VALUE_SERIALIZER, ByteArraySerializer.class.getName());
|
||||
kafkaBinderDefaultProperties.put(SPRING_KAFKA_CONSUMER_KEY_DESERIALIZER, ByteArrayDeserializer.class.getName());
|
||||
kafkaBinderDefaultProperties.put(SPRING_KAFKA_CONSUMER_VALUE_DESERIALIZER, ByteArrayDeserializer.class.getName());
|
||||
environment.getPropertySources().addLast(new MapPropertySource(KAFKA_BINDER_DEFAULT_PROPERTIES, kafkaBinderDefaultProperties));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -16,20 +16,16 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
|
||||
import org.springframework.boot.actuate.health.Health;
|
||||
import org.springframework.boot.actuate.health.HealthIndicator;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
|
||||
/**
|
||||
* Health indicator for Kafka.
|
||||
@@ -41,24 +37,18 @@ public class KafkaBinderHealthIndicator implements HealthIndicator {
|
||||
|
||||
private final KafkaMessageChannelBinder binder;
|
||||
|
||||
private final KafkaBinderConfigurationProperties configurationProperties;
|
||||
private final ConsumerFactory<?, ?> consumerFactory;
|
||||
|
||||
public KafkaBinderHealthIndicator(KafkaMessageChannelBinder binder,
|
||||
KafkaBinderConfigurationProperties configurationProperties) {
|
||||
ConsumerFactory<?, ?> consumerFactory) {
|
||||
this.binder = binder;
|
||||
this.configurationProperties = configurationProperties;
|
||||
this.consumerFactory = consumerFactory;
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public Health health() {
|
||||
Map<String, String> properties = new HashMap<>();
|
||||
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configurationProperties
|
||||
.getKafkaConnectionString());
|
||||
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
|
||||
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
|
||||
KafkaConsumer metadataConsumer = new KafkaConsumer(properties);
|
||||
try {
|
||||
try (Consumer<?, ?> metadataConsumer = consumerFactory.createConsumer()) {
|
||||
Set<String> downMessages = new HashSet<>();
|
||||
for (String topic : this.binder.getTopicsInUse().keySet()) {
|
||||
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
|
||||
@@ -78,8 +68,5 @@ public class KafkaBinderHealthIndicator implements HealthIndicator {
|
||||
catch (Exception e) {
|
||||
return Health.down(e).build();
|
||||
}
|
||||
finally {
|
||||
metadataConsumer.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ import org.apache.kafka.common.security.JaasUtils;
|
||||
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.DisposableBean;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ApplicationContextAware;
|
||||
import org.springframework.context.ApplicationListener;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2014-2016 the original author or authors.
|
||||
* Copyright 2014-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -22,34 +22,30 @@ import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
import kafka.common.ErrorMapping;
|
||||
import kafka.utils.ZkUtils;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.producer.Callback;
|
||||
import org.apache.kafka.clients.producer.Producer;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.clients.producer.RecordMetadata;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.security.JaasUtils;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
|
||||
import org.springframework.beans.factory.DisposableBean;
|
||||
import org.springframework.cloud.stream.binder.AbstractMessageChannelBinder;
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.BinderException;
|
||||
import org.springframework.cloud.stream.binder.BinderHeaders;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.provisioning.ConsumerDestination;
|
||||
import org.springframework.cloud.stream.provisioning.ProducerDestination;
|
||||
import org.springframework.context.Lifecycle;
|
||||
import org.springframework.expression.common.LiteralExpression;
|
||||
import org.springframework.expression.spel.standard.SpelExpressionParser;
|
||||
@@ -65,19 +61,17 @@ import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
|
||||
import org.springframework.kafka.listener.ErrorHandler;
|
||||
import org.springframework.kafka.listener.config.ContainerProperties;
|
||||
import org.springframework.kafka.support.ProducerListener;
|
||||
import org.springframework.kafka.support.SendResult;
|
||||
import org.springframework.kafka.support.TopicPartitionInitialOffset;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
import org.springframework.messaging.MessageHandler;
|
||||
import org.springframework.retry.RetryCallback;
|
||||
import org.springframework.retry.RetryContext;
|
||||
import org.springframework.retry.RetryOperations;
|
||||
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
|
||||
import org.springframework.retry.policy.SimpleRetryPolicy;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
import org.springframework.util.concurrent.ListenableFuture;
|
||||
import org.springframework.util.concurrent.ListenableFutureCallback;
|
||||
|
||||
/**
|
||||
* A {@link Binder} that uses Kafka as the underlying middleware.
|
||||
@@ -89,29 +83,24 @@ import org.springframework.util.StringUtils;
|
||||
* @author Gary Russell
|
||||
* @author Mark Fisher
|
||||
* @author Soby Chacko
|
||||
* @author Henryk Konsek
|
||||
*/
|
||||
public class KafkaMessageChannelBinder extends
|
||||
AbstractMessageChannelBinder<ExtendedConsumerProperties<KafkaConsumerProperties>,
|
||||
ExtendedProducerProperties<KafkaProducerProperties>, Collection<PartitionInfo>, String>
|
||||
implements ExtendedPropertiesBinder<MessageChannel, KafkaConsumerProperties, KafkaProducerProperties>,
|
||||
DisposableBean {
|
||||
ExtendedProducerProperties<KafkaProducerProperties>, KafkaTopicProvisioner>
|
||||
implements ExtendedPropertiesBinder<MessageChannel, KafkaConsumerProperties, KafkaProducerProperties> {
|
||||
|
||||
private final KafkaBinderConfigurationProperties configurationProperties;
|
||||
|
||||
private RetryOperations metadataRetryOperations;
|
||||
|
||||
private final Map<String, Collection<PartitionInfo>> topicsInUse = new HashMap<>();
|
||||
|
||||
private ProducerListener<byte[], byte[]> producerListener;
|
||||
|
||||
private volatile Producer<byte[], byte[]> dlqProducer;
|
||||
|
||||
private KafkaExtendedBindingProperties extendedBindingProperties = new KafkaExtendedBindingProperties();
|
||||
|
||||
private AdminUtilsOperation adminUtilsOperation;
|
||||
private final Map<String, Collection<PartitionInfo>> topicsInUse = new HashMap<>();
|
||||
|
||||
public KafkaMessageChannelBinder(KafkaBinderConfigurationProperties configurationProperties) {
|
||||
super(false, headersToMap(configurationProperties));
|
||||
public KafkaMessageChannelBinder(KafkaBinderConfigurationProperties configurationProperties,
|
||||
KafkaTopicProvisioner provisioningProvider) {
|
||||
super(false, headersToMap(configurationProperties), provisioningProvider);
|
||||
this.configurationProperties = configurationProperties;
|
||||
}
|
||||
|
||||
@@ -131,50 +120,10 @@ public class KafkaMessageChannelBinder extends
|
||||
return headersToMap;
|
||||
}
|
||||
|
||||
public void setAdminUtilsOperation(AdminUtilsOperation adminUtilsOperation) {
|
||||
this.adminUtilsOperation = adminUtilsOperation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retry configuration for operations such as validating topic creation
|
||||
*
|
||||
* @param metadataRetryOperations the retry configuration
|
||||
*/
|
||||
public void setMetadataRetryOperations(RetryOperations metadataRetryOperations) {
|
||||
this.metadataRetryOperations = metadataRetryOperations;
|
||||
}
|
||||
|
||||
public void setExtendedBindingProperties(KafkaExtendedBindingProperties extendedBindingProperties) {
|
||||
this.extendedBindingProperties = extendedBindingProperties;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onInit() throws Exception {
|
||||
|
||||
if (this.metadataRetryOperations == null) {
|
||||
RetryTemplate retryTemplate = new RetryTemplate();
|
||||
|
||||
SimpleRetryPolicy simpleRetryPolicy = new SimpleRetryPolicy();
|
||||
simpleRetryPolicy.setMaxAttempts(10);
|
||||
retryTemplate.setRetryPolicy(simpleRetryPolicy);
|
||||
|
||||
ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy();
|
||||
backOffPolicy.setInitialInterval(100);
|
||||
backOffPolicy.setMultiplier(2);
|
||||
backOffPolicy.setMaxInterval(1000);
|
||||
retryTemplate.setBackOffPolicy(backOffPolicy);
|
||||
this.metadataRetryOperations = retryTemplate;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void destroy() throws Exception {
|
||||
if (this.dlqProducer != null) {
|
||||
this.dlqProducer.close();
|
||||
this.dlqProducer = null;
|
||||
}
|
||||
}
|
||||
|
||||
public void setProducerListener(ProducerListener<byte[], byte[]> producerListener) {
|
||||
this.producerListener = producerListener;
|
||||
}
|
||||
@@ -194,68 +143,57 @@ public class KafkaMessageChannelBinder extends
|
||||
}
|
||||
|
||||
@Override
|
||||
protected MessageHandler createProducerMessageHandler(final String destination,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties) throws Exception {
|
||||
|
||||
KafkaTopicUtils.validateTopicName(destination);
|
||||
createTopicsIfAutoCreateEnabledAndAdminUtilsPresent(destination, producerProperties.getPartitionCount());
|
||||
Collection<PartitionInfo> partitions = getPartitionsForTopic(destination, producerProperties.getPartitionCount());
|
||||
|
||||
protected MessageHandler createProducerMessageHandler(final ProducerDestination destination,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties) throws Exception {
|
||||
final DefaultKafkaProducerFactory<byte[], byte[]> producerFB = getProducerFactory(producerProperties);
|
||||
Collection<PartitionInfo> partitions = provisioningProvider.getPartitionsForTopic(producerProperties.getPartitionCount(),
|
||||
false,
|
||||
new Callable<Collection<PartitionInfo>>() {
|
||||
@Override
|
||||
public Collection<PartitionInfo> call() throws Exception {
|
||||
return producerFB.createProducer().partitionsFor(destination.getName());
|
||||
}
|
||||
});
|
||||
this.topicsInUse.put(destination.getName(), partitions);
|
||||
if (producerProperties.getPartitionCount() < partitions.size()) {
|
||||
if (this.logger.isInfoEnabled()) {
|
||||
this.logger.info("The `partitionCount` of the producer for topic " + destination + " is "
|
||||
this.logger.info("The `partitionCount` of the producer for topic " + destination.getName() + " is "
|
||||
+ producerProperties.getPartitionCount() + ", smaller than the actual partition count of "
|
||||
+ partitions.size() + " of the topic. The larger number will be used instead.");
|
||||
}
|
||||
}
|
||||
|
||||
this.topicsInUse.put(destination, partitions);
|
||||
|
||||
DefaultKafkaProducerFactory<byte[], byte[]> producerFB = getProducerFactory(producerProperties);
|
||||
KafkaTemplate<byte[], byte[]> kafkaTemplate = new KafkaTemplate<>(producerFB);
|
||||
if (this.producerListener != null) {
|
||||
kafkaTemplate.setProducerListener(this.producerListener);
|
||||
}
|
||||
return new ProducerConfigurationMessageHandler(kafkaTemplate, destination, producerProperties, producerFB);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String createProducerDestinationIfNecessary(String name,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties) {
|
||||
if (this.logger.isInfoEnabled()) {
|
||||
this.logger.info("Using kafka topic for outbound: " + name);
|
||||
}
|
||||
KafkaTopicUtils.validateTopicName(name);
|
||||
createTopicsIfAutoCreateEnabledAndAdminUtilsPresent(name, properties.getPartitionCount());
|
||||
Collection<PartitionInfo> partitions = getPartitionsForTopic(name, properties.getPartitionCount());
|
||||
if (properties.getPartitionCount() < partitions.size()) {
|
||||
if (this.logger.isInfoEnabled()) {
|
||||
this.logger.info("The `partitionCount` of the producer for topic " + name + " is "
|
||||
+ properties.getPartitionCount() + ", smaller than the actual partition count of "
|
||||
+ partitions.size() + " of the topic. The larger number will be used instead.");
|
||||
}
|
||||
}
|
||||
this.topicsInUse.put(name, partitions);
|
||||
return name;
|
||||
return new ProducerConfigurationMessageHandler(kafkaTemplate, destination.getName(), producerProperties, producerFB);
|
||||
}
|
||||
|
||||
private DefaultKafkaProducerFactory<byte[], byte[]> getProducerFactory(
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties) {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
if (!ObjectUtils.isEmpty(configurationProperties.getConfiguration())) {
|
||||
props.putAll(configurationProperties.getConfiguration());
|
||||
}
|
||||
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configurationProperties.getKafkaConnectionString());
|
||||
props.put(ProducerConfig.RETRIES_CONFIG, 0);
|
||||
props.put(ProducerConfig.BATCH_SIZE_CONFIG, String.valueOf(producerProperties.getExtension().getBufferSize()));
|
||||
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
|
||||
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
props.put(ProducerConfig.ACKS_CONFIG, String.valueOf(this.configurationProperties.getRequiredAcks()));
|
||||
props.put(ProducerConfig.LINGER_MS_CONFIG,
|
||||
String.valueOf(producerProperties.getExtension().getBatchTimeout()));
|
||||
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG,
|
||||
producerProperties.getExtension().getCompressionType().toString());
|
||||
if (!ObjectUtils.isEmpty(configurationProperties.getConfiguration())) {
|
||||
props.putAll(configurationProperties.getConfiguration());
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configurationProperties.getKafkaConnectionString());
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.BATCH_SIZE_CONFIG))) {
|
||||
props.put(ProducerConfig.BATCH_SIZE_CONFIG, String.valueOf(producerProperties.getExtension().getBufferSize()));
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.LINGER_MS_CONFIG))) {
|
||||
props.put(ProducerConfig.LINGER_MS_CONFIG, String.valueOf(producerProperties.getExtension().getBatchTimeout()));
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.COMPRESSION_TYPE_CONFIG))) {
|
||||
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG,
|
||||
producerProperties.getExtension().getCompressionType().toString());
|
||||
}
|
||||
if (!ObjectUtils.isEmpty(producerProperties.getExtension().getConfiguration())) {
|
||||
props.putAll(producerProperties.getExtension().getConfiguration());
|
||||
}
|
||||
@@ -263,56 +201,50 @@ public class KafkaMessageChannelBinder extends
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<PartitionInfo> createConsumerDestinationIfNecessary(String name, String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
KafkaTopicUtils.validateTopicName(name);
|
||||
if (properties.getInstanceCount() == 0) {
|
||||
throw new IllegalArgumentException("Instance count cannot be zero");
|
||||
}
|
||||
int partitionCount = properties.getInstanceCount() * properties.getConcurrency();
|
||||
createTopicsIfAutoCreateEnabledAndAdminUtilsPresent(name, partitionCount);
|
||||
Collection<PartitionInfo> allPartitions = getPartitionsForTopic(name, partitionCount);
|
||||
@SuppressWarnings("unchecked")
|
||||
protected MessageProducer createConsumerEndpoint(final ConsumerDestination destination, final String group,
|
||||
final ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties) {
|
||||
|
||||
boolean anonymous = !StringUtils.hasText(group);
|
||||
Assert.isTrue(!anonymous || !extendedConsumerProperties.getExtension().isEnableDlq(),
|
||||
"DLQ support is not available for anonymous subscriptions");
|
||||
String consumerGroup = anonymous ? "anonymous." + UUID.randomUUID().toString() : group;
|
||||
final ConsumerFactory<?, ?> consumerFactory = createKafkaConsumerFactory(anonymous, consumerGroup, extendedConsumerProperties);
|
||||
int partitionCount = extendedConsumerProperties.getInstanceCount() * extendedConsumerProperties.getConcurrency();
|
||||
|
||||
Collection<PartitionInfo> allPartitions = provisioningProvider.getPartitionsForTopic(partitionCount,
|
||||
extendedConsumerProperties.getExtension().isAutoRebalanceEnabled(),
|
||||
new Callable<Collection<PartitionInfo>>() {
|
||||
@Override
|
||||
public Collection<PartitionInfo> call() throws Exception {
|
||||
return consumerFactory.createConsumer().partitionsFor(destination.getName());
|
||||
}
|
||||
});
|
||||
|
||||
Collection<PartitionInfo> listenedPartitions;
|
||||
|
||||
if (properties.getExtension().isAutoRebalanceEnabled() ||
|
||||
properties.getInstanceCount() == 1) {
|
||||
if (extendedConsumerProperties.getExtension().isAutoRebalanceEnabled() ||
|
||||
extendedConsumerProperties.getInstanceCount() == 1) {
|
||||
listenedPartitions = allPartitions;
|
||||
}
|
||||
else {
|
||||
listenedPartitions = new ArrayList<>();
|
||||
for (PartitionInfo partition : allPartitions) {
|
||||
// divide partitions across modules
|
||||
if ((partition.partition() % properties.getInstanceCount()) == properties.getInstanceIndex()) {
|
||||
if ((partition.partition() % extendedConsumerProperties.getInstanceCount()) == extendedConsumerProperties.getInstanceIndex()) {
|
||||
listenedPartitions.add(partition);
|
||||
}
|
||||
}
|
||||
}
|
||||
this.topicsInUse.put(name, listenedPartitions);
|
||||
return listenedPartitions;
|
||||
}
|
||||
this.topicsInUse.put(destination.getName(), listenedPartitions);
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected MessageProducer createConsumerEndpoint(String name, String group, Collection<PartitionInfo> destination,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
boolean anonymous = !StringUtils.hasText(group);
|
||||
Assert.isTrue(!anonymous || !properties.getExtension().isEnableDlq(),
|
||||
"DLQ support is not available for anonymous subscriptions");
|
||||
String consumerGroup = anonymous ? "anonymous." + UUID.randomUUID().toString() : group;
|
||||
Map<String, Object> props = getConsumerConfig(anonymous, consumerGroup);
|
||||
if (!ObjectUtils.isEmpty(properties.getExtension().getConfiguration())) {
|
||||
props.putAll(properties.getExtension().getConfiguration());
|
||||
}
|
||||
ConsumerFactory<?, ?> consumerFactory = new DefaultKafkaConsumerFactory<>(props);
|
||||
Collection<PartitionInfo> listenedPartitions = destination;
|
||||
Assert.isTrue(!CollectionUtils.isEmpty(listenedPartitions), "A list of partitions must be provided");
|
||||
final TopicPartitionInitialOffset[] topicPartitionInitialOffsets = getTopicPartitionInitialOffsets(
|
||||
listenedPartitions);
|
||||
final ContainerProperties containerProperties =
|
||||
anonymous || properties.getExtension().isAutoRebalanceEnabled() ? new ContainerProperties(name)
|
||||
: new ContainerProperties(topicPartitionInitialOffsets);
|
||||
int concurrency = Math.min(properties.getConcurrency(), listenedPartitions.size());
|
||||
anonymous || extendedConsumerProperties.getExtension().isAutoRebalanceEnabled() ?
|
||||
new ContainerProperties(destination.getName()) : new ContainerProperties(topicPartitionInitialOffsets);
|
||||
int concurrency = Math.min(extendedConsumerProperties.getConcurrency(), listenedPartitions.size());
|
||||
final ConcurrentMessageListenerContainer<?, ?> messageListenerContainer =
|
||||
new ConcurrentMessageListenerContainer(
|
||||
consumerFactory, containerProperties) {
|
||||
@@ -323,8 +255,8 @@ public class KafkaMessageChannelBinder extends
|
||||
}
|
||||
};
|
||||
messageListenerContainer.setConcurrency(concurrency);
|
||||
messageListenerContainer.getContainerProperties().setAckOnError(isAutoCommitOnError(properties));
|
||||
if (!properties.getExtension().isAutoCommitOffset()) {
|
||||
messageListenerContainer.getContainerProperties().setAckOnError(isAutoCommitOnError(extendedConsumerProperties));
|
||||
if (!extendedConsumerProperties.getExtension().isAutoCommitOffset()) {
|
||||
messageListenerContainer.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL);
|
||||
}
|
||||
if (this.logger.isDebugEnabled()) {
|
||||
@@ -339,11 +271,11 @@ public class KafkaMessageChannelBinder extends
|
||||
new KafkaMessageDrivenChannelAdapter<>(
|
||||
messageListenerContainer);
|
||||
kafkaMessageDrivenChannelAdapter.setBeanFactory(this.getBeanFactory());
|
||||
final RetryTemplate retryTemplate = buildRetryTemplate(properties);
|
||||
final RetryTemplate retryTemplate = buildRetryTemplate(extendedConsumerProperties);
|
||||
kafkaMessageDrivenChannelAdapter.setRetryTemplate(retryTemplate);
|
||||
if (properties.getExtension().isEnableDlq()) {
|
||||
final String dlqTopic = "error." + name + "." + group;
|
||||
initDlqProducer();
|
||||
if (extendedConsumerProperties.getExtension().isEnableDlq()) {
|
||||
DefaultKafkaProducerFactory<byte[], byte[]> producerFactory = getProducerFactory(new ExtendedProducerProperties<>(new KafkaProducerProperties()));
|
||||
final KafkaTemplate<byte[], byte[]> kafkaTemplate = new KafkaTemplate<>(producerFactory);
|
||||
messageListenerContainer.getContainerProperties().setErrorHandler(new ErrorHandler() {
|
||||
|
||||
@Override
|
||||
@@ -352,49 +284,56 @@ public class KafkaMessageChannelBinder extends
|
||||
: null;
|
||||
final byte[] payload = message.value() != null
|
||||
? Utils.toArray(ByteBuffer.wrap((byte[]) message.value())) : null;
|
||||
KafkaMessageChannelBinder.this.dlqProducer.send(new ProducerRecord<>(dlqTopic, key, payload),
|
||||
new Callback() {
|
||||
String dlqName = StringUtils.hasText(extendedConsumerProperties.getExtension().getDlqName()) ?
|
||||
extendedConsumerProperties.getExtension().getDlqName() : "error." + destination.getName() + "." + group;
|
||||
ListenableFuture<SendResult<byte[], byte[]>> sentDlq = kafkaTemplate.send(dlqName, message.partition(), key, payload);
|
||||
sentDlq.addCallback(new ListenableFutureCallback<SendResult<byte[], byte[]>>() {
|
||||
StringBuilder sb = new StringBuilder().append(" a message with key='")
|
||||
.append(toDisplayString(ObjectUtils.nullSafeToString(key), 50)).append("'")
|
||||
.append(" and payload='")
|
||||
.append(toDisplayString(ObjectUtils.nullSafeToString(payload), 50))
|
||||
.append("'").append(" received from ")
|
||||
.append(message.partition());
|
||||
|
||||
@Override
|
||||
public void onCompletion(RecordMetadata metadata, Exception exception) {
|
||||
StringBuffer messageLog = new StringBuffer();
|
||||
messageLog.append(" a message with key='"
|
||||
+ toDisplayString(ObjectUtils.nullSafeToString(key), 50) + "'");
|
||||
messageLog.append(" and payload='"
|
||||
+ toDisplayString(ObjectUtils.nullSafeToString(payload), 50) + "'");
|
||||
messageLog.append(" received from " + message.partition());
|
||||
if (exception != null) {
|
||||
KafkaMessageChannelBinder.this.logger.error(
|
||||
"Error sending to DLQ" + messageLog.toString(), exception);
|
||||
}
|
||||
else {
|
||||
if (KafkaMessageChannelBinder.this.logger.isDebugEnabled()) {
|
||||
KafkaMessageChannelBinder.this.logger.debug(
|
||||
"Sent to DLQ " + messageLog.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
@Override
|
||||
public void onFailure(Throwable ex) {
|
||||
KafkaMessageChannelBinder.this.logger.error(
|
||||
"Error sending to DLQ" + sb.toString(), ex);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onSuccess(SendResult<byte[], byte[]> result) {
|
||||
if (KafkaMessageChannelBinder.this.logger.isDebugEnabled()) {
|
||||
KafkaMessageChannelBinder.this.logger.debug(
|
||||
"Sent to DLQ " + sb.toString());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
return kafkaMessageDrivenChannelAdapter;
|
||||
}
|
||||
|
||||
private Map<String, Object> getConsumerConfig(boolean anonymous, String consumerGroup) {
|
||||
private ConsumerFactory<?, ?> createKafkaConsumerFactory(boolean anonymous, String consumerGroup,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties) {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
|
||||
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 100);
|
||||
if (!ObjectUtils.isEmpty(configurationProperties.getConfiguration())) {
|
||||
props.putAll(configurationProperties.getConfiguration());
|
||||
}
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configurationProperties.getKafkaConnectionString());
|
||||
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
|
||||
if (ObjectUtils.isEmpty(props.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configurationProperties.getKafkaConnectionString());
|
||||
}
|
||||
if (!ObjectUtils.isEmpty(consumerProperties.getExtension().getConfiguration())) {
|
||||
props.putAll(consumerProperties.getExtension().getConfiguration());
|
||||
}
|
||||
props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroup);
|
||||
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,
|
||||
anonymous ? "latest" : "earliest");
|
||||
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 100);
|
||||
return props;
|
||||
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, anonymous ? "latest" : "earliest");
|
||||
return new DefaultKafkaConsumerFactory<>(props);
|
||||
}
|
||||
|
||||
private boolean isAutoCommitOnError(ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
@@ -416,132 +355,6 @@ public class KafkaMessageChannelBinder extends
|
||||
return topicPartitionInitialOffsets;
|
||||
}
|
||||
|
||||
private void createTopicsIfAutoCreateEnabledAndAdminUtilsPresent(final String topicName, final int partitionCount) {
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminUtilsOperation != null) {
|
||||
createTopicAndPartitions(topicName, partitionCount);
|
||||
}
|
||||
else if (this.configurationProperties.isAutoCreateTopics() && adminUtilsOperation == null) {
|
||||
this.logger.warn("Auto creation of topics is enabled, but Kafka AdminUtils class is not present on the classpath. " +
|
||||
"No topic will be created by the binder");
|
||||
}
|
||||
else if (!this.configurationProperties.isAutoCreateTopics()) {
|
||||
this.logger.info("Auto creation of topics is disabled.");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Kafka topic if needed, or try to increase its partition count to the
|
||||
* desired number.
|
||||
*/
|
||||
private void createTopicAndPartitions(final String topicName, final int partitionCount) {
|
||||
|
||||
final ZkUtils zkUtils = ZkUtils.apply(this.configurationProperties.getZkConnectionString(),
|
||||
this.configurationProperties.getZkSessionTimeout(),
|
||||
this.configurationProperties.getZkConnectionTimeout(),
|
||||
JaasUtils.isZkSecurityEnabled());
|
||||
try {
|
||||
short errorCode = adminUtilsOperation.errorCodeFromTopicMetadata(topicName, zkUtils);
|
||||
if (errorCode == ErrorMapping.NoError()) {
|
||||
// only consider minPartitionCount for resizing if autoAddPartitions is true
|
||||
int effectivePartitionCount = this.configurationProperties.isAutoAddPartitions()
|
||||
? Math.max(this.configurationProperties.getMinPartitionCount(), partitionCount)
|
||||
: partitionCount;
|
||||
int partitionSize = adminUtilsOperation.partitionSize(topicName, zkUtils);
|
||||
|
||||
if (partitionSize < effectivePartitionCount) {
|
||||
if (this.configurationProperties.isAutoAddPartitions()) {
|
||||
adminUtilsOperation.invokeAddPartitions(zkUtils, topicName, effectivePartitionCount, null, false);
|
||||
}
|
||||
else {
|
||||
throw new BinderException("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "Consider either increasing the partition count of the topic or enabling " +
|
||||
"`autoAddPartitions`");
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (errorCode == ErrorMapping.UnknownTopicOrPartitionCode()) {
|
||||
// always consider minPartitionCount for topic creation
|
||||
final int effectivePartitionCount = Math.max(this.configurationProperties.getMinPartitionCount(),
|
||||
partitionCount);
|
||||
|
||||
this.metadataRetryOperations.execute(new RetryCallback<Object, RuntimeException>() {
|
||||
|
||||
@Override
|
||||
public Object doWithRetry(RetryContext context) throws RuntimeException {
|
||||
|
||||
adminUtilsOperation.invokeCreateTopic(zkUtils, topicName, effectivePartitionCount,
|
||||
configurationProperties.getReplicationFactor(), new Properties());
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
throw new BinderException("Error fetching Kafka topic metadata: ",
|
||||
ErrorMapping.exceptionFor(errorCode));
|
||||
}
|
||||
}
|
||||
finally {
|
||||
zkUtils.close();
|
||||
}
|
||||
}
|
||||
|
||||
private Collection<PartitionInfo> getPartitionsForTopic(final String topicName, final int partitionCount) {
|
||||
try {
|
||||
return this.metadataRetryOperations
|
||||
.execute(new RetryCallback<Collection<PartitionInfo>, Exception>() {
|
||||
|
||||
@Override
|
||||
public Collection<PartitionInfo> doWithRetry(RetryContext context) throws Exception {
|
||||
Collection<PartitionInfo> partitions =
|
||||
getProducerFactory(
|
||||
new ExtendedProducerProperties<>(new KafkaProducerProperties()))
|
||||
.createProducer().partitionsFor(topicName);
|
||||
|
||||
// do a sanity check on the partition set
|
||||
if (partitions.size() < partitionCount) {
|
||||
throw new IllegalStateException("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitions.size()
|
||||
+ (partitions.size() > 1 ? " have " : " has ") + "been found instead");
|
||||
}
|
||||
return partitions;
|
||||
}
|
||||
});
|
||||
}
|
||||
catch (Exception e) {
|
||||
this.logger.error("Cannot initialize Binder", e);
|
||||
throw new BinderException("Cannot initialize binder:", e);
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized void initDlqProducer() {
|
||||
try {
|
||||
if (this.dlqProducer == null) {
|
||||
synchronized (this) {
|
||||
if (this.dlqProducer == null) {
|
||||
// we can use the producer defaults as we do not need to tune
|
||||
// performance
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
|
||||
this.configurationProperties.getKafkaConnectionString());
|
||||
props.put(ProducerConfig.RETRIES_CONFIG, 0);
|
||||
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
|
||||
props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
|
||||
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
|
||||
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
DefaultKafkaProducerFactory<byte[], byte[]> defaultKafkaProducerFactory =
|
||||
new DefaultKafkaProducerFactory<>(props);
|
||||
this.dlqProducer = defaultKafkaProducerFactory.createProducer();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new RuntimeException("Cannot initialize DLQ producer:", e);
|
||||
}
|
||||
}
|
||||
|
||||
private String toDisplayString(String original, int maxCharacters) {
|
||||
if (original.length() <= maxCharacters) {
|
||||
return original;
|
||||
@@ -557,14 +370,15 @@ public class KafkaMessageChannelBinder extends
|
||||
private final DefaultKafkaProducerFactory<byte[], byte[]> producerFactory;
|
||||
|
||||
private ProducerConfigurationMessageHandler(KafkaTemplate<byte[], byte[]> kafkaTemplate, String topic,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties,
|
||||
DefaultKafkaProducerFactory<byte[], byte[]> producerFactory) {
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties,
|
||||
DefaultKafkaProducerFactory<byte[], byte[]> producerFactory) {
|
||||
super(kafkaTemplate);
|
||||
setTopicExpression(new LiteralExpression(topic));
|
||||
setMessageKeyExpression(producerProperties.getExtension().getMessageKeyExpression());
|
||||
setBeanFactory(KafkaMessageChannelBinder.this.getBeanFactory());
|
||||
if (producerProperties.isPartitioned()) {
|
||||
SpelExpressionParser parser = new SpelExpressionParser();
|
||||
setPartitionIdExpression(parser.parseExpression("headers.partition"));
|
||||
setPartitionIdExpression(parser.parseExpression("headers." + BinderHeaders.PARTITION_HEADER));
|
||||
}
|
||||
if (producerProperties.getExtension().isSync()) {
|
||||
setSync(true);
|
||||
|
||||
@@ -16,25 +16,36 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.config;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.utils.AppInfoParser;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.PropertyPlaceholderAutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
|
||||
import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaBinderHealthIndicator;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaBinderJaasInitializerListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaMessageChannelBinder;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka09AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.JaasLoginModuleConfiguration;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.config.codec.kryo.KryoCodecAutoConfiguration;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ApplicationListener;
|
||||
@@ -46,8 +57,11 @@ import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.core.type.AnnotatedTypeMetadata;
|
||||
import org.springframework.integration.codec.Codec;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.support.LoggingProducerListener;
|
||||
import org.springframework.kafka.support.ProducerListener;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
|
||||
/**
|
||||
* @author David Turanski
|
||||
@@ -58,7 +72,7 @@ import org.springframework.kafka.support.ProducerListener;
|
||||
*/
|
||||
@Configuration
|
||||
@ConditionalOnMissingBean(Binder.class)
|
||||
@Import({KryoCodecAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class})
|
||||
@Import({KryoCodecAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class, KafkaBinderConfiguration.KafkaPropertiesConfiguration.class})
|
||||
@EnableConfigurationProperties({KafkaBinderConfigurationProperties.class, KafkaExtendedBindingProperties.class})
|
||||
public class KafkaBinderConfiguration {
|
||||
|
||||
@@ -82,14 +96,18 @@ public class KafkaBinderConfiguration {
|
||||
@Autowired (required = false)
|
||||
private AdminUtilsOperation adminUtilsOperation;
|
||||
|
||||
@Bean
|
||||
KafkaTopicProvisioner provisioningProvider() {
|
||||
return new KafkaTopicProvisioner(this.configurationProperties, this.adminUtilsOperation);
|
||||
}
|
||||
|
||||
@Bean
|
||||
KafkaMessageChannelBinder kafkaMessageChannelBinder() {
|
||||
KafkaMessageChannelBinder kafkaMessageChannelBinder = new KafkaMessageChannelBinder(
|
||||
this.configurationProperties);
|
||||
this.configurationProperties, provisioningProvider());
|
||||
kafkaMessageChannelBinder.setCodec(this.codec);
|
||||
kafkaMessageChannelBinder.setProducerListener(producerListener);
|
||||
kafkaMessageChannelBinder.setExtendedBindingProperties(this.kafkaExtendedBindingProperties);
|
||||
kafkaMessageChannelBinder.setAdminUtilsOperation(adminUtilsOperation);
|
||||
return kafkaMessageChannelBinder;
|
||||
}
|
||||
|
||||
@@ -101,7 +119,17 @@ public class KafkaBinderConfiguration {
|
||||
|
||||
@Bean
|
||||
KafkaBinderHealthIndicator healthIndicator(KafkaMessageChannelBinder kafkaMessageChannelBinder) {
|
||||
return new KafkaBinderHealthIndicator(kafkaMessageChannelBinder, this.configurationProperties);
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
if (!ObjectUtils.isEmpty(configurationProperties.getConfiguration())) {
|
||||
props.putAll(configurationProperties.getConfiguration());
|
||||
}
|
||||
if (!props.containsKey(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configurationProperties.getKafkaConnectionString());
|
||||
}
|
||||
ConsumerFactory<?, ?> consumerFactory = new DefaultKafkaConsumerFactory<>(props);
|
||||
return new KafkaBinderHealthIndicator(kafkaMessageChannelBinder, consumerFactory);
|
||||
}
|
||||
|
||||
@Bean(name = "adminUtilsOperation")
|
||||
@@ -147,4 +175,47 @@ public class KafkaBinderConfiguration {
|
||||
|
||||
private JaasLoginModuleConfiguration zookeeper;
|
||||
}
|
||||
|
||||
@ConditionalOnClass(name = "org.springframework.boot.autoconfigure.kafka.KafkaProperties")
|
||||
public static class KafkaPropertiesConfiguration {
|
||||
|
||||
// KafkaProperties can still be unavailable if KafkaAutoConfiguration is disabled.
|
||||
@Autowired(required = false)
|
||||
private KafkaProperties kafkaProperties;
|
||||
|
||||
@Autowired
|
||||
private KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties;
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
Map<String, Object> configuration = this.kafkaBinderConfigurationProperties.getConfiguration();
|
||||
if (this.kafkaProperties != null) {
|
||||
for (Map.Entry<String, String> properties : this.kafkaProperties.getProperties().entrySet()) {
|
||||
if (!configuration.containsKey(properties.getKey())) {
|
||||
configuration.put(properties.getKey(), properties.getValue());
|
||||
}
|
||||
}
|
||||
for (Map.Entry<String, Object> producerProperties : this.kafkaProperties.buildProducerProperties().entrySet()) {
|
||||
if (!configuration.containsKey(producerProperties.getKey())) {
|
||||
configuration.put(producerProperties.getKey(), producerProperties.getValue());
|
||||
}
|
||||
}
|
||||
for (Map.Entry<String, Object> consumerProperties : this.kafkaProperties.buildConsumerProperties().entrySet()) {
|
||||
if (!configuration.containsKey(consumerProperties.getKey())) {
|
||||
configuration.put(consumerProperties.getKey(), consumerProperties.getValue());
|
||||
}
|
||||
}
|
||||
if (ObjectUtils.isEmpty(configuration.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
configuration.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBinderConfigurationProperties.getKafkaConnectionString());
|
||||
}
|
||||
else {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<String> bootStrapServers = (List<String>) configuration.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
if (bootStrapServers.size() == 1 && bootStrapServers.get(0).equals("localhost:9092")) {
|
||||
configuration.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBinderConfigurationProperties.getKafkaConnectionString());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
org.springframework.boot.env.EnvironmentPostProcessor=\
|
||||
org.springframework.cloud.stream.binder.kafka.KafkaBinderEnvironmentPostProcessor
|
||||
@@ -23,6 +23,8 @@ import com.esotericsoftware.kryo.Registration;
|
||||
import org.springframework.cloud.stream.binder.AbstractTestBinder;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.integration.codec.Codec;
|
||||
import org.springframework.integration.codec.kryo.KryoRegistrar;
|
||||
import org.springframework.integration.codec.kryo.PojoCodec;
|
||||
|
||||
@@ -31,6 +31,7 @@ import org.I0Itec.zkclient.ZkClient;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.Deserializer;
|
||||
import org.assertj.core.api.Assertions;
|
||||
import org.eclipse.jetty.server.Server;
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
@@ -42,7 +43,9 @@ import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.Spy;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.integration.channel.DirectChannel;
|
||||
import org.springframework.integration.channel.QueueChannel;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
@@ -54,7 +57,6 @@ import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
import org.springframework.messaging.SubscribableChannel;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.retry.RetryOperations;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.assertj.core.api.Assertions.fail;
|
||||
@@ -111,12 +113,6 @@ public class Kafka10BinderTests extends KafkaBinderTests {
|
||||
return consumerFactory().createConsumer().partitionsFor(topic).size();
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected void setMetadataRetryOperations(Binder binder, RetryOperations retryOperations) {
|
||||
((Kafka10TestBinder) binder).getBinder().setMetadataRetryOperations(retryOperations);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ZkUtils getZkUtils(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties) {
|
||||
final ZkClient zkClient = new ZkClient(kafkaBinderConfigurationProperties.getZkConnectionString(),
|
||||
@@ -237,8 +233,8 @@ public class Kafka10BinderTests extends KafkaBinderTests {
|
||||
assertThat(inbound).isNotNull();
|
||||
assertTrue(message.getPayload() instanceof User1);
|
||||
User1 receivedUser = (User1) message.getPayload();
|
||||
assertThat(receivedUser.getName()).isEqualTo(userName1);
|
||||
assertThat(receivedUser.getFavoriteColor()).isEqualTo(favColor1);
|
||||
Assertions.assertThat(receivedUser.getName()).isEqualTo(userName1);
|
||||
Assertions.assertThat(receivedUser.getFavoriteColor()).isEqualTo(favColor1);
|
||||
producerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
@@ -16,8 +16,10 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.context.support.GenericApplicationContext;
|
||||
import org.springframework.kafka.support.LoggingProducerListener;
|
||||
import org.springframework.kafka.support.ProducerListener;
|
||||
@@ -34,14 +36,19 @@ public class Kafka10TestBinder extends AbstractKafkaTestBinder {
|
||||
|
||||
public Kafka10TestBinder(KafkaBinderConfigurationProperties binderConfiguration) {
|
||||
try {
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(binderConfiguration);
|
||||
AdminUtilsOperation adminUtilsOperation = new Kafka10AdminUtilsOperation();
|
||||
KafkaTopicProvisioner provisioningProvider =
|
||||
new KafkaTopicProvisioner(binderConfiguration, adminUtilsOperation);
|
||||
provisioningProvider.afterPropertiesSet();
|
||||
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(binderConfiguration, provisioningProvider);
|
||||
|
||||
binder.setCodec(getCodec());
|
||||
ProducerListener producerListener = new LoggingProducerListener();
|
||||
binder.setProducerListener(producerListener);
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
binder.setApplicationContext(context);
|
||||
binder.setAdminUtilsOperation(new Kafka10AdminUtilsOperation());
|
||||
binder.afterPropertiesSet();
|
||||
this.setBinder(binder);
|
||||
}
|
||||
@@ -0,0 +1,119 @@
|
||||
/*
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.common.serialization.LongDeserializer;
|
||||
import org.apache.kafka.common.serialization.LongSerializer;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfiguration;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.test.context.TestPropertySource;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
@RunWith(SpringJUnit4ClassRunner.class)
|
||||
@SpringBootTest(classes = {KafkaBinderAutoConfigurationPropertiesTest.KafkaBinderConfigProperties.class, KafkaBinderConfiguration.class})
|
||||
@TestPropertySource(locations = "classpath:binder-config-autoconfig.properties")
|
||||
public class KafkaBinderAutoConfigurationPropertiesTest {
|
||||
|
||||
@Autowired
|
||||
private KafkaMessageChannelBinder kafkaMessageChannelBinder;
|
||||
|
||||
@Autowired
|
||||
private KafkaBinderHealthIndicator kafkaBinderHealthIndicator;
|
||||
|
||||
@Test
|
||||
public void testKafkaBinderConfigurationWithKafkaProperties() throws Exception {
|
||||
assertNotNull(this.kafkaMessageChannelBinder);
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = new ExtendedProducerProperties<>(new KafkaProducerProperties());
|
||||
Method getProducerFactoryMethod = KafkaMessageChannelBinder.class.getDeclaredMethod("getProducerFactory", ExtendedProducerProperties.class);
|
||||
getProducerFactoryMethod.setAccessible(true);
|
||||
DefaultKafkaProducerFactory producerFactory = (DefaultKafkaProducerFactory) getProducerFactoryMethod.invoke(this.kafkaMessageChannelBinder, producerProperties);
|
||||
Field producerFactoryConfigField = ReflectionUtils.findField(DefaultKafkaProducerFactory.class, "configs", Map.class);
|
||||
ReflectionUtils.makeAccessible(producerFactoryConfigField);
|
||||
Map<String, Object> producerConfigs = (Map<String, Object>) ReflectionUtils.getField(producerFactoryConfigField, producerFactory);
|
||||
assertTrue(producerConfigs.get("batch.size").equals(10));
|
||||
assertTrue(producerConfigs.get("key.serializer").equals(LongSerializer.class));
|
||||
assertTrue(producerConfigs.get("value.serializer").equals(LongSerializer.class));
|
||||
assertTrue(producerConfigs.get("compression.type").equals("snappy"));
|
||||
List<String> bootstrapServers = new ArrayList<>();
|
||||
bootstrapServers.add("10.98.09.199:9092");
|
||||
bootstrapServers.add("10.98.09.196:9092");
|
||||
assertTrue((((List<String>) producerConfigs.get("bootstrap.servers")).containsAll(bootstrapServers)));
|
||||
Method createKafkaConsumerFactoryMethod = KafkaMessageChannelBinder.class.getDeclaredMethod("createKafkaConsumerFactory", boolean.class, String.class, ExtendedConsumerProperties.class);
|
||||
createKafkaConsumerFactoryMethod.setAccessible(true);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = new ExtendedConsumerProperties<>(new KafkaConsumerProperties());
|
||||
DefaultKafkaConsumerFactory consumerFactory = (DefaultKafkaConsumerFactory) createKafkaConsumerFactoryMethod.invoke(this.kafkaMessageChannelBinder, true, "test", consumerProperties);
|
||||
Field consumerFactoryConfigField = ReflectionUtils.findField(DefaultKafkaConsumerFactory.class, "configs", Map.class);
|
||||
ReflectionUtils.makeAccessible(consumerFactoryConfigField);
|
||||
Map<String, Object> consumerConfigs = (Map<String, Object>) ReflectionUtils.getField(consumerFactoryConfigField, consumerFactory);
|
||||
assertTrue(consumerConfigs.get("key.deserializer").equals(LongDeserializer.class));
|
||||
assertTrue(consumerConfigs.get("value.deserializer").equals(LongDeserializer.class));
|
||||
assertTrue(consumerConfigs.get("group.id").equals("test"));
|
||||
assertTrue(consumerConfigs.get("auto.offset.reset").equals("latest"));
|
||||
assertTrue((((List<String>) consumerConfigs.get("bootstrap.servers")).containsAll(bootstrapServers)));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKafkaHealthIndicatorProperties() {
|
||||
assertNotNull(this.kafkaBinderHealthIndicator);
|
||||
Field consumerFactoryField = ReflectionUtils.findField(KafkaBinderHealthIndicator.class, "consumerFactory",
|
||||
ConsumerFactory.class);
|
||||
ReflectionUtils.makeAccessible(consumerFactoryField);
|
||||
DefaultKafkaConsumerFactory consumerFactory = (DefaultKafkaConsumerFactory) ReflectionUtils.getField(
|
||||
consumerFactoryField, this.kafkaBinderHealthIndicator);
|
||||
Field configField = ReflectionUtils.findField(DefaultKafkaConsumerFactory.class, "configs", Map.class);
|
||||
ReflectionUtils.makeAccessible(configField);
|
||||
Map<String, Object> configs = (Map<String, Object>) ReflectionUtils.getField(configField, consumerFactory);
|
||||
assertTrue(configs.containsKey("bootstrap.servers"));
|
||||
List<String> bootstrapServers = new ArrayList<>();
|
||||
bootstrapServers.add("10.98.09.199:9092");
|
||||
bootstrapServers.add("10.98.09.196:9092");
|
||||
assertTrue(((List<String>)configs.get("bootstrap.servers")).containsAll(bootstrapServers));
|
||||
}
|
||||
|
||||
public static class KafkaBinderConfigProperties {
|
||||
|
||||
@Bean
|
||||
KafkaProperties kafkaProperties() {
|
||||
return new KafkaProperties();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfiguration;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.test.context.TestPropertySource;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
@RunWith(SpringJUnit4ClassRunner.class)
|
||||
@SpringBootTest(classes = {KafkaBinderConfiguration.class})
|
||||
@TestPropertySource(locations = "classpath:binder-config.properties")
|
||||
public class KafkaBinderConfigurationPropertiesTest {
|
||||
|
||||
@Autowired
|
||||
private KafkaMessageChannelBinder kafkaMessageChannelBinder;
|
||||
|
||||
@Test
|
||||
public void testKafkaBinderConfigurationProperties() throws Exception {
|
||||
assertNotNull(this.kafkaMessageChannelBinder);
|
||||
KafkaProducerProperties kafkaProducerProperties = new KafkaProducerProperties();
|
||||
kafkaProducerProperties.setBufferSize(12345);
|
||||
kafkaProducerProperties.setBatchTimeout(100);
|
||||
kafkaProducerProperties.setCompressionType(KafkaProducerProperties.CompressionType.gzip);
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = new ExtendedProducerProperties<>(kafkaProducerProperties);
|
||||
Method getProducerFactoryMethod = KafkaMessageChannelBinder.class.getDeclaredMethod("getProducerFactory", ExtendedProducerProperties.class);
|
||||
getProducerFactoryMethod.setAccessible(true);
|
||||
DefaultKafkaProducerFactory producerFactory = (DefaultKafkaProducerFactory) getProducerFactoryMethod.invoke(this.kafkaMessageChannelBinder, producerProperties);
|
||||
Field producerFactoryConfigField = ReflectionUtils.findField(DefaultKafkaProducerFactory.class, "configs", Map.class);
|
||||
ReflectionUtils.makeAccessible(producerFactoryConfigField);
|
||||
Map<String, Object> producerConfigs = (Map<String, Object>) ReflectionUtils.getField(producerFactoryConfigField, producerFactory);
|
||||
assertTrue(producerConfigs.get("batch.size").equals("12345"));
|
||||
assertTrue(producerConfigs.get("linger.ms").equals("100"));
|
||||
assertTrue(producerConfigs.get("key.serializer").equals(ByteArraySerializer.class));
|
||||
assertTrue(producerConfigs.get("value.serializer").equals(ByteArraySerializer.class));
|
||||
assertTrue(producerConfigs.get("compression.type").equals("gzip"));
|
||||
List<String> bootstrapServers = new ArrayList<>();
|
||||
bootstrapServers.add("10.98.09.199:9082");
|
||||
assertTrue((((String) producerConfigs.get("bootstrap.servers")).contains("10.98.09.199:9082")));
|
||||
Method createKafkaConsumerFactoryMethod = KafkaMessageChannelBinder.class.getDeclaredMethod("createKafkaConsumerFactory", boolean.class, String.class, ExtendedConsumerProperties.class);
|
||||
createKafkaConsumerFactoryMethod.setAccessible(true);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = new ExtendedConsumerProperties<>(new KafkaConsumerProperties());
|
||||
DefaultKafkaConsumerFactory consumerFactory = (DefaultKafkaConsumerFactory) createKafkaConsumerFactoryMethod.invoke(this.kafkaMessageChannelBinder, true, "test", consumerProperties);
|
||||
Field consumerFactoryConfigField = ReflectionUtils.findField(DefaultKafkaConsumerFactory.class, "configs", Map.class);
|
||||
ReflectionUtils.makeAccessible(consumerFactoryConfigField);
|
||||
Map<String, Object> consumerConfigs = (Map<String, Object>) ReflectionUtils.getField(consumerFactoryConfigField, consumerFactory);
|
||||
assertTrue(consumerConfigs.get("key.deserializer").equals(ByteArrayDeserializer.class));
|
||||
assertTrue(consumerConfigs.get("value.deserializer").equals(ByteArrayDeserializer.class));
|
||||
assertTrue((((String) consumerConfigs.get("bootstrap.servers")).contains("10.98.09.199:9082")));
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -15,8 +15,6 @@
|
||||
*/
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
|
||||
import org.junit.Test;
|
||||
@@ -29,6 +27,8 @@ import org.springframework.kafka.support.ProducerListener;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
|
||||
/**
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
|
||||
@@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.BDDMockito.given;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
import org.springframework.boot.actuate.health.Health;
|
||||
import org.springframework.boot.actuate.health.Status;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
|
||||
/**
|
||||
* @author Barry Commins
|
||||
*/
|
||||
public class KafkaBinderHealthIndicatorTest {
|
||||
|
||||
private static final String TEST_TOPIC = "test";
|
||||
private KafkaBinderHealthIndicator indicator;
|
||||
|
||||
@Mock
|
||||
private DefaultKafkaConsumerFactory consumerFactory;
|
||||
|
||||
@Mock
|
||||
private KafkaConsumer consumer;
|
||||
|
||||
@Mock
|
||||
private KafkaMessageChannelBinder binder;
|
||||
|
||||
private Map<String, Collection<PartitionInfo>> topicsInUse = new HashMap<>();
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
MockitoAnnotations.initMocks(this);
|
||||
given(consumerFactory.createConsumer()).willReturn(consumer);
|
||||
given(binder.getTopicsInUse()).willReturn(topicsInUse);
|
||||
indicator = new KafkaBinderHealthIndicator(binder, consumerFactory);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void kafkaBinderIsUp() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, partitions);
|
||||
given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
Health health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.UP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void kafkaBinderIsDown() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(-1, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, partitions);
|
||||
given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
Health health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.DOWN);
|
||||
}
|
||||
|
||||
private List<PartitionInfo> partitions(Node leader) {
|
||||
List<PartitionInfo> partitions = new ArrayList<>();
|
||||
partitions.add(new PartitionInfo(TEST_TOPIC, 0, leader, null, null));
|
||||
return partitions;
|
||||
}
|
||||
}
|
||||
@@ -19,6 +19,7 @@ package org.springframework.cloud.stream.binder.kafka;
|
||||
import javax.security.auth.login.AppConfigurationEntry;
|
||||
|
||||
import com.sun.security.auth.login.ConfigFile;
|
||||
|
||||
import org.apache.kafka.common.security.JaasUtils;
|
||||
import org.junit.Test;
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -32,22 +32,29 @@ import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.LongDeserializer;
|
||||
import org.apache.kafka.common.serialization.StringDeserializer;
|
||||
import org.apache.kafka.common.serialization.StringSerializer;
|
||||
import org.assertj.core.api.Condition;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
||||
import org.springframework.beans.DirectFieldAccessor;
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.BinderException;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.DefaultBinding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.HeaderMode;
|
||||
import org.springframework.cloud.stream.binder.PartitionCapableBinderTests;
|
||||
import org.springframework.cloud.stream.binder.PartitionTestSupport;
|
||||
import org.springframework.cloud.stream.binder.TestUtils;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.utils.KafkaTopicUtils;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.provisioning.ProvisioningException;
|
||||
import org.springframework.context.support.GenericApplicationContext;
|
||||
import org.springframework.integration.IntegrationMessageHeaderAccessor;
|
||||
import org.springframework.integration.channel.DirectChannel;
|
||||
@@ -66,7 +73,6 @@ import org.springframework.messaging.MessagingException;
|
||||
import org.springframework.messaging.SubscribableChannel;
|
||||
import org.springframework.messaging.support.GenericMessage;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.retry.RetryOperations;
|
||||
import org.springframework.retry.backoff.FixedBackOffPolicy;
|
||||
import org.springframework.retry.policy.SimpleRetryPolicy;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
@@ -78,9 +84,13 @@ import static org.junit.Assert.assertTrue;
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Henryk Konsek
|
||||
*/
|
||||
public abstract class KafkaBinderTests extends PartitionCapableBinderTests<AbstractKafkaTestBinder, ExtendedConsumerProperties<KafkaConsumerProperties>,
|
||||
ExtendedProducerProperties<KafkaProducerProperties>> {
|
||||
ExtendedProducerProperties<KafkaProducerProperties>> {
|
||||
|
||||
@Rule
|
||||
public ExpectedException expectedProvisioningException = ExpectedException.none();
|
||||
|
||||
@Override
|
||||
protected ExtendedConsumerProperties<KafkaConsumerProperties> createConsumerProperties() {
|
||||
@@ -107,15 +117,13 @@ public abstract class KafkaBinderTests extends PartitionCapableBinderTests<Abstr
|
||||
|
||||
protected abstract int partitionSize(String topic);
|
||||
|
||||
protected abstract void setMetadataRetryOperations(Binder binder, RetryOperations retryOperations);
|
||||
|
||||
protected abstract ZkUtils getZkUtils(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties);
|
||||
|
||||
protected abstract void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions,
|
||||
int replicationFactor, Properties topicConfig);
|
||||
int replicationFactor, Properties topicConfig);
|
||||
|
||||
protected abstract int invokePartitionSize(String topic,
|
||||
ZkUtils zkUtils);
|
||||
ZkUtils zkUtils);
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
@@ -273,6 +281,68 @@ public abstract class KafkaBinderTests extends PartitionCapableBinderTests<Abstr
|
||||
producerBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testConfigurableDlqName() throws Exception {
|
||||
Binder binder = getBinder();
|
||||
DirectChannel moduleOutputChannel = new DirectChannel();
|
||||
DirectChannel moduleInputChannel = new DirectChannel();
|
||||
FailingInvocationCountingMessageHandler handler = new FailingInvocationCountingMessageHandler();
|
||||
moduleInputChannel.subscribe(handler);
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.setPartitionCount(10);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setMaxAttempts(3);
|
||||
consumerProperties.setBackOffInitialInterval(100);
|
||||
consumerProperties.setBackOffMaxInterval(150);
|
||||
consumerProperties.getExtension().setEnableDlq(true);
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
String dlqName = "dlqTest";
|
||||
consumerProperties.getExtension().setDlqName(dlqName);
|
||||
long uniqueBindingId = System.currentTimeMillis();
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("retryTest." + uniqueBindingId + ".0",
|
||||
moduleOutputChannel, producerProperties);
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0",
|
||||
"testGroup", moduleInputChannel, consumerProperties);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> dlqConsumerProperties = createConsumerProperties();
|
||||
dlqConsumerProperties.setMaxAttempts(1);
|
||||
QueueChannel dlqChannel = new QueueChannel();
|
||||
Binding<MessageChannel> dlqConsumerBinding = binder.bindConsumer(dlqName, null, dlqChannel, dlqConsumerProperties);
|
||||
|
||||
String testMessagePayload = "test." + UUID.randomUUID().toString();
|
||||
Message<String> testMessage = MessageBuilder.withPayload(testMessagePayload).build();
|
||||
moduleOutputChannel.send(testMessage);
|
||||
|
||||
Message<?> dlqMessage = receive(dlqChannel, 3);
|
||||
assertThat(dlqMessage).isNotNull();
|
||||
assertThat(dlqMessage.getPayload()).isEqualTo(testMessagePayload);
|
||||
|
||||
// first attempt fails
|
||||
assertThat(handler.getReceivedMessages().entrySet()).hasSize(1);
|
||||
Message<?> handledMessage = handler.getReceivedMessages().entrySet().iterator().next().getValue();
|
||||
assertThat(handledMessage).isNotNull();
|
||||
assertThat(handledMessage.getPayload()).isEqualTo(testMessagePayload);
|
||||
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
|
||||
binderBindUnbindLatency();
|
||||
dlqConsumerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
|
||||
// on the second attempt the message is not redelivered because the DLQ is set
|
||||
QueueChannel successfulInputChannel = new QueueChannel();
|
||||
consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0", "testGroup",
|
||||
successfulInputChannel, consumerProperties);
|
||||
String testMessage2Payload = "test." + UUID.randomUUID().toString();
|
||||
Message<String> testMessage2 = MessageBuilder.withPayload(testMessage2Payload).build();
|
||||
moduleOutputChannel.send(testMessage2);
|
||||
|
||||
Message<?> receivedMessage = receive(successfulInputChannel);
|
||||
assertThat(receivedMessage.getPayload()).isEqualTo(testMessage2Payload);
|
||||
|
||||
binderBindUnbindLatency();
|
||||
consumerBinding.unbind();
|
||||
producerBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testAutoCreateTopicsEnabledSucceeds() throws Exception {
|
||||
@@ -284,7 +354,6 @@ public abstract class KafkaBinderTests extends PartitionCapableBinderTests<Abstr
|
||||
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
|
||||
backOffPolicy.setBackOffPeriod(1000);
|
||||
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
|
||||
setMetadataRetryOperations(binder, metatadataRetrievalRetryOperations);
|
||||
DirectChannel output = new DirectChannel();
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
String testTopicName = "nonexisting" + System.currentTimeMillis();
|
||||
@@ -312,10 +381,10 @@ public abstract class KafkaBinderTests extends PartitionCapableBinderTests<Abstr
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.getExtension().setCompressionType(
|
||||
KafkaProducerProperties.CompressionType.valueOf(codec.toString()));
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("foo.0", moduleOutputChannel,
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("testCompression", moduleOutputChannel,
|
||||
producerProperties);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo.0", "test", moduleInputChannel,
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("testCompression", "test", moduleInputChannel,
|
||||
consumerProperties);
|
||||
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
|
||||
.build();
|
||||
@@ -402,6 +471,35 @@ public abstract class KafkaBinderTests extends PartitionCapableBinderTests<Abstr
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testDynamicKeyExpression() throws Exception {
|
||||
Binder binder = getBinder(createConfigurationProperties());
|
||||
QueueChannel moduleInputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.getExtension().getConfiguration().put("key.serializer", StringSerializer.class.getName());
|
||||
producerProperties.getExtension().setMessageKeyExpression(spelExpressionParser.parseExpression("headers.key"));
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
String uniqueBindingId = UUID.randomUUID().toString();
|
||||
DirectChannel moduleOutputChannel = createBindableChannel("output",
|
||||
createProducerBindingProperties(producerProperties));
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("foo" + uniqueBindingId + ".0",
|
||||
moduleOutputChannel, producerProperties);
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo" + uniqueBindingId + ".0", null,
|
||||
moduleInputChannel, consumerProperties);
|
||||
Thread.sleep(1000);
|
||||
Message<?> message = MessageBuilder.withPayload("somePayload").setHeader("key", "myDynamicKey").build();
|
||||
// Let the consumer actually bind to the producer before sending a msg
|
||||
binderBindUnbindLatency();
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(moduleInputChannel);
|
||||
assertThat(inbound).isNotNull();
|
||||
String receivedKey = new String(inbound.getHeaders().get(KafkaHeaders.RECEIVED_MESSAGE_KEY, byte[].class));
|
||||
assertThat(receivedKey).isEqualTo("myDynamicKey");
|
||||
producerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testCustomPartitionCountOverridesPartitioningIfLarger() throws Exception {
|
||||
@@ -670,13 +768,13 @@ public abstract class KafkaBinderTests extends PartitionCapableBinderTests<Abstr
|
||||
createProducerBindingProperties(createProducerProperties()));
|
||||
QueueChannel moduleInputChannel = new QueueChannel();
|
||||
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("foo.x", moduleOutputChannel,
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("testManualAckSucceedsWhenAutoCommitOffsetIsTurnedOff", moduleOutputChannel,
|
||||
createProducerProperties());
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.getExtension().setAutoCommitOffset(false);
|
||||
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo.x", "test", moduleInputChannel,
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("testManualAckSucceedsWhenAutoCommitOffsetIsTurnedOff", "test", moduleInputChannel,
|
||||
consumerProperties);
|
||||
|
||||
String testPayload1 = "foo" + UUID.randomUUID().toString();
|
||||
@@ -712,12 +810,12 @@ public abstract class KafkaBinderTests extends PartitionCapableBinderTests<Abstr
|
||||
createProducerBindingProperties(createProducerProperties()));
|
||||
QueueChannel moduleInputChannel = new QueueChannel();
|
||||
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("foo.x", moduleOutputChannel,
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("testManualAckIsNotPossibleWhenAutoCommitOffsetIsEnabledOnTheBinder", moduleOutputChannel,
|
||||
createProducerProperties());
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo.x", "test", moduleInputChannel,
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("testManualAckIsNotPossibleWhenAutoCommitOffsetIsEnabledOnTheBinder", "test", moduleInputChannel,
|
||||
consumerProperties);
|
||||
|
||||
String testPayload1 = "foo" + UUID.randomUUID().toString();
|
||||
@@ -1013,7 +1111,6 @@ public abstract class KafkaBinderTests extends PartitionCapableBinderTests<Abstr
|
||||
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
|
||||
backOffPolicy.setBackOffPeriod(1000);
|
||||
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
|
||||
setMetadataRetryOperations(binder, metatadataRetrievalRetryOperations);
|
||||
DirectChannel output = new DirectChannel();
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
String testTopicName = "createdByBroker-" + System.currentTimeMillis();
|
||||
@@ -1069,7 +1166,6 @@ public abstract class KafkaBinderTests extends PartitionCapableBinderTests<Abstr
|
||||
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
|
||||
backOffPolicy.setBackOffPeriod(1000);
|
||||
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
|
||||
setMetadataRetryOperations(binder, metatadataRetrievalRetryOperations);
|
||||
DirectChannel output = new DirectChannel();
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
Binding<?> binding = binder.bindConsumer(testTopicName, "test", output, consumerProperties);
|
||||
@@ -1079,7 +1175,7 @@ public abstract class KafkaBinderTests extends PartitionCapableBinderTests<Abstr
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testAutoAddPartitionsDisabledFailsIfTopicUnderpartitioned() throws Exception {
|
||||
public void testAutoAddPartitionsDisabledSucceedsIfTopicUnderPartitionedAndAutoRebalanceEnabled() throws Exception {
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
|
||||
final ZkClient zkClient = new ZkClient(configurationProperties.getZkConnectionString(),
|
||||
@@ -1094,26 +1190,46 @@ public abstract class KafkaBinderTests extends PartitionCapableBinderTests<Abstr
|
||||
Binder binder = getBinder(configurationProperties);
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
// binder.setApplicationContext(context);
|
||||
// binder.afterPropertiesSet();
|
||||
|
||||
DirectChannel output = new DirectChannel();
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
// this consumer must consume from partition 2
|
||||
consumerProperties.setInstanceCount(3);
|
||||
consumerProperties.setInstanceIndex(2);
|
||||
Binding binding = null;
|
||||
try {
|
||||
binding = binder.bindConsumer(testTopicName, "test", output, consumerProperties);
|
||||
}
|
||||
catch (Exception e) {
|
||||
assertThat(e).isInstanceOf(BinderException.class);
|
||||
assertThat(e)
|
||||
.hasMessageContaining("The number of expected partitions was: 3, but 1 has been found instead");
|
||||
}
|
||||
finally {
|
||||
if (binding != null) {
|
||||
binding.unbind();
|
||||
}
|
||||
Binding binding = binder.bindConsumer(testTopicName, "test", output, consumerProperties);
|
||||
binding.unbind();
|
||||
assertThat(invokePartitionSize(testTopicName, zkUtils)).isEqualTo(1);
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testAutoAddPartitionsDisabledFailsIfTopicUnderPartitionedAndAutoRebalanceDisabled() throws Exception {
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
|
||||
final ZkClient zkClient = new ZkClient(configurationProperties.getZkConnectionString(),
|
||||
configurationProperties.getZkSessionTimeout(), configurationProperties.getZkConnectionTimeout(),
|
||||
ZKStringSerializer$.MODULE$);
|
||||
|
||||
final ZkUtils zkUtils = new ZkUtils(zkClient, null, false);
|
||||
|
||||
String testTopicName = "existing" + System.currentTimeMillis();
|
||||
invokeCreateTopic(zkUtils, testTopicName, 1, 1, new Properties());
|
||||
configurationProperties.setAutoAddPartitions(false);
|
||||
Binder binder = getBinder(configurationProperties);
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
|
||||
DirectChannel output = new DirectChannel();
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
// this consumer must consume from partition 2
|
||||
consumerProperties.setInstanceCount(3);
|
||||
consumerProperties.setInstanceIndex(2);
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
expectedProvisioningException.expect(ProvisioningException.class);
|
||||
expectedProvisioningException.expectMessage("The number of expected partitions was: 3, but 1 has been found instead");
|
||||
Binding binding = binder.bindConsumer(testTopicName, "test", output, consumerProperties);
|
||||
if (binding != null) {
|
||||
binding.unbind();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1139,7 +1255,6 @@ public abstract class KafkaBinderTests extends PartitionCapableBinderTests<Abstr
|
||||
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
|
||||
backOffPolicy.setBackOffPeriod(1000);
|
||||
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
|
||||
setMetadataRetryOperations(binder, metatadataRetrievalRetryOperations);
|
||||
DirectChannel output = new DirectChannel();
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
// this consumer must consume from partition 2
|
||||
@@ -1188,7 +1303,6 @@ public abstract class KafkaBinderTests extends PartitionCapableBinderTests<Abstr
|
||||
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
|
||||
backOffPolicy.setBackOffPeriod(1000);
|
||||
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
|
||||
setMetadataRetryOperations(binder, metatadataRetrievalRetryOperations);
|
||||
DirectChannel output = new DirectChannel();
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
Binding<?> binding = binder.bindConsumer(testTopicName, "test", output, consumerProperties);
|
||||
@@ -1228,7 +1342,7 @@ public abstract class KafkaBinderTests extends PartitionCapableBinderTests<Abstr
|
||||
Binding<?> binding = null;
|
||||
try {
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
Map<String, String> propertiesToOverride = configurationProperties.getConfiguration();
|
||||
Map<String, Object> propertiesToOverride = configurationProperties.getConfiguration();
|
||||
propertiesToOverride.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
|
||||
propertiesToOverride.put("value.deserializer", "org.apache.kafka.common.serialization.LongDeserializer");
|
||||
configurationProperties.setConfiguration(propertiesToOverride);
|
||||
@@ -1252,11 +1366,11 @@ public abstract class KafkaBinderTests extends PartitionCapableBinderTests<Abstr
|
||||
}
|
||||
|
||||
private KafkaConsumer getKafkaConsumer(Binding binding) {
|
||||
DirectFieldAccessor bindingAccessor = new DirectFieldAccessor((DefaultBinding)binding);
|
||||
DirectFieldAccessor bindingAccessor = new DirectFieldAccessor((DefaultBinding) binding);
|
||||
KafkaMessageDrivenChannelAdapter adapter = (KafkaMessageDrivenChannelAdapter) bindingAccessor.getPropertyValue("lifecycle");
|
||||
DirectFieldAccessor adapterAccessor = new DirectFieldAccessor(adapter);
|
||||
ConcurrentMessageListenerContainer messageListenerContainer = (ConcurrentMessageListenerContainer) adapterAccessor.getPropertyValue("messageListenerContainer");
|
||||
DirectFieldAccessor containerAccessor = new DirectFieldAccessor((ConcurrentMessageListenerContainer)messageListenerContainer);
|
||||
DirectFieldAccessor containerAccessor = new DirectFieldAccessor((ConcurrentMessageListenerContainer) messageListenerContainer);
|
||||
DefaultKafkaConsumerFactory consumerFactory = (DefaultKafkaConsumerFactory) containerAccessor.getPropertyValue("consumerFactory");
|
||||
return (KafkaConsumer) consumerFactory.createConsumer();
|
||||
}
|
||||
@@ -1350,6 +1464,246 @@ public abstract class KafkaBinderTests extends PartitionCapableBinderTests<Abstr
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testPartitionedModuleJavaWithRawMode() throws Exception {
|
||||
Binder binder = getBinder();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
|
||||
properties.setHeaderMode(HeaderMode.raw);
|
||||
properties.setPartitionKeyExtractorClass(RawKafkaPartitionTestSupport.class);
|
||||
properties.setPartitionSelectorClass(RawKafkaPartitionTestSupport.class);
|
||||
properties.setPartitionCount(6);
|
||||
|
||||
DirectChannel output = createBindableChannel("output", createProducerBindingProperties(properties));
|
||||
output.setBeanName("test.output");
|
||||
Binding<MessageChannel> outputBinding = binder.bindProducer("partJ.raw.0", output, properties);
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setConcurrency(2);
|
||||
consumerProperties.setInstanceCount(3);
|
||||
consumerProperties.setInstanceIndex(0);
|
||||
consumerProperties.setPartitioned(true);
|
||||
consumerProperties.setHeaderMode(HeaderMode.raw);
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
QueueChannel input0 = new QueueChannel();
|
||||
input0.setBeanName("test.input0J");
|
||||
Binding<MessageChannel> input0Binding = binder.bindConsumer("partJ.raw.0", "test", input0, consumerProperties);
|
||||
consumerProperties.setInstanceIndex(1);
|
||||
QueueChannel input1 = new QueueChannel();
|
||||
input1.setBeanName("test.input1J");
|
||||
Binding<MessageChannel> input1Binding = binder.bindConsumer("partJ.raw.0", "test", input1, consumerProperties);
|
||||
consumerProperties.setInstanceIndex(2);
|
||||
QueueChannel input2 = new QueueChannel();
|
||||
input2.setBeanName("test.input2J");
|
||||
Binding<MessageChannel> input2Binding = binder.bindConsumer("partJ.raw.0", "test", input2, consumerProperties);
|
||||
|
||||
output.send(new GenericMessage<>(new byte[]{(byte) 0}));
|
||||
output.send(new GenericMessage<>(new byte[]{(byte) 1}));
|
||||
output.send(new GenericMessage<>(new byte[]{(byte) 2}));
|
||||
|
||||
Message<?> receive0 = receive(input0);
|
||||
assertThat(receive0).isNotNull();
|
||||
Message<?> receive1 = receive(input1);
|
||||
assertThat(receive1).isNotNull();
|
||||
Message<?> receive2 = receive(input2);
|
||||
assertThat(receive2).isNotNull();
|
||||
|
||||
assertThat(Arrays.asList(((byte[]) receive0.getPayload())[0], ((byte[]) receive1.getPayload())[0],
|
||||
((byte[]) receive2.getPayload())[0])).containsExactlyInAnyOrder((byte) 0, (byte) 1, (byte) 2);
|
||||
|
||||
input0Binding.unbind();
|
||||
input1Binding.unbind();
|
||||
input2Binding.unbind();
|
||||
outputBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testPartitionedModuleSpELWithRawMode() throws Exception {
|
||||
Binder binder = getBinder();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
|
||||
properties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload[0]"));
|
||||
properties.setPartitionSelectorExpression(spelExpressionParser.parseExpression("hashCode()"));
|
||||
properties.setPartitionCount(6);
|
||||
properties.setHeaderMode(HeaderMode.raw);
|
||||
|
||||
DirectChannel output = createBindableChannel("output", createProducerBindingProperties(properties));
|
||||
output.setBeanName("test.output");
|
||||
Binding<MessageChannel> outputBinding = binder.bindProducer("part.raw.0", output, properties);
|
||||
try {
|
||||
Object endpoint = extractEndpoint(outputBinding);
|
||||
assertThat(getEndpointRouting(endpoint))
|
||||
.contains(getExpectedRoutingBaseDestination("part.raw.0", "test") + "-' + headers['partition']");
|
||||
}
|
||||
catch (UnsupportedOperationException ignored) {
|
||||
}
|
||||
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setConcurrency(2);
|
||||
consumerProperties.setInstanceIndex(0);
|
||||
consumerProperties.setInstanceCount(3);
|
||||
consumerProperties.setPartitioned(true);
|
||||
consumerProperties.setHeaderMode(HeaderMode.raw);
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
QueueChannel input0 = new QueueChannel();
|
||||
input0.setBeanName("test.input0S");
|
||||
Binding<MessageChannel> input0Binding = binder.bindConsumer("part.raw.0", "test", input0, consumerProperties);
|
||||
consumerProperties.setInstanceIndex(1);
|
||||
QueueChannel input1 = new QueueChannel();
|
||||
input1.setBeanName("test.input1S");
|
||||
Binding<MessageChannel> input1Binding = binder.bindConsumer("part.raw.0", "test", input1, consumerProperties);
|
||||
consumerProperties.setInstanceIndex(2);
|
||||
QueueChannel input2 = new QueueChannel();
|
||||
input2.setBeanName("test.input2S");
|
||||
Binding<MessageChannel> input2Binding = binder.bindConsumer("part.raw.0", "test", input2, consumerProperties);
|
||||
|
||||
Message<byte[]> message2 = org.springframework.integration.support.MessageBuilder.withPayload(new byte[]{2})
|
||||
.setHeader(IntegrationMessageHeaderAccessor.CORRELATION_ID, "kafkaBinderTestCommonsDelegate")
|
||||
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_NUMBER, 42)
|
||||
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_SIZE, 43).build();
|
||||
output.send(message2);
|
||||
output.send(new GenericMessage<>(new byte[]{1}));
|
||||
output.send(new GenericMessage<>(new byte[]{0}));
|
||||
Message<?> receive0 = receive(input0);
|
||||
assertThat(receive0).isNotNull();
|
||||
Message<?> receive1 = receive(input1);
|
||||
assertThat(receive1).isNotNull();
|
||||
Message<?> receive2 = receive(input2);
|
||||
assertThat(receive2).isNotNull();
|
||||
assertThat(Arrays.asList(((byte[]) receive0.getPayload())[0], ((byte[]) receive1.getPayload())[0],
|
||||
((byte[]) receive2.getPayload())[0])).containsExactlyInAnyOrder((byte) 0, (byte) 1, (byte) 2);
|
||||
input0Binding.unbind();
|
||||
input1Binding.unbind();
|
||||
input2Binding.unbind();
|
||||
outputBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testSendAndReceiveWithRawMode() throws Exception {
|
||||
Binder binder = getBinder();
|
||||
DirectChannel moduleOutputChannel = new DirectChannel();
|
||||
QueueChannel moduleInputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.setHeaderMode(HeaderMode.raw);
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("raw.0", moduleOutputChannel,
|
||||
producerProperties);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setHeaderMode(HeaderMode.raw);
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("raw.0", "test", moduleInputChannel,
|
||||
consumerProperties);
|
||||
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload("testSendAndReceiveWithRawMode".getBytes()).build();
|
||||
// Let the consumer actually bind to the producer before sending a msg
|
||||
binderBindUnbindLatency();
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(moduleInputChannel);
|
||||
assertThat(inbound).isNotNull();
|
||||
assertThat(new String((byte[]) inbound.getPayload())).isEqualTo("testSendAndReceiveWithRawMode");
|
||||
producerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testSendAndReceiveWithRawModeAndStringPayload() throws Exception {
|
||||
Binder binder = getBinder();
|
||||
DirectChannel moduleOutputChannel = new DirectChannel();
|
||||
QueueChannel moduleInputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.setHeaderMode(HeaderMode.raw);
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("raw.string.0", moduleOutputChannel,
|
||||
producerProperties);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setHeaderMode(HeaderMode.raw);
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("raw.string.0", "test", moduleInputChannel,
|
||||
consumerProperties);
|
||||
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload("testSendAndReceiveWithRawModeAndStringPayload").build();
|
||||
// Let the consumer actually bind to the producer before sending a msg
|
||||
binderBindUnbindLatency();
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(moduleInputChannel);
|
||||
assertThat(inbound).isNotNull();
|
||||
assertThat(new String((byte[]) inbound.getPayload())).isEqualTo("testSendAndReceiveWithRawModeAndStringPayload");
|
||||
producerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testSendAndReceiveWithExplicitConsumerGroupWithRawMode() throws Exception {
|
||||
Binder binder = getBinder();
|
||||
DirectChannel moduleOutputChannel = new DirectChannel();
|
||||
// Test pub/sub by emulating how StreamPlugin handles taps
|
||||
QueueChannel module1InputChannel = new QueueChannel();
|
||||
QueueChannel module2InputChannel = new QueueChannel();
|
||||
QueueChannel module3InputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.setHeaderMode(HeaderMode.raw);
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("baz.raw.0", moduleOutputChannel,
|
||||
producerProperties);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setHeaderMode(HeaderMode.raw);
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
Binding<MessageChannel> input1Binding = binder.bindConsumer("baz.raw.0", "test", module1InputChannel,
|
||||
consumerProperties);
|
||||
// A new module is using the tap as an input channel
|
||||
String fooTapName = "baz.raw.0";
|
||||
Binding<MessageChannel> input2Binding = binder.bindConsumer(fooTapName, "tap1", module2InputChannel,
|
||||
consumerProperties);
|
||||
// Another new module is using tap as an input channel
|
||||
String barTapName = "baz.raw.0";
|
||||
Binding<MessageChannel> input3Binding = binder.bindConsumer(barTapName, "tap2", module3InputChannel,
|
||||
consumerProperties);
|
||||
|
||||
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload("testSendAndReceiveWithExplicitConsumerGroupWithRawMode".getBytes()).build();
|
||||
boolean success = false;
|
||||
boolean retried = false;
|
||||
while (!success) {
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(module1InputChannel);
|
||||
assertThat(inbound).isNotNull();
|
||||
assertThat(new String((byte[]) inbound.getPayload())).isEqualTo("testSendAndReceiveWithExplicitConsumerGroupWithRawMode");
|
||||
|
||||
Message<?> tapped1 = receive(module2InputChannel);
|
||||
Message<?> tapped2 = receive(module3InputChannel);
|
||||
if (tapped1 == null || tapped2 == null) {
|
||||
// listener may not have started
|
||||
assertThat(retried).isFalse().withFailMessage("Failed to receive tap after retry");
|
||||
retried = true;
|
||||
continue;
|
||||
}
|
||||
success = true;
|
||||
assertThat(new String((byte[]) tapped1.getPayload())).isEqualTo("testSendAndReceiveWithExplicitConsumerGroupWithRawMode");
|
||||
assertThat(new String((byte[]) tapped2.getPayload())).isEqualTo("testSendAndReceiveWithExplicitConsumerGroupWithRawMode");
|
||||
}
|
||||
// delete one tap stream is deleted
|
||||
input3Binding.unbind();
|
||||
Message<?> message2 = org.springframework.integration.support.MessageBuilder.withPayload("bar".getBytes()).build();
|
||||
moduleOutputChannel.send(message2);
|
||||
|
||||
// other tap still receives messages
|
||||
Message<?> tapped = receive(module2InputChannel);
|
||||
assertThat(tapped).isNotNull();
|
||||
|
||||
// removed tap does not
|
||||
assertThat(receive(module3InputChannel)).isNull();
|
||||
|
||||
// re-subscribed tap does receive the message
|
||||
input3Binding = binder.bindConsumer(barTapName, "tap2", module3InputChannel, createConsumerProperties());
|
||||
assertThat(receive(module3InputChannel)).isNotNull();
|
||||
|
||||
// clean up
|
||||
input1Binding.unbind();
|
||||
input2Binding.unbind();
|
||||
input3Binding.unbind();
|
||||
producerBinding.unbind();
|
||||
assertThat(extractEndpoint(input1Binding).isRunning()).isFalse();
|
||||
assertThat(extractEndpoint(input2Binding).isRunning()).isFalse();
|
||||
assertThat(extractEndpoint(input3Binding).isRunning()).isFalse();
|
||||
assertThat(extractEndpoint(producerBinding).isRunning()).isFalse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void binderBindUnbindLatency() throws InterruptedException {
|
||||
Thread.sleep(500);
|
||||
|
||||
@@ -1,258 +0,0 @@
|
||||
/*
|
||||
* Copyright 2015-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.HeaderMode;
|
||||
import org.springframework.integration.IntegrationMessageHeaderAccessor;
|
||||
import org.springframework.integration.channel.DirectChannel;
|
||||
import org.springframework.integration.channel.QueueChannel;
|
||||
import org.springframework.integration.support.MessageBuilder;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
import org.springframework.messaging.support.GenericMessage;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author David Turanski
|
||||
* @author Gary Russell
|
||||
* @author Mark Fisher
|
||||
*/
|
||||
public class RawModeKafka09BinderTests extends Kafka09BinderTests {
|
||||
|
||||
@Test
|
||||
@Override
|
||||
public void testPartitionedModuleJava() throws Exception {
|
||||
Kafka09TestBinder binder = getBinder();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
|
||||
properties.setHeaderMode(HeaderMode.raw);
|
||||
properties.setPartitionKeyExtractorClass(RawKafkaPartitionTestSupport.class);
|
||||
properties.setPartitionSelectorClass(RawKafkaPartitionTestSupport.class);
|
||||
properties.setPartitionCount(6);
|
||||
|
||||
DirectChannel output = createBindableChannel("output", createProducerBindingProperties(properties));
|
||||
output.setBeanName("test.output");
|
||||
Binding<MessageChannel> outputBinding = binder.bindProducer("partJ.0", output, properties);
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setConcurrency(2);
|
||||
consumerProperties.setInstanceCount(3);
|
||||
consumerProperties.setInstanceIndex(0);
|
||||
consumerProperties.setPartitioned(true);
|
||||
consumerProperties.setHeaderMode(HeaderMode.raw);
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
QueueChannel input0 = new QueueChannel();
|
||||
input0.setBeanName("test.input0J");
|
||||
Binding<MessageChannel> input0Binding = binder.bindConsumer("partJ.0", "test", input0, consumerProperties);
|
||||
consumerProperties.setInstanceIndex(1);
|
||||
QueueChannel input1 = new QueueChannel();
|
||||
input1.setBeanName("test.input1J");
|
||||
Binding<MessageChannel> input1Binding = binder.bindConsumer("partJ.0", "test", input1, consumerProperties);
|
||||
consumerProperties.setInstanceIndex(2);
|
||||
QueueChannel input2 = new QueueChannel();
|
||||
input2.setBeanName("test.input2J");
|
||||
Binding<MessageChannel> input2Binding = binder.bindConsumer("partJ.0", "test", input2, consumerProperties);
|
||||
|
||||
output.send(new GenericMessage<>(new byte[] {(byte) 0}));
|
||||
output.send(new GenericMessage<>(new byte[] {(byte) 1}));
|
||||
output.send(new GenericMessage<>(new byte[] {(byte) 2}));
|
||||
|
||||
Message<?> receive0 = receive(input0);
|
||||
assertThat(receive0).isNotNull();
|
||||
Message<?> receive1 = receive(input1);
|
||||
assertThat(receive1).isNotNull();
|
||||
Message<?> receive2 = receive(input2);
|
||||
assertThat(receive2).isNotNull();
|
||||
|
||||
assertThat(Arrays.asList(((byte[]) receive0.getPayload())[0], ((byte[]) receive1.getPayload())[0],
|
||||
((byte[]) receive2.getPayload())[0])).containsExactlyInAnyOrder((byte) 0, (byte) 1, (byte) 2);
|
||||
|
||||
input0Binding.unbind();
|
||||
input1Binding.unbind();
|
||||
input2Binding.unbind();
|
||||
outputBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
@Override
|
||||
public void testPartitionedModuleSpEL() throws Exception {
|
||||
Kafka09TestBinder binder = getBinder();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
|
||||
properties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload[0]"));
|
||||
properties.setPartitionSelectorExpression(spelExpressionParser.parseExpression("hashCode()"));
|
||||
properties.setPartitionCount(6);
|
||||
properties.setHeaderMode(HeaderMode.raw);
|
||||
|
||||
DirectChannel output = createBindableChannel("output", createProducerBindingProperties(properties));
|
||||
output.setBeanName("test.output");
|
||||
Binding<MessageChannel> outputBinding = binder.bindProducer("part.0", output, properties);
|
||||
try {
|
||||
Object endpoint = extractEndpoint(outputBinding);
|
||||
assertThat(getEndpointRouting(endpoint))
|
||||
.contains(getExpectedRoutingBaseDestination("part.0", "test") + "-' + headers['partition']");
|
||||
}
|
||||
catch (UnsupportedOperationException ignored) {
|
||||
}
|
||||
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setConcurrency(2);
|
||||
consumerProperties.setInstanceIndex(0);
|
||||
consumerProperties.setInstanceCount(3);
|
||||
consumerProperties.setPartitioned(true);
|
||||
consumerProperties.setHeaderMode(HeaderMode.raw);
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
QueueChannel input0 = new QueueChannel();
|
||||
input0.setBeanName("test.input0S");
|
||||
Binding<MessageChannel> input0Binding = binder.bindConsumer("part.0", "test", input0, consumerProperties);
|
||||
consumerProperties.setInstanceIndex(1);
|
||||
QueueChannel input1 = new QueueChannel();
|
||||
input1.setBeanName("test.input1S");
|
||||
Binding<MessageChannel> input1Binding = binder.bindConsumer("part.0", "test", input1, consumerProperties);
|
||||
consumerProperties.setInstanceIndex(2);
|
||||
QueueChannel input2 = new QueueChannel();
|
||||
input2.setBeanName("test.input2S");
|
||||
Binding<MessageChannel> input2Binding = binder.bindConsumer("part.0", "test", input2, consumerProperties);
|
||||
|
||||
Message<byte[]> message2 = MessageBuilder.withPayload(new byte[] {2})
|
||||
.setHeader(IntegrationMessageHeaderAccessor.CORRELATION_ID, "foo")
|
||||
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_NUMBER, 42)
|
||||
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_SIZE, 43).build();
|
||||
output.send(message2);
|
||||
output.send(new GenericMessage<>(new byte[] {1}));
|
||||
output.send(new GenericMessage<>(new byte[] {0}));
|
||||
Message<?> receive0 = receive(input0);
|
||||
assertThat(receive0).isNotNull();
|
||||
Message<?> receive1 = receive(input1);
|
||||
assertThat(receive1).isNotNull();
|
||||
Message<?> receive2 = receive(input2);
|
||||
assertThat(receive2).isNotNull();
|
||||
assertThat(Arrays.asList(((byte[]) receive0.getPayload())[0], ((byte[]) receive1.getPayload())[0],
|
||||
((byte[]) receive2.getPayload())[0])).containsExactlyInAnyOrder((byte) 0, (byte) 1, (byte) 2);
|
||||
input0Binding.unbind();
|
||||
input1Binding.unbind();
|
||||
input2Binding.unbind();
|
||||
outputBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
@Override
|
||||
public void testSendAndReceive() throws Exception {
|
||||
Kafka09TestBinder binder = getBinder();
|
||||
DirectChannel moduleOutputChannel = new DirectChannel();
|
||||
QueueChannel moduleInputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.setHeaderMode(HeaderMode.raw);
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("foo.0", moduleOutputChannel,
|
||||
producerProperties);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setHeaderMode(HeaderMode.raw);
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo.0", "test", moduleInputChannel,
|
||||
consumerProperties);
|
||||
Message<?> message = MessageBuilder.withPayload("foo".getBytes()).build();
|
||||
// Let the consumer actually bind to the producer before sending a msg
|
||||
binderBindUnbindLatency();
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(moduleInputChannel);
|
||||
assertThat(inbound).isNotNull();
|
||||
assertThat(new String((byte[]) inbound.getPayload())).isEqualTo("foo");
|
||||
producerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSendAndReceiveWithExplicitConsumerGroup() {
|
||||
Kafka09TestBinder binder = getBinder();
|
||||
DirectChannel moduleOutputChannel = new DirectChannel();
|
||||
// Test pub/sub by emulating how StreamPlugin handles taps
|
||||
QueueChannel module1InputChannel = new QueueChannel();
|
||||
QueueChannel module2InputChannel = new QueueChannel();
|
||||
QueueChannel module3InputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.setHeaderMode(HeaderMode.raw);
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("baz.0", moduleOutputChannel,
|
||||
producerProperties);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setHeaderMode(HeaderMode.raw);
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
Binding<MessageChannel> input1Binding = binder.bindConsumer("baz.0", "test", module1InputChannel,
|
||||
consumerProperties);
|
||||
// A new module is using the tap as an input channel
|
||||
String fooTapName = "baz.0";
|
||||
Binding<MessageChannel> input2Binding = binder.bindConsumer(fooTapName, "tap1", module2InputChannel,
|
||||
consumerProperties);
|
||||
// Another new module is using tap as an input channel
|
||||
String barTapName = "baz.0";
|
||||
Binding<MessageChannel> input3Binding = binder.bindConsumer(barTapName, "tap2", module3InputChannel,
|
||||
consumerProperties);
|
||||
|
||||
Message<?> message = MessageBuilder.withPayload("foo".getBytes()).build();
|
||||
boolean success = false;
|
||||
boolean retried = false;
|
||||
while (!success) {
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(module1InputChannel);
|
||||
assertThat(inbound).isNotNull();
|
||||
assertThat(new String((byte[]) inbound.getPayload())).isEqualTo("foo");
|
||||
|
||||
Message<?> tapped1 = receive(module2InputChannel);
|
||||
Message<?> tapped2 = receive(module3InputChannel);
|
||||
if (tapped1 == null || tapped2 == null) {
|
||||
// listener may not have started
|
||||
assertThat(retried).isFalse().withFailMessage("Failed to receive tap after retry");
|
||||
retried = true;
|
||||
continue;
|
||||
}
|
||||
success = true;
|
||||
assertThat(new String((byte[]) tapped1.getPayload())).isEqualTo("foo");
|
||||
assertThat(new String((byte[]) tapped2.getPayload())).isEqualTo("foo");
|
||||
}
|
||||
// delete one tap stream is deleted
|
||||
input3Binding.unbind();
|
||||
Message<?> message2 = MessageBuilder.withPayload("bar".getBytes()).build();
|
||||
moduleOutputChannel.send(message2);
|
||||
|
||||
// other tap still receives messages
|
||||
Message<?> tapped = receive(module2InputChannel);
|
||||
assertThat(tapped).isNotNull();
|
||||
|
||||
// removed tap does not
|
||||
assertThat(receive(module3InputChannel)).isNull();
|
||||
|
||||
// re-subscribed tap does receive the message
|
||||
input3Binding = binder.bindConsumer(barTapName, "tap2", module3InputChannel, createConsumerProperties());
|
||||
assertThat(receive(module3InputChannel)).isNotNull();
|
||||
|
||||
// clean up
|
||||
input1Binding.unbind();
|
||||
input2Binding.unbind();
|
||||
input3Binding.unbind();
|
||||
producerBinding.unbind();
|
||||
assertThat(extractEndpoint(input1Binding).isRunning()).isFalse();
|
||||
assertThat(extractEndpoint(input2Binding).isRunning()).isFalse();
|
||||
assertThat(extractEndpoint(input3Binding).isRunning()).isFalse();
|
||||
assertThat(extractEndpoint(producerBinding).isRunning()).isFalse();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.springframework.cloud.stream.binder.kafka.bootstrap;
|
||||
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
import org.springframework.boot.builder.SpringApplicationBuilder;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class KafkaBinderBootstrapTest {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, 10);
|
||||
|
||||
@Test
|
||||
public void testKafkaBinderConfiguration() throws Exception {
|
||||
ConfigurableApplicationContext applicationContext = new SpringApplicationBuilder(SimpleApplication.class)
|
||||
.web(false)
|
||||
.run("--spring.cloud.stream.kafka.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
applicationContext.close();
|
||||
}
|
||||
|
||||
@SpringBootApplication
|
||||
static class SimpleApplication {
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
spring.kafka.producer.keySerializer=org.apache.kafka.common.serialization.LongSerializer
|
||||
spring.kafka.producer.valueSerializer=org.apache.kafka.common.serialization.LongSerializer
|
||||
spring.kafka.consumer.keyDeserializer=org.apache.kafka.common.serialization.LongDeserializer
|
||||
spring.kafka.consumer.valueDeserializer=org.apache.kafka.common.serialization.LongDeserializer
|
||||
spring.kafka.producer.batchSize=10
|
||||
spring.kafka.bootstrapServers=10.98.09.199:9092,10.98.09.196:9092
|
||||
spring.kafka.producer.compressionType=snappy
|
||||
# Test consumer properties
|
||||
spring.kafka.consumer.auto-offset-reset=earliest
|
||||
spring.kafka.consumer.group-id=testEmbeddedKafkaApplication
|
||||
@@ -0,0 +1 @@
|
||||
spring.cloud.stream.kafka.binder.brokers=10.98.09.199:9082
|
||||
Reference in New Issue
Block a user