Compare commits
47 Commits
v2.0.0.RC2
...
v2.0.1.REL
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d67c98334f | ||
|
|
3a4f047e9c | ||
|
|
725d2a0de2 | ||
|
|
c7dc56e7d2 | ||
|
|
5c594816bd | ||
|
|
c941e2d735 | ||
|
|
8a1c2c504d | ||
|
|
dd48bf1540 | ||
|
|
3450b4b360 | ||
|
|
78a8baf81f | ||
|
|
1ea69a10a4 | ||
|
|
8f61919069 | ||
|
|
369c46ce77 | ||
|
|
64431426aa | ||
|
|
f77dc50de9 | ||
|
|
d141ad3647 | ||
|
|
75dd5f202a | ||
|
|
acb8eef43b | ||
|
|
b3f8cf41ef | ||
|
|
cbf693f14e | ||
|
|
39dd048ee5 | ||
|
|
0689e87489 | ||
|
|
2c3787faa1 | ||
|
|
84f0fb28ae | ||
|
|
710ff2c292 | ||
|
|
11a275a299 | ||
|
|
3526a298c8 | ||
|
|
e152d0c073 | ||
|
|
976b903352 | ||
|
|
9861c80355 | ||
|
|
7057e225df | ||
|
|
b8267ea81e | ||
|
|
2b595b004f | ||
|
|
de45edc962 | ||
|
|
2406fe5237 | ||
|
|
b5a0013e1e | ||
|
|
c814ad5595 | ||
|
|
def2c3d0ed | ||
|
|
10a44d1e44 | ||
|
|
0de078ca48 | ||
|
|
8035e25359 | ||
|
|
bcf15ed3be | ||
|
|
d37ef750ad | ||
|
|
d8baca5a66 | ||
|
|
b9d7f1f537 | ||
|
|
ad819ece92 | ||
|
|
e254968eaf |
10
pom.xml
10
pom.xml
@@ -2,20 +2,20 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.0.0.RC2</version>
|
||||
<version>2.0.1.RELEASE</version>
|
||||
<packaging>pom</packaging>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-build</artifactId>
|
||||
<version>2.0.0.RC2</version>
|
||||
<version>2.0.2.RELEASE</version>
|
||||
<relativePath />
|
||||
</parent>
|
||||
<properties>
|
||||
<java.version>1.8</java.version>
|
||||
<spring-kafka.version>2.1.4.RELEASE</spring-kafka.version>
|
||||
<spring-kafka.version>2.1.7.RELEASE</spring-kafka.version>
|
||||
<spring-integration-kafka.version>3.0.3.RELEASE</spring-integration-kafka.version>
|
||||
<kafka.version>1.0.0</kafka.version>
|
||||
<spring-cloud-stream.version>2.0.0.RC2</spring-cloud-stream.version>
|
||||
<kafka.version>1.0.1</kafka.version>
|
||||
<spring-cloud-stream.version>2.0.1.RELEASE</spring-cloud-stream.version>
|
||||
</properties>
|
||||
<modules>
|
||||
<module>spring-cloud-stream-binder-kafka</module>
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.0.0.RC2</version>
|
||||
<version>2.0.1.RELEASE</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
<description>Spring Cloud Starter Stream Kafka</description>
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.0.0.RC2</version>
|
||||
<version>2.0.1.RELEASE</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder Core</description>
|
||||
|
||||
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Properties for configuring topics.
|
||||
*
|
||||
* @author Gary Russell
|
||||
* @since 2.0
|
||||
*
|
||||
*/
|
||||
public class KafkaAdminProperties {
|
||||
|
||||
private Short replicationFactor;
|
||||
|
||||
private Map<Integer, List<Integer>> replicasAssignments = new HashMap<>();
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
public Short getReplicationFactor() {
|
||||
return this.replicationFactor;
|
||||
}
|
||||
|
||||
public void setReplicationFactor(Short replicationFactor) {
|
||||
this.replicationFactor = replicationFactor;
|
||||
}
|
||||
|
||||
public Map<Integer, List<Integer>> getReplicasAssignments() {
|
||||
return this.replicasAssignments;
|
||||
}
|
||||
|
||||
public void setReplicasAssignments(Map<Integer, List<Integer>> replicasAssignments) {
|
||||
this.replicasAssignments = replicasAssignments;
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2015-2017 the original author or authors.
|
||||
* Copyright 2015-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -27,6 +27,7 @@ import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.boot.context.properties.DeprecatedConfigurationProperty;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
@@ -45,7 +46,7 @@ public class KafkaBinderConfigurationProperties {
|
||||
|
||||
private final Transaction transaction = new Transaction();
|
||||
|
||||
@Autowired(required = false)
|
||||
@Autowired
|
||||
private KafkaProperties kafkaProperties;
|
||||
|
||||
private String[] zkNodes = new String[] { "localhost" };
|
||||
@@ -86,7 +87,7 @@ public class KafkaBinderConfigurationProperties {
|
||||
|
||||
private String requiredAcks = "1";
|
||||
|
||||
private int replicationFactor = 1;
|
||||
private short replicationFactor = 1;
|
||||
|
||||
private int fetchSize = 1024 * 1024;
|
||||
|
||||
@@ -110,6 +111,13 @@ public class KafkaBinderConfigurationProperties {
|
||||
return this.transaction;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the connection String
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
@Deprecated
|
||||
public String getZkConnectionString() {
|
||||
return toConnectionString(this.zkNodes, this.defaultZkPort);
|
||||
}
|
||||
@@ -126,26 +134,68 @@ public class KafkaBinderConfigurationProperties {
|
||||
return this.headers;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the window.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getOffsetUpdateTimeWindow() {
|
||||
return this.offsetUpdateTimeWindow;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the count.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getOffsetUpdateCount() {
|
||||
return this.offsetUpdateCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the timeout.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getOffsetUpdateShutdownTimeout() {
|
||||
return this.offsetUpdateShutdownTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper nodes.
|
||||
* @return the nodes.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public String[] getZkNodes() {
|
||||
return this.zkNodes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper nodes.
|
||||
* @param zkNodes the nodes.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public void setZkNodes(String... zkNodes) {
|
||||
this.zkNodes = zkNodes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper port.
|
||||
* @param defaultZkPort the port.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public void setDefaultZkPort(String defaultZkPort) {
|
||||
this.defaultZkPort = defaultZkPort;
|
||||
}
|
||||
@@ -166,30 +216,79 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.headers = headers;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param offsetUpdateTimeWindow the window.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setOffsetUpdateTimeWindow(int offsetUpdateTimeWindow) {
|
||||
this.offsetUpdateTimeWindow = offsetUpdateTimeWindow;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param offsetUpdateCount the count.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setOffsetUpdateCount(int offsetUpdateCount) {
|
||||
this.offsetUpdateCount = offsetUpdateCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param offsetUpdateShutdownTimeout the timeout.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setOffsetUpdateShutdownTimeout(int offsetUpdateShutdownTimeout) {
|
||||
this.offsetUpdateShutdownTimeout = offsetUpdateShutdownTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper session timeout.
|
||||
* @return the timeout.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public int getZkSessionTimeout() {
|
||||
return this.zkSessionTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper session timeout.
|
||||
* @param zkSessionTimeout the timout
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public void setZkSessionTimeout(int zkSessionTimeout) {
|
||||
this.zkSessionTimeout = zkSessionTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper connection timeout.
|
||||
* @return the timout.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public int getZkConnectionTimeout() {
|
||||
return this.zkConnectionTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper connection timeout.
|
||||
* @param zkConnectionTimeout the timeout.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public void setZkConnectionTimeout(int zkConnectionTimeout) {
|
||||
this.zkConnectionTimeout = zkConnectionTimeout;
|
||||
}
|
||||
@@ -212,10 +311,24 @@ public class KafkaBinderConfigurationProperties {
|
||||
return StringUtils.arrayToCommaDelimitedString(fullyFormattedHosts);
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the wait.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getMaxWait() {
|
||||
return this.maxWait;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer user.
|
||||
* @param maxWait the wait.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setMaxWait(int maxWait) {
|
||||
this.maxWait = maxWait;
|
||||
}
|
||||
@@ -232,18 +345,32 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.requiredAcks = requiredAcks;
|
||||
}
|
||||
|
||||
public int getReplicationFactor() {
|
||||
public short getReplicationFactor() {
|
||||
return this.replicationFactor;
|
||||
}
|
||||
|
||||
public void setReplicationFactor(int replicationFactor) {
|
||||
public void setReplicationFactor(short replicationFactor) {
|
||||
this.replicationFactor = replicationFactor;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the size.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getFetchSize() {
|
||||
return this.fetchSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param fetchSize the size.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setFetchSize(int fetchSize) {
|
||||
this.fetchSize = fetchSize;
|
||||
}
|
||||
@@ -264,10 +391,24 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.healthTimeout = healthTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the queue size.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getQueueSize() {
|
||||
return this.queueSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param queueSize the queue size.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setQueueSize(int queueSize) {
|
||||
this.queueSize = queueSize;
|
||||
}
|
||||
@@ -288,10 +429,26 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.autoAddPartitions = autoAddPartitions;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used; set properties such as this via {@link #getConfiguration()
|
||||
* configuration}.
|
||||
* @return the size.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0, set properties such as this via 'configuration'")
|
||||
public int getSocketBufferSize() {
|
||||
return this.socketBufferSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used; set properties such as this via {@link #getConfiguration()
|
||||
* configuration}.
|
||||
* @param socketBufferSize the size.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0, set properties such as this via 'configuration'")
|
||||
public void setSocketBufferSize(int socketBufferSize) {
|
||||
this.socketBufferSize = socketBufferSize;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -53,6 +53,8 @@ public class KafkaConsumerProperties {
|
||||
both
|
||||
}
|
||||
|
||||
private boolean ackEachRecord;
|
||||
|
||||
private boolean autoRebalanceEnabled = true;
|
||||
|
||||
private boolean autoCommitOffset = true;
|
||||
@@ -61,6 +63,8 @@ public class KafkaConsumerProperties {
|
||||
|
||||
private StartOffset startOffset;
|
||||
|
||||
private boolean resetOffsets;
|
||||
|
||||
private boolean enableDlq;
|
||||
|
||||
private String dlqName;
|
||||
@@ -79,6 +83,16 @@ public class KafkaConsumerProperties {
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
private KafkaAdminProperties admin = new KafkaAdminProperties();
|
||||
|
||||
public boolean isAckEachRecord() {
|
||||
return this.ackEachRecord;
|
||||
}
|
||||
|
||||
public void setAckEachRecord(boolean ackEachRecord) {
|
||||
this.ackEachRecord = ackEachRecord;
|
||||
}
|
||||
|
||||
public boolean isAutoCommitOffset() {
|
||||
return this.autoCommitOffset;
|
||||
}
|
||||
@@ -95,6 +109,14 @@ public class KafkaConsumerProperties {
|
||||
this.startOffset = startOffset;
|
||||
}
|
||||
|
||||
public boolean isResetOffsets() {
|
||||
return this.resetOffsets;
|
||||
}
|
||||
|
||||
public void setResetOffsets(boolean resetOffsets) {
|
||||
this.resetOffsets = resetOffsets;
|
||||
}
|
||||
|
||||
public boolean isEnableDlq() {
|
||||
return this.enableDlq;
|
||||
}
|
||||
@@ -111,10 +133,22 @@ public class KafkaConsumerProperties {
|
||||
this.autoCommitOnError = autoCommitOnError;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the interval.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
public int getRecoveryInterval() {
|
||||
return this.recoveryInterval;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param recoveryInterval the interval.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
public void setRecoveryInterval(int recoveryInterval) {
|
||||
this.recoveryInterval = recoveryInterval;
|
||||
}
|
||||
@@ -182,4 +216,12 @@ public class KafkaConsumerProperties {
|
||||
this.idleEventInterval = idleEventInterval;
|
||||
}
|
||||
|
||||
public KafkaAdminProperties getAdmin() {
|
||||
return this.admin;
|
||||
}
|
||||
|
||||
public void setAdmin(KafkaAdminProperties admin) {
|
||||
this.admin = admin;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -44,6 +44,8 @@ public class KafkaProducerProperties {
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
private KafkaAdminProperties admin = new KafkaAdminProperties();
|
||||
|
||||
public int getBufferSize() {
|
||||
return this.bufferSize;
|
||||
}
|
||||
@@ -101,6 +103,15 @@ public class KafkaProducerProperties {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
public KafkaAdminProperties getAdmin() {
|
||||
return this.admin;
|
||||
}
|
||||
|
||||
public void setAdmin(KafkaAdminProperties admin) {
|
||||
this.admin = admin;
|
||||
}
|
||||
|
||||
|
||||
public enum CompressionType {
|
||||
none,
|
||||
gzip,
|
||||
|
||||
@@ -18,6 +18,7 @@ package org.springframework.cloud.stream.binder.kafka.provisioning;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
@@ -44,6 +45,7 @@ import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.BinderException;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaAdminProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
@@ -67,6 +69,7 @@ import org.springframework.util.StringUtils;
|
||||
* @author Gary Russell
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Simon Flandergan
|
||||
* @author Oleg Zhurakousky
|
||||
*/
|
||||
public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsumerProperties<KafkaConsumerProperties>,
|
||||
ExtendedProducerProperties<KafkaProducerProperties>>, InitializingBean {
|
||||
@@ -77,19 +80,18 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
|
||||
private final KafkaBinderConfigurationProperties configurationProperties;
|
||||
|
||||
private final AdminClient adminClient;
|
||||
private final int operationTimeout = DEFAULT_OPERATION_TIMEOUT;
|
||||
|
||||
private final Map<String, Object> adminClientProperties;
|
||||
|
||||
private RetryOperations metadataRetryOperations;
|
||||
|
||||
private final int operationTimeout = DEFAULT_OPERATION_TIMEOUT;
|
||||
|
||||
public KafkaTopicProvisioner(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties) {
|
||||
Assert.isTrue(kafkaProperties != null, "KafkaProperties cannot be null");
|
||||
Map<String, Object> adminClientProperties = kafkaProperties.buildAdminProperties();
|
||||
this.adminClientProperties = kafkaProperties.buildAdminProperties();
|
||||
this.configurationProperties = kafkaBinderConfigurationProperties;
|
||||
normalalizeBootPropsWithBinder(adminClientProperties, kafkaProperties, kafkaBinderConfigurationProperties);
|
||||
this.adminClient = AdminClient.create(adminClientProperties);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -118,33 +120,38 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
}
|
||||
|
||||
@Override
|
||||
public ProducerDestination provisionProducerDestination(final String name, ExtendedProducerProperties<KafkaProducerProperties> properties) {
|
||||
public ProducerDestination provisionProducerDestination(final String name,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties) {
|
||||
|
||||
if (this.logger.isInfoEnabled()) {
|
||||
this.logger.info("Using kafka topic for outbound: " + name);
|
||||
}
|
||||
KafkaTopicUtils.validateTopicName(name);
|
||||
createTopic(name, properties.getPartitionCount(), false);
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminClient != null) {
|
||||
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult.all();
|
||||
try (AdminClient adminClient = AdminClient.create(this.adminClientProperties)) {
|
||||
createTopic(adminClient, name, properties.getPartitionCount(), false, properties.getExtension().getAdmin());
|
||||
int partitions = 0;
|
||||
if (this.configurationProperties.isAutoCreateTopics()) {
|
||||
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult.all();
|
||||
|
||||
try {
|
||||
Map<String, TopicDescription> topicDescriptions = all.get(operationTimeout, TimeUnit.SECONDS);
|
||||
Map<String, TopicDescription> topicDescriptions = null;
|
||||
try {
|
||||
topicDescriptions = all.get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new ProvisioningException("Problems encountered with partitions finding", e);
|
||||
}
|
||||
TopicDescription topicDescription = topicDescriptions.get(name);
|
||||
int partitions = topicDescription.partitions().size();
|
||||
return new KafkaProducerDestination(name, partitions);
|
||||
partitions = topicDescription.partitions().size();
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new ProvisioningException("Problems encountered with partitions finding", e);
|
||||
}
|
||||
}
|
||||
else {
|
||||
return new KafkaProducerDestination(name);
|
||||
return new KafkaProducerDestination(name, partitions);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConsumerDestination provisionConsumerDestination(final String name, final String group, ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
public ConsumerDestination provisionConsumerDestination(final String name, final String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
|
||||
KafkaTopicUtils.validateTopicName(name);
|
||||
boolean anonymous = !StringUtils.hasText(group);
|
||||
Assert.isTrue(!anonymous || !properties.getExtension().isEnableDlq(),
|
||||
@@ -153,25 +160,32 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
throw new IllegalArgumentException("Instance count cannot be zero");
|
||||
}
|
||||
int partitionCount = properties.getInstanceCount() * properties.getConcurrency();
|
||||
createTopic(name, partitionCount, properties.getExtension().isAutoRebalanceEnabled());
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminClient != null) {
|
||||
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult.all();
|
||||
try {
|
||||
Map<String, TopicDescription> topicDescriptions = all.get(operationTimeout, TimeUnit.SECONDS);
|
||||
TopicDescription topicDescription = topicDescriptions.get(name);
|
||||
int partitions = topicDescription.partitions().size();
|
||||
ConsumerDestination dlqTopic = createDlqIfNeedBe(name, group, properties, anonymous, partitions);
|
||||
if (dlqTopic != null) {
|
||||
return dlqTopic;
|
||||
ConsumerDestination consumerDestination = new KafkaConsumerDestination(name);
|
||||
try (AdminClient adminClient = createAdminClient()) {
|
||||
createTopic(adminClient, name, partitionCount, properties.getExtension().isAutoRebalanceEnabled(),
|
||||
properties.getExtension().getAdmin());
|
||||
if (this.configurationProperties.isAutoCreateTopics()) {
|
||||
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult.all();
|
||||
try {
|
||||
Map<String, TopicDescription> topicDescriptions = all.get(operationTimeout, TimeUnit.SECONDS);
|
||||
TopicDescription topicDescription = topicDescriptions.get(name);
|
||||
int partitions = topicDescription.partitions().size();
|
||||
consumerDestination = createDlqIfNeedBe(adminClient, name, group, properties, anonymous, partitions);
|
||||
if (consumerDestination == null) {
|
||||
consumerDestination = new KafkaConsumerDestination(name, partitions);
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new ProvisioningException("provisioning exception", e);
|
||||
}
|
||||
return new KafkaConsumerDestination(name, partitions);
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new ProvisioningException("provisioning exception", e);
|
||||
}
|
||||
}
|
||||
return new KafkaConsumerDestination(name);
|
||||
return consumerDestination;
|
||||
}
|
||||
|
||||
AdminClient createAdminClient() {
|
||||
return AdminClient.create(this.adminClientProperties);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -209,14 +223,15 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
});
|
||||
}
|
||||
|
||||
private ConsumerDestination createDlqIfNeedBe(String name, String group,
|
||||
private ConsumerDestination createDlqIfNeedBe(AdminClient adminClient, String name, String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties,
|
||||
boolean anonymous, int partitions) {
|
||||
if (properties.getExtension().isEnableDlq() && !anonymous) {
|
||||
String dlqTopic = StringUtils.hasText(properties.getExtension().getDlqName()) ?
|
||||
properties.getExtension().getDlqName() : "error." + name + "." + group;
|
||||
try {
|
||||
createTopicAndPartitions(dlqTopic, partitions, properties.getExtension().isAutoRebalanceEnabled());
|
||||
createTopicAndPartitions(adminClient, dlqTopic, partitions,
|
||||
properties.getExtension().isAutoRebalanceEnabled(), properties.getExtension().getAdmin());
|
||||
}
|
||||
catch (Throwable throwable) {
|
||||
if (throwable instanceof Error) {
|
||||
@@ -231,9 +246,10 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
return null;
|
||||
}
|
||||
|
||||
private void createTopic(String name, int partitionCount, boolean tolerateLowerPartitionsOnBroker) {
|
||||
private void createTopic(AdminClient adminClient, String name, int partitionCount, boolean tolerateLowerPartitionsOnBroker,
|
||||
KafkaAdminProperties properties) {
|
||||
try {
|
||||
createTopicIfNecessary(name, partitionCount, tolerateLowerPartitionsOnBroker);
|
||||
createTopicIfNecessary(adminClient, name, partitionCount, tolerateLowerPartitionsOnBroker, properties);
|
||||
}
|
||||
catch (Throwable throwable) {
|
||||
if (throwable instanceof Error) {
|
||||
@@ -245,16 +261,14 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
}
|
||||
}
|
||||
|
||||
private void createTopicIfNecessary(final String topicName, final int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker) throws Throwable {
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminClient != null) {
|
||||
createTopicAndPartitions(topicName, partitionCount, tolerateLowerPartitionsOnBroker);
|
||||
private void createTopicIfNecessary(AdminClient adminClient, final String topicName, final int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker, KafkaAdminProperties properties) throws Throwable {
|
||||
|
||||
if (this.configurationProperties.isAutoCreateTopics()) {
|
||||
createTopicAndPartitions(adminClient, topicName, partitionCount, tolerateLowerPartitionsOnBroker,
|
||||
properties);
|
||||
}
|
||||
else if (this.configurationProperties.isAutoCreateTopics() && adminClient == null) {
|
||||
this.logger.warn("Auto creation of topics is enabled, but Kafka AdminUtils class is not present on the classpath. " +
|
||||
"No topic will be created by the binder");
|
||||
}
|
||||
else if (!this.configurationProperties.isAutoCreateTopics()) {
|
||||
else {
|
||||
this.logger.info("Auto creation of topics is disabled.");
|
||||
}
|
||||
}
|
||||
@@ -262,9 +276,12 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
/**
|
||||
* Creates a Kafka topic if needed, or try to increase its partition count to the
|
||||
* desired number.
|
||||
* @param adminClient
|
||||
* @param adminProperties
|
||||
*/
|
||||
private void createTopicAndPartitions(final String topicName, final int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker) throws Throwable {
|
||||
private void createTopicAndPartitions(AdminClient adminClient, final String topicName, final int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker, KafkaAdminProperties adminProperties) throws Throwable {
|
||||
|
||||
ListTopicsResult listTopicsResult = adminClient.listTopics();
|
||||
KafkaFuture<Set<String>> namesFutures = listTopicsResult.names();
|
||||
|
||||
@@ -298,14 +315,26 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (!names.contains(topicName)) {
|
||||
else {
|
||||
// always consider minPartitionCount for topic creation
|
||||
final int effectivePartitionCount = Math.max(this.configurationProperties.getMinPartitionCount(),
|
||||
partitionCount);
|
||||
this.metadataRetryOperations.execute(context -> {
|
||||
|
||||
NewTopic newTopic = new NewTopic(topicName, effectivePartitionCount,
|
||||
(short) configurationProperties.getReplicationFactor());
|
||||
NewTopic newTopic;
|
||||
Map<Integer, List<Integer>> replicasAssignments = adminProperties.getReplicasAssignments();
|
||||
if (replicasAssignments != null && replicasAssignments.size() > 0) {
|
||||
newTopic = new NewTopic(topicName, adminProperties.getReplicasAssignments());
|
||||
}
|
||||
else {
|
||||
newTopic = new NewTopic(topicName, effectivePartitionCount,
|
||||
adminProperties.getReplicationFactor() != null
|
||||
? adminProperties.getReplicationFactor()
|
||||
: configurationProperties.getReplicationFactor());
|
||||
}
|
||||
if (adminProperties.getConfiguration().size() > 0) {
|
||||
newTopic.configs(adminProperties.getConfiguration());
|
||||
}
|
||||
CreateTopicsResult createTopicsResult = adminClient.createTopics(Collections.singletonList(newTopic));
|
||||
try {
|
||||
createTopicsResult.all().get(operationTimeout, TimeUnit.SECONDS);
|
||||
@@ -318,6 +347,10 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
logger.warn("Attempt to create topic: " + topicName + ". Topic already exists.");
|
||||
}
|
||||
}
|
||||
else {
|
||||
logger.error("Failed to create topics", e.getCause());
|
||||
throw e.getCause();
|
||||
}
|
||||
}
|
||||
else {
|
||||
logger.error("Failed to create topics", e.getCause());
|
||||
@@ -365,10 +398,6 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
|
||||
private final int partitions;
|
||||
|
||||
KafkaProducerDestination(String destinationName) {
|
||||
this(destinationName, 0);
|
||||
}
|
||||
|
||||
KafkaProducerDestination(String destinationName, Integer partitions) {
|
||||
this.producerDestinationName = destinationName;
|
||||
this.partitions = partitions;
|
||||
@@ -420,10 +449,6 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
return this.consumerDestinationName;
|
||||
}
|
||||
|
||||
public String getDlqName() {
|
||||
return dlqName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KafkaConsumerDestination{" +
|
||||
|
||||
@@ -55,10 +55,11 @@ public class KafkaTopicProvisionerTests {
|
||||
binderConfig.getConfiguration().put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, ts.getFile().getAbsolutePath());
|
||||
binderConfig.setBrokers("localhost:9092");
|
||||
KafkaTopicProvisioner provisioner = new KafkaTopicProvisioner(binderConfig, bootConfig);
|
||||
AdminClient adminClient = KafkaTestUtils.getPropertyValue(provisioner, "adminClient", AdminClient.class);
|
||||
AdminClient adminClient = provisioner.createAdminClient();
|
||||
assertThat(KafkaTestUtils.getPropertyValue(adminClient, "client.selector.channelBuilder")).isInstanceOf(SslChannelBuilder.class);
|
||||
Map configs = KafkaTestUtils.getPropertyValue(adminClient, "client.selector.channelBuilder.configs", Map.class);
|
||||
assertThat(((List) configs.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)).get(0)).isEqualTo("localhost:1234");
|
||||
adminClient.close();
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
@@ -73,13 +74,13 @@ public class KafkaTopicProvisionerTests {
|
||||
binderConfig.getConfiguration().put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, ts.getFile().getAbsolutePath());
|
||||
binderConfig.setBrokers("localhost:1234");
|
||||
KafkaTopicProvisioner provisioner = new KafkaTopicProvisioner(binderConfig, bootConfig);
|
||||
AdminClient adminClient = KafkaTestUtils.getPropertyValue(provisioner, "adminClient", AdminClient.class);
|
||||
AdminClient adminClient = provisioner.createAdminClient();
|
||||
assertThat(KafkaTestUtils.getPropertyValue(adminClient, "client.selector.channelBuilder")).isInstanceOf(SslChannelBuilder.class);
|
||||
Map configs = KafkaTestUtils.getPropertyValue(adminClient, "client.selector.channelBuilder.configs", Map.class);
|
||||
assertThat(((List) configs.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)).get(0)).isEqualTo("localhost:1234");
|
||||
adminClient.close();
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
@Test
|
||||
public void brokersInvalid() throws Exception {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.0.0.RC2</version>
|
||||
<version>2.0.1.RELEASE</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>spring-cloud-stream-binder-kafka-docs</artifactId>
|
||||
|
||||
@@ -1,24 +1,22 @@
|
||||
[[kafka-dlq-processing]]
|
||||
== Dead-Letter Topic Processing
|
||||
|
||||
Because it can't be anticipated how users would want to dispose of dead-lettered messages, the framework does not provide any standard mechanism to handle them.
|
||||
Because you cannot anticipate how users would want to dispose of dead-lettered messages, the framework does not provide any standard mechanism to handle them.
|
||||
If the reason for the dead-lettering is transient, you may wish to route the messages back to the original topic.
|
||||
However, if the problem is a permanent issue, that could cause an infinite loop.
|
||||
The following `spring-boot` application is an example of how to route those messages back to the original topic, but moves them to a third "parking lot" topic after three attempts.
|
||||
The application is simply another spring-cloud-stream application that reads from the dead-letter topic.
|
||||
The sample Spring Boot application within this topic is an example of how to route those messages back to the original topic, but it moves them to a "`parking lot`" topic after three attempts.
|
||||
The application is another spring-cloud-stream application that reads from the dead-letter topic.
|
||||
It terminates when no messages are received for 5 seconds.
|
||||
|
||||
The examples assume the original destination is `so8400out` and the consumer group is `so8400`.
|
||||
|
||||
There are several considerations.
|
||||
There are a couple of strategies to consider:
|
||||
|
||||
- Consider only running the rerouting when the main application is not running.
|
||||
Otherwise, the retries for transient errors will be used up very quickly.
|
||||
- Alternatively, use a two-stage approach - use this application to route to a third topic, and another to route from there back to the main topic.
|
||||
- Since this technique uses a message header to keep track of retries, it won't work with `headerMode=raw`.
|
||||
In that case, consider adding some data to the payload (that can be ignored by the main application).
|
||||
- `x-retries` has to be added to the `headers` property `spring.cloud.stream.kafka.binder.headers=x-retries` on both this, and the main application so that the header is transported between the applications.
|
||||
- Since kafka is publish/subscribe, replayed messages will be sent to each consumer group, even those that successfully processed a message the first time around.
|
||||
* Consider running the rerouting only when the main application is not running.
|
||||
Otherwise, the retries for transient errors are used up very quickly.
|
||||
* Alternatively, use a two-stage approach: Use this application to route to a third topic and another to route from there back to the main topic.
|
||||
|
||||
The following code listings show the sample application:
|
||||
|
||||
.application.properties
|
||||
[source]
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 119 KiB |
@@ -81,7 +81,7 @@ For common configuration options and properties pertaining to binder, refer to t
|
||||
|
||||
=== Kafka Streams Properties
|
||||
|
||||
The following properties are available at the binder level and must be prefixed with `spring.cloud.stream.kafka.binder.`
|
||||
The following properties are available at the binder level and must be prefixed with `spring.cloud.stream.kafka.streams.binder.`
|
||||
literal.
|
||||
|
||||
configuration::
|
||||
@@ -169,7 +169,7 @@ spring.cloud.stream.kafka.streams.timeWindow.length::
|
||||
The value is expressed in milliseconds.
|
||||
+
|
||||
Default: `none`.
|
||||
spring.cloud.stream.kstream.timeWindow.advanceBy::
|
||||
spring.cloud.stream.kafka.streams.timeWindow.advanceBy::
|
||||
Value is given in milliseconds.
|
||||
+
|
||||
Default: `none`.
|
||||
@@ -413,9 +413,9 @@ If `nativeEncoding` is set, then you can set different SerDe's on individual out
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kstream.bindings.output1.producer.valueSerde=IntegerSerde
|
||||
spring.cloud.stream.kstream.bindings.output2.producer.valueSerde=StringSerde
|
||||
spring.cloud.stream.kstream.bindings.output3.producer.valueSerde=JsonSerde
|
||||
spring.cloud.stream.kafka.streams.bindings.output1.producer.valueSerde=IntegerSerde
|
||||
spring.cloud.stream.kafka.streams.bindings.output2.producer.valueSerde=StringSerde
|
||||
spring.cloud.stream.kafka.streams.bindings.output3.producer.valueSerde=JsonSerde
|
||||
----
|
||||
|
||||
Then if you have `SendTo` like this, @SendTo({"output1", "output2", "output3"}), the `KStream[]` from the branches are
|
||||
@@ -595,4 +595,18 @@ Once you gain access to this bean, then you can query for the particular state-s
|
||||
----
|
||||
ReadOnlyKeyValueStore<Object, Object> keyValueStore =
|
||||
queryableStoreRegistry.getQueryableStoreType("my-store", QueryableStoreTypes.keyValueStore());
|
||||
----
|
||||
|
||||
== Accessing the underlying KafkaStreams object
|
||||
|
||||
`StreamBuilderFactoryBean` from spring-kafka that is responsible for constructing the `KafkaStreams` object can be accessed programmatically.
|
||||
Each `StreamBuilderFactoryBean` is registered as `stream-builder` and appended with the `StreamListener` method name.
|
||||
If your `StreamListener` method is named as `process` for example, the stream builder bean is named as `stream-builder-process`.
|
||||
Since this is a factory bean, it should be accessed by prepending an ampersand (`&`) when accessing it programmatically.
|
||||
Following is an example and it assumes the `StreamListener` method is named as `process`
|
||||
|
||||
[source]
|
||||
----
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = context.getBean("&stream-builder-process", StreamsBuilderFactoryBean.class);
|
||||
KafkaStreams kafkaStreams = streamsBuilderFactoryBean.getKafkaStreams();
|
||||
----
|
||||
@@ -1,13 +1,13 @@
|
||||
[partintro]
|
||||
--
|
||||
This guide describes the Apache Kafka implementation of the Spring Cloud Stream Binder.
|
||||
It contains information about its design, usage and configuration options, as well as information on how the Stream Cloud Stream concepts map into Apache Kafka specific constructs.
|
||||
In addition, this guide also explains the Kafka Streams binding capabilities of Spring Cloud Stream.
|
||||
It contains information about its design, usage, and configuration options, as well as information on how the Stream Cloud Stream concepts map onto Apache Kafka specific constructs.
|
||||
In addition, this guide explains the Kafka Streams binding capabilities of Spring Cloud Stream.
|
||||
--
|
||||
|
||||
== Usage
|
||||
|
||||
For using the Apache Kafka binder, you just need to add it to your Spring Cloud Stream application, using the following Maven coordinates:
|
||||
To use Apache Kafka binder, you need to add `spring-cloud-stream-binder-kafka` as a dependency to your Spring Cloud Stream application, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
@@ -17,7 +17,7 @@ For using the Apache Kafka binder, you just need to add it to your Spring Cloud
|
||||
</dependency>
|
||||
----
|
||||
|
||||
Alternatively, you can also use the Spring Cloud Stream Kafka Starter.
|
||||
Alternatively, you can also use the Spring Cloud Stream Kafka Starter, as shown inn the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
@@ -29,182 +29,202 @@ Alternatively, you can also use the Spring Cloud Stream Kafka Starter.
|
||||
|
||||
== Apache Kafka Binder Overview
|
||||
|
||||
A simplified diagram of how the Apache Kafka binder operates can be seen below.
|
||||
The following image shows a simplified diagram of how the Apache Kafka binder operates:
|
||||
|
||||
.Kafka Binder
|
||||
image::kafka-binder.png[width=300,scaledwidth="50%"]
|
||||
image::images/kafka-binder.png[width=300,scaledwidth="50%"]
|
||||
|
||||
The Apache Kafka Binder implementation maps each destination to an Apache Kafka topic.
|
||||
The consumer group maps directly to the same Apache Kafka concept.
|
||||
Partitioning also maps directly to Apache Kafka partitions as well.
|
||||
|
||||
The binder currently uses the Apache Kafka `kafka-clients` 1.0.0 jar and is designed to be used with a broker of at least that version.
|
||||
This client can communicate with older brokers (see the Kafka documentation), but certain features may not be available.
|
||||
For example, with versions earlier than 0.11.x.x, native headers are not supported.
|
||||
Also, 0.11.x.x does not support the `autoAddPartitions` property.
|
||||
|
||||
== Configuration Options
|
||||
|
||||
This section contains the configuration options used by the Apache Kafka binder.
|
||||
|
||||
For common configuration options and properties pertaining to binder, refer to the <<binding-properties,core documentation>>.
|
||||
For common configuration options and properties pertaining to binder, see the <<binding-properties,core documentation>>.
|
||||
|
||||
=== Kafka Binder Properties
|
||||
|
||||
spring.cloud.stream.kafka.binder.brokers::
|
||||
A list of brokers to which the Kafka binder will connect.
|
||||
A list of brokers to which the Kafka binder connects.
|
||||
+
|
||||
Default: `localhost`.
|
||||
spring.cloud.stream.kafka.binder.defaultBrokerPort::
|
||||
`brokers` allows hosts specified with or without port information (e.g., `host1,host2:port2`).
|
||||
`brokers` allows hosts specified with or without port information (for example, `host1,host2:port2`).
|
||||
This sets the default port when no port is configured in the broker list.
|
||||
+
|
||||
Default: `9092`.
|
||||
spring.cloud.stream.kafka.binder.zkNodes::
|
||||
A list of ZooKeeper nodes to which the Kafka binder can connect.
|
||||
+
|
||||
Default: `localhost`.
|
||||
spring.cloud.stream.kafka.binder.defaultZkPort::
|
||||
`zkNodes` allows hosts specified with or without port information (e.g., `host1,host2:port2`).
|
||||
This sets the default port when no port is configured in the node list.
|
||||
+
|
||||
Default: `2181`.
|
||||
spring.cloud.stream.kafka.binder.configuration::
|
||||
Key/Value map of client properties (both producers and consumer) passed to all clients created by the binder.
|
||||
Due to the fact that these properties will be used by both producers and consumers, usage should be restricted to common properties, especially security settings.
|
||||
Key/Value map of client properties (both producers and consumer) passed to all clients created by the binder.
|
||||
Due to the fact that these properties are used by both producers and consumers, usage should be restricted to common properties -- for example, security settings.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.headers::
|
||||
The list of custom headers that will be transported by the binder.
|
||||
The list of custom headers that are transported by the binder.
|
||||
Only required when communicating with older applications (<= 1.3.x) with a `kafka-clients` version < 0.11.0.0. Newer versions support headers natively.
|
||||
+
|
||||
Default: empty.
|
||||
spring.cloud.stream.kafka.binder.healthTimeout::
|
||||
The time to wait to get partition information in seconds; default 60.
|
||||
Health will report as down if this timer expires.
|
||||
The time to wait to get partition information, in seconds.
|
||||
Health reports as down if this timer expires.
|
||||
+
|
||||
Default: 10.
|
||||
spring.cloud.stream.kafka.binder.offsetUpdateTimeWindow::
|
||||
The frequency, in milliseconds, with which offsets are saved.
|
||||
Ignored if `0`.
|
||||
+
|
||||
Default: `10000`.
|
||||
spring.cloud.stream.kafka.binder.offsetUpdateCount::
|
||||
The frequency, in number of updates, which which consumed offsets are persisted.
|
||||
Ignored if `0`.
|
||||
Mutually exclusive with `offsetUpdateTimeWindow`.
|
||||
+
|
||||
Default: `0`.
|
||||
spring.cloud.stream.kafka.binder.requiredAcks::
|
||||
The number of required acks on the broker.
|
||||
The number of required acks on the broker.
|
||||
See the Kafka documentation for the producer `acks` property.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.minPartitionCount::
|
||||
Effective only if `autoCreateTopics` or `autoAddPartitions` is set.
|
||||
The global minimum number of partitions that the binder will configure on topics on which it produces/consumes data.
|
||||
It can be superseded by the `partitionCount` setting of the producer or by the value of `instanceCount` * `concurrency` settings of the producer (if either is larger).
|
||||
Effective only if `autoCreateTopics` or `autoAddPartitions` is set.
|
||||
The global minimum number of partitions that the binder configures on topics on which it produces or consumes data.
|
||||
It can be superseded by the `partitionCount` setting of the producer or by the value of `instanceCount * concurrency` settings of the producer (if either is larger).
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.replicationFactor::
|
||||
The replication factor of auto-created topics if `autoCreateTopics` is active.
|
||||
The replication factor of auto-created topics if `autoCreateTopics` is active.
|
||||
Can be overridden on each binding.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.autoCreateTopics::
|
||||
If set to `true`, the binder will create new topics automatically.
|
||||
If set to `false`, the binder will rely on the topics being already configured.
|
||||
In the latter case, if the topics do not exist, the binder will fail to start.
|
||||
Of note, this setting is independent of the `auto.topic.create.enable` setting of the broker and it does not influence it: if the server is set to auto-create topics, they may be created as part of the metadata retrieval request, with default broker settings.
|
||||
If set to `true`, the binder creates new topics automatically.
|
||||
If set to `false`, the binder relies on the topics being already configured.
|
||||
In the latter case, if the topics do not exist, the binder fails to start.
|
||||
+
|
||||
NOTE: This setting is independent of the `auto.topic.create.enable` setting of the broker and does not influence it.
|
||||
If the server is set to auto-create topics, they may be created as part of the metadata retrieval request, with default broker settings.
|
||||
+
|
||||
Default: `true`.
|
||||
spring.cloud.stream.kafka.binder.autoAddPartitions::
|
||||
If set to `true`, the binder will create add new partitions if required.
|
||||
If set to `false`, the binder will rely on the partition size of the topic being already configured.
|
||||
If the partition count of the target topic is smaller than the expected value, the binder will fail to start.
|
||||
If set to `true`, the binder creates new partitions if required.
|
||||
If set to `false`, the binder relies on the partition size of the topic being already configured.
|
||||
If the partition count of the target topic is smaller than the expected value, the binder fails to start.
|
||||
+
|
||||
Default: `false`.
|
||||
spring.cloud.stream.kafka.binder.socketBufferSize::
|
||||
Size (in bytes) of the socket buffer to be used by the Kafka consumers.
|
||||
+
|
||||
Default: `2097152`.
|
||||
spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix::
|
||||
Enable transactions in the binder; see `transaction.id` in the Kafka documentation and https://docs.spring.io/spring-kafka/reference/html/_reference.html#transactions[Transactions] in the `spring-kafka` documentation.
|
||||
When transactions are enabled, individual `producer` properties are ignored and all producers use the `spring.cloud.stream.kafka.binder.transaction.producer.*` properties.
|
||||
Enables transactions in the binder. See `transaction.id` in the Kafka documentation and https://docs.spring.io/spring-kafka/reference/html/_reference.html#transactions[Transactions] in the `spring-kafka` documentation.
|
||||
When transactions are enabled, individual `producer` properties are ignored and all producers use the `spring.cloud.stream.kafka.binder.transaction.producer.*` properties.
|
||||
+
|
||||
Default `null` (no transactions)
|
||||
spring.cloud.stream.kafka.binder.transaction.producer.*::
|
||||
Global producer properties for producers in a transactional binder.
|
||||
See `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` and <<kafka-producer-properties>> and the general producer properties supported by all binders.
|
||||
Global producer properties for producers in a transactional binder.
|
||||
See `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` and <<kafka-producer-properties>> and the general producer properties supported by all binders.
|
||||
+
|
||||
Default: See individual producer properties.
|
||||
|
||||
spring.cloud.stream.kafka.binder.headerMapperBeanName::
|
||||
The bean name of a `KafkaHeaderMapper` used for mapping `spring-messaging` headers to and from Kafka headers.
|
||||
Use this, for example, if you wish to customize the trusted packages in a `DefaultKafkaHeaderMapper` that uses JSON deserialization for the headers.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
[[kafka-consumer-properties]]
|
||||
=== Kafka Consumer Properties
|
||||
|
||||
The following properties are available for Kafka consumers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.consumer.`.
|
||||
|
||||
admin.configuration::
|
||||
A `Map` of Kafka topic properties used when provisioning topics -- for example, `spring.cloud.stream.kafka.bindings.input.consumer.admin.configuration.message.format.version=0.9.0.0`
|
||||
+
|
||||
Default: none.
|
||||
|
||||
admin.replicas-assignment::
|
||||
A Map<Integer, List<Integer>> of replica assignments, with the key being the partition and the value being the assignments.
|
||||
Used when provisioning new topics.
|
||||
See the `NewTopic` Javadocs in the `kafka-clients` jar.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
admin.replication-factor::
|
||||
The replication factor to use when provisioning topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of 1 is used).
|
||||
|
||||
autoRebalanceEnabled::
|
||||
When `true`, topic partitions will be automatically rebalanced between the members of a consumer group.
|
||||
When `false`, each consumer will be assigned a fixed set of partitions based on `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex`.
|
||||
This requires both `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex` properties to be set appropriately on each launched instance.
|
||||
The property `spring.cloud.stream.instanceCount` must typically be greater than 1 in this case.
|
||||
When `true`, topic partitions is automatically rebalanced between the members of a consumer group.
|
||||
When `false`, each consumer is assigned a fixed set of partitions based on `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex`.
|
||||
This requires both the `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex` properties to be set appropriately on each launched instance.
|
||||
The value of the `spring.cloud.stream.instanceCount` property must typically be greater than 1 in this case.
|
||||
+
|
||||
Default: `true`.
|
||||
ackEachRecord::
|
||||
When `autoCommitOffset` is `true`, this setting dictates whether to commit the offset after each record is processed.
|
||||
By default, offsets are committed after all records in the batch of records returned by `consumer.poll()` have been processed.
|
||||
The number of records returned by a poll can be controlled with the `max.poll.records` Kafka property, which is set through the consumer `configuration` property.
|
||||
Setting this to `true` may cause a degradation in performance, but doing so reduces the likelihood of redelivered records when a failure occurs.
|
||||
Also, see the binder `requiredAcks` property, which also affects the performance of committing offsets.
|
||||
+
|
||||
Default: `false`.
|
||||
autoCommitOffset::
|
||||
Whether to autocommit offsets when a message has been processed.
|
||||
If set to `false`, a header with the key `kafka_acknowledgment` of the type `org.springframework.kafka.support.Acknowledgment` header will be present in the inbound message.
|
||||
Whether to autocommit offsets when a message has been processed.
|
||||
If set to `false`, a header with the key `kafka_acknowledgment` of the type `org.springframework.kafka.support.Acknowledgment` header is present in the inbound message.
|
||||
Applications may use this header for acknowledging messages.
|
||||
See the examples section for details.
|
||||
When this property is set to `false`, Kafka binder will set the ack mode to `org.springframework.kafka.listener.AbstractMessageListenerContainer.AckMode.MANUAL`.
|
||||
When this property is set to `false`, Kafka binder sets the ack mode to `org.springframework.kafka.listener.AbstractMessageListenerContainer.AckMode.MANUAL` and the application is responsible for acknowledging records.
|
||||
Also see `ackEachRecord`.
|
||||
+
|
||||
Default: `true`.
|
||||
autoCommitOnError::
|
||||
Effective only if `autoCommitOffset` is set to `true`.
|
||||
If set to `false` it suppresses auto-commits for messages that result in errors, and will commit only for successful messages, allows a stream to automatically replay from the last successfully processed message, in case of persistent failures.
|
||||
If set to `true`, it will always auto-commit (if auto-commit is enabled).
|
||||
If not set (default), it effectively has the same value as `enableDlq`, auto-committing erroneous messages if they are sent to a DLQ, and not committing them otherwise.
|
||||
Effective only if `autoCommitOffset` is set to `true`.
|
||||
If set to `false`, it suppresses auto-commits for messages that result in errors and commits only for successful messages. It allows a stream to automatically replay from the last successfully processed message, in case of persistent failures.
|
||||
If set to `true`, it always auto-commits (if auto-commit is enabled).
|
||||
If not set (the default), it effectively has the same value as `enableDlq`, auto-committing erroneous messages if they are sent to a DLQ and not committing them otherwise.
|
||||
+
|
||||
Default: not set.
|
||||
recoveryInterval::
|
||||
The interval between connection recovery attempts, in milliseconds.
|
||||
resetOffsets::
|
||||
Whether to reset offsets on the consumer to the value provided by startOffset.
|
||||
+
|
||||
Default: `5000`.
|
||||
Default: `false`.
|
||||
startOffset::
|
||||
The starting offset for new groups.
|
||||
Allowed values: `earliest`, `latest`.
|
||||
If the consumer group is set explicitly for the consumer 'binding' (via `spring.cloud.stream.bindings.<channelName>.group`), then 'startOffset' is set to `earliest`; otherwise it is set to `latest` for the `anonymous` consumer group.
|
||||
The starting offset for new groups.
|
||||
Allowed values: `earliest` and `latest`.
|
||||
If the consumer group is set explicitly for the consumer 'binding' (through `spring.cloud.stream.bindings.<channelName>.group`), 'startOffset' is set to `earliest`. Otherwise, it is set to `latest` for the `anonymous` consumer group.
|
||||
Also see `resetOffsets` (earlier in this list).
|
||||
+
|
||||
Default: null (equivalent to `earliest`).
|
||||
enableDlq::
|
||||
When set to true, it will send enable DLQ behavior for the consumer.
|
||||
By default, messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`.
|
||||
The DLQ topic name can be configurable via the property `dlqName`.
|
||||
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
|
||||
See <<kafka-dlq-processing>> processing for more information.
|
||||
Starting with _version 2.0_, messages sent to the DLQ topic are enhanced with the following headers: `x-original-topic`, `x-exception-message` and `x-exception-stacktrace` as `byte[]`.
|
||||
When set to true, it enables DLQ behavior for the consumer.
|
||||
By default, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`.
|
||||
The DLQ topic name can be configurable by setting the `dlqName` property.
|
||||
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
|
||||
See <<kafka-dlq-processing>> processing for more information.
|
||||
Starting with version 2.0, messages sent to the DLQ topic are enhanced with the following headers: `x-original-topic`, `x-exception-message`, and `x-exception-stacktrace` as `byte[]`.
|
||||
+
|
||||
Default: `false`.
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka consumer properties.
|
||||
Map with a key/value pair containing generic Kafka consumer properties.
|
||||
+
|
||||
Default: Empty map.
|
||||
dlqName::
|
||||
The name of the DLQ topic to receive the error messages.
|
||||
The name of the DLQ topic to receive the error messages.
|
||||
+
|
||||
Default: null (If not specified, messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`).
|
||||
Default: null (If not specified, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`).
|
||||
dlqProducerProperties::
|
||||
Using this, dlq specific producer properties can be set.
|
||||
All the properties available through kafka producer properties can be set through this property.
|
||||
Using this, DLQ-specific producer properties can be set.
|
||||
All the properties available through kafka producer properties can be set through this property.
|
||||
+
|
||||
Default: Default Kafka producer properties.
|
||||
standardHeaders::
|
||||
Indicates which standard headers are populated by the inbound channel adapter.
|
||||
`none`, `id`, `timestamp` or `both`.
|
||||
Useful if using native deserialization and the first component to receive a message needs an `id` (such as an aggregator that is configured to use a JDBC message store).
|
||||
Indicates which standard headers are populated by the inbound channel adapter.
|
||||
Allowed values: `none`, `id`, `timestamp`, or `both`.
|
||||
Useful if using native deserialization and the first component to receive a message needs an `id` (such as an aggregator that is configured to use a JDBC message store).
|
||||
+
|
||||
Default: `none`
|
||||
converterBeanName::
|
||||
The name of a bean that implements `RecordMessageConverter`; used in the inbound channel adapter to replace the default `MessagingMessageConverter`.
|
||||
The name of a bean that implements `RecordMessageConverter`. Used in the inbound channel adapter to replace the default `MessagingMessageConverter`.
|
||||
+
|
||||
Default: `null`
|
||||
idleEventInterval::
|
||||
The interval, in milliseconds between events indicating that no messages have recently been received.
|
||||
Use an `ApplicationListener<ListenerContainerIdleEvent>` to receive these events.
|
||||
See <<pause-resume>> for a usage example.
|
||||
The interval, in milliseconds, between events indicating that no messages have recently been received.
|
||||
Use an `ApplicationListener<ListenerContainerIdleEvent>` to receive these events.
|
||||
See <<pause-resume>> for a usage example.
|
||||
+
|
||||
Default: `30000`
|
||||
|
||||
@@ -214,55 +234,71 @@ Default: `30000`
|
||||
The following properties are available for Kafka producers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.producer.`.
|
||||
|
||||
admin.configuration::
|
||||
A `Map` of Kafka topic properties used when provisioning new topics -- for example, `spring.cloud.stream.kafka.bindings.input.consumer.admin.configuration.message.format.version=0.9.0.0`
|
||||
+
|
||||
Default: none.
|
||||
|
||||
admin.replicas-assignment::
|
||||
A Map<Integer, List<Integer>> of replica assignments, with the key being the partition and the value being the assignments.
|
||||
Used when provisioning new topics.
|
||||
See `NewTopic` javadocs in the `kafka-clients` jar.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
admin.replication-factor::
|
||||
The replication factor to use when provisioning new topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of 1 is used).
|
||||
|
||||
bufferSize::
|
||||
Upper limit, in bytes, of how much data the Kafka producer will attempt to batch before sending.
|
||||
Upper limit, in bytes, of how much data the Kafka producer attempts to batch before sending.
|
||||
+
|
||||
Default: `16384`.
|
||||
sync::
|
||||
Whether the producer is synchronous.
|
||||
Whether the producer is synchronous.
|
||||
+
|
||||
Default: `false`.
|
||||
batchTimeout::
|
||||
How long the producer will wait before sending in order to allow more messages to accumulate in the same batch.
|
||||
(Normally the producer does not wait at all, and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.
|
||||
How long the producer waits to allow more messages to accumulate in the same batch before sending the messages.
|
||||
(Normally, the producer does not wait at all and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.
|
||||
+
|
||||
Default: `0`.
|
||||
messageKeyExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message.
|
||||
For example `headers.key` or `payload.myKey`.
|
||||
A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message -- for example, `headers['myKey']`.
|
||||
The payload cannot be used because, by the time this expression is evaluated, the payload is already in the form of a `byte[]`.
|
||||
+
|
||||
Default: `none`.
|
||||
headerPatterns::
|
||||
A comma-delimited list of simple patterns to match spring-messaging headers to be mapped to the kafka `Headers` in the `ProducerRecord`.
|
||||
Patterns can begin or end with the wildcard character (asterisk).
|
||||
Patterns can be negated by prefixing with `!`; matching stops after the first match (positive or negative).
|
||||
For example `!foo,fo*` will pass `fox` but not `foo`.
|
||||
`id` and `timestamp` are never mapped.
|
||||
A comma-delimited list of simple patterns to match Spring messaging headers to be mapped to the Kafka `Headers` in the `ProducerRecord`.
|
||||
Patterns can begin or end with the wildcard character (asterisk).
|
||||
Patterns can be negated by prefixing with `!`.
|
||||
Matching stops after the first match (positive or negative).
|
||||
For example `!ask,as*` will pass `ash` but not `ask`.
|
||||
`id` and `timestamp` are never mapped.
|
||||
+
|
||||
Default: `*` (all headers - except the `id` and `timestamp`)
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka producer properties.
|
||||
Map with a key/value pair containing generic Kafka producer properties.
|
||||
+
|
||||
Default: Empty map.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The Kafka binder will use the `partitionCount` setting of the producer as a hint to create a topic with the given partition count (in conjunction with the `minPartitionCount`, the maximum of the two being the value being used).
|
||||
Exercise caution when configuring both `minPartitionCount` for a binder and `partitionCount` for an application, as the larger value will be used.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is disabled (the default), then the binder will fail to start.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is enabled, new partitions will be added.
|
||||
If a topic already exists with a larger number of partitions than the maximum of (`minPartitionCount` and `partitionCount`), the existing partition count will be used.
|
||||
====
|
||||
NOTE: The Kafka binder uses the `partitionCount` setting of the producer as a hint to create a topic with the given partition count (in conjunction with the `minPartitionCount`, the maximum of the two being the value being used).
|
||||
Exercise caution when configuring both `minPartitionCount` for a binder and `partitionCount` for an application, as the larger value is used.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is disabled (the default), the binder fails to start.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is enabled, new partitions are added.
|
||||
If a topic already exists with a larger number of partitions than the maximum of (`minPartitionCount` or `partitionCount`), the existing partition count is used.
|
||||
|
||||
=== Usage examples
|
||||
|
||||
In this section, we illustrate the use of the above properties for specific scenarios.
|
||||
In this section, we show the use of the preceding properties for specific scenarios.
|
||||
|
||||
==== Example: Setting `autoCommitOffset` false and relying on manual acking.
|
||||
==== Example: Setting `autoCommitOffset` to `false` and Relying on Manual Acking
|
||||
|
||||
This example illustrates how one may manually acknowledge offsets in a consumer application.
|
||||
|
||||
This example requires that `spring.cloud.stream.kafka.bindings.input.consumer.autoCommitOffset` is set to false.
|
||||
This example requires that `spring.cloud.stream.kafka.bindings.input.consumer.autoCommitOffset` be set to `false`.
|
||||
Use the corresponding input channel name for your example.
|
||||
|
||||
[source]
|
||||
@@ -286,13 +322,13 @@ public class ManuallyAcknowdledgingConsumer {
|
||||
}
|
||||
----
|
||||
|
||||
==== Example: security configuration
|
||||
==== Example: Security Configuration
|
||||
|
||||
Apache Kafka 0.9 supports secure connections between client and brokers.
|
||||
To take advantage of this feature, follow the guidelines in the http://kafka.apache.org/090/documentation.html#security_configclients[Apache Kafka Documentation] as well as the Kafka 0.9 http://docs.confluent.io/2.0.0/kafka/security.html[security guidelines from the Confluent documentation].
|
||||
Use the `spring.cloud.stream.kafka.binder.configuration` option to set security properties for all clients created by the binder.
|
||||
|
||||
For example, for setting `security.protocol` to `SASL_SSL`, set:
|
||||
For example, to set `security.protocol` to `SASL_SSL`, set the following property:
|
||||
|
||||
[source]
|
||||
----
|
||||
@@ -303,47 +339,45 @@ All the other security properties can be set in a similar manner.
|
||||
|
||||
When using Kerberos, follow the instructions in the http://kafka.apache.org/090/documentation.html#security_sasl_clientconfig[reference documentation] for creating and referencing the JAAS configuration.
|
||||
|
||||
Spring Cloud Stream supports passing JAAS configuration information to the application using a JAAS configuration file and using Spring Boot properties.
|
||||
Spring Cloud Stream supports passing JAAS configuration information to the application by using a JAAS configuration file and using Spring Boot properties.
|
||||
|
||||
===== Using JAAS configuration files
|
||||
===== Using JAAS Configuration Files
|
||||
|
||||
The JAAS, and (optionally) krb5 file locations can be set for Spring Cloud Stream applications by using system properties.
|
||||
Here is an example of launching a Spring Cloud Stream application with SASL and Kerberos using a JAAS configuration file:
|
||||
The JAAS and (optionally) krb5 file locations can be set for Spring Cloud Stream applications by using system properties.
|
||||
The following example shows how to launch a Spring Cloud Stream application with SASL and Kerberos by using a JAAS configuration file:
|
||||
|
||||
[source]
|
||||
[source,bash]
|
||||
----
|
||||
java -Djava.security.auth.login.config=/path.to/kafka_client_jaas.conf -jar log.jar \
|
||||
--spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.kafka.binder.zkNodes=secure.zookeeper:2181 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT
|
||||
----
|
||||
|
||||
===== Using Spring Boot properties
|
||||
===== Using Spring Boot Properties
|
||||
|
||||
As an alternative to having a JAAS configuration file, Spring Cloud Stream provides a mechanism for setting up the JAAS configuration for Spring Cloud Stream applications using Spring Boot properties.
|
||||
As an alternative to having a JAAS configuration file, Spring Cloud Stream provides a mechanism for setting up the JAAS configuration for Spring Cloud Stream applications by using Spring Boot properties.
|
||||
|
||||
The following properties can be used for configuring the login context of the Kafka client.
|
||||
The following properties can be used to configure the login context of the Kafka client:
|
||||
|
||||
spring.cloud.stream.kafka.binder.jaas.loginModule::
|
||||
The login module name. Not necessary to be set in normal cases.
|
||||
The login module name. Not necessary to be set in normal cases.
|
||||
+
|
||||
Default: `com.sun.security.auth.module.Krb5LoginModule`.
|
||||
spring.cloud.stream.kafka.binder.jaas.controlFlag::
|
||||
The control flag of the login module.
|
||||
The control flag of the login module.
|
||||
+
|
||||
Default: `required`.
|
||||
spring.cloud.stream.kafka.binder.jaas.options::
|
||||
Map with a key/value pair containing the login module options.
|
||||
Map with a key/value pair containing the login module options.
|
||||
+
|
||||
Default: Empty map.
|
||||
|
||||
Here is an example of launching a Spring Cloud Stream application with SASL and Kerberos using Spring Boot configuration properties:
|
||||
The following example shows how to launch a Spring Cloud Stream application with SASL and Kerberos by using Spring Boot configuration properties:
|
||||
|
||||
[source]
|
||||
[source,bash]
|
||||
----
|
||||
java --spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.kafka.binder.zkNodes=secure.zookeeper:2181 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.autoCreateTopics=false \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT \
|
||||
@@ -353,7 +387,7 @@ Here is an example of launching a Spring Cloud Stream application with SASL and
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.principal=kafka-client-1@EXAMPLE.COM
|
||||
----
|
||||
|
||||
This represents the equivalent of the following JAAS file:
|
||||
The preceding example represents the equivalent of the following JAAS file:
|
||||
|
||||
[source]
|
||||
----
|
||||
@@ -366,31 +400,26 @@ KafkaClient {
|
||||
};
|
||||
----
|
||||
|
||||
If the topics required already exist on the broker, or will be created by an administrator, autocreation can be turned off and only client JAAS properties need to be sent. As an alternative to setting `spring.cloud.stream.kafka.binder.autoCreateTopics` you can simply remove the broker dependency from the application. See <<exclude-admin-utils>> for details.
|
||||
If the topics required already exist on the broker or will be created by an administrator, autocreation can be turned off and only client JAAS properties need to be sent.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Do not mix JAAS configuration files and Spring Boot properties in the same application.
|
||||
If the `-Djava.security.auth.login.config` system property is already present, Spring Cloud Stream will ignore the Spring Boot properties.
|
||||
NOTE: Do not mix JAAS configuration files and Spring Boot properties in the same application.
|
||||
If the `-Djava.security.auth.login.config` system property is already present, Spring Cloud Stream ignores the Spring Boot properties.
|
||||
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Exercise caution when using the `autoCreateTopics` and `autoAddPartitions` if using Kerberos.
|
||||
Usually applications may use principals that do not have administrative rights in Kafka and Zookeeper, and relying on Spring Cloud Stream to create/modify topics may fail.
|
||||
In secure environments, we strongly recommend creating topics and managing ACLs administratively using Kafka tooling.
|
||||
====
|
||||
NOTE: Be careful when using the `autoCreateTopics` and `autoAddPartitions` with Kerberos.
|
||||
Usually, applications may use principals that do not have administrative rights in Kafka and Zookeeper.
|
||||
Consequently, relying on Spring Cloud Stream to create/modify topics may fail.
|
||||
In secure environments, we strongly recommend creating topics and managing ACLs administratively by using Kafka tooling.
|
||||
|
||||
[[pause-resume]]
|
||||
==== Example: Pausing and Resuming the Consumer
|
||||
|
||||
If you wish to suspend consumption, but not cause a partition rebalance, you can pause and resume the consumer.
|
||||
If you wish to suspend consumption but not cause a partition rebalance, you can pause and resume the consumer.
|
||||
This is facilitated by adding the `Consumer` as a parameter to your `@StreamListener`.
|
||||
To resume, you need an `ApplicationListener` for `ListenerContainerIdleEvent` s; the frequency at which events are published is controlled by the `idleEventInterval` property.
|
||||
To resume, you need an `ApplicationListener` for `ListenerContainerIdleEvent` instances.
|
||||
The frequency at which events are published is controlled by the `idleEventInterval` property.
|
||||
Since the consumer is not thread-safe, you must call these methods on the calling thread.
|
||||
|
||||
The following simple application shows how to pause and resume.
|
||||
The following simple application shows how to pause and resume:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@@ -421,121 +450,25 @@ public class Application {
|
||||
}
|
||||
----
|
||||
|
||||
|
||||
==== Using the binder with Apache Kafka 0.10
|
||||
|
||||
The default Kafka support in Spring Cloud Stream Kafka binder is for Kafka version 0.10.1.1. The binder also supports connecting to other 0.10 based versions and 0.9 clients.
|
||||
In order to do this, when you create the project that contains your application, include `spring-cloud-starter-stream-kafka` as you normally would do for the default binder.
|
||||
Then add these dependencies at the top of the `<dependencies>` section in the pom.xml file to override the dependencies.
|
||||
|
||||
Here is an example for downgrading your application to 0.10.0.1. Since it is still on the 0.10 line, the default `spring-kafka` and `spring-integration-kafka` versions can be retained.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>0.10.0.1</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>0.10.0.1</version>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
Here is another example of using 0.9.0.1 version.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<version>1.0.5.RELEASE</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
<version>2.0.1.RELEASE</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>0.9.0.1</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>0.9.0.1</version>
|
||||
</dependency>
|
||||
|
||||
----
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The versions above are provided only for the sake of the example.
|
||||
For best results, we recommend using the most recent 0.10-compatible versions of the projects.
|
||||
====
|
||||
|
||||
[[exclude-admin-utils]]
|
||||
==== Excluding Kafka broker jar from the classpath of the binder based application
|
||||
|
||||
The Apache Kafka Binder uses the administrative utilities which are part of the Apache Kafka server library to create and reconfigure topics.
|
||||
If the inclusion of the Apache Kafka server library and its dependencies is not necessary at runtime because the application will rely on the topics being configured administratively, the Kafka binder allows for Apache Kafka server dependency to be excluded from the application.
|
||||
|
||||
If you use non default versions for Kafka dependencies as advised above, all you have to do is not to include the kafka broker dependency.
|
||||
If you use the default Kafka version, then ensure that you exclude the kafka broker jar from the `spring-cloud-starter-stream-kafka` dependency as following.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
If you exclude the Apache Kafka server dependency and the topic is not present on the server, then the Apache Kafka broker will create the topic if auto topic creation is enabled on the server.
|
||||
Please keep in mind that if you are relying on this, then the Kafka server will use the default number of partitions and replication factors.
|
||||
On the other hand, if auto topic creation is disabled on the server, then care must be taken before running the application to create the topic with the desired number of partitions.
|
||||
|
||||
If you want to have full control over how partitions are allocated, then leave the default settings as they are, i.e. do not exclude the kafka broker jar and ensure that `spring.cloud.stream.kafka.binder.autoCreateTopics` is set to `true`, which is the default.
|
||||
|
||||
[[kafka-error-channels]]
|
||||
== Error Channels
|
||||
|
||||
Starting with _version 1.3_, the binder unconditionally sends exceptions to an error channel for each consumer destination, and can be configured to send async producer send failures to an error channel too.
|
||||
See <<binder-error-channels>> for more information.
|
||||
Starting with version 1.3, the binder unconditionally sends exceptions to an error channel for each consumer destination and can also be configured to send async producer send failures to an error channel.
|
||||
See <<spring-cloud-stream-overview-error-handling>> for more information.
|
||||
|
||||
The payload of the `ErrorMessage` for a send failure is a `KafkaSendFailureException` with properties:
|
||||
|
||||
* `failedMessage` - the spring-messaging `Message<?>` that failed to be sent.
|
||||
* `record` - the raw `ProducerRecord` that was created from the `failedMessage`
|
||||
* `failedMessage`: The Spring Messaging `Message<?>` that failed to be sent.
|
||||
* `record`: The raw `ProducerRecord` that was created from the `failedMessage`
|
||||
|
||||
There is no automatic handling of producer exceptions (such as sending to a <<kafka-dlq-processing, Dead-Letter queue>>); you can consume these exceptions with your own Spring Integration flow.
|
||||
There is no automatic handling of producer exceptions (such as sending to a <<kafka-dlq-processing, Dead-Letter queue>>).
|
||||
You can consume these exceptions with your own Spring Integration flow.
|
||||
|
||||
[[kafka-metrics]]
|
||||
== Kafka Metrics
|
||||
|
||||
Kafka binder module exposes the following metrics:
|
||||
|
||||
`spring.cloud.stream.binder.kafka.someGroup.someTopic.lag` - this metric indicates how many messages have not been yet consumed from given binder's topic by given consumer group.
|
||||
For example if the value of the metric `spring.cloud.stream.binder.kafka.myGroup.myTopic.lag` is `1000`, then consumer group `myGroup` has `1000` messages to waiting to be consumed from topic `myTopic`.
|
||||
This metric is particularly useful to provide auto-scaling feedback to PaaS platform of your choice.
|
||||
`spring.cloud.stream.binder.kafka.someGroup.someTopic.lag`: This metric indicates how many messages have not been yet consumed from a given binder's topic by a given consumer group.
|
||||
For example, if the value of the metric `spring.cloud.stream.binder.kafka.myGroup.myTopic.lag` is `1000`, the consumer group named `myGroup` has `1000` messages waiting to be consumed from the topic calle `myTopic`.
|
||||
This metric is particularly useful for providing auto-scaling feedback to a PaaS platform.
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
Apache Kafka supports topic partitioning natively.
|
||||
|
||||
Sometimes it is advantageous to send data to specific partitions, for example when you want to strictly order message processing - all messages for a particular customer should go to the same partition.
|
||||
Sometimes it is advantageous to send data to specific partitions -- for example, when you want to strictly order message processing (all messages for a particular customer should go to the same partition).
|
||||
|
||||
The following illustrates how to configure the producer and consumer side:
|
||||
The following example shows how to configure the producer and consumer side:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@@ -55,14 +55,17 @@ spring:
|
||||
----
|
||||
|
||||
IMPORTANT: The topic must be provisioned to have enough partitions to achieve the desired concurrency for all consumer groups.
|
||||
The above configuration will support up to 12 consumer instances (or 6 if their `concurrency` is 2, etc.).
|
||||
It is generally best to "over provision" the partitions to allow for future increases in consumers and/or concurrency.
|
||||
The above configuration supports up to 12 consumer instances (6 if their `concurrency` is 2, 4 if their concurrency is 3, and so on).
|
||||
It is generally best to "`over-provision`" the partitions to allow for future increases in consumers or concurrency.
|
||||
|
||||
NOTE: The above configuration uses the default partitioning (`key.hashCode() % partitionCount`).
|
||||
This may or may not provide a suitably balanced algorithm, depending on the key values; you can override this default by using the `partitionSelectorExpression` or `partitionSelectorClass` properties.
|
||||
NOTE: The preceding configuration uses the default partitioning (`key.hashCode() % partitionCount`).
|
||||
This may or may not provide a suitably balanced algorithm, depending on the key values.
|
||||
You can override this default by using the `partitionSelectorExpression` or `partitionSelectorClass` properties.
|
||||
|
||||
Since partitions are natively handled by Kafka, no special configuration is needed on the consumer side.
|
||||
Kafka will allocate partitions across the instances.
|
||||
Kafka allocates partitions across the instances.
|
||||
|
||||
The following Spring Boot application listens to a Kafka stream and prints (to the console) the partition ID to which each message goes:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@@ -96,5 +99,6 @@ spring:
|
||||
group: myGroup
|
||||
----
|
||||
|
||||
You can add instances as needed; Kafka will rebalance the partition allocations.
|
||||
If the instance count (or `instance count * concurrency`) exceeds the number of partitions, some consumers will be idle.
|
||||
You can add instances as needed.
|
||||
Kafka rebalances the partition allocations.
|
||||
If the instance count (or `instance count * concurrency`) exceeds the number of partitions, some consumers are idle.
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.0.0.RC2</version>
|
||||
<version>2.0.1.RELEASE</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
|
||||
@@ -30,7 +30,6 @@ import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.converter.MessageConverter;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.MimeType;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
@@ -72,11 +71,9 @@ class KafkaStreamsMessageConversionDelegate {
|
||||
*/
|
||||
public KStream serializeOnOutbound(KStream<?,?> outboundBindTarget) {
|
||||
String contentType = this.kstreamBindingInformationCatalogue.getContentType(outboundBindTarget);
|
||||
MessageConverter messageConverter = StringUtils.hasText(contentType) ? compositeMessageConverterFactory
|
||||
.getMessageConverterForType(MimeType.valueOf(contentType))
|
||||
: null;
|
||||
MessageConverter messageConverter = compositeMessageConverterFactory.getMessageConverterForAllRegistered();
|
||||
|
||||
return outboundBindTarget.map((k, v) -> {
|
||||
return outboundBindTarget.mapValues((v) -> {
|
||||
Message<?> message = v instanceof Message<?> ? (Message<?>) v :
|
||||
MessageBuilder.withPayload(v).build();
|
||||
Map<String, Object> headers = new HashMap<>(message.getHeaders());
|
||||
@@ -84,9 +81,9 @@ class KafkaStreamsMessageConversionDelegate {
|
||||
headers.put(MessageHeaders.CONTENT_TYPE, contentType);
|
||||
}
|
||||
MessageHeaders messageHeaders = new MessageHeaders(headers);
|
||||
return new KeyValue<>(k,
|
||||
return
|
||||
messageConverter.toMessage(message.getPayload(),
|
||||
messageHeaders).getPayload());
|
||||
messageHeaders).getPayload();
|
||||
});
|
||||
}
|
||||
|
||||
@@ -140,10 +137,10 @@ class KafkaStreamsMessageConversionDelegate {
|
||||
processErrorFromDeserialization(bindingTarget, branch[1]);
|
||||
|
||||
//first branch above is the branch where the messages are converted, let it go through further processing.
|
||||
return branch[0].map((o, o2) -> {
|
||||
KeyValue<Object, Object> objectObjectKeyValue = keyValueThreadLocal.get();
|
||||
return branch[0].mapValues((o2) -> {
|
||||
Object objectValue = keyValueThreadLocal.get().value;
|
||||
keyValueThreadLocal.remove();
|
||||
return objectObjectKeyValue;
|
||||
return objectValue;
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -20,14 +20,12 @@ import java.lang.reflect.Method;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.apache.kafka.streams.Consumed;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.StreamsBuilder;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.errors.DeserializationExceptionHandler;
|
||||
@@ -66,6 +64,7 @@ import org.springframework.messaging.handler.annotation.SendTo;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
@@ -155,6 +154,7 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator implements StreamListene
|
||||
this.applicationContext,
|
||||
this.streamListenerParameterAdapter);
|
||||
try {
|
||||
ReflectionUtils.makeAccessible(method);
|
||||
if (Void.TYPE.equals(method.getReturnType())) {
|
||||
method.invoke(bean, adaptedInboundArguments);
|
||||
}
|
||||
@@ -221,7 +221,7 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator implements StreamListene
|
||||
enableNativeDecodingForKTableAlways(parameterType, bindingProperties);
|
||||
StreamsConfig streamsConfig = null;
|
||||
//Retrieve the StreamsConfig created for this method if available.
|
||||
//Otherwise, carete the StreamsBuilderFactory and get the underlying config.
|
||||
//Otherwise, create the StreamsBuilderFactory and get the underlying config.
|
||||
if (!methodStreamsBuilderFactoryBeanMap.containsKey(method)) {
|
||||
streamsConfig = buildStreamsBuilderAndRetrieveConfig(method, applicationContext, bindingProperties);
|
||||
}
|
||||
@@ -291,24 +291,25 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator implements StreamListene
|
||||
Serde<?> keySerde, Serde<?> valueSerde) {
|
||||
KStream<?, ?> stream = streamsBuilder.stream(bindingServiceProperties.getBindingDestination(inboundName),
|
||||
Consumed.with(keySerde, valueSerde));
|
||||
if (bindingProperties.getConsumer().isUseNativeDecoding()){
|
||||
final boolean nativeDecoding = bindingServiceProperties.getConsumerProperties(inboundName).isUseNativeDecoding();
|
||||
if (nativeDecoding){
|
||||
LOG.info("Native decoding is enabled for " + inboundName + ". Inbound deserialization done at the broker.");
|
||||
}
|
||||
else {
|
||||
LOG.info("Native decoding is disabled for " + inboundName + ". Inbound message conversion done by Spring Cloud Stream.");
|
||||
}
|
||||
stream = stream.map((key, value) -> {
|
||||
KeyValue<Object, Object> keyValue;
|
||||
|
||||
stream = stream.mapValues(value -> {
|
||||
Object returnValue;
|
||||
String contentType = bindingProperties.getContentType();
|
||||
if (!StringUtils.isEmpty(contentType) && !bindingProperties.getConsumer().isUseNativeDecoding()) {
|
||||
if (!StringUtils.isEmpty(contentType) && !nativeDecoding) {
|
||||
Message<?> message = MessageBuilder.withPayload(value)
|
||||
.setHeader(MessageHeaders.CONTENT_TYPE, contentType).build();
|
||||
keyValue = new KeyValue<>(key, message);
|
||||
returnValue = message;
|
||||
} else {
|
||||
returnValue = value;
|
||||
}
|
||||
else {
|
||||
keyValue = new KeyValue<>(key, value);
|
||||
}
|
||||
return keyValue;
|
||||
return returnValue;
|
||||
});
|
||||
return stream;
|
||||
}
|
||||
@@ -329,12 +330,11 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator implements StreamListene
|
||||
ConfigurableListableBeanFactory beanFactory = this.applicationContext.getBeanFactory();
|
||||
StreamsBuilderFactoryBean streamsBuilder = new StreamsBuilderFactoryBean();
|
||||
streamsBuilder.setAutoStartup(false);
|
||||
String uuid = UUID.randomUUID().toString();
|
||||
BeanDefinition streamsBuilderBeanDefinition =
|
||||
BeanDefinitionBuilder.genericBeanDefinition((Class<StreamsBuilderFactoryBean>) streamsBuilder.getClass(), () -> streamsBuilder)
|
||||
.getRawBeanDefinition();
|
||||
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition("stream-builder-" + uuid, streamsBuilderBeanDefinition);
|
||||
StreamsBuilderFactoryBean streamsBuilderX = applicationContext.getBean("&stream-builder-" + uuid, StreamsBuilderFactoryBean.class);
|
||||
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition("stream-builder-" + method.getName(), streamsBuilderBeanDefinition);
|
||||
StreamsBuilderFactoryBean streamsBuilderX = applicationContext.getBean("&stream-builder-" + method.getName(), StreamsBuilderFactoryBean.class);
|
||||
String group = bindingProperties.getGroup();
|
||||
if (!StringUtils.hasText(group)) {
|
||||
group = binderConfigurationProperties.getApplicationId();
|
||||
@@ -364,7 +364,7 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator implements StreamListene
|
||||
BeanDefinition streamsConfigBeanDefinition =
|
||||
BeanDefinitionBuilder.genericBeanDefinition((Class<StreamsConfig>) streamsConfig.getClass(), () -> streamsConfig)
|
||||
.getRawBeanDefinition();
|
||||
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition("streamsConfig-" + uuid, streamsConfigBeanDefinition);
|
||||
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition("streamsConfig-" + method.getName(), streamsConfigBeanDefinition);
|
||||
|
||||
streamsBuilder.setStreamsConfig(streamsConfig);
|
||||
methodStreamsBuilderFactoryBeanMap.put(method, streamsBuilderX);
|
||||
|
||||
@@ -20,6 +20,7 @@ import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.errors.InvalidStateStoreException;
|
||||
import org.apache.kafka.streams.state.QueryableStoreType;
|
||||
|
||||
/**
|
||||
@@ -27,6 +28,7 @@ import org.apache.kafka.streams.state.QueryableStoreType;
|
||||
* the user applications.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Renwei Han
|
||||
* @since 2.0.0
|
||||
*/
|
||||
public class QueryableStoreRegistry {
|
||||
@@ -44,9 +46,14 @@ public class QueryableStoreRegistry {
|
||||
public <T> T getQueryableStoreType(String storeName, QueryableStoreType<T> storeType) {
|
||||
|
||||
for (KafkaStreams kafkaStream : kafkaStreams) {
|
||||
T store = kafkaStream.store(storeName, storeType);
|
||||
if (store != null) {
|
||||
return store;
|
||||
try{
|
||||
T store = kafkaStream.store(storeName, storeType);
|
||||
if (store != null) {
|
||||
return store;
|
||||
}
|
||||
}
|
||||
catch (InvalidStateStoreException ignored) {
|
||||
//pass through
|
||||
}
|
||||
}
|
||||
return null;
|
||||
|
||||
@@ -106,10 +106,10 @@ public class KafkaStreamsBinderPojoInputAndPrimitiveTypeOutputTests {
|
||||
template.sendDefault("{\"id\":\"123\"}");
|
||||
ConsumerRecord<Integer, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts-id");
|
||||
|
||||
assertThat(cr.key().equals(123));
|
||||
assertThat(cr.key()).isEqualTo(123);
|
||||
ObjectMapper om = new ObjectMapper();
|
||||
Long aLong = om.readValue(cr.value(), Long.class);
|
||||
assertThat(aLong.equals(1L));
|
||||
assertThat(aLong).isEqualTo(1L);
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
|
||||
@@ -24,11 +24,14 @@ import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.apache.kafka.streams.state.QueryableStoreTypes;
|
||||
import org.apache.kafka.streams.state.ReadOnlyWindowStore;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
@@ -48,6 +51,7 @@ import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.core.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
@@ -101,6 +105,12 @@ public class KafkaStreamsBinderWordCountIntegrationTests {
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidate(context);
|
||||
//Assertions on StreamBuilderFactoryBean
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = context.getBean("&stream-builder-process", StreamsBuilderFactoryBean.class);
|
||||
KafkaStreams kafkaStreams = streamsBuilderFactoryBean.getKafkaStreams();
|
||||
ReadOnlyWindowStore<Object, Object> store = kafkaStreams.store("foo-WordCounts", QueryableStoreTypes.windowStore());
|
||||
assertThat(store).isNotNull();
|
||||
|
||||
} finally {
|
||||
context.close();
|
||||
}
|
||||
@@ -119,7 +129,7 @@ public class KafkaStreamsBinderWordCountIntegrationTests {
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
public static class WordCountProcessorApplication {
|
||||
static class WordCountProcessorApplication {
|
||||
|
||||
@Autowired
|
||||
private TimeWindows timeWindows;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
eclipse.preferences.version=1
|
||||
org.eclipse.jdt.ui.ignorelowercasenames=true
|
||||
org.eclipse.jdt.ui.importorder=java;javax;com;org;org.springframework;ch.qos;\#;
|
||||
org.eclipse.jdt.ui.importorder=java;javax;com;io.micrometer;org;org.springframework;ch.qos;\#;
|
||||
org.eclipse.jdt.ui.ondemandthreshold=99
|
||||
org.eclipse.jdt.ui.staticondemandthreshold=99
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.0.0.RC2</version>
|
||||
<version>2.0.1.RELEASE</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
@@ -37,11 +37,6 @@
|
||||
<artifactId>spring-boot-autoconfigure</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -78,25 +78,31 @@ public class KafkaBinderHealthIndicator implements HealthIndicator {
|
||||
public Health call() {
|
||||
try {
|
||||
if (metadataConsumer == null) {
|
||||
metadataConsumer = consumerFactory.createConsumer();
|
||||
}
|
||||
Set<String> downMessages = new HashSet<>();
|
||||
for (String topic : KafkaBinderHealthIndicator.this.binder.getTopicsInUse().keySet()) {
|
||||
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
|
||||
for (PartitionInfo partitionInfo : partitionInfos) {
|
||||
if (KafkaBinderHealthIndicator.this.binder.getTopicsInUse().get(topic).getPartitionInfos()
|
||||
.contains(partitionInfo) && partitionInfo.leader().id() == -1) {
|
||||
downMessages.add(partitionInfo.toString());
|
||||
synchronized(KafkaBinderHealthIndicator.this) {
|
||||
if (metadataConsumer == null) {
|
||||
metadataConsumer = consumerFactory.createConsumer();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (downMessages.isEmpty()) {
|
||||
return Health.up().build();
|
||||
}
|
||||
else {
|
||||
return Health.down()
|
||||
.withDetail("Following partitions in use have no leaders: ", downMessages.toString())
|
||||
.build();
|
||||
synchronized (metadataConsumer) {
|
||||
Set<String> downMessages = new HashSet<>();
|
||||
for (String topic : KafkaBinderHealthIndicator.this.binder.getTopicsInUse().keySet()) {
|
||||
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
|
||||
for (PartitionInfo partitionInfo : partitionInfos) {
|
||||
if (KafkaBinderHealthIndicator.this.binder.getTopicsInUse().get(topic).getPartitionInfos()
|
||||
.contains(partitionInfo) && partitionInfo.leader().id() == -1) {
|
||||
downMessages.add(partitionInfo.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
if (downMessages.isEmpty()) {
|
||||
return Health.up().build();
|
||||
}
|
||||
else {
|
||||
return Health.down()
|
||||
.withDetail("Following partitions in use have no leaders: ", downMessages.toString())
|
||||
.build();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
|
||||
@@ -20,8 +20,15 @@ import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.micrometer.core.instrument.TimeGauge;
|
||||
import io.micrometer.core.instrument.binder.MeterBinder;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
@@ -48,12 +55,17 @@ import org.springframework.util.ObjectUtils;
|
||||
* @author Soby Chacko
|
||||
* @author Artem Bilan
|
||||
* @author Oleg Zhurakousky
|
||||
* @author Jon Schneider
|
||||
* @author Thomas Cheyney
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KafkaBinderMetrics implements MeterBinder, ApplicationListener<BindingCreatedEvent> {
|
||||
|
||||
private static final int DEFAULT_TIMEOUT = 60;
|
||||
|
||||
private final static Log LOG = LogFactory.getLog(KafkaBinderMetrics.class);
|
||||
|
||||
static final String METRIC_PREFIX = "spring.cloud.stream.binder.kafka";
|
||||
static final String METRIC_NAME = "spring.cloud.stream.binder.kafka.offset";
|
||||
|
||||
private final KafkaMessageChannelBinder binder;
|
||||
|
||||
@@ -63,6 +75,10 @@ public class KafkaBinderMetrics implements MeterBinder, ApplicationListener<Bind
|
||||
|
||||
private final MeterRegistry meterRegistry;
|
||||
|
||||
private Consumer<?, ?> metadataConsumer;
|
||||
|
||||
private int timeout = DEFAULT_TIMEOUT;
|
||||
|
||||
public KafkaBinderMetrics(KafkaMessageChannelBinder binder,
|
||||
KafkaBinderConfigurationProperties binderConfigurationProperties,
|
||||
ConsumerFactory<?, ?> defaultConsumerFactory, @Nullable MeterRegistry meterRegistry) {
|
||||
@@ -79,6 +95,10 @@ public class KafkaBinderMetrics implements MeterBinder, ApplicationListener<Bind
|
||||
this(binder, binderConfigurationProperties, null, null);
|
||||
}
|
||||
|
||||
public void setTimeout(int timeout) {
|
||||
this.timeout = timeout;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void bindTo(MeterRegistry registry) {
|
||||
for (Map.Entry<String, KafkaMessageChannelBinder.TopicInformation> topicInfo : this.binder.getTopicsInUse()
|
||||
@@ -91,35 +111,66 @@ public class KafkaBinderMetrics implements MeterBinder, ApplicationListener<Bind
|
||||
String topic = topicInfo.getKey();
|
||||
String group = topicInfo.getValue().getConsumerGroup();
|
||||
|
||||
registry.gauge(String.format("%s.%s.%s.lag", METRIC_PREFIX, group, topic), this,
|
||||
o -> calculateConsumerLagOnTopic(topic, group));
|
||||
TimeGauge.builder(METRIC_NAME, this, TimeUnit.MILLISECONDS,
|
||||
o -> calculateConsumerLagOnTopic(topic, group))
|
||||
.tag("group", group)
|
||||
.tag("topic", topic)
|
||||
.description("Consumer lag for a particular group and topic")
|
||||
.register(registry);
|
||||
}
|
||||
}
|
||||
|
||||
private double calculateConsumerLagOnTopic(String topic, String group) {
|
||||
long lag = 0;
|
||||
try (Consumer<?, ?> metadataConsumer = createConsumerFactory(group).createConsumer()) {
|
||||
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
|
||||
List<TopicPartition> topicPartitions = new LinkedList<>();
|
||||
for (PartitionInfo partitionInfo : partitionInfos) {
|
||||
topicPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
|
||||
}
|
||||
Map<TopicPartition, Long> endOffsets = metadataConsumer.endOffsets(topicPartitions);
|
||||
ExecutorService exec = Executors.newSingleThreadExecutor();
|
||||
Future<Long> future = exec.submit(() -> {
|
||||
|
||||
for (Map.Entry<TopicPartition, Long> endOffset : endOffsets.entrySet()) {
|
||||
OffsetAndMetadata current = metadataConsumer.committed(endOffset.getKey());
|
||||
if (current != null) {
|
||||
lag += endOffset.getValue() - current.offset();
|
||||
long lag = 0;
|
||||
try {
|
||||
if (metadataConsumer == null) {
|
||||
synchronized(KafkaBinderMetrics.this) {
|
||||
if (metadataConsumer == null) {
|
||||
metadataConsumer = createConsumerFactory(group).createConsumer();
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
lag += endOffset.getValue();
|
||||
synchronized (metadataConsumer) {
|
||||
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
|
||||
List<TopicPartition> topicPartitions = new LinkedList<>();
|
||||
for (PartitionInfo partitionInfo : partitionInfos) {
|
||||
topicPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
|
||||
}
|
||||
|
||||
Map<TopicPartition, Long> endOffsets = metadataConsumer.endOffsets(topicPartitions);
|
||||
|
||||
for (Map.Entry<TopicPartition, Long> endOffset : endOffsets.entrySet()) {
|
||||
OffsetAndMetadata current = metadataConsumer.committed(endOffset.getKey());
|
||||
if (current != null) {
|
||||
lag += endOffset.getValue() - current.offset();
|
||||
}
|
||||
else {
|
||||
lag += endOffset.getValue();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOG.debug("Cannot generate metric for topic: " + topic, e);
|
||||
}
|
||||
return lag;
|
||||
});
|
||||
try {
|
||||
return future.get(this.timeout, TimeUnit.SECONDS);
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOG.debug("Cannot generate metric for topic: " + topic, e);
|
||||
catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
return 0L;
|
||||
}
|
||||
catch (ExecutionException | TimeoutException e) {
|
||||
return 0L;
|
||||
}
|
||||
finally {
|
||||
exec.shutdownNow();
|
||||
}
|
||||
return lag;
|
||||
}
|
||||
|
||||
private ConsumerFactory<?, ?> createConsumerFactory(String group) {
|
||||
|
||||
@@ -28,7 +28,10 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
@@ -50,10 +53,12 @@ import org.springframework.beans.factory.NoSuchBeanDefinitionException;
|
||||
import org.springframework.cloud.stream.binder.AbstractMessageChannelBinder;
|
||||
import org.springframework.cloud.stream.binder.BinderHeaders;
|
||||
import org.springframework.cloud.stream.binder.DefaultPollableMessageSource;
|
||||
import org.springframework.cloud.stream.binder.EmbeddedHeaderUtils;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
|
||||
import org.springframework.cloud.stream.binder.HeaderMode;
|
||||
import org.springframework.cloud.stream.binder.MessageValues;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties.StandardHeaders;
|
||||
@@ -73,6 +78,7 @@ import org.springframework.integration.kafka.support.RawRecordHeaderErrorMessage
|
||||
import org.springframework.integration.support.AcknowledgmentCallback;
|
||||
import org.springframework.integration.support.AcknowledgmentCallback.Status;
|
||||
import org.springframework.integration.support.ErrorMessageStrategy;
|
||||
import org.springframework.integration.support.MessageBuilder;
|
||||
import org.springframework.integration.support.StaticMessageHeaderAccessor;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
@@ -80,7 +86,9 @@ import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.core.ProducerFactory;
|
||||
import org.springframework.kafka.listener.AbstractMessageListenerContainer;
|
||||
import org.springframework.kafka.listener.AbstractMessageListenerContainer.AckMode;
|
||||
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
|
||||
import org.springframework.kafka.listener.ConsumerAwareRebalanceListener;
|
||||
import org.springframework.kafka.listener.config.ContainerProperties;
|
||||
import org.springframework.kafka.support.DefaultKafkaHeaderMapper;
|
||||
import org.springframework.kafka.support.KafkaHeaderMapper;
|
||||
@@ -325,7 +333,8 @@ public class KafkaMessageChannelBinder extends
|
||||
|
||||
Collection<PartitionInfo> listenedPartitions;
|
||||
|
||||
if (extendedConsumerProperties.getExtension().isAutoRebalanceEnabled() ||
|
||||
boolean groupManagement = extendedConsumerProperties.getExtension().isAutoRebalanceEnabled();
|
||||
if (groupManagement ||
|
||||
extendedConsumerProperties.getInstanceCount() == 1) {
|
||||
listenedPartitions = allPartitions;
|
||||
}
|
||||
@@ -354,6 +363,7 @@ public class KafkaMessageChannelBinder extends
|
||||
}
|
||||
containerProperties.setIdleEventInterval(extendedConsumerProperties.getExtension().getIdleEventInterval());
|
||||
int concurrency = Math.min(extendedConsumerProperties.getConcurrency(), listenedPartitions.size());
|
||||
resetOffsets(extendedConsumerProperties, consumerFactory, groupManagement, containerProperties);
|
||||
@SuppressWarnings("rawtypes")
|
||||
final ConcurrentMessageListenerContainer<?, ?> messageListenerContainer =
|
||||
new ConcurrentMessageListenerContainer(consumerFactory, containerProperties) {
|
||||
@@ -366,8 +376,14 @@ public class KafkaMessageChannelBinder extends
|
||||
};
|
||||
messageListenerContainer.setConcurrency(concurrency);
|
||||
// these won't be needed if the container is made a bean
|
||||
messageListenerContainer.setApplicationEventPublisher(getApplicationContext());
|
||||
if (getApplicationEventPublisher() != null) {
|
||||
messageListenerContainer.setApplicationEventPublisher(getApplicationEventPublisher());
|
||||
}
|
||||
else if (getApplicationContext() != null) {
|
||||
messageListenerContainer.setApplicationEventPublisher(getApplicationContext());
|
||||
}
|
||||
messageListenerContainer.setBeanName(destination.getName() + ".container");
|
||||
// end of these won't be needed...
|
||||
if (!extendedConsumerProperties.getExtension().isAutoCommitOffset()) {
|
||||
messageListenerContainer.getContainerProperties()
|
||||
.setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL);
|
||||
@@ -376,6 +392,9 @@ public class KafkaMessageChannelBinder extends
|
||||
else {
|
||||
messageListenerContainer.getContainerProperties()
|
||||
.setAckOnError(isAutoCommitOnError(extendedConsumerProperties));
|
||||
if (extendedConsumerProperties.getExtension().isAckEachRecord()) {
|
||||
messageListenerContainer.getContainerProperties().setAckMode(AckMode.RECORD);
|
||||
}
|
||||
}
|
||||
if (this.logger.isDebugEnabled()) {
|
||||
this.logger.debug(
|
||||
@@ -397,6 +416,58 @@ public class KafkaMessageChannelBinder extends
|
||||
return kafkaMessageDrivenChannelAdapter;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset the offsets if needed; may update the offsets in in the container's
|
||||
* topicPartitionInitialOffsets.
|
||||
*/
|
||||
private void resetOffsets(
|
||||
final ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties,
|
||||
final ConsumerFactory<?, ?> consumerFactory, boolean groupManagement,
|
||||
final ContainerProperties containerProperties) {
|
||||
|
||||
boolean resetOffsets = extendedConsumerProperties.getExtension().isResetOffsets();
|
||||
final Object resetTo = consumerFactory.getConfigurationProperties().get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG);
|
||||
final AtomicBoolean initialAssignment = new AtomicBoolean(true);
|
||||
if (!"earliest".equals(resetTo) && "!latest".equals(resetTo)) {
|
||||
logger.warn("no (or unknown) " + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG +
|
||||
" property cannot reset");
|
||||
resetOffsets = false;
|
||||
}
|
||||
if (groupManagement && resetOffsets) {
|
||||
containerProperties.setConsumerRebalanceListener(new ConsumerAwareRebalanceListener() {
|
||||
|
||||
@Override
|
||||
public void onPartitionsRevokedBeforeCommit(Consumer<?, ?> consumer, Collection<TopicPartition> tps) {
|
||||
// no op
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onPartitionsRevokedAfterCommit(Consumer<?, ?> consumer, Collection<TopicPartition> tps) {
|
||||
// no op
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onPartitionsAssigned(Consumer<?, ?> consumer, Collection<TopicPartition> tps) {
|
||||
if (initialAssignment.getAndSet(false)) {
|
||||
if ("earliest".equals(resetTo)) {
|
||||
consumer.seekToBeginning(tps);
|
||||
}
|
||||
else if ("latest".equals(resetTo)) {
|
||||
consumer.seekToEnd(tps);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
else if (resetOffsets) {
|
||||
Arrays.stream(containerProperties.getTopicPartitions())
|
||||
.map(tpio -> new TopicPartitionInitialOffset(tpio.topic(), tpio.partition(),
|
||||
// SK GH-599 "earliest".equals(resetTo) ? SeekPosition.BEGINNING : SeekPosition.END))
|
||||
"earliest".equals(resetTo) ? 0L : Long.MAX_VALUE))
|
||||
.collect(Collectors.toList()).toArray(containerProperties.getTopicPartitions());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PolledConsumerResources createPolledConsumerResources(String name, String group,
|
||||
ConsumerDestination destination, ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties) {
|
||||
@@ -534,7 +605,8 @@ public class KafkaMessageChannelBinder extends
|
||||
DlqSender<?,?> dlqSender = new DlqSender(kafkaTemplate, dlqName);
|
||||
|
||||
return message -> {
|
||||
final ConsumerRecord<?, ?> record = message.getHeaders()
|
||||
@SuppressWarnings("unchecked")
|
||||
final ConsumerRecord<Object, Object> record = message.getHeaders()
|
||||
.get(KafkaHeaders.RAW_DATA, ConsumerRecord.class);
|
||||
|
||||
if (properties.isUseNativeDecoding()) {
|
||||
@@ -558,16 +630,40 @@ public class KafkaMessageChannelBinder extends
|
||||
return;
|
||||
}
|
||||
Headers kafkaHeaders = new RecordHeaders(record.headers().toArray());
|
||||
kafkaHeaders.add(new RecordHeader(X_ORIGINAL_TOPIC,
|
||||
record.topic().getBytes(StandardCharsets.UTF_8)));
|
||||
AtomicReference<ConsumerRecord<?, ?>> recordToSend = new AtomicReference<>(record);
|
||||
if (message.getPayload() instanceof Throwable) {
|
||||
Throwable throwable = (Throwable) message.getPayload();
|
||||
kafkaHeaders.add(new RecordHeader(X_EXCEPTION_MESSAGE,
|
||||
throwable.getMessage().getBytes(StandardCharsets.UTF_8)));
|
||||
kafkaHeaders.add(new RecordHeader(X_EXCEPTION_STACKTRACE,
|
||||
getStackTraceAsString(throwable).getBytes(StandardCharsets.UTF_8)));
|
||||
HeaderMode headerMode = properties.getHeaderMode();
|
||||
if (headerMode == null || HeaderMode.headers.equals(headerMode)) {
|
||||
kafkaHeaders.add(new RecordHeader(X_ORIGINAL_TOPIC,
|
||||
record.topic().getBytes(StandardCharsets.UTF_8)));
|
||||
kafkaHeaders.add(new RecordHeader(X_EXCEPTION_MESSAGE,
|
||||
throwable.getMessage().getBytes(StandardCharsets.UTF_8)));
|
||||
kafkaHeaders.add(new RecordHeader(X_EXCEPTION_STACKTRACE,
|
||||
getStackTraceAsString(throwable).getBytes(StandardCharsets.UTF_8)));
|
||||
}
|
||||
else if (HeaderMode.embeddedHeaders.equals(headerMode)) {
|
||||
try {
|
||||
MessageValues messageValues = EmbeddedHeaderUtils
|
||||
.extractHeaders(MessageBuilder.withPayload((byte[]) record.value()).build(),
|
||||
false);
|
||||
messageValues.put(X_ORIGINAL_TOPIC, record.topic());
|
||||
messageValues.put(X_EXCEPTION_MESSAGE, throwable.getMessage());
|
||||
messageValues.put(X_EXCEPTION_STACKTRACE, getStackTraceAsString(throwable));
|
||||
|
||||
final String[] headersToEmbed = new ArrayList<>(messageValues.keySet()).toArray(
|
||||
new String[messageValues.keySet().size()]);
|
||||
byte[] payload = EmbeddedHeaderUtils.embedHeaders(messageValues,
|
||||
EmbeddedHeaderUtils.headersToEmbed(headersToEmbed));
|
||||
recordToSend.set(new ConsumerRecord<Object, Object>(record.topic(), record.partition(),
|
||||
record.offset(), record.key(), payload));
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
dlqSender.sendToDlq(record, kafkaHeaders);
|
||||
dlqSender.sendToDlq(recordToSend.get(), kafkaHeaders);
|
||||
};
|
||||
}
|
||||
return null;
|
||||
@@ -617,7 +713,7 @@ public class KafkaMessageChannelBinder extends
|
||||
}
|
||||
}
|
||||
|
||||
private ConsumerFactory<?, ?> createKafkaConsumerFactory(boolean anonymous, String consumerGroup,
|
||||
protected ConsumerFactory<?, ?> createKafkaConsumerFactory(boolean anonymous, String consumerGroup,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties) {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2015-2017 the original author or authors.
|
||||
* Copyright 2015-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -17,40 +17,31 @@
|
||||
package org.springframework.cloud.stream.binder.kafka.config;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.micrometer.core.instrument.binder.MeterBinder;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
|
||||
import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaBinderHealthIndicator;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaBinderMetrics;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaMessageChannelBinder;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.JaasLoginModuleConfiguration;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.security.jaas.KafkaJaasLoginModuleInitializer;
|
||||
import org.springframework.kafka.support.LoggingProducerListener;
|
||||
import org.springframework.kafka.support.ProducerListener;
|
||||
import org.springframework.lang.Nullable;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
|
||||
/**
|
||||
* @author David Turanski
|
||||
@@ -61,24 +52,20 @@ import org.springframework.util.ObjectUtils;
|
||||
* @author Henryk Konsek
|
||||
* @author Gary Russell
|
||||
* @author Oleg Zhurakousky
|
||||
* @author Artem Bilan
|
||||
*/
|
||||
@Configuration
|
||||
@ConditionalOnMissingBean(Binder.class)
|
||||
@Import({ PropertyPlaceholderAutoConfiguration.class})
|
||||
@Import({KafkaAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class, KafkaBinderHealthIndicatorConfiguration.class })
|
||||
@EnableConfigurationProperties({ KafkaExtendedBindingProperties.class })
|
||||
public class KafkaBinderConfiguration {
|
||||
|
||||
protected static final Log logger = LogFactory.getLog(KafkaBinderConfiguration.class);
|
||||
|
||||
@Autowired
|
||||
private KafkaExtendedBindingProperties kafkaExtendedBindingProperties;
|
||||
|
||||
@Autowired
|
||||
private ProducerListener producerListener;
|
||||
|
||||
@Autowired
|
||||
private ApplicationContext context;
|
||||
|
||||
@Autowired
|
||||
private KafkaProperties kafkaProperties;
|
||||
|
||||
@@ -94,7 +81,8 @@ public class KafkaBinderConfiguration {
|
||||
|
||||
@Bean
|
||||
KafkaMessageChannelBinder kafkaMessageChannelBinder(KafkaBinderConfigurationProperties configurationProperties,
|
||||
KafkaTopicProvisioner provisioningProvider) {
|
||||
KafkaTopicProvisioner provisioningProvider) {
|
||||
|
||||
KafkaMessageChannelBinder kafkaMessageChannelBinder = new KafkaMessageChannelBinder(
|
||||
configurationProperties, provisioningProvider);
|
||||
kafkaMessageChannelBinder.setProducerListener(producerListener);
|
||||
@@ -108,41 +96,37 @@ public class KafkaBinderConfiguration {
|
||||
return new LoggingProducerListener();
|
||||
}
|
||||
|
||||
@Bean
|
||||
KafkaBinderHealthIndicator healthIndicator(KafkaMessageChannelBinder kafkaMessageChannelBinder,
|
||||
KafkaBinderConfigurationProperties configurationProperties) {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
if (!ObjectUtils.isEmpty(configurationProperties.getConsumerConfiguration())) {
|
||||
props.putAll(configurationProperties.getConsumerConfiguration());
|
||||
}
|
||||
if (!props.containsKey(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
|
||||
}
|
||||
ConsumerFactory<?, ?> consumerFactory = new DefaultKafkaConsumerFactory<>(props);
|
||||
KafkaBinderHealthIndicator indicator = new KafkaBinderHealthIndicator(kafkaMessageChannelBinder,
|
||||
consumerFactory);
|
||||
indicator.setTimeout(configurationProperties.getHealthTimeout());
|
||||
return indicator;
|
||||
}
|
||||
|
||||
@Bean
|
||||
public MeterBinder kafkaBinderMetrics(KafkaMessageChannelBinder kafkaMessageChannelBinder,
|
||||
KafkaBinderConfigurationProperties configurationProperties,
|
||||
@Nullable MeterRegistry meterRegistry) {
|
||||
return new KafkaBinderMetrics(kafkaMessageChannelBinder, configurationProperties, null, meterRegistry);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaJaasLoginModuleInitializer jaasInitializer() throws IOException {
|
||||
return new KafkaJaasLoginModuleInitializer();
|
||||
}
|
||||
|
||||
/**
|
||||
* A conditional configuration for the {@link KafkaBinderMetrics} bean when the
|
||||
* {@link MeterRegistry} class is in classpath, as well as a {@link MeterRegistry} bean is
|
||||
* present in the application context.
|
||||
*/
|
||||
@Configuration
|
||||
@ConditionalOnClass(MeterRegistry.class)
|
||||
@ConditionalOnBean(MeterRegistry.class)
|
||||
protected class KafkaBinderMetricsConfiguration {
|
||||
|
||||
@Bean
|
||||
@ConditionalOnMissingBean(KafkaBinderMetrics.class)
|
||||
public MeterBinder kafkaBinderMetrics(KafkaMessageChannelBinder kafkaMessageChannelBinder,
|
||||
KafkaBinderConfigurationProperties configurationProperties,
|
||||
MeterRegistry meterRegistry) {
|
||||
|
||||
return new KafkaBinderMetrics(kafkaMessageChannelBinder, configurationProperties, null, meterRegistry);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static class JaasConfigurationProperties {
|
||||
|
||||
private JaasLoginModuleConfiguration kafka;
|
||||
|
||||
private JaasLoginModuleConfiguration zookeeper;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.config;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaBinderHealthIndicator;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaMessageChannelBinder;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Oleg Zhurakousky
|
||||
*
|
||||
*/
|
||||
|
||||
@Configuration
|
||||
@ConditionalOnClass(name="org.springframework.boot.actuate.health.HealthIndicator")
|
||||
class KafkaBinderHealthIndicatorConfiguration {
|
||||
|
||||
@Bean
|
||||
KafkaBinderHealthIndicator healthIndicator(KafkaMessageChannelBinder kafkaMessageChannelBinder,
|
||||
KafkaBinderConfigurationProperties configurationProperties) {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
if (!ObjectUtils.isEmpty(configurationProperties.getConsumerConfiguration())) {
|
||||
props.putAll(configurationProperties.getConsumerConfiguration());
|
||||
}
|
||||
if (!props.containsKey(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
|
||||
}
|
||||
ConsumerFactory<?, ?> consumerFactory = new DefaultKafkaConsumerFactory<>(props);
|
||||
KafkaBinderHealthIndicator indicator = new KafkaBinderHealthIndicator(kafkaMessageChannelBinder,
|
||||
consumerFactory);
|
||||
indicator.setTimeout(configurationProperties.getHealthTimeout());
|
||||
return indicator;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfiguration;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaAdminProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.config.BinderFactoryConfiguration;
|
||||
import org.springframework.cloud.stream.config.BindingServiceConfiguration;
|
||||
import org.springframework.integration.config.EnableIntegration;
|
||||
import org.springframework.test.context.TestPropertySource;
|
||||
import org.springframework.test.context.junit4.SpringRunner;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Gary Russell
|
||||
* @since 2.0
|
||||
*
|
||||
*/
|
||||
@RunWith(SpringRunner.class)
|
||||
@SpringBootTest(classes = {KafkaBinderConfiguration.class,
|
||||
BinderFactoryConfiguration.class,
|
||||
BindingServiceConfiguration.class })
|
||||
@TestPropertySource(properties = {
|
||||
"spring.cloud.stream.kafka.bindings.input.consumer.admin.replication-factor=2",
|
||||
"spring.cloud.stream.kafka.bindings.input.consumer.admin.replicas-assignments.0=0,1",
|
||||
"spring.cloud.stream.kafka.bindings.input.consumer.admin.configuration.message.format.version=0.9.0.0" })
|
||||
@EnableIntegration
|
||||
public class AdminConfigTests {
|
||||
|
||||
@Autowired
|
||||
private KafkaMessageChannelBinder binder;
|
||||
|
||||
@Test
|
||||
public void testProps() {
|
||||
KafkaConsumerProperties consumerProps = this.binder.getExtendedConsumerProperties("input");
|
||||
KafkaAdminProperties admin = consumerProps.getAdmin();
|
||||
assertThat(admin.getReplicationFactor()).isEqualTo((short) 2);
|
||||
assertThat(admin.getReplicasAssignments().get(0)).isEqualTo(Arrays.asList(0, 1));
|
||||
assertThat(admin.getConfiguration().get("message.format.version")).isEqualTo("0.9.0.0");
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -28,14 +28,12 @@ import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfiguration;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
@@ -43,15 +41,16 @@ import org.springframework.test.context.TestPropertySource;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
@RunWith(SpringJUnit4ClassRunner.class)
|
||||
@SpringBootTest(classes = { KafkaBinderAutoConfigurationPropertiesTest.KafkaBinderConfigProperties.class,
|
||||
KafkaBinderConfiguration.class })
|
||||
@SpringBootTest(classes = {KafkaBinderConfiguration.class })
|
||||
@TestPropertySource(locations = "classpath:binder-config-autoconfig.properties")
|
||||
public class KafkaBinderAutoConfigurationPropertiesTest {
|
||||
|
||||
@@ -77,11 +76,11 @@ public class KafkaBinderAutoConfigurationPropertiesTest {
|
||||
Map<String, Object> producerConfigs = (Map<String, Object>) ReflectionUtils.getField(producerFactoryConfigField,
|
||||
producerFactory);
|
||||
assertTrue(producerConfigs.get("batch.size").equals(10));
|
||||
assertTrue(producerConfigs.get("key.serializer").equals(LongSerializer.class));
|
||||
assertTrue(producerConfigs.get("key.deserializer") == null);
|
||||
assertTrue(producerConfigs.get("value.serializer").equals(LongSerializer.class));
|
||||
assertTrue(producerConfigs.get("value.deserializer") == null);
|
||||
assertTrue(producerConfigs.get("compression.type").equals("snappy"));
|
||||
assertEquals(producerConfigs.get("key.serializer"), LongSerializer.class);
|
||||
assertNull(producerConfigs.get("key.deserializer"));
|
||||
assertEquals(producerConfigs.get("value.serializer"), LongSerializer.class);
|
||||
assertNull(producerConfigs.get("value.deserializer"));
|
||||
assertEquals("snappy", producerConfigs.get("compression.type"));
|
||||
List<String> bootstrapServers = new ArrayList<>();
|
||||
bootstrapServers.add("10.98.09.199:9092");
|
||||
bootstrapServers.add("10.98.09.196:9092");
|
||||
@@ -98,12 +97,12 @@ public class KafkaBinderAutoConfigurationPropertiesTest {
|
||||
ReflectionUtils.makeAccessible(consumerFactoryConfigField);
|
||||
Map<String, Object> consumerConfigs = (Map<String, Object>) ReflectionUtils.getField(consumerFactoryConfigField,
|
||||
consumerFactory);
|
||||
assertTrue(consumerConfigs.get("key.deserializer").equals(LongDeserializer.class));
|
||||
assertTrue(consumerConfigs.get("key.serializer") == null);
|
||||
assertTrue(consumerConfigs.get("value.deserializer").equals(LongDeserializer.class));
|
||||
assertTrue(consumerConfigs.get("value.serialized") == null);
|
||||
assertTrue(consumerConfigs.get("group.id").equals("groupIdFromBootConfig"));
|
||||
assertTrue(consumerConfigs.get("auto.offset.reset").equals("earliest"));
|
||||
assertEquals(consumerConfigs.get("key.deserializer"), LongDeserializer.class);
|
||||
assertNull(consumerConfigs.get("key.serializer"));
|
||||
assertEquals(consumerConfigs.get("value.deserializer"), LongDeserializer.class);
|
||||
assertNull(consumerConfigs.get("value.serialized"));
|
||||
assertEquals("groupIdFromBootConfig", consumerConfigs.get("group.id"));
|
||||
assertEquals("earliest", consumerConfigs.get("auto.offset.reset"));
|
||||
assertTrue((((List<String>) consumerConfigs.get("bootstrap.servers")).containsAll(bootstrapServers)));
|
||||
}
|
||||
|
||||
@@ -124,13 +123,4 @@ public class KafkaBinderAutoConfigurationPropertiesTest {
|
||||
bootstrapServers.add("10.98.09.196:9092");
|
||||
assertTrue(((List<String>) configs.get("bootstrap.servers")).containsAll(bootstrapServers));
|
||||
}
|
||||
|
||||
public static class KafkaBinderConfigProperties {
|
||||
|
||||
@Bean
|
||||
KafkaProperties kafkaProperties() {
|
||||
return new KafkaProperties();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,6 +41,7 @@ import org.springframework.test.context.TestPropertySource;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
@@ -75,11 +76,11 @@ public class KafkaBinderConfigurationPropertiesTest {
|
||||
ReflectionUtils.makeAccessible(producerFactoryConfigField);
|
||||
Map<String, Object> producerConfigs = (Map<String, Object>) ReflectionUtils.getField(producerFactoryConfigField,
|
||||
producerFactory);
|
||||
assertTrue(producerConfigs.get("batch.size").equals("12345"));
|
||||
assertTrue(producerConfigs.get("linger.ms").equals("100"));
|
||||
assertTrue(producerConfigs.get("key.serializer").equals(ByteArraySerializer.class));
|
||||
assertTrue(producerConfigs.get("value.serializer").equals(ByteArraySerializer.class));
|
||||
assertTrue(producerConfigs.get("compression.type").equals("gzip"));
|
||||
assertEquals("12345", producerConfigs.get("batch.size"));;
|
||||
assertEquals("100", producerConfigs.get("linger.ms"));
|
||||
assertEquals(producerConfigs.get("key.serializer"), ByteArraySerializer.class);
|
||||
assertEquals(producerConfigs.get("value.serializer"), ByteArraySerializer.class);
|
||||
assertEquals("gzip", producerConfigs.get("compression.type"));
|
||||
List<String> bootstrapServers = new ArrayList<>();
|
||||
bootstrapServers.add("10.98.09.199:9082");
|
||||
assertTrue((((String) producerConfigs.get("bootstrap.servers")).contains("10.98.09.199:9082")));
|
||||
@@ -95,8 +96,8 @@ public class KafkaBinderConfigurationPropertiesTest {
|
||||
ReflectionUtils.makeAccessible(consumerFactoryConfigField);
|
||||
Map<String, Object> consumerConfigs = (Map<String, Object>) ReflectionUtils.getField(consumerFactoryConfigField,
|
||||
consumerFactory);
|
||||
assertTrue(consumerConfigs.get("key.deserializer").equals(ByteArrayDeserializer.class));
|
||||
assertTrue(consumerConfigs.get("value.deserializer").equals(ByteArrayDeserializer.class));
|
||||
assertEquals(consumerConfigs.get("key.deserializer"), ByteArrayDeserializer.class);
|
||||
assertEquals(consumerConfigs.get("value.deserializer"), ByteArrayDeserializer.class);
|
||||
assertTrue((((String) consumerConfigs.get("bootstrap.servers")).contains("10.98.09.199:9082")));
|
||||
}
|
||||
|
||||
|
||||
@@ -20,18 +20,22 @@ import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.micrometer.core.instrument.search.Search;
|
||||
import io.micrometer.core.instrument.TimeGauge;
|
||||
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.common.KafkaException;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.ArgumentMatchers;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaMessageChannelBinder.TopicInformation;
|
||||
@@ -42,6 +46,7 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Henryk Konsek
|
||||
* @author Thomas Cheyney
|
||||
*/
|
||||
public class KafkaBinderMetricsTest {
|
||||
|
||||
@@ -71,20 +76,20 @@ public class KafkaBinderMetricsTest {
|
||||
org.mockito.BDDMockito.given(consumerFactory.createConsumer()).willReturn(consumer);
|
||||
org.mockito.BDDMockito.given(binder.getTopicsInUse()).willReturn(topicsInUse);
|
||||
metrics = new KafkaBinderMetrics(binder, kafkaBinderConfigurationProperties, consumerFactory, null);
|
||||
org.mockito.BDDMockito.given(consumer.endOffsets(org.mockito.Matchers.anyCollectionOf(TopicPartition.class)))
|
||||
org.mockito.BDDMockito.given(consumer.endOffsets(ArgumentMatchers.anyCollection()))
|
||||
.willReturn(java.util.Collections.singletonMap(new TopicPartition(TEST_TOPIC, 0), 1000L));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldIndicateLag() {
|
||||
org.mockito.BDDMockito.given(consumer.committed(org.mockito.Matchers.any(TopicPartition.class))).willReturn(new OffsetAndMetadata(500));
|
||||
org.mockito.BDDMockito.given(consumer.committed(ArgumentMatchers.any(TopicPartition.class))).willReturn(new OffsetAndMetadata(500));
|
||||
List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
metrics.bindTo(meterRegistry);
|
||||
assertThat(meterRegistry.getMeters()).hasSize(1);
|
||||
Search group = meterRegistry.find(String.format("%s.%s.%s.lag", KafkaBinderMetrics.METRIC_PREFIX, "group", TEST_TOPIC));
|
||||
assertThat(group.gauge().value()).isEqualTo(500.0);
|
||||
assertThat(meterRegistry.get(KafkaBinderMetrics.METRIC_NAME).tag("group", "group").tag("topic", TEST_TOPIC).timeGauge()
|
||||
.value(TimeUnit.MILLISECONDS)).isEqualTo(500.0);
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -92,15 +97,15 @@ public class KafkaBinderMetricsTest {
|
||||
Map<TopicPartition, Long> endOffsets = new HashMap<>();
|
||||
endOffsets.put(new TopicPartition(TEST_TOPIC, 0), 1000L);
|
||||
endOffsets.put(new TopicPartition(TEST_TOPIC, 1), 1000L);
|
||||
org.mockito.BDDMockito.given(consumer.endOffsets(org.mockito.Matchers.anyCollectionOf(TopicPartition.class))).willReturn(endOffsets);
|
||||
org.mockito.BDDMockito.given(consumer.committed(org.mockito.Matchers.any(TopicPartition.class))).willReturn(new OffsetAndMetadata(500));
|
||||
org.mockito.BDDMockito.given(consumer.endOffsets(ArgumentMatchers.anyCollection())).willReturn(endOffsets);
|
||||
org.mockito.BDDMockito.given(consumer.committed(ArgumentMatchers.any(TopicPartition.class))).willReturn(new OffsetAndMetadata(500));
|
||||
List<PartitionInfo> partitions = partitions(new Node(0, null, 0), new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
metrics.bindTo(meterRegistry);
|
||||
assertThat(meterRegistry.getMeters()).hasSize(1);
|
||||
Search group = meterRegistry.find(String.format("%s.%s.%s.lag", KafkaBinderMetrics.METRIC_PREFIX, "group", TEST_TOPIC));
|
||||
assertThat(group.gauge().value()).isEqualTo(1000.0);
|
||||
assertThat(meterRegistry.get(KafkaBinderMetrics.METRIC_NAME).tag("group", "group").tag("topic", TEST_TOPIC).timeGauge()
|
||||
.value(TimeUnit.MILLISECONDS)).isEqualTo(1000.0);
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -110,8 +115,8 @@ public class KafkaBinderMetricsTest {
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
metrics.bindTo(meterRegistry);
|
||||
assertThat(meterRegistry.getMeters()).hasSize(1);
|
||||
Search group = meterRegistry.find(String.format("%s.%s.%s.lag", KafkaBinderMetrics.METRIC_PREFIX, "group", TEST_TOPIC));
|
||||
assertThat(group.gauge().value()).isEqualTo(1000.0);
|
||||
assertThat(meterRegistry.get(KafkaBinderMetrics.METRIC_NAME).tag("group", "group").tag("topic", TEST_TOPIC).timeGauge()
|
||||
.value(TimeUnit.MILLISECONDS)).isEqualTo(1000.0);
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -122,6 +127,37 @@ public class KafkaBinderMetricsTest {
|
||||
assertThat(meterRegistry.getMeters()).isEmpty();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void createsConsumerOnceWhenInvokedMultipleTimes() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group", partitions));
|
||||
|
||||
metrics.bindTo(meterRegistry);
|
||||
|
||||
TimeGauge gauge = meterRegistry.get(KafkaBinderMetrics.METRIC_NAME).tag("group", "group").tag("topic", TEST_TOPIC).timeGauge();
|
||||
gauge.value(TimeUnit.MILLISECONDS);
|
||||
assertThat(gauge.value(TimeUnit.MILLISECONDS)).isEqualTo(1000.0);
|
||||
|
||||
org.mockito.Mockito.verify(this.consumerFactory).createConsumer();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void consumerCreationFailsFirstTime() {
|
||||
org.mockito.BDDMockito.given(consumerFactory.createConsumer()).willThrow(KafkaException.class)
|
||||
.willReturn(consumer);
|
||||
|
||||
final List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group", partitions));
|
||||
|
||||
metrics.bindTo(meterRegistry);
|
||||
|
||||
TimeGauge gauge = meterRegistry.get(KafkaBinderMetrics.METRIC_NAME).tag("group", "group").tag("topic", TEST_TOPIC).timeGauge();
|
||||
assertThat(gauge.value(TimeUnit.MILLISECONDS)).isEqualTo(0);
|
||||
assertThat(gauge.value(TimeUnit.MILLISECONDS)).isEqualTo(1000.0);
|
||||
|
||||
org.mockito.Mockito.verify(this.consumerFactory, Mockito.times(2)).createConsumer();
|
||||
}
|
||||
|
||||
private List<PartitionInfo> partitions(Node... nodes) {
|
||||
List<PartitionInfo> partitions = new ArrayList<>();
|
||||
for (int i = 0; i < nodes.length; i++) {
|
||||
|
||||
@@ -100,6 +100,8 @@ import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.core.ProducerFactory;
|
||||
import org.springframework.kafka.listener.AbstractMessageListenerContainer;
|
||||
import org.springframework.kafka.listener.AbstractMessageListenerContainer.AckMode;
|
||||
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
|
||||
import org.springframework.kafka.support.Acknowledgment;
|
||||
import org.springframework.kafka.support.KafkaHeaders;
|
||||
@@ -571,20 +573,30 @@ public class KafkaBinderTests extends
|
||||
|
||||
@Test
|
||||
public void testDlqAndRetry() throws Exception {
|
||||
testDlqGuts(true);
|
||||
testDlqGuts(true, null);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDlq() throws Exception {
|
||||
testDlqGuts(false);
|
||||
testDlqGuts(false, null);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void testDlqGuts(boolean withRetry) throws Exception {
|
||||
@Test
|
||||
public void testDlqNone() throws Exception {
|
||||
testDlqGuts(false, HeaderMode.none);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDlqEmbedded() throws Exception {
|
||||
testDlqGuts(false, HeaderMode.embeddedHeaders);
|
||||
}
|
||||
|
||||
private void testDlqGuts(boolean withRetry, HeaderMode headerMode) throws Exception {
|
||||
AbstractKafkaTestBinder binder = getBinder();
|
||||
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.getExtension().setHeaderPatterns(new String[]{MessageHeaders.CONTENT_TYPE});
|
||||
producerProperties.setHeaderMode(headerMode);
|
||||
|
||||
DirectChannel moduleOutputChannel = createBindableChannel("output",
|
||||
createProducerBindingProperties(producerProperties));
|
||||
@@ -595,6 +607,7 @@ public class KafkaBinderTests extends
|
||||
consumerProperties.setBackOffMaxInterval(150);
|
||||
consumerProperties.getExtension().setEnableDlq(true);
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
consumerProperties.setHeaderMode(headerMode);
|
||||
|
||||
DirectChannel moduleInputChannel = createBindableChannel("input", createConsumerBindingProperties(consumerProperties));
|
||||
|
||||
@@ -612,6 +625,7 @@ public class KafkaBinderTests extends
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> dlqConsumerProperties = createConsumerProperties();
|
||||
dlqConsumerProperties.setMaxAttempts(1);
|
||||
dlqConsumerProperties.setHeaderMode(headerMode);
|
||||
|
||||
ApplicationContext context = TestUtils.getPropertyValue(binder.getBinder(), "applicationContext",
|
||||
ApplicationContext.class);
|
||||
@@ -638,13 +652,27 @@ public class KafkaBinderTests extends
|
||||
Message<?> receivedMessage = receive(dlqChannel, 3);
|
||||
assertThat(receivedMessage).isNotNull();
|
||||
assertThat(receivedMessage.getPayload()).isEqualTo(testMessagePayload.getBytes());
|
||||
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_ORIGINAL_TOPIC))
|
||||
.isEqualTo(producerName.getBytes(StandardCharsets.UTF_8));
|
||||
assertThat(new String((byte[]) receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_EXCEPTION_MESSAGE)))
|
||||
.startsWith("failed to send Message to channel 'input'");
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_EXCEPTION_STACKTRACE))
|
||||
.isNotNull();
|
||||
if (HeaderMode.embeddedHeaders.equals(headerMode)) {
|
||||
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_ORIGINAL_TOPIC))
|
||||
.isEqualTo(producerName);
|
||||
assertThat(((String) receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_EXCEPTION_MESSAGE)))
|
||||
.startsWith("failed to send Message to channel 'input'");
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_EXCEPTION_STACKTRACE))
|
||||
.isNotNull();
|
||||
}
|
||||
else if (!HeaderMode.none.equals(headerMode)) {
|
||||
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_ORIGINAL_TOPIC))
|
||||
.isEqualTo(producerName.getBytes(StandardCharsets.UTF_8));
|
||||
assertThat(new String((byte[]) receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_EXCEPTION_MESSAGE)))
|
||||
.startsWith("failed to send Message to channel 'input'");
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_EXCEPTION_STACKTRACE))
|
||||
.isNotNull();
|
||||
}
|
||||
else {
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_ORIGINAL_TOPIC)).isNull();
|
||||
}
|
||||
binderBindUnbindLatency();
|
||||
|
||||
// verify we got a message on the dedicated error channel and the global (via bridge)
|
||||
@@ -1095,6 +1123,11 @@ public class KafkaBinderTests extends
|
||||
"testManualAckIsNotPossibleWhenAutoCommitOffsetIsEnabledOnTheBinder", "test", moduleInputChannel,
|
||||
consumerProperties);
|
||||
|
||||
|
||||
AbstractMessageListenerContainer<?, ?> container = TestUtils.getPropertyValue(consumerBinding,
|
||||
"lifecycle.messageListenerContainer", AbstractMessageListenerContainer.class);
|
||||
assertThat(container.getContainerProperties().getAckMode()).isEqualTo(AckMode.BATCH);
|
||||
|
||||
String testPayload1 = "foo" + UUID.randomUUID().toString();
|
||||
Message<?> message1 = org.springframework.integration.support.MessageBuilder.withPayload(
|
||||
testPayload1.getBytes()).build();
|
||||
@@ -1131,12 +1164,17 @@ public class KafkaBinderTests extends
|
||||
QueueChannel inbound1 = new QueueChannel();
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
consumerProperties.getExtension().setAckEachRecord(true);
|
||||
Binding<MessageChannel> consumerBinding1 = binder.bindConsumer(testDestination, "test1", inbound1,
|
||||
consumerProperties);
|
||||
QueueChannel inbound2 = new QueueChannel();
|
||||
Binding<MessageChannel> consumerBinding2 = binder.bindConsumer(testDestination, "test2", inbound2,
|
||||
consumerProperties);
|
||||
|
||||
AbstractMessageListenerContainer<?, ?> container = TestUtils.getPropertyValue(consumerBinding2,
|
||||
"lifecycle.messageListenerContainer", AbstractMessageListenerContainer.class);
|
||||
assertThat(container.getContainerProperties().getAckMode()).isEqualTo(AckMode.RECORD);
|
||||
|
||||
Message<?> receivedMessage1 = receive(inbound1);
|
||||
assertThat(receivedMessage1).isNotNull();
|
||||
assertThat(new String((byte[]) receivedMessage1.getPayload(), StandardCharsets.UTF_8)).isEqualTo(testPayload);
|
||||
|
||||
@@ -17,10 +17,21 @@
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
@@ -28,9 +39,24 @@ import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.provisioning.ConsumerDestination;
|
||||
import org.springframework.context.support.GenericApplicationContext;
|
||||
import org.springframework.integration.channel.DirectChannel;
|
||||
import org.springframework.integration.test.util.TestUtils;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyBoolean;
|
||||
import static org.mockito.ArgumentMatchers.anyInt;
|
||||
import static org.mockito.ArgumentMatchers.anyLong;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.BDDMockito.given;
|
||||
import static org.mockito.BDDMockito.willAnswer;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
/**
|
||||
* @author Gary Russell
|
||||
@@ -76,4 +102,137 @@ public class KafkaBinderUnitTests {
|
||||
assertThat(configs.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("earliest");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOffsetResetWithGroupManagementEarliest() throws Exception {
|
||||
testOffsetResetWithGroupManagement(true, true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOffsetResetWithGroupManagementLatest() throws Throwable {
|
||||
testOffsetResetWithGroupManagement(false, true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOffsetResetWithManualAssignmentEarliest() throws Exception {
|
||||
testOffsetResetWithGroupManagement(true, false);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOffsetResetWithGroupManualAssignmentLatest() throws Throwable {
|
||||
testOffsetResetWithGroupManagement(false, false);
|
||||
}
|
||||
|
||||
private void testOffsetResetWithGroupManagement(final boolean earliest, boolean groupManage) throws Exception {
|
||||
final List<TopicPartition> partitions = new ArrayList<>();
|
||||
partitions.add(new TopicPartition("foo", 0));
|
||||
partitions.add(new TopicPartition("foo", 1));
|
||||
KafkaBinderConfigurationProperties configurationProperties = new KafkaBinderConfigurationProperties();
|
||||
KafkaTopicProvisioner provisioningProvider = mock(KafkaTopicProvisioner.class);
|
||||
ConsumerDestination dest = mock(ConsumerDestination.class);
|
||||
given(dest.getName()).willReturn("foo");
|
||||
given(provisioningProvider.provisionConsumerDestination(anyString(), anyString(), any())).willReturn(dest);
|
||||
final AtomicInteger part = new AtomicInteger();
|
||||
willAnswer(i -> {
|
||||
return partitions.stream()
|
||||
.map(p -> new PartitionInfo("foo", part.getAndIncrement(), null, null, null))
|
||||
.collect(Collectors.toList());
|
||||
}).given(provisioningProvider).getPartitionsForTopic(anyInt(), anyBoolean(), any());
|
||||
@SuppressWarnings("unchecked")
|
||||
final Consumer<byte[], byte[]> consumer = mock(Consumer.class);
|
||||
final CountDownLatch latch = new CountDownLatch(2);
|
||||
willAnswer(i -> {
|
||||
try {
|
||||
Thread.sleep(100);
|
||||
}
|
||||
catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
return new ConsumerRecords<>(Collections.emptyMap());
|
||||
}).given(consumer).poll(anyLong());
|
||||
willAnswer(i -> {
|
||||
((org.apache.kafka.clients.consumer.ConsumerRebalanceListener) i.getArgument(1))
|
||||
.onPartitionsAssigned(partitions);
|
||||
latch.countDown();
|
||||
latch.countDown();
|
||||
return null;
|
||||
}).given(consumer).subscribe(eq(Collections.singletonList("foo")),
|
||||
any(org.apache.kafka.clients.consumer.ConsumerRebalanceListener.class));
|
||||
willAnswer(i -> {
|
||||
latch.countDown();
|
||||
return null;
|
||||
}).given(consumer).seek(any(), anyLong());
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties, provisioningProvider) {
|
||||
|
||||
@Override
|
||||
protected ConsumerFactory<?, ?> createKafkaConsumerFactory(boolean anonymous, String consumerGroup,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties) {
|
||||
|
||||
return new ConsumerFactory<byte[], byte[]>() {
|
||||
|
||||
@Override
|
||||
public Consumer<byte[], byte[]> createConsumer() {
|
||||
return consumer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Consumer<byte[], byte[]> createConsumer(String arg0) {
|
||||
return consumer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Consumer<byte[], byte[]> createConsumer(String arg0, String arg1) {
|
||||
return consumer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAutoCommit() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Object> getConfigurationProperties() {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,
|
||||
earliest ? "earliest" : "latest");
|
||||
props.put(ConsumerConfig.GROUP_ID_CONFIG, "bar");
|
||||
return props;
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
};
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
binder.setApplicationContext(context);
|
||||
MessageChannel channel = new DirectChannel();
|
||||
KafkaConsumerProperties extension = new KafkaConsumerProperties();
|
||||
extension.setResetOffsets(true);
|
||||
extension.setAutoRebalanceEnabled(groupManage);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = new ExtendedConsumerProperties<KafkaConsumerProperties>(
|
||||
extension);
|
||||
consumerProperties.setInstanceCount(1);
|
||||
binder.bindConsumer("foo", "bar", channel, consumerProperties);
|
||||
assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue();
|
||||
if (groupManage) {
|
||||
if (earliest) {
|
||||
verify(consumer).seekToBeginning(partitions);
|
||||
}
|
||||
else {
|
||||
verify(consumer).seekToEnd(partitions);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (earliest) {
|
||||
verify(consumer).seek(partitions.get(0), 0L);
|
||||
verify(consumer).seek(partitions.get(1), 0L);
|
||||
}
|
||||
else {
|
||||
verify(consumer).seek(partitions.get(0), Long.MAX_VALUE);
|
||||
verify(consumer).seek(partitions.get(1), Long.MAX_VALUE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ package org.springframework.cloud.stream.binder.kafka.bootstrap;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
import org.springframework.boot.builder.SpringApplicationBuilder;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
@@ -35,7 +36,7 @@ public class KafkaBinderBootstrapTest {
|
||||
@Test
|
||||
public void testKafkaBinderConfiguration() throws Exception {
|
||||
ConfigurableApplicationContext applicationContext = new SpringApplicationBuilder(SimpleApplication.class)
|
||||
.web(false)
|
||||
.web(WebApplicationType.NONE)
|
||||
.run("--spring.cloud.stream.kafka.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
applicationContext.close();
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
package org.springframework.cloud.stream.binder.kafka.integration;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.micrometer.core.instrument.search.Search;
|
||||
import io.micrometer.core.instrument.binder.MeterBinder;
|
||||
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
@@ -27,7 +27,9 @@ import org.junit.runner.RunWith;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.test.context.FilteredClassLoader;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.test.context.runner.ApplicationContextRunner;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.messaging.Sink;
|
||||
@@ -40,12 +42,12 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
/**
|
||||
* @author Artem Bilan
|
||||
* @author Oleg Zhurakousky
|
||||
* @author Jon Schneider
|
||||
*
|
||||
* @since 2.0
|
||||
*/
|
||||
@RunWith(SpringRunner.class)
|
||||
@SpringBootTest(
|
||||
webEnvironment = SpringBootTest.WebEnvironment.NONE,
|
||||
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.NONE,
|
||||
properties = "spring.cloud.stream.bindings.input.group=" + KafkaBinderActuatorTests.TEST_CONSUMER_GROUP)
|
||||
public class KafkaBinderActuatorTests {
|
||||
|
||||
@@ -74,15 +76,24 @@ public class KafkaBinderActuatorTests {
|
||||
|
||||
@Test
|
||||
public void testKafkaBinderMetricsExposed() {
|
||||
Search search = this.meterRegistry.find(
|
||||
String.format("%s.%s.%s.lag", "spring.cloud.stream.binder.kafka", TEST_CONSUMER_GROUP, Sink.INPUT));
|
||||
|
||||
assertThat(search.gauge()).isNotNull();
|
||||
|
||||
this.kafkaTemplate.send(Sink.INPUT, null, "foo".getBytes());
|
||||
this.kafkaTemplate.flush();
|
||||
|
||||
assertThat(search.gauge().value()).isGreaterThan(0);
|
||||
assertThat(this.meterRegistry.get("spring.cloud.stream.binder.kafka.offset")
|
||||
.tag("group", TEST_CONSUMER_GROUP)
|
||||
.tag("topic", Sink.INPUT)
|
||||
.timeGauge().value()).isGreaterThan(0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKafkaBinderMetricsWhenNoMicrometer() {
|
||||
new ApplicationContextRunner()
|
||||
.withUserConfiguration(KafkaMetricsTestConfig.class)
|
||||
.withClassLoader(new FilteredClassLoader("io.micrometer.core"))
|
||||
.run(context -> {
|
||||
assertThat(context.getBeanNamesForType(MeterRegistry.class)).isEmpty();
|
||||
assertThat(context.getBeanNamesForType(MeterBinder.class)).isEmpty();
|
||||
});
|
||||
}
|
||||
|
||||
@EnableBinding(Sink.class)
|
||||
|
||||
Reference in New Issue
Block a user