Compare commits
40 Commits
v2.0.1.REL
...
v2.1.0.M2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
de4b5a9443 | ||
|
|
a090614709 | ||
|
|
a3f7ca9756 | ||
|
|
82b07a0120 | ||
|
|
0d0cf8dcb7 | ||
|
|
cbfe03be2f | ||
|
|
1f0c6cabc6 | ||
|
|
0c61cedc85 | ||
|
|
44210f1b72 | ||
|
|
7007f9494a | ||
|
|
da268bb6dd | ||
|
|
e03baefbaf | ||
|
|
01396b6573 | ||
|
|
3f009a8267 | ||
|
|
31bb86b002 | ||
|
|
cc2dfd1d08 | ||
|
|
fc768ba695 | ||
|
|
38d6deb4d5 | ||
|
|
2a0b9015de | ||
|
|
321919abc9 | ||
|
|
b09def9ccc | ||
|
|
54d7c333d3 | ||
|
|
015b1a7fa1 | ||
|
|
020821f471 | ||
|
|
2e14ba99e3 | ||
|
|
bd5cc2f89d | ||
|
|
ca2983c881 | ||
|
|
1b00179f25 | ||
|
|
f533177d21 | ||
|
|
990a5142ce | ||
|
|
13693e8e66 | ||
|
|
70cd7dc2f9 | ||
|
|
894597309b | ||
|
|
74689bf007 | ||
|
|
d2012c287a | ||
|
|
109295464f | ||
|
|
cf41a8a4eb | ||
|
|
77540a2027 | ||
|
|
a2592698c9 | ||
|
|
5543b4ed1e |
40
pom.xml
40
pom.xml
@@ -2,20 +2,20 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.0.0.RELEASE</version>
|
||||
<version>2.1.0.M2</version>
|
||||
<packaging>pom</packaging>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-build</artifactId>
|
||||
<version>2.0.0.RELEASE</version>
|
||||
<version>2.1.0.M1</version>
|
||||
<relativePath />
|
||||
</parent>
|
||||
<properties>
|
||||
<java.version>1.8</java.version>
|
||||
<spring-kafka.version>2.1.5.RELEASE</spring-kafka.version>
|
||||
<spring-integration-kafka.version>3.0.3.RELEASE</spring-integration-kafka.version>
|
||||
<kafka.version>1.0.1</kafka.version>
|
||||
<spring-cloud-stream.version>2.0.0.RELEASE</spring-cloud-stream.version>
|
||||
<spring-kafka.version>2.2.0.M2</spring-kafka.version>
|
||||
<spring-integration-kafka.version>3.1.0.M1</spring-integration-kafka.version>
|
||||
<kafka.version>2.0.0</kafka.version>
|
||||
<spring-cloud-stream.version>2.1.0.M2</spring-cloud-stream.version>
|
||||
</properties>
|
||||
<modules>
|
||||
<module>spring-cloud-stream-binder-kafka</module>
|
||||
@@ -47,6 +47,13 @@
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
@@ -101,6 +108,27 @@
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
<version>${kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>jline</groupId>
|
||||
<artifactId>jline</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.0.0.RELEASE</version>
|
||||
<version>2.1.0.M2</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
<description>Spring Cloud Starter Stream Kafka</description>
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.0.0.RELEASE</version>
|
||||
<version>2.1.0.M2</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder Core</description>
|
||||
|
||||
@@ -24,10 +24,10 @@ import java.util.Map;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.boot.context.properties.DeprecatedConfigurationProperty;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
@@ -46,13 +46,25 @@ public class KafkaBinderConfigurationProperties {
|
||||
|
||||
private final Transaction transaction = new Transaction();
|
||||
|
||||
@Autowired
|
||||
private KafkaProperties kafkaProperties;
|
||||
private final KafkaProperties kafkaProperties;
|
||||
|
||||
private String[] zkNodes = new String[] { "localhost" };
|
||||
|
||||
/**
|
||||
* Arbitrary kafka properties that apply to both producers and consumers.
|
||||
*/
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Arbitrary kafka consumer properties.
|
||||
*/
|
||||
private Map<String, String> consumerProperties = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Arbitrary kafka producer properties.
|
||||
*/
|
||||
private Map<String, String> producerProperties = new HashMap<>();
|
||||
|
||||
private String defaultZkPort = "2181";
|
||||
|
||||
private String[] brokers = new String[] { "localhost" };
|
||||
@@ -107,6 +119,12 @@ public class KafkaBinderConfigurationProperties {
|
||||
*/
|
||||
private String headerMapperBeanName;
|
||||
|
||||
|
||||
public KafkaBinderConfigurationProperties(KafkaProperties kafkaProperties) {
|
||||
Assert.notNull(kafkaProperties, "'kafkaProperties' cannot be null");
|
||||
this.kafkaProperties = kafkaProperties;
|
||||
}
|
||||
|
||||
public Transaction getTransaction() {
|
||||
return this.transaction;
|
||||
}
|
||||
@@ -461,18 +479,40 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
public Map<String, Object> getConsumerConfiguration() {
|
||||
public Map<String, String> getConsumerProperties() {
|
||||
return this.consumerProperties;
|
||||
}
|
||||
|
||||
public void setConsumerProperties(Map<String, String> consumerProperties) {
|
||||
Assert.notNull(consumerProperties, "'consumerProperties' cannot be null");
|
||||
this.consumerProperties = consumerProperties;
|
||||
}
|
||||
|
||||
public Map<String, String> getProducerProperties() {
|
||||
return this.producerProperties;
|
||||
}
|
||||
|
||||
public void setProducerProperties(Map<String, String> producerProperties) {
|
||||
Assert.notNull(producerProperties, "'producerProperties' cannot be null");
|
||||
this.producerProperties = producerProperties;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge boot consumer properties, general properties from
|
||||
* {@link #setConfiguration(Map)} that apply to consumers, properties from
|
||||
* {@link #setConsumerProperties(Map)}, in that order.
|
||||
* @return the merged properties.
|
||||
*/
|
||||
public Map<String, Object> mergedConsumerConfiguration() {
|
||||
Map<String, Object> consumerConfiguration = new HashMap<>();
|
||||
// If Spring Boot Kafka properties are present, add them with lowest precedence
|
||||
if (this.kafkaProperties != null) {
|
||||
consumerConfiguration.putAll(this.kafkaProperties.buildConsumerProperties());
|
||||
}
|
||||
// Copy configured binder properties
|
||||
consumerConfiguration.putAll(this.kafkaProperties.buildConsumerProperties());
|
||||
// Copy configured binder properties that apply to consumers
|
||||
for (Map.Entry<String, String> configurationEntry : this.configuration.entrySet()) {
|
||||
if (ConsumerConfig.configNames().contains(configurationEntry.getKey())) {
|
||||
consumerConfiguration.put(configurationEntry.getKey(), configurationEntry.getValue());
|
||||
}
|
||||
}
|
||||
consumerConfiguration.putAll(this.consumerProperties);
|
||||
// Override Spring Boot bootstrap server setting if left to default with the value
|
||||
// configured in the binder
|
||||
if (ObjectUtils.isEmpty(consumerConfiguration.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
@@ -492,18 +532,22 @@ public class KafkaBinderConfigurationProperties {
|
||||
return Collections.unmodifiableMap(consumerConfiguration);
|
||||
}
|
||||
|
||||
public Map<String, Object> getProducerConfiguration() {
|
||||
/**
|
||||
* Merge boot producer properties, general properties from
|
||||
* {@link #setConfiguration(Map)} that apply to producers, properties from
|
||||
* {@link #setProducerProperties(Map)}, in that order.
|
||||
* @return the merged properties.
|
||||
*/
|
||||
public Map<String, Object> mergedProducerConfiguration() {
|
||||
Map<String, Object> producerConfiguration = new HashMap<>();
|
||||
// If Spring Boot Kafka properties are present, add them with lowest precedence
|
||||
if (this.kafkaProperties != null) {
|
||||
producerConfiguration.putAll(this.kafkaProperties.buildProducerProperties());
|
||||
}
|
||||
// Copy configured binder properties
|
||||
producerConfiguration.putAll(this.kafkaProperties.buildProducerProperties());
|
||||
// Copy configured binder properties that apply to producers
|
||||
for (Map.Entry<String, String> configurationEntry : configuration.entrySet()) {
|
||||
if (ProducerConfig.configNames().contains(configurationEntry.getKey())) {
|
||||
producerConfiguration.put(configurationEntry.getKey(), configurationEntry.getValue());
|
||||
}
|
||||
}
|
||||
producerConfiguration.putAll(this.producerProperties);
|
||||
// Override Spring Boot bootstrap server setting if left to default with the value
|
||||
// configured in the binder
|
||||
if (ObjectUtils.isEmpty(producerConfiguration.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
|
||||
@@ -81,6 +81,8 @@ public class KafkaConsumerProperties {
|
||||
|
||||
private long idleEventInterval = 30_000;
|
||||
|
||||
private boolean destinationIsPattern;
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
private KafkaAdminProperties admin = new KafkaAdminProperties();
|
||||
@@ -216,6 +218,14 @@ public class KafkaConsumerProperties {
|
||||
this.idleEventInterval = idleEventInterval;
|
||||
}
|
||||
|
||||
public boolean isDestinationIsPattern() {
|
||||
return this.destinationIsPattern;
|
||||
}
|
||||
|
||||
public void setDestinationIsPattern(boolean destinationIsPattern) {
|
||||
this.destinationIsPattern = destinationIsPattern;
|
||||
}
|
||||
|
||||
public KafkaAdminProperties getAdmin() {
|
||||
return this.admin;
|
||||
}
|
||||
|
||||
@@ -151,7 +151,30 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
@Override
|
||||
public ConsumerDestination provisionConsumerDestination(final String name, final String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
if (!properties.isMultiplex()) {
|
||||
return doProvisionConsumerDestination(name, group, properties);
|
||||
}
|
||||
else {
|
||||
String[] destinations = StringUtils.commaDelimitedListToStringArray(name);
|
||||
for (String destination : destinations) {
|
||||
doProvisionConsumerDestination(destination.trim(), group, properties);
|
||||
}
|
||||
return new KafkaConsumerDestination(name);
|
||||
}
|
||||
}
|
||||
|
||||
private ConsumerDestination doProvisionConsumerDestination(final String name, final String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
|
||||
if (properties.getExtension().isDestinationIsPattern()) {
|
||||
Assert.isTrue(!properties.getExtension().isEnableDlq(),
|
||||
"enableDLQ is not allowed when listening to topic patterns");
|
||||
if (this.logger.isDebugEnabled()) {
|
||||
this.logger.debug("Listening to a topic pattern - " + name
|
||||
+ " - no provisioning performed");
|
||||
}
|
||||
return new KafkaConsumerDestination(name);
|
||||
}
|
||||
KafkaTopicUtils.validateTopicName(name);
|
||||
boolean anonymous = !StringUtils.hasText(group);
|
||||
Assert.isTrue(!anonymous || !properties.getExtension().isEnableDlq(),
|
||||
|
||||
@@ -49,7 +49,7 @@ public class KafkaTopicProvisionerTests {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
bootConfig.getProperties().put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "PLAINTEXT");
|
||||
bootConfig.setBootstrapServers(Collections.singletonList("localhost:1234"));
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties();
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties(bootConfig);
|
||||
binderConfig.getConfiguration().put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, "SSL");
|
||||
ClassPathResource ts = new ClassPathResource("test.truststore.ks");
|
||||
binderConfig.getConfiguration().put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, ts.getFile().getAbsolutePath());
|
||||
@@ -68,7 +68,7 @@ public class KafkaTopicProvisionerTests {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
bootConfig.getProperties().put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "PLAINTEXT");
|
||||
bootConfig.setBootstrapServers(Collections.singletonList("localhost:9092"));
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties();
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties(bootConfig);
|
||||
binderConfig.getConfiguration().put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, "SSL");
|
||||
ClassPathResource ts = new ClassPathResource("test.truststore.ks");
|
||||
binderConfig.getConfiguration().put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, ts.getFile().getAbsolutePath());
|
||||
@@ -84,7 +84,7 @@ public class KafkaTopicProvisionerTests {
|
||||
@Test
|
||||
public void brokersInvalid() throws Exception {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties();
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties(bootConfig);
|
||||
binderConfig.getConfiguration().put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "localhost:1234");
|
||||
try {
|
||||
new KafkaTopicProvisioner(binderConfig, bootConfig);
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.0.0.RELEASE</version>
|
||||
<version>2.1.0.M2</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>spring-cloud-stream-binder-kafka-docs</artifactId>
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 119 KiB |
@@ -1,6 +1,6 @@
|
||||
== Usage
|
||||
|
||||
For using the Kafka Streams binder, you just need to add it to your Spring Cloud Stream application, using the following
|
||||
For using the Kafka Streams binder, you just need to add it to your Spring Cloud Stream application, using the following
|
||||
Maven coordinates:
|
||||
|
||||
[source,xml]
|
||||
@@ -13,26 +13,26 @@ Maven coordinates:
|
||||
|
||||
== Kafka Streams Binder Overview
|
||||
|
||||
Spring Cloud Stream's Apache Kafka support also includes a binder implementation designed explicitly for Apache Kafka
|
||||
Streams binding. With this native integration, a Spring Cloud Stream "processor" application can directly use the
|
||||
Spring Cloud Stream's Apache Kafka support also includes a binder implementation designed explicitly for Apache Kafka
|
||||
Streams binding. With this native integration, a Spring Cloud Stream "processor" application can directly use the
|
||||
https://kafka.apache.org/documentation/streams/developer-guide[Apache Kafka Streams] APIs in the core business logic.
|
||||
|
||||
Kafka Streams binder implementation builds on the foundation provided by the http://docs.spring.io/spring-kafka/reference/html/_reference.html#kafka-streams[Kafka Streams in Spring Kafka]
|
||||
Kafka Streams binder implementation builds on the foundation provided by the http://docs.spring.io/spring-kafka/reference/html/_reference.html#kafka-streams[Kafka Streams in Spring Kafka]
|
||||
project.
|
||||
|
||||
As part of this native integration, the high-level https://docs.confluent.io/current/streams/developer-guide/dsl-api.html[Streams DSL]
|
||||
As part of this native integration, the high-level https://docs.confluent.io/current/streams/developer-guide/dsl-api.html[Streams DSL]
|
||||
provided by the Kafka Streams API is available for use in the business logic, too.
|
||||
|
||||
An early version of the https://docs.confluent.io/current/streams/developer-guide/processor-api.html[Processor API]
|
||||
An early version of the https://docs.confluent.io/current/streams/developer-guide/processor-api.html[Processor API]
|
||||
support is available as well.
|
||||
|
||||
As noted early-on, Kafka Streams support in Spring Cloud Stream strictly only available for use in the Processor model.
|
||||
A model in which the messages read from an inbound topic, business processing can be applied, and the transformed messages
|
||||
As noted early-on, Kafka Streams support in Spring Cloud Stream strictly only available for use in the Processor model.
|
||||
A model in which the messages read from an inbound topic, business processing can be applied, and the transformed messages
|
||||
can be written to an outbound topic. It can also be used in Processor applications with a no-outbound destination.
|
||||
|
||||
=== Streams DSL
|
||||
|
||||
This application consumes data from a Kafka topic (e.g., `words`), computes word count for each unique word in a 5 seconds
|
||||
This application consumes data from a Kafka topic (e.g., `words`), computes word count for each unique word in a 5 seconds
|
||||
time window, and the computed results are sent to a downstream topic (e.g., `counts`) for further processing.
|
||||
|
||||
[source]
|
||||
@@ -65,12 +65,12 @@ Once built as a uber-jar (e.g., `wordcount-processor.jar`), you can run the abov
|
||||
java -jar wordcount-processor.jar --spring.cloud.stream.bindings.input.destination=words --spring.cloud.stream.bindings.output.destination=counts
|
||||
----
|
||||
|
||||
This application will consume messages from the Kafka topic `words` and the computed results are published to an output
|
||||
This application will consume messages from the Kafka topic `words` and the computed results are published to an output
|
||||
topic `counts`.
|
||||
|
||||
Spring Cloud Stream will ensure that the messages from both the incoming and outgoing topics are automatically bound as
|
||||
KStream objects. As a developer, you can exclusively focus on the business aspects of the code, i.e. writing the logic
|
||||
required in the processor. Setting up the Streams DSL specific configuration required by the Kafka Streams infrastructure
|
||||
Spring Cloud Stream will ensure that the messages from both the incoming and outgoing topics are automatically bound as
|
||||
KStream objects. As a developer, you can exclusively focus on the business aspects of the code, i.e. writing the logic
|
||||
required in the processor. Setting up the Streams DSL specific configuration required by the Kafka Streams infrastructure
|
||||
is automatically handled by the framework.
|
||||
|
||||
== Configuration Options
|
||||
@@ -81,7 +81,7 @@ For common configuration options and properties pertaining to binder, refer to t
|
||||
|
||||
=== Kafka Streams Properties
|
||||
|
||||
The following properties are available at the binder level and must be prefixed with `spring.cloud.stream.kafka.binder.`
|
||||
The following properties are available at the binder level and must be prefixed with `spring.cloud.stream.kafka.streams.binder.`
|
||||
literal.
|
||||
|
||||
configuration::
|
||||
@@ -96,7 +96,7 @@ spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.a
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000
|
||||
----
|
||||
|
||||
For more information about all the properties that may go into streams configuration, see StreamsConfig JavaDocs in
|
||||
For more information about all the properties that may go into streams configuration, see StreamsConfig JavaDocs in
|
||||
Apache Kafka Streams docs.
|
||||
|
||||
brokers::
|
||||
@@ -119,7 +119,7 @@ applicationId::
|
||||
+
|
||||
Default: `default`
|
||||
|
||||
The following properties are _only_ available for Kafka Streams producers and must be prefixed with `spring.cloud.stream.kafka.streams.bindings.<binding name>.producer.`
|
||||
The following properties are _only_ available for Kafka Streams producers and must be prefixed with `spring.cloud.stream.kafka.streams.bindings.<binding name>.producer.`
|
||||
literal.
|
||||
|
||||
keySerde::
|
||||
@@ -135,7 +135,7 @@ useNativeEncoding::
|
||||
+
|
||||
Default: `false`.
|
||||
|
||||
The following properties are _only_ available for Kafka Streams consumers and must be prefixed with `spring.cloud.stream.kafka.streams.bindings.<binding name>.consumer.`
|
||||
The following properties are _only_ available for Kafka Streams consumers and must be prefixed with `spring.cloud.stream.kafka.streams.bindings.<binding name>.consumer.`
|
||||
literal.
|
||||
|
||||
keySerde::
|
||||
@@ -176,8 +176,8 @@ Default: `none`.
|
||||
|
||||
== Multiple Input Bindings
|
||||
|
||||
For use cases that requires multiple incoming KStream objects or a combination of KStream and KTable objects, the Kafka
|
||||
Streams binder provides multiple bindings support.
|
||||
For use cases that requires multiple incoming KStream objects or a combination of KStream and KTable objects, the Kafka
|
||||
Streams binder provides multiple bindings support.
|
||||
|
||||
Let's see it in action.
|
||||
|
||||
@@ -206,11 +206,11 @@ interface KStreamKTableBinding {
|
||||
|
||||
----
|
||||
|
||||
In the above example, the application is written as a sink, i.e. there are no output bindings and the application has to
|
||||
decide concerning downstream processing. When you write applications in this style, you might want to send the information
|
||||
In the above example, the application is written as a sink, i.e. there are no output bindings and the application has to
|
||||
decide concerning downstream processing. When you write applications in this style, you might want to send the information
|
||||
downstream or store them in a state store (See below for Queryable State Stores).
|
||||
|
||||
In the case of incoming KTable, if you want to materialize the computations to a state store, you have to express it
|
||||
In the case of incoming KTable, if you want to materialize the computations to a state store, you have to express it
|
||||
through the following property.
|
||||
|
||||
[source]
|
||||
@@ -244,13 +244,13 @@ interface KStreamKTableBinding extends KafkaStreamsProcessor {
|
||||
|
||||
== Multiple Output Bindings (aka Branching)
|
||||
|
||||
Kafka Streams allow outbound data to be split into multiple topics based on some predicates. The Kafka Streams binder provides
|
||||
Kafka Streams allow outbound data to be split into multiple topics based on some predicates. The Kafka Streams binder provides
|
||||
support for this feature without compromising the programming model exposed through `StreamListener` in the end user application.
|
||||
|
||||
You can write the application in the usual way as demonstrated above in the word count example. However, when using the
|
||||
branching feature, you are required to do a few things. First, you need to make sure that your return type is `KStream[]`
|
||||
instead of a regular `KStream`. Second, you need to use the `SendTo` annotation containing the output bindings in the order
|
||||
(see example below). For each of these output bindings, you need to configure destination, content-type etc., complying with
|
||||
You can write the application in the usual way as demonstrated above in the word count example. However, when using the
|
||||
branching feature, you are required to do a few things. First, you need to make sure that your return type is `KStream[]`
|
||||
instead of a regular `KStream`. Second, you need to use the `SendTo` annotation containing the output bindings in the order
|
||||
(see example below). For each of these output bindings, you need to configure destination, content-type etc., complying with
|
||||
the standard Spring Cloud Stream expectations.
|
||||
|
||||
Here is an example:
|
||||
@@ -330,21 +330,21 @@ spring.cloud.stream.bindings.input:
|
||||
|
||||
== Message Conversion
|
||||
|
||||
Similar to message-channel based binder applications, the Kafka Streams binder adapts to the out-of-the-box content-type
|
||||
Similar to message-channel based binder applications, the Kafka Streams binder adapts to the out-of-the-box content-type
|
||||
conversions without any compromise.
|
||||
|
||||
It is typical for Kafka Streams operations to know the type of SerDe’s used to transform the key and value correctly.
|
||||
Therefore, it may be more natural to rely on the SerDe facilities provided by the Apache Kafka Streams library itself at
|
||||
Therefore, it may be more natural to rely on the SerDe facilities provided by the Apache Kafka Streams library itself at
|
||||
the inbound and outbound conversions rather than using the content-type conversions offered by the framework.
|
||||
On the other hand, you might be already familiar with the content-type conversion patterns provided by the framework, and
|
||||
On the other hand, you might be already familiar with the content-type conversion patterns provided by the framework, and
|
||||
that, you'd like to continue using for inbound and outbound conversions.
|
||||
|
||||
Both the options are supported in the Kafka Streams binder implementation.
|
||||
Both the options are supported in the Kafka Streams binder implementation.
|
||||
|
||||
==== Outbound serialization
|
||||
|
||||
If native encoding is disabled (which is the default), then the framework will convert the message using the contentType
|
||||
set by the user (otherwise, the default `application/json` will be applied). It will ignore any SerDe set on the outbound
|
||||
If native encoding is disabled (which is the default), then the framework will convert the message using the contentType
|
||||
set by the user (otherwise, the default `application/json` will be applied). It will ignore any SerDe set on the outbound
|
||||
in this case for outbound serialization.
|
||||
|
||||
Here is the property to set the contentType on the outbound.
|
||||
@@ -361,7 +361,7 @@ Here is the property to enable native encoding.
|
||||
spring.cloud.stream.bindings.output.nativeEncoding: true
|
||||
----
|
||||
|
||||
If native encoding is enabled on the output binding (user has to enable it as above explicitly), then the framework will
|
||||
If native encoding is enabled on the output binding (user has to enable it as above explicitly), then the framework will
|
||||
skip any form of automatic message conversion on the outbound. In that case, it will switch to the Serde set by the user.
|
||||
The `valueSerde` property set on the actual output binding will be used. Here is an example.
|
||||
|
||||
@@ -372,7 +372,7 @@ spring.cloud.stream.kafka.streams.bindings.output.producer.valueSerde: org.apach
|
||||
If this property is not set, then it will use the "default" SerDe: `spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde`.
|
||||
|
||||
It is worth to mention that Kafka Streams binder does not serialize the keys on outbound - it simply relies on Kafka itself.
|
||||
Therefore, you either have to specify the `keySerde` property on the binding or it will default to the application-wide common
|
||||
Therefore, you either have to specify the `keySerde` property on the binding or it will default to the application-wide common
|
||||
`keySerde`.
|
||||
|
||||
Binding level key serde:
|
||||
@@ -418,9 +418,9 @@ spring.cloud.stream.kafka.streams.bindings.output2.producer.valueSerde=StringSer
|
||||
spring.cloud.stream.kafka.streams.bindings.output3.producer.valueSerde=JsonSerde
|
||||
----
|
||||
|
||||
Then if you have `SendTo` like this, @SendTo({"output1", "output2", "output3"}), the `KStream[]` from the branches are
|
||||
applied with proper SerDe objects as defined above. If you are not enabling `nativeEncoding`, you can then set different
|
||||
contentType values on the output bindings as below. In that case, the framework will use the appropriate message converter
|
||||
Then if you have `SendTo` like this, @SendTo({"output1", "output2", "output3"}), the `KStream[]` from the branches are
|
||||
applied with proper SerDe objects as defined above. If you are not enabling `nativeEncoding`, you can then set different
|
||||
contentType values on the output bindings as below. In that case, the framework will use the appropriate message converter
|
||||
to convert the messages before sending to Kafka.
|
||||
|
||||
[source]
|
||||
@@ -434,8 +434,8 @@ spring.cloud.stream.bindings.output3.contentType: application/octet-stream
|
||||
|
||||
Similar rules apply to data deserialization on the inbound.
|
||||
|
||||
If native decoding is disabled (which is the default), then the framework will convert the message using the contentType
|
||||
set by the user (otherwise, the default `application/json` will be applied). It will ignore any SerDe set on the inbound
|
||||
If native decoding is disabled (which is the default), then the framework will convert the message using the contentType
|
||||
set by the user (otherwise, the default `application/json` will be applied). It will ignore any SerDe set on the inbound
|
||||
in this case for inbound deserialization.
|
||||
|
||||
Here is the property to set the contentType on the inbound.
|
||||
@@ -452,8 +452,8 @@ Here is the property to enable native decoding.
|
||||
spring.cloud.stream.bindings.input.nativeDecoding: true
|
||||
----
|
||||
|
||||
If native decoding is enabled on the input binding (user has to enable it as above explicitly), then the framework will
|
||||
skip doing any message conversion on the inbound. In that case, it will switch to the SerDe set by the user. The `valueSerde`
|
||||
If native decoding is enabled on the input binding (user has to enable it as above explicitly), then the framework will
|
||||
skip doing any message conversion on the inbound. In that case, it will switch to the SerDe set by the user. The `valueSerde`
|
||||
property set on the actual output binding will be used. Here is an example.
|
||||
|
||||
[source]
|
||||
@@ -464,7 +464,7 @@ spring.cloud.stream.kafka.streams.bindings.input.consumer.valueSerde: org.apache
|
||||
If this property is not set, it will use the default SerDe: `spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde`.
|
||||
|
||||
It is worth to mention that Kafka Streams binder does not deserialize the keys on inbound - it simply relies on Kafka itself.
|
||||
Therefore, you either have to specify the `keySerde` property on the binding or it will default to the application-wide common
|
||||
Therefore, you either have to specify the `keySerde` property on the binding or it will default to the application-wide common
|
||||
`keySerde`.
|
||||
|
||||
Binding level key serde:
|
||||
@@ -481,8 +481,8 @@ Common Key serde:
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde
|
||||
----
|
||||
|
||||
As in the case of KStream branching on the outbound, the benefit of setting value SerDe per binding is that if you have
|
||||
multiple input bindings (multiple KStreams object) and they all require separate value SerDe's, then you can configure
|
||||
As in the case of KStream branching on the outbound, the benefit of setting value SerDe per binding is that if you have
|
||||
multiple input bindings (multiple KStreams object) and they all require separate value SerDe's, then you can configure
|
||||
them individually. If you use the common configuration approach, then this feature won't be applicable.
|
||||
|
||||
== Error Handling
|
||||
@@ -490,7 +490,7 @@ them individually. If you use the common configuration approach, then this featu
|
||||
Apache Kafka Streams provide the capability for natively handling exceptions from deserialization errors.
|
||||
For details on this support, please see https://cwiki.apache.org/confluence/display/KAFKA/KIP-161%3A+streams+deserialization+exception+handlers[this]
|
||||
Out of the box, Apache Kafka Streams provide two kinds of deserialization exception handlers - `logAndContinue` and `logAndFail`.
|
||||
As the name indicates, the former will log the error and continue processing the next records and the latter will log the
|
||||
As the name indicates, the former will log the error and continue processing the next records and the latter will log the
|
||||
error and fail. `LogAndFail` is the default deserialization exception handler.
|
||||
|
||||
=== Handling Deserialization Exceptions
|
||||
@@ -502,7 +502,7 @@ Kafka Streams binder supports a selection of exception handlers through the foll
|
||||
spring.cloud.stream.kafka.streams.binder.serdeError: logAndContinue
|
||||
----
|
||||
|
||||
In addition to the above two deserialization exception handlers, the binder also provides a third one for sending the erroneous
|
||||
In addition to the above two deserialization exception handlers, the binder also provides a third one for sending the erroneous
|
||||
records (poison pills) to a DLQ topic. Here is how you enable this DLQ exception handler.
|
||||
|
||||
[source]
|
||||
@@ -516,30 +516,29 @@ When the above property is set, all the deserialization error records are automa
|
||||
spring.cloud.stream.kafka.streams.bindings.input.consumer.dlqName: foo-dlq
|
||||
----
|
||||
|
||||
If this is set, then the error records are sent to the topic `foo-dlq`. If this is not set, then it will create a DLQ
|
||||
If this is set, then the error records are sent to the topic `foo-dlq`. If this is not set, then it will create a DLQ
|
||||
topic with the name `error.<input-topic-name>.<group-name>`.
|
||||
|
||||
A couple of things to keep in mind when using the exception handling feature in Kafka Streams binder.
|
||||
|
||||
* The property `spring.cloud.stream.kafka.streams.binder.serdeError` is applicable for the entire application. This implies
|
||||
* The property `spring.cloud.stream.kafka.streams.binder.serdeError` is applicable for the entire application. This implies
|
||||
that if there are multiple `StreamListener` methods in the same application, this property is applied to all of them.
|
||||
* The exception handling for deserialization works consistently with native deserialization and framework provided message
|
||||
* The exception handling for deserialization works consistently with native deserialization and framework provided message
|
||||
conversion.
|
||||
|
||||
=== Handling Non-Deserialization Exceptions
|
||||
|
||||
For general error handling in Kafka Streams binder, it is up to the end user applications to handle application level errors.
|
||||
As a side effect of providing a DLQ for deserialization exception handlers, Kafka Streams binder provides a way to get
|
||||
As a side effect of providing a DLQ for deserialization exception handlers, Kafka Streams binder provides a way to get
|
||||
access to the DLQ sending bean directly from your application.
|
||||
Once you get access to that bean, you can programmatically send any exception records from your application to the DLQ.
|
||||
|
||||
It continues to remain hard to robust error handling using the high-level DSL; Kafka Streams doesn't natively support error
|
||||
handling yet.
|
||||
It continues to remain hard to robust error handling using the high-level DSL; Kafka Streams doesn't natively support error
|
||||
handling yet.
|
||||
|
||||
However, when you use the low-level Processor API in your application, there are options to control this behavior. See
|
||||
However, when you use the low-level Processor API in your application, there are options to control this behavior. See
|
||||
below.
|
||||
|
||||
|
||||
[source]
|
||||
----
|
||||
@Autowired
|
||||
@@ -577,16 +576,47 @@ public KStream<?, WordCount> process(KStream<Object, String> input) {
|
||||
}
|
||||
----
|
||||
|
||||
== State Store
|
||||
|
||||
State store is created automatically by Kafka Streams when the DSL is used.
|
||||
When processor API is used, you need to register a state store manually. In order to do so, you can use `KafkaStreamsStateStore` annotation.
|
||||
You can specify the name and type of the store, flags to control log and disabling cache, etc.
|
||||
Once the store is created by the binder during the bootstrapping phase, you can access this state store through the processor API.
|
||||
Below are some primitives for doing this.
|
||||
|
||||
Creating a state store:
|
||||
[source]
|
||||
----
|
||||
@KafkaStreamsStateStore(name="mystate", type= KafkaStreamsStateStoreProperties.StoreType.WINDOW, lengthMs=300000)
|
||||
public void process(KStream<Object, Product> input) {
|
||||
...
|
||||
}
|
||||
----
|
||||
|
||||
Accessing the state store:
|
||||
[source]
|
||||
----
|
||||
Processor<Object, Product>() {
|
||||
|
||||
WindowStore<Object, String> state;
|
||||
|
||||
@Override
|
||||
public void init(ProcessorContext processorContext) {
|
||||
state = (WindowStore)processorContext.getStateStore("mystate");
|
||||
}
|
||||
...
|
||||
}
|
||||
----
|
||||
|
||||
== Interactive Queries
|
||||
|
||||
As part of the public Kafka Streams binder API, we expose a class called `QueryableStoreRegistry`. You can access this
|
||||
as a Spring bean in your application. An easy way to get access to this bean from your application is to "autowire" the bean
|
||||
in your application.
|
||||
As part of the public Kafka Streams binder API, we expose a class called `InteractiveQueryService`.
|
||||
You can access this as a Spring bean in your application. An easy way to get access to this bean from your application is to "autowire" the bean.
|
||||
|
||||
[source]
|
||||
----
|
||||
@Autowired
|
||||
private QueryableStoreRegistry queryableStoreRegistry;
|
||||
private InteractiveQueryService interactiveQueryService;
|
||||
----
|
||||
|
||||
Once you gain access to this bean, then you can query for the particular state-store that you are interested. See below.
|
||||
@@ -594,5 +624,51 @@ Once you gain access to this bean, then you can query for the particular state-s
|
||||
[source]
|
||||
----
|
||||
ReadOnlyKeyValueStore<Object, Object> keyValueStore =
|
||||
queryableStoreRegistry.getQueryableStoreType("my-store", QueryableStoreTypes.keyValueStore());
|
||||
----
|
||||
interactiveQueryService.getQueryableStoreType("my-store", QueryableStoreTypes.keyValueStore());
|
||||
----
|
||||
|
||||
If there are multiple instances of the kafka streams application running, then before you can query them interactively, you need to identify which application instance hosts the key.
|
||||
`InteractiveQueryService` API provides methods for identifying the host information.
|
||||
|
||||
In order for this to work, you must configure the property `application.server` as below:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.application.server: <server>:<port>
|
||||
----
|
||||
|
||||
Here are some code snippets:
|
||||
|
||||
[source]
|
||||
----
|
||||
org.apache.kafka.streams.state.HostInfo hostInfo = interactiveQueryService.getHostInfo("store-name",
|
||||
key, keySerializer);
|
||||
|
||||
if (interactiveQueryService.getCurrentHostInfo().equals(hostInfo)) {
|
||||
|
||||
//query from the store that is locally available
|
||||
}
|
||||
else {
|
||||
//query from the remote host
|
||||
}
|
||||
----
|
||||
|
||||
== Accessing the underlying KafkaStreams object
|
||||
|
||||
`StreamBuilderFactoryBean` from spring-kafka that is responsible for constructing the `KafkaStreams` object can be accessed programmatically.
|
||||
Each `StreamBuilderFactoryBean` is registered as `stream-builder` and appended with the `StreamListener` method name.
|
||||
If your `StreamListener` method is named as `process` for example, the stream builder bean is named as `stream-builder-process`.
|
||||
Since this is a factory bean, it should be accessed by prepending an ampersand (`&`) when accessing it programmatically.
|
||||
Following is an example and it assumes the `StreamListener` method is named as `process`
|
||||
|
||||
[source]
|
||||
----
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = context.getBean("&stream-builder-process", StreamsBuilderFactoryBean.class);
|
||||
KafkaStreams kafkaStreams = streamsBuilderFactoryBean.getKafkaStreams();
|
||||
----
|
||||
|
||||
== State Cleanup
|
||||
|
||||
By default, the `Kafkastreams.cleanup()` method is called when the binding is stopped.
|
||||
See https://docs.spring.io/spring-kafka/reference/html/_reference.html#_configuration[the Spring Kafka documentation].
|
||||
To modify this behavior simply add a single `CleanupConfig` `@Bean` (configured to clean up on start, stop, or neither) to the application context; the bean will be detected and wired into the factory bean.
|
||||
|
||||
@@ -63,6 +63,12 @@ Default: `9092`.
|
||||
spring.cloud.stream.kafka.binder.configuration::
|
||||
Key/Value map of client properties (both producers and consumer) passed to all clients created by the binder.
|
||||
Due to the fact that these properties are used by both producers and consumers, usage should be restricted to common properties -- for example, security settings.
|
||||
Properties here supersede any properties set in boot.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.consumerProperties::
|
||||
Key/Value map of arbitrary Kafka client consumer properties.
|
||||
Properties here supersede any properties set in boot and in the `configuration` property above.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.headers::
|
||||
@@ -86,6 +92,11 @@ The global minimum number of partitions that the binder configures on topics on
|
||||
It can be superseded by the `partitionCount` setting of the producer or by the value of `instanceCount * concurrency` settings of the producer (if either is larger).
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.producerProperties::
|
||||
Key/Value map of arbitrary Kafka client producer properties.
|
||||
Properties here supersede any properties set in boot and in the `configuration` property above.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.replicationFactor::
|
||||
The replication factor of auto-created topics if `autoCreateTopics` is active.
|
||||
Can be overridden on each binding.
|
||||
@@ -196,6 +207,7 @@ The DLQ topic name can be configurable by setting the `dlqName` property.
|
||||
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
|
||||
See <<kafka-dlq-processing>> processing for more information.
|
||||
Starting with version 2.0, messages sent to the DLQ topic are enhanced with the following headers: `x-original-topic`, `x-exception-message`, and `x-exception-stacktrace` as `byte[]`.
|
||||
**Not allowed when `destinationIsPattern` is `true`.**
|
||||
+
|
||||
Default: `false`.
|
||||
configuration::
|
||||
@@ -227,6 +239,13 @@ Use an `ApplicationListener<ListenerContainerIdleEvent>` to receive these events
|
||||
See <<pause-resume>> for a usage example.
|
||||
+
|
||||
Default: `30000`
|
||||
destinationIsPattern::
|
||||
When true, the destination is treated as a regular expression `Pattern` used to match topic names by the broker.
|
||||
When true, topics are not provisioned, and `enableDlq` is not allowed, because the binder does not know the topic names during the provisioning phase.
|
||||
Note, the time taken to detect new topics that match the pattern is controlled by the consumer property `metadata.max.age.ms`, which (at the time of writing) defaults to 300,000ms (5 minutes).
|
||||
This can be configured using the `configuration` property above.
|
||||
+
|
||||
Default: `false`
|
||||
|
||||
[[kafka-producer-properties]]
|
||||
=== Kafka Producer Properties
|
||||
@@ -454,7 +473,7 @@ public class Application {
|
||||
== Error Channels
|
||||
|
||||
Starting with version 1.3, the binder unconditionally sends exceptions to an error channel for each consumer destination and can also be configured to send async producer send failures to an error channel.
|
||||
See <<binder-error-channels>> for more information.
|
||||
See <<spring-cloud-stream-overview-error-handling>> for more information.
|
||||
|
||||
The payload of the `ErrorMessage` for a send failure is a `KafkaSendFailureException` with properties:
|
||||
|
||||
@@ -469,6 +488,6 @@ You can consume these exceptions with your own Spring Integration flow.
|
||||
|
||||
Kafka binder module exposes the following metrics:
|
||||
|
||||
`spring.cloud.stream.binder.kafka.someGroup.someTopic.lag`: This metric indicates how many messages have not been yet consumed from a given binder's topic by a given consumer group.
|
||||
For example, if the value of the metric `spring.cloud.stream.binder.kafka.myGroup.myTopic.lag` is `1000`, the consumer group named `myGroup` has `1000` messages waiting to be consumed from the topic calle `myTopic`.
|
||||
`spring.cloud.stream.binder.kafka.offset`: This metric indicates how many messages have not been yet consumed from a given binder's topic by a given consumer group.
|
||||
The metrics provided are based on the Mircometer metrics library. The metric contains the consumer group information, topic and the actual lag in committed offset from the latest offset on the topic.
|
||||
This metric is particularly useful for providing auto-scaling feedback to a PaaS platform.
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.0.0.RELEASE</version>
|
||||
<version>2.1.0.M2</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
@@ -45,11 +45,6 @@
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<classifier>test</classifier>
|
||||
</dependency>
|
||||
<!-- Added back since Kafka still depends on it, but it has been removed by Boot due to EOL -->
|
||||
<dependency>
|
||||
<groupId>log4j</groupId>
|
||||
@@ -62,5 +57,24 @@
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-autoconfigure-processor</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<!-- Following dependencies are needed to support Kafka 1.1.0 client-->
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
</project>
|
||||
@@ -0,0 +1,124 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.errors.InvalidStateStoreException;
|
||||
import org.apache.kafka.streams.state.HostInfo;
|
||||
import org.apache.kafka.streams.state.QueryableStoreType;
|
||||
import org.apache.kafka.streams.state.StreamsMetadata;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Services pertinent to the interactive query capabilities of Kafka Streams. This class provides
|
||||
* services such as querying for a particular store, which instance is hosting a particular store etc.
|
||||
* This is part of the public API of the kafka streams binder and the users can inject this service in their
|
||||
* applications to make use of it.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Renwei Han
|
||||
* @since 2.1.0
|
||||
*/
|
||||
public class InteractiveQueryService {
|
||||
|
||||
private final KafkaStreamsRegistry kafkaStreamsRegistry;
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
/**
|
||||
*
|
||||
* @param kafkaStreamsRegistry holding {@link KafkaStreamsRegistry}
|
||||
* @param binderConfigurationProperties Kafka Streams binder configuration properties
|
||||
*/
|
||||
public InteractiveQueryService(KafkaStreamsRegistry kafkaStreamsRegistry,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
this.kafkaStreamsRegistry = kafkaStreamsRegistry;
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve and return a queryable store by name created in the application.
|
||||
*
|
||||
* @param storeName name of the queryable store
|
||||
* @param storeType type of the queryable store
|
||||
* @param <T> generic queryable store
|
||||
* @return queryable store.
|
||||
*/
|
||||
public <T> T getQueryableStore(String storeName, QueryableStoreType<T> storeType) {
|
||||
for (KafkaStreams kafkaStream : this.kafkaStreamsRegistry.getKafkaStreams()) {
|
||||
try{
|
||||
T store = kafkaStream.store(storeName, storeType);
|
||||
if (store != null) {
|
||||
return store;
|
||||
}
|
||||
}
|
||||
catch (InvalidStateStoreException ignored) {
|
||||
//pass through
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the current {@link HostInfo} that the calling kafka streams application is running on.
|
||||
*
|
||||
* Note that the end user applications must provide `applicaiton.server` as a configuration property
|
||||
* when calling this method. If this is not available, then null is returned.
|
||||
*
|
||||
* @return the current {@link HostInfo}
|
||||
*/
|
||||
public HostInfo getCurrentHostInfo() {
|
||||
Map<String, String> configuration = this.binderConfigurationProperties.getConfiguration();
|
||||
if (configuration.containsKey("application.server")) {
|
||||
|
||||
String applicationServer = configuration.get("application.server");
|
||||
String[] splits = StringUtils.split(applicationServer, ":");
|
||||
|
||||
return new HostInfo(splits[0], Integer.valueOf(splits[1]));
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the {@link HostInfo} where the provided store and key are hosted on. This may not be the
|
||||
* current host that is running the application. Kafka Streams will look through all the consumer instances
|
||||
* under the same application id and retrieves the proper host.
|
||||
*
|
||||
* Note that the end user applications must provide `applicaiton.server` as a configuration property
|
||||
* for all the application instances when calling this method. If this is not available, then null maybe returned.
|
||||
*
|
||||
* @param store store name
|
||||
* @param key key to look for
|
||||
* @param serializer {@link Serializer} for the key
|
||||
* @return the {@link HostInfo} where the key for the provided store is hosted currently
|
||||
*/
|
||||
public <K> HostInfo getHostInfo(String store, K key, Serializer<K> serializer) {
|
||||
StreamsMetadata streamsMetadata = this.kafkaStreamsRegistry.getKafkaStreams()
|
||||
.stream()
|
||||
.map(k -> Optional.ofNullable(k.metadataForKey(store, key, serializer)))
|
||||
.filter(Optional::isPresent)
|
||||
.map(Optional::get)
|
||||
.findFirst()
|
||||
.orElse(null);
|
||||
return streamsMetadata != null ? streamsMetadata.hostInfo() : null;
|
||||
}
|
||||
}
|
||||
@@ -42,7 +42,7 @@ import org.springframework.util.StringUtils;
|
||||
/**
|
||||
* {@link org.springframework.cloud.stream.binder.Binder} implementation for {@link KStream}.
|
||||
* This implemenation extends from the {@link AbstractBinder} directly.
|
||||
*
|
||||
* <p>
|
||||
* Provides both producer and consumer bindings for the bound KStream.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
@@ -67,10 +67,10 @@ class KStreamBinder extends
|
||||
private final KeyValueSerdeResolver keyValueSerdeResolver;
|
||||
|
||||
KStreamBinder(KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver) {
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver) {
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
this.kafkaTopicProvisioner = kafkaTopicProvisioner;
|
||||
this.kafkaStreamsMessageConversionDelegate = kafkaStreamsMessageConversionDelegate;
|
||||
@@ -92,21 +92,34 @@ class KStreamBinder extends
|
||||
if (!StringUtils.hasText(group)) {
|
||||
group = binderConfigurationProperties.getApplicationId();
|
||||
}
|
||||
this.kafkaTopicProvisioner.provisionConsumerDestination(name, group, extendedConsumerProperties);
|
||||
StreamsConfig streamsConfig = this.KafkaStreamsBindingInformationCatalogue.getStreamsConfig(inputTarget);
|
||||
if (extendedConsumerProperties.getExtension().isEnableDlq()) {
|
||||
String dlqName = StringUtils.isEmpty(extendedConsumerProperties.getExtension().getDlqName()) ?
|
||||
"error." + name + "." + group : extendedConsumerProperties.getExtension().getDlqName();
|
||||
KafkaStreamsDlqDispatch kafkaStreamsDlqDispatch = new KafkaStreamsDlqDispatch(dlqName, binderConfigurationProperties,
|
||||
extendedConsumerProperties.getExtension());
|
||||
SendToDlqAndContinue sendToDlqAndContinue = this.getApplicationContext().getBean(SendToDlqAndContinue.class);
|
||||
sendToDlqAndContinue.addKStreamDlqDispatch(name, kafkaStreamsDlqDispatch);
|
||||
|
||||
DeserializationExceptionHandler deserializationExceptionHandler = streamsConfig.defaultDeserializationExceptionHandler();
|
||||
if(deserializationExceptionHandler instanceof SendToDlqAndContinue) {
|
||||
((SendToDlqAndContinue)deserializationExceptionHandler).addKStreamDlqDispatch(name, kafkaStreamsDlqDispatch);
|
||||
String[] inputTopics = StringUtils.commaDelimitedListToStringArray(name);
|
||||
for (String inputTopic : inputTopics) {
|
||||
this.kafkaTopicProvisioner.provisionConsumerDestination(inputTopic, group, extendedConsumerProperties);
|
||||
}
|
||||
|
||||
if (extendedConsumerProperties.getExtension().isEnableDlq()) {
|
||||
StreamsConfig streamsConfig = this.KafkaStreamsBindingInformationCatalogue.getStreamsConfig(inputTarget);
|
||||
|
||||
KafkaStreamsDlqDispatch kafkaStreamsDlqDispatch = !StringUtils.isEmpty(extendedConsumerProperties.getExtension().getDlqName()) ?
|
||||
new KafkaStreamsDlqDispatch(extendedConsumerProperties.getExtension().getDlqName(), binderConfigurationProperties,
|
||||
extendedConsumerProperties.getExtension()) : null;
|
||||
for (String inputTopic : inputTopics) {
|
||||
if (StringUtils.isEmpty(extendedConsumerProperties.getExtension().getDlqName())) {
|
||||
String dlqName = "error." + inputTopic + "." + group;
|
||||
kafkaStreamsDlqDispatch = new KafkaStreamsDlqDispatch(dlqName, binderConfigurationProperties,
|
||||
extendedConsumerProperties.getExtension());
|
||||
}
|
||||
SendToDlqAndContinue sendToDlqAndContinue = this.getApplicationContext().getBean(SendToDlqAndContinue.class);
|
||||
sendToDlqAndContinue.addKStreamDlqDispatch(inputTopic, kafkaStreamsDlqDispatch);
|
||||
|
||||
DeserializationExceptionHandler deserializationExceptionHandler = streamsConfig.defaultDeserializationExceptionHandler();
|
||||
if (deserializationExceptionHandler instanceof SendToDlqAndContinue) {
|
||||
((SendToDlqAndContinue) deserializationExceptionHandler).addKStreamDlqDispatch(inputTopic, kafkaStreamsDlqDispatch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return new DefaultBinding<>(name, group, inputTarget, null);
|
||||
}
|
||||
|
||||
@@ -125,13 +138,12 @@ class KStreamBinder extends
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void to(boolean isNativeEncoding, String name, KStream<Object, Object> outboundBindTarget,
|
||||
Serde<Object> keySerde, Serde<Object> valueSerde) {
|
||||
Serde<Object> keySerde, Serde<Object> valueSerde) {
|
||||
if (!isNativeEncoding) {
|
||||
LOG.info("Native encoding is disabled for " + name + ". Outbound message conversion done by Spring Cloud Stream.");
|
||||
kafkaStreamsMessageConversionDelegate.serializeOnOutbound(outboundBindTarget)
|
||||
.to(name, Produced.with(keySerde, valueSerde));
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
LOG.info("Native encoding is enabled for " + name + ". Outbound serialization done at the broker.");
|
||||
outboundBindTarget.to(name, Produced.with(keySerde, valueSerde));
|
||||
}
|
||||
|
||||
@@ -16,17 +16,18 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.beans.factory.config.BeanFactoryPostProcessor;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Import;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
@@ -34,18 +35,30 @@ import org.springframework.context.annotation.Configuration;
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@Configuration
|
||||
@Import({KafkaAutoConfiguration.class})
|
||||
public class KStreamBinderConfiguration {
|
||||
|
||||
private static final Log logger = LogFactory.getLog(KStreamBinderConfiguration.class);
|
||||
|
||||
@Autowired
|
||||
private KafkaProperties kafkaProperties;
|
||||
|
||||
@Autowired
|
||||
private KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties;
|
||||
@Bean
|
||||
@ConditionalOnBean(name = "outerContext")
|
||||
public BeanFactoryPostProcessor outerContextBeanFactoryPostProcessor() {
|
||||
return beanFactory -> {
|
||||
ApplicationContext outerContext = (ApplicationContext) beanFactory.getBean("outerContext");
|
||||
beanFactory.registerSingleton(KafkaStreamsBinderConfigurationProperties.class.getSimpleName(), outerContext
|
||||
.getBean(KafkaStreamsBinderConfigurationProperties.class));
|
||||
beanFactory.registerSingleton(KafkaStreamsMessageConversionDelegate.class.getSimpleName(), outerContext
|
||||
.getBean(KafkaStreamsMessageConversionDelegate.class));
|
||||
beanFactory.registerSingleton(KafkaStreamsBindingInformationCatalogue.class.getSimpleName(), outerContext
|
||||
.getBean(KafkaStreamsBindingInformationCatalogue.class));
|
||||
beanFactory.registerSingleton(KeyValueSerdeResolver.class.getSimpleName(), outerContext
|
||||
.getBean(KeyValueSerdeResolver.class));
|
||||
beanFactory.registerSingleton(KafkaStreamsExtendedBindingProperties.class.getSimpleName(), outerContext
|
||||
.getBean(KafkaStreamsExtendedBindingProperties.class));
|
||||
};
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaTopicProvisioner provisioningProvider(KafkaBinderConfigurationProperties binderConfigurationProperties) {
|
||||
public KafkaTopicProvisioner provisioningProvider(KafkaBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties) {
|
||||
return new KafkaTopicProvisioner(binderConfigurationProperties, kafkaProperties);
|
||||
}
|
||||
|
||||
@@ -54,7 +67,8 @@ public class KStreamBinderConfiguration {
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsMessageConversionDelegate KafkaStreamsMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver) {
|
||||
KeyValueSerdeResolver keyValueSerdeResolver,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties) {
|
||||
KStreamBinder kStreamBinder = new KStreamBinder(binderConfigurationProperties, kafkaTopicProvisioner,
|
||||
KafkaStreamsMessageConversionDelegate, KafkaStreamsBindingInformationCatalogue,
|
||||
keyValueSerdeResolver);
|
||||
|
||||
@@ -21,6 +21,7 @@ import org.aopalliance.intercept.MethodInvocation;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.aop.framework.ProxyFactory;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binding.AbstractBindingTargetFactory;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
@@ -50,6 +51,9 @@ class KStreamBoundElementFactory extends AbstractBindingTargetFactory<KStream> {
|
||||
|
||||
@Override
|
||||
public KStream createInput(String name) {
|
||||
ConsumerProperties consumerProperties = this.bindingServiceProperties.getConsumerProperties(name);
|
||||
//Always set multiplex to true in the kafka streams binder
|
||||
consumerProperties.setMultiplex(true);
|
||||
return createProxyForKStream(name);
|
||||
}
|
||||
|
||||
|
||||
@@ -73,20 +73,31 @@ class KTableBinder extends
|
||||
if (!StringUtils.hasText(group)) {
|
||||
group = binderConfigurationProperties.getApplicationId();
|
||||
}
|
||||
this.kafkaTopicProvisioner.provisionConsumerDestination(name, group, extendedConsumerProperties);
|
||||
|
||||
String[] inputTopics = StringUtils.commaDelimitedListToStringArray(name);
|
||||
for (String inputTopic : inputTopics) {
|
||||
this.kafkaTopicProvisioner.provisionConsumerDestination(inputTopic, group, extendedConsumerProperties);
|
||||
}
|
||||
|
||||
if (extendedConsumerProperties.getExtension().isEnableDlq()) {
|
||||
String dlqName = StringUtils.isEmpty(extendedConsumerProperties.getExtension().getDlqName()) ?
|
||||
"error." + name + "." + group : extendedConsumerProperties.getExtension().getDlqName();
|
||||
KafkaStreamsDlqDispatch kafkaStreamsDlqDispatch = new KafkaStreamsDlqDispatch(dlqName, binderConfigurationProperties,
|
||||
extendedConsumerProperties.getExtension());
|
||||
SendToDlqAndContinue sendToDlqAndContinue = this.getApplicationContext().getBean(SendToDlqAndContinue.class);
|
||||
sendToDlqAndContinue.addKStreamDlqDispatch(name, kafkaStreamsDlqDispatch);
|
||||
|
||||
StreamsConfig streamsConfig = this.KafkaStreamsBindingInformationCatalogue.getStreamsConfig(inputTarget);
|
||||
DeserializationExceptionHandler deserializationExceptionHandler = streamsConfig.defaultDeserializationExceptionHandler();
|
||||
if(deserializationExceptionHandler instanceof SendToDlqAndContinue) {
|
||||
((SendToDlqAndContinue)deserializationExceptionHandler).addKStreamDlqDispatch(name, kafkaStreamsDlqDispatch);
|
||||
|
||||
KafkaStreamsDlqDispatch kafkaStreamsDlqDispatch = !StringUtils.isEmpty(extendedConsumerProperties.getExtension().getDlqName()) ?
|
||||
new KafkaStreamsDlqDispatch(extendedConsumerProperties.getExtension().getDlqName(), binderConfigurationProperties,
|
||||
extendedConsumerProperties.getExtension()) : null;
|
||||
for (String inputTopic : inputTopics) {
|
||||
if (StringUtils.isEmpty(extendedConsumerProperties.getExtension().getDlqName())) {
|
||||
String dlqName = "error." + inputTopic + "." + group;
|
||||
kafkaStreamsDlqDispatch = new KafkaStreamsDlqDispatch(dlqName, binderConfigurationProperties,
|
||||
extendedConsumerProperties.getExtension());
|
||||
}
|
||||
SendToDlqAndContinue sendToDlqAndContinue = this.getApplicationContext().getBean(SendToDlqAndContinue.class);
|
||||
sendToDlqAndContinue.addKStreamDlqDispatch(inputTopic, kafkaStreamsDlqDispatch);
|
||||
|
||||
DeserializationExceptionHandler deserializationExceptionHandler = streamsConfig.defaultDeserializationExceptionHandler();
|
||||
if (deserializationExceptionHandler instanceof SendToDlqAndContinue) {
|
||||
((SendToDlqAndContinue) deserializationExceptionHandler).addKStreamDlqDispatch(inputTopic, kafkaStreamsDlqDispatch);
|
||||
}
|
||||
}
|
||||
}
|
||||
return new DefaultBinding<>(name, group, inputTarget, null);
|
||||
|
||||
@@ -16,27 +16,37 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.beans.factory.config.BeanFactoryPostProcessor;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@Configuration
|
||||
public class KTableBinderConfiguration {
|
||||
|
||||
@Autowired
|
||||
private KafkaProperties kafkaProperties;
|
||||
|
||||
@Autowired
|
||||
private KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties;
|
||||
@Bean
|
||||
@ConditionalOnBean(name = "outerContext")
|
||||
public BeanFactoryPostProcessor outerContextBeanFactoryPostProcessor() {
|
||||
return beanFactory -> {
|
||||
ApplicationContext outerContext = (ApplicationContext) beanFactory.getBean("outerContext");
|
||||
beanFactory.registerSingleton(KafkaStreamsBinderConfigurationProperties.class.getSimpleName(), outerContext
|
||||
.getBean(KafkaStreamsBinderConfigurationProperties.class));
|
||||
beanFactory.registerSingleton(KafkaStreamsBindingInformationCatalogue.class.getSimpleName(), outerContext
|
||||
.getBean(KafkaStreamsBindingInformationCatalogue.class));
|
||||
};
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaTopicProvisioner provisioningProvider(KafkaBinderConfigurationProperties binderConfigurationProperties) {
|
||||
public KafkaTopicProvisioner provisioningProvider(KafkaBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties) {
|
||||
return new KafkaTopicProvisioner(binderConfigurationProperties, kafkaProperties);
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,9 @@ import org.aopalliance.intercept.MethodInvocation;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
|
||||
import org.springframework.aop.framework.ProxyFactory;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binding.AbstractBindingTargetFactory;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
/**
|
||||
@@ -33,12 +35,19 @@ import org.springframework.util.Assert;
|
||||
*/
|
||||
class KTableBoundElementFactory extends AbstractBindingTargetFactory<KTable> {
|
||||
|
||||
KTableBoundElementFactory() {
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
|
||||
KTableBoundElementFactory(BindingServiceProperties bindingServiceProperties) {
|
||||
super(KTable.class);
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KTable createInput(String name) {
|
||||
ConsumerProperties consumerProperties = this.bindingServiceProperties.getConsumerProperties(name);
|
||||
//Always set multiplex to true in the kafka streams binder
|
||||
consumerProperties.setMultiplex(true);
|
||||
|
||||
KTableBoundElementFactory.KTableWrapperHandler wrapper= new KTableBoundElementFactory.KTableWrapperHandler();
|
||||
ProxyFactory proxyFactory = new ProxyFactory(KTableBoundElementFactory.KTableWrapper.class, KTable.class);
|
||||
proxyFactory.addAdvice(wrapper);
|
||||
|
||||
@@ -25,31 +25,38 @@ import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.errors.LogAndContinueExceptionHandler;
|
||||
import org.apache.kafka.streams.errors.LogAndFailExceptionHandler;
|
||||
|
||||
import org.springframework.beans.factory.ObjectProvider;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.boot.autoconfigure.AutoConfigureAfter;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binding.BindingService;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerResultAdapter;
|
||||
import org.springframework.cloud.stream.config.BindingServiceConfiguration;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.cloud.stream.converter.CompositeMessageConverterFactory;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.core.CleanupConfig;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
@EnableConfigurationProperties(KafkaStreamsExtendedBindingProperties.class)
|
||||
@ConditionalOnBean(BindingService.class)
|
||||
@AutoConfigureAfter(BindingServiceConfiguration.class)
|
||||
public class KafkaStreamsBinderSupportAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
@ConfigurationProperties(prefix = "spring.cloud.stream.kafka.streams.binder")
|
||||
public KafkaStreamsBinderConfigurationProperties binderConfigurationProperties() {
|
||||
return new KafkaStreamsBinderConfigurationProperties();
|
||||
public KafkaStreamsBinderConfigurationProperties binderConfigurationProperties(KafkaProperties kafkaProperties) {
|
||||
return new KafkaStreamsBinderConfigurationProperties(kafkaProperties);
|
||||
}
|
||||
|
||||
@Bean("streamConfigGlobalProperties")
|
||||
@@ -97,10 +104,12 @@ public class KafkaStreamsBinderSupportAutoConfiguration {
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
KStreamStreamListenerParameterAdapter kafkaStreamListenerParameterAdapter,
|
||||
Collection<StreamListenerResultAdapter> streamListenerResultAdapters,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
ObjectProvider<CleanupConfig> cleanupConfig) {
|
||||
return new KafkaStreamsStreamListenerSetupMethodOrchestrator(bindingServiceProperties,
|
||||
kafkaStreamsExtendedBindingProperties, keyValueSerdeResolver, kafkaStreamsBindingInformationCatalogue,
|
||||
kafkaStreamListenerParameterAdapter, streamListenerResultAdapters, binderConfigurationProperties);
|
||||
kafkaStreamListenerParameterAdapter, streamListenerResultAdapters, binderConfigurationProperties,
|
||||
cleanupConfig.getIfUnique());
|
||||
}
|
||||
|
||||
@Bean
|
||||
@@ -120,8 +129,8 @@ public class KafkaStreamsBinderSupportAutoConfiguration {
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KTableBoundElementFactory kTableBoundElementFactory() {
|
||||
return new KTableBoundElementFactory();
|
||||
public KTableBoundElementFactory kTableBoundElementFactory(BindingServiceProperties bindingServiceProperties) {
|
||||
return new KTableBoundElementFactory(bindingServiceProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
@@ -142,14 +151,25 @@ public class KafkaStreamsBinderSupportAutoConfiguration {
|
||||
}
|
||||
|
||||
@Bean
|
||||
public QueryableStoreRegistry queryableStoreTypeRegistry() {
|
||||
return new QueryableStoreRegistry();
|
||||
public QueryableStoreRegistry queryableStoreTypeRegistry(KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
return new QueryableStoreRegistry(kafkaStreamsRegistry);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public InteractiveQueryService interactiveQueryServices(KafkaStreamsRegistry kafkaStreamsRegistry,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
return new InteractiveQueryService(kafkaStreamsRegistry, binderConfigurationProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaStreamsRegistry kafkaStreamsRegistry() {
|
||||
return new KafkaStreamsRegistry();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public StreamsBuilderFactoryManager streamsBuilderFactoryManager(KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
QueryableStoreRegistry queryableStoreRegistry) {
|
||||
return new StreamsBuilderFactoryManager(kafkaStreamsBindingInformationCatalogue, queryableStoreRegistry);
|
||||
KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
return new StreamsBuilderFactoryManager(kafkaStreamsBindingInformationCatalogue, kafkaStreamsRegistry);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @author Rafal Zukowski
|
||||
* @author Gary Russell
|
||||
*/
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
@@ -105,8 +106,9 @@ class KafkaStreamsDlqDispatch {
|
||||
props.put(ProducerConfig.RETRIES_CONFIG, 0);
|
||||
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
|
||||
props.put(ProducerConfig.ACKS_CONFIG, configurationProperties.getRequiredAcks());
|
||||
if (!ObjectUtils.isEmpty(configurationProperties.getProducerConfiguration())) {
|
||||
props.putAll(configurationProperties.getProducerConfiguration());
|
||||
Map<String, Object> mergedConfig = configurationProperties.mergedProducerConfiguration();
|
||||
if (!ObjectUtils.isEmpty(mergedConfig)) {
|
||||
props.putAll(mergedConfig);
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
|
||||
|
||||
@@ -41,7 +41,7 @@ import org.springframework.util.StringUtils;
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KafkaStreamsMessageConversionDelegate {
|
||||
public class KafkaStreamsMessageConversionDelegate {
|
||||
|
||||
private static final ThreadLocal<KeyValue<Object, Object>> keyValueThreadLocal = new ThreadLocal<>();
|
||||
|
||||
@@ -73,7 +73,7 @@ class KafkaStreamsMessageConversionDelegate {
|
||||
String contentType = this.kstreamBindingInformationCatalogue.getContentType(outboundBindTarget);
|
||||
MessageConverter messageConverter = compositeMessageConverterFactory.getMessageConverterForAllRegistered();
|
||||
|
||||
return outboundBindTarget.map((k, v) -> {
|
||||
return outboundBindTarget.mapValues((v) -> {
|
||||
Message<?> message = v instanceof Message<?> ? (Message<?>) v :
|
||||
MessageBuilder.withPayload(v).build();
|
||||
Map<String, Object> headers = new HashMap<>(message.getHeaders());
|
||||
@@ -81,9 +81,9 @@ class KafkaStreamsMessageConversionDelegate {
|
||||
headers.put(MessageHeaders.CONTENT_TYPE, contentType);
|
||||
}
|
||||
MessageHeaders messageHeaders = new MessageHeaders(headers);
|
||||
return new KeyValue<>(k,
|
||||
return
|
||||
messageConverter.toMessage(message.getPayload(),
|
||||
messageHeaders).getPayload());
|
||||
messageHeaders).getPayload();
|
||||
});
|
||||
}
|
||||
|
||||
@@ -137,10 +137,10 @@ class KafkaStreamsMessageConversionDelegate {
|
||||
processErrorFromDeserialization(bindingTarget, branch[1]);
|
||||
|
||||
//first branch above is the branch where the messages are converted, let it go through further processing.
|
||||
return branch[0].map((o, o2) -> {
|
||||
KeyValue<Object, Object> objectObjectKeyValue = keyValueThreadLocal.get();
|
||||
return branch[0].mapValues((o2) -> {
|
||||
Object objectValue = keyValueThreadLocal.get().value;
|
||||
keyValueThreadLocal.remove();
|
||||
return objectObjectKeyValue;
|
||||
return objectValue;
|
||||
});
|
||||
}
|
||||
|
||||
@@ -165,7 +165,7 @@ class KafkaStreamsMessageConversionDelegate {
|
||||
@Override
|
||||
public void process(Object o, Object o2) {
|
||||
if (kstreamBindingInformationCatalogue.isDlqEnabled(bindingTarget)) {
|
||||
String destination = kstreamBindingInformationCatalogue.getDestination(bindingTarget);
|
||||
String destination = context.topic();
|
||||
if (o2 instanceof Message) {
|
||||
Message message = (Message) o2;
|
||||
sendToDlqAndContinue.sendToDlq(destination, (byte[]) o, (byte[]) message.getPayload(), context.partition());
|
||||
@@ -182,12 +182,6 @@ class KafkaStreamsMessageConversionDelegate {
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
@Override
|
||||
public void punctuate(long timestamp) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
|
||||
|
||||
@@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
|
||||
/**
|
||||
* An internal registry for holding {@KafkaStreams} objects maintained through
|
||||
* {@link StreamsBuilderFactoryManager}.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KafkaStreamsRegistry {
|
||||
|
||||
private final Set<KafkaStreams> kafkaStreams = new HashSet<>();
|
||||
|
||||
Set<KafkaStreams> getKafkaStreams() {
|
||||
return kafkaStreams;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register the {@link KafkaStreams} object created in the application.
|
||||
*
|
||||
* @param kafkaStreams {@link KafkaStreams} object created in the application
|
||||
*/
|
||||
void registerKafkaStreams(KafkaStreams kafkaStreams) {
|
||||
this.kafkaStreams.add(kafkaStreams);
|
||||
}
|
||||
}
|
||||
@@ -17,24 +17,25 @@
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.apache.kafka.streams.Consumed;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.StreamsBuilder;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.errors.DeserializationExceptionHandler;
|
||||
import org.apache.kafka.streams.kstream.Consumed;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.state.KeyValueStore;
|
||||
import org.apache.kafka.streams.state.StoreBuilder;
|
||||
import org.apache.kafka.streams.state.Stores;
|
||||
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.BeanInitializationException;
|
||||
@@ -45,9 +46,11 @@ import org.springframework.beans.factory.support.BeanDefinitionRegistry;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsStateStore;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsStateStoreProperties;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerErrorMessages;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerParameterAdapter;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerResultAdapter;
|
||||
@@ -59,6 +62,7 @@ import org.springframework.context.ApplicationContextAware;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.core.MethodParameter;
|
||||
import org.springframework.core.annotation.AnnotationUtils;
|
||||
import org.springframework.kafka.core.CleanupConfig;
|
||||
import org.springframework.kafka.core.StreamsBuilderFactoryBean;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
@@ -80,6 +84,8 @@ import org.springframework.util.StringUtils;
|
||||
* 3. Each StreamListener method that it orchestrates gets its own {@link StreamsBuilderFactoryBean} and {@link StreamsConfig}
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Lei Chen
|
||||
* @author Gary Russell
|
||||
*/
|
||||
class KafkaStreamsStreamListenerSetupMethodOrchestrator implements StreamListenerSetupMethodOrchestrator, ApplicationContextAware {
|
||||
|
||||
@@ -101,6 +107,8 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator implements StreamListene
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
private final CleanupConfig cleanupConfig;
|
||||
|
||||
private ConfigurableApplicationContext applicationContext;
|
||||
|
||||
KafkaStreamsStreamListenerSetupMethodOrchestrator(BindingServiceProperties bindingServiceProperties,
|
||||
@@ -109,7 +117,8 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator implements StreamListene
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
StreamListenerParameterAdapter streamListenerParameterAdapter,
|
||||
Collection<StreamListenerResultAdapter> streamListenerResultAdapters,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
CleanupConfig cleanupConfig) {
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.kafkaStreamsExtendedBindingProperties = kafkaStreamsExtendedBindingProperties;
|
||||
this.keyValueSerdeResolver = keyValueSerdeResolver;
|
||||
@@ -117,6 +126,7 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator implements StreamListene
|
||||
this.streamListenerParameterAdapter = streamListenerParameterAdapter;
|
||||
this.streamListenerResultAdapters = streamListenerResultAdapters;
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
this.cleanupConfig = cleanupConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -231,10 +241,12 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator implements StreamListene
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = methodStreamsBuilderFactoryBeanMap.get(method);
|
||||
StreamsBuilder streamsBuilder = streamsBuilderFactoryBean.getObject();
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties = kafkaStreamsExtendedBindingProperties.getExtendedConsumerProperties(inboundName);
|
||||
//get state store spec
|
||||
KafkaStreamsStateStoreProperties spec = buildStateStoreSpec(method);
|
||||
Serde<?> keySerde = this.keyValueSerdeResolver.getInboundKeySerde(extendedConsumerProperties);
|
||||
Serde<?> valueSerde = this.keyValueSerdeResolver.getInboundValueSerde(bindingProperties.getConsumer(), extendedConsumerProperties);
|
||||
if (parameterType.isAssignableFrom(KStream.class)) {
|
||||
KStream<?, ?> stream = getkStream(inboundName, bindingProperties, streamsBuilder, keySerde, valueSerde);
|
||||
KStream<?, ?> stream = getkStream(inboundName, spec, bindingProperties, streamsBuilder, keySerde, valueSerde);
|
||||
KStreamBoundElementFactory.KStreamWrapper kStreamWrapper = (KStreamBoundElementFactory.KStreamWrapper) targetBean;
|
||||
//wrap the proxy created during the initial target type binding with real object (KStream)
|
||||
kStreamWrapper.wrap((KStream<Object, Object>) stream);
|
||||
@@ -283,15 +295,62 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator implements StreamListene
|
||||
}
|
||||
|
||||
private <K,V> KTable<K,V> materializedAs(StreamsBuilder streamsBuilder, String destination, String storeName, Serde<K> k, Serde<V> v) {
|
||||
|
||||
return streamsBuilder.table(bindingServiceProperties.getBindingDestination(destination),
|
||||
Materialized.<K, V, KeyValueStore<Bytes, byte[]>>as(storeName)
|
||||
.withKeySerde(k)
|
||||
.withValueSerde(v));
|
||||
}
|
||||
|
||||
private KStream<?, ?> getkStream(String inboundName, BindingProperties bindingProperties, StreamsBuilder streamsBuilder,
|
||||
private StoreBuilder buildStateStore(KafkaStreamsStateStoreProperties spec) {
|
||||
try {
|
||||
Serde<?> keySerde = this.keyValueSerdeResolver.getStateStoreKeySerde(spec.getKeySerdeString());
|
||||
Serde<?> valueSerde = this.keyValueSerdeResolver.getStateStoreValueSerde(spec.getValueSerdeString());
|
||||
StoreBuilder builder;
|
||||
switch (spec.getType()) {
|
||||
case KEYVALUE:
|
||||
builder = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(spec.getName()), keySerde, valueSerde);
|
||||
break;
|
||||
case WINDOW:
|
||||
builder = Stores.windowStoreBuilder(Stores.persistentWindowStore(spec.getName(), spec.getRetention(), 3, spec.getLength(), false),
|
||||
keySerde,
|
||||
valueSerde);
|
||||
break;
|
||||
case SESSION:
|
||||
builder = Stores.sessionStoreBuilder(Stores.persistentSessionStore(spec.getName(), spec.getRetention()), keySerde, valueSerde);
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException("state store type (" + spec.getType() + ") is not supported!");
|
||||
}
|
||||
if (spec.isCacheEnabled()) {
|
||||
builder = builder.withCachingEnabled();
|
||||
}
|
||||
if (spec.isLoggingDisabled()) {
|
||||
builder = builder.withLoggingDisabled();
|
||||
}
|
||||
|
||||
return builder;
|
||||
|
||||
}catch (Exception e) {
|
||||
LOG.error("failed to build state store exception : " + e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private KStream<?, ?> getkStream(String inboundName, KafkaStreamsStateStoreProperties storeSpec,
|
||||
BindingProperties bindingProperties, StreamsBuilder streamsBuilder,
|
||||
Serde<?> keySerde, Serde<?> valueSerde) {
|
||||
KStream<?, ?> stream = streamsBuilder.stream(bindingServiceProperties.getBindingDestination(inboundName),
|
||||
if (storeSpec != null) {
|
||||
StoreBuilder storeBuilder = buildStateStore(storeSpec);
|
||||
streamsBuilder.addStateStore(storeBuilder);
|
||||
if (LOG.isInfoEnabled()) {
|
||||
LOG.info("state store " + storeBuilder.name() + " added to topology");
|
||||
}
|
||||
}
|
||||
String[] bindingTargets = StringUtils
|
||||
.commaDelimitedListToStringArray(bindingServiceProperties.getBindingDestination(inboundName));
|
||||
KStream<?, ?> stream = streamsBuilder.stream(Arrays.asList(bindingTargets),
|
||||
Consumed.with(keySerde, valueSerde));
|
||||
final boolean nativeDecoding = bindingServiceProperties.getConsumerProperties(inboundName).isUseNativeDecoding();
|
||||
if (nativeDecoding){
|
||||
@@ -300,18 +359,18 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator implements StreamListene
|
||||
else {
|
||||
LOG.info("Native decoding is disabled for " + inboundName + ". Inbound message conversion done by Spring Cloud Stream.");
|
||||
}
|
||||
stream = stream.map((key, value) -> {
|
||||
KeyValue<Object, Object> keyValue;
|
||||
|
||||
stream = stream.mapValues(value -> {
|
||||
Object returnValue;
|
||||
String contentType = bindingProperties.getContentType();
|
||||
if (!StringUtils.isEmpty(contentType) && !nativeDecoding) {
|
||||
Message<?> message = MessageBuilder.withPayload(value)
|
||||
.setHeader(MessageHeaders.CONTENT_TYPE, contentType).build();
|
||||
keyValue = new KeyValue<>(key, message);
|
||||
returnValue = message;
|
||||
} else {
|
||||
returnValue = value;
|
||||
}
|
||||
else {
|
||||
keyValue = new KeyValue<>(key, value);
|
||||
}
|
||||
return keyValue;
|
||||
return returnValue;
|
||||
});
|
||||
return stream;
|
||||
}
|
||||
@@ -330,14 +389,6 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator implements StreamListene
|
||||
private StreamsConfig buildStreamsBuilderAndRetrieveConfig(Method method, ApplicationContext applicationContext,
|
||||
BindingProperties bindingProperties) {
|
||||
ConfigurableListableBeanFactory beanFactory = this.applicationContext.getBeanFactory();
|
||||
StreamsBuilderFactoryBean streamsBuilder = new StreamsBuilderFactoryBean();
|
||||
streamsBuilder.setAutoStartup(false);
|
||||
String uuid = UUID.randomUUID().toString();
|
||||
BeanDefinition streamsBuilderBeanDefinition =
|
||||
BeanDefinitionBuilder.genericBeanDefinition((Class<StreamsBuilderFactoryBean>) streamsBuilder.getClass(), () -> streamsBuilder)
|
||||
.getRawBeanDefinition();
|
||||
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition("stream-builder-" + uuid, streamsBuilderBeanDefinition);
|
||||
StreamsBuilderFactoryBean streamsBuilderX = applicationContext.getBean("&stream-builder-" + uuid, StreamsBuilderFactoryBean.class);
|
||||
String group = bindingProperties.getGroup();
|
||||
if (!StringUtils.hasText(group)) {
|
||||
group = binderConfigurationProperties.getApplicationId();
|
||||
@@ -364,12 +415,20 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator implements StreamListene
|
||||
return super.getConfiguredInstance(key, clazz);
|
||||
}
|
||||
};
|
||||
StreamsBuilderFactoryBean streamsBuilder = this.cleanupConfig == null
|
||||
? new StreamsBuilderFactoryBean(streamsConfig)
|
||||
: new StreamsBuilderFactoryBean(streamsConfig, this.cleanupConfig);
|
||||
streamsBuilder.setAutoStartup(false);
|
||||
BeanDefinition streamsBuilderBeanDefinition =
|
||||
BeanDefinitionBuilder.genericBeanDefinition((Class<StreamsBuilderFactoryBean>) streamsBuilder.getClass(), () -> streamsBuilder)
|
||||
.getRawBeanDefinition();
|
||||
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition("stream-builder-" + method.getName(), streamsBuilderBeanDefinition);
|
||||
StreamsBuilderFactoryBean streamsBuilderX = applicationContext.getBean("&stream-builder-" + method.getName(), StreamsBuilderFactoryBean.class);
|
||||
BeanDefinition streamsConfigBeanDefinition =
|
||||
BeanDefinitionBuilder.genericBeanDefinition((Class<StreamsConfig>) streamsConfig.getClass(), () -> streamsConfig)
|
||||
.getRawBeanDefinition();
|
||||
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition("streamsConfig-" + uuid, streamsConfigBeanDefinition);
|
||||
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition("streamsConfig-" + method.getName(), streamsConfigBeanDefinition);
|
||||
|
||||
streamsBuilder.setStreamsConfig(streamsConfig);
|
||||
methodStreamsBuilderFactoryBeanMap.put(method, streamsBuilderX);
|
||||
return streamsConfig;
|
||||
}
|
||||
@@ -432,4 +491,24 @@ class KafkaStreamsStreamListenerSetupMethodOrchestrator implements StreamListene
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked"})
|
||||
private static KafkaStreamsStateStoreProperties buildStateStoreSpec(Method method) {
|
||||
KafkaStreamsStateStore spec = AnnotationUtils.findAnnotation(method, KafkaStreamsStateStore.class);
|
||||
if (spec != null) {
|
||||
Assert.isTrue(!ObjectUtils.isEmpty(spec.name()), "name cannot be empty");
|
||||
Assert.isTrue(spec.name().length() >= 1, "name cannot be empty.");
|
||||
KafkaStreamsStateStoreProperties props = new KafkaStreamsStateStoreProperties();
|
||||
props.setName(spec.name());
|
||||
props.setType(spec.type());
|
||||
props.setLength(spec.lengthMs());
|
||||
props.setKeySerdeString(spec.keySerde());
|
||||
props.setRetention(spec.retentionMs());
|
||||
props.setValueSerdeString(spec.valueSerde());
|
||||
props.setCacheEnabled(spec.cache());
|
||||
props.setLoggingDisabled(!spec.logging());
|
||||
return props;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,12 +38,16 @@ import org.springframework.util.StringUtils;
|
||||
* If native decoding is disabled, then the binder will do the deserialization on value and ignore any Serde set for value
|
||||
* and rely on the contentType provided. Keys are always deserialized at the broker.
|
||||
*
|
||||
*
|
||||
* Same rules apply on the outbound. If native encoding is enabled, then value serialization is done at the broker using
|
||||
* any binder level Serde for value, if not using common Serde, if not, then byte[].
|
||||
* If native encoding is disabled, then the binder will do serialization using a contentType. Keys are always serialized
|
||||
* by the broker.
|
||||
*
|
||||
* For state store, use serdes class specified in {@link KafkaStreamsStateStore} to create Serde accordingly.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Lei Chen
|
||||
*/
|
||||
class KeyValueSerdeResolver {
|
||||
|
||||
@@ -130,6 +134,31 @@ class KeyValueSerdeResolver {
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for state store
|
||||
*
|
||||
* @param keySerdeString serde class used for key
|
||||
* @return {@link Serde} for the state store key.
|
||||
*/
|
||||
public Serde<?> getStateStoreKeySerde(String keySerdeString) {
|
||||
return getKeySerde(keySerdeString);
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for state store value
|
||||
*
|
||||
* @param valueSerdeString serde class used for value
|
||||
* @return {@link Serde} for the state store value.
|
||||
*/
|
||||
public Serde<?> getStateStoreValueSerde(String valueSerdeString) {
|
||||
try {
|
||||
return getValueSerde(valueSerdeString);
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("Serde class not found: ", e);
|
||||
}
|
||||
}
|
||||
|
||||
private Serde<?> getKeySerde(String keySerdeString) {
|
||||
Serde<?> keySerde;
|
||||
try {
|
||||
|
||||
@@ -16,10 +16,8 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.errors.InvalidStateStoreException;
|
||||
import org.apache.kafka.streams.state.QueryableStoreType;
|
||||
|
||||
/**
|
||||
@@ -27,11 +25,17 @@ import org.apache.kafka.streams.state.QueryableStoreType;
|
||||
* the user applications.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Renwei Han
|
||||
* @since 2.0.0
|
||||
* @deprecated in favor of {@link InteractiveQueryService}
|
||||
*/
|
||||
public class QueryableStoreRegistry {
|
||||
|
||||
private final Set<KafkaStreams> kafkaStreams = new HashSet<>();
|
||||
private final KafkaStreamsRegistry kafkaStreamsRegistry;
|
||||
|
||||
public QueryableStoreRegistry(KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
this.kafkaStreamsRegistry = kafkaStreamsRegistry;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve and return a queryable store by name created in the application.
|
||||
@@ -40,24 +44,22 @@ public class QueryableStoreRegistry {
|
||||
* @param storeType type of the queryable store
|
||||
* @param <T> generic queryable store
|
||||
* @return queryable store.
|
||||
* @deprecated in favor of {@link InteractiveQueryService#getQueryableStore(String, QueryableStoreType)}
|
||||
*/
|
||||
public <T> T getQueryableStoreType(String storeName, QueryableStoreType<T> storeType) {
|
||||
|
||||
for (KafkaStreams kafkaStream : kafkaStreams) {
|
||||
T store = kafkaStream.store(storeName, storeType);
|
||||
if (store != null) {
|
||||
return store;
|
||||
for (KafkaStreams kafkaStream : this.kafkaStreamsRegistry.getKafkaStreams()) {
|
||||
try{
|
||||
T store = kafkaStream.store(storeName, storeType);
|
||||
if (store != null) {
|
||||
return store;
|
||||
}
|
||||
}
|
||||
catch (InvalidStateStoreException ignored) {
|
||||
//pass through
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register the {@link KafkaStreams} object created in the application.
|
||||
*
|
||||
* @param kafkaStreams {@link KafkaStreams} object created in the application
|
||||
*/
|
||||
void registerKafkaStreams(KafkaStreams kafkaStreams) {
|
||||
this.kafkaStreams.add(kafkaStreams);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,14 +38,14 @@ import org.springframework.kafka.core.StreamsBuilderFactoryBean;
|
||||
class StreamsBuilderFactoryManager implements SmartLifecycle {
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
private final QueryableStoreRegistry queryableStoreRegistry;
|
||||
private final KafkaStreamsRegistry kafkaStreamsRegistry;
|
||||
|
||||
private volatile boolean running;
|
||||
|
||||
StreamsBuilderFactoryManager(KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
QueryableStoreRegistry queryableStoreRegistry) {
|
||||
KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
this.kafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
this.queryableStoreRegistry = queryableStoreRegistry;
|
||||
this.kafkaStreamsRegistry = kafkaStreamsRegistry;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -68,7 +68,7 @@ class StreamsBuilderFactoryManager implements SmartLifecycle {
|
||||
Set<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans = this.kafkaStreamsBindingInformationCatalogue.getStreamsBuilderFactoryBeans();
|
||||
for (StreamsBuilderFactoryBean streamsBuilderFactoryBean : streamsBuilderFactoryBeans) {
|
||||
streamsBuilderFactoryBean.start();
|
||||
queryableStoreRegistry.registerKafkaStreams(streamsBuilderFactoryBean.getKafkaStreams());
|
||||
kafkaStreamsRegistry.registerKafkaStreams(streamsBuilderFactoryBean.getKafkaStreams());
|
||||
}
|
||||
this.running = true;
|
||||
} catch (Exception e) {
|
||||
|
||||
@@ -0,0 +1,105 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.annotations;
|
||||
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsStateStoreProperties;
|
||||
|
||||
|
||||
/**
|
||||
* Interface for Kafka Stream state store.
|
||||
*
|
||||
* This interface can be used to inject a state store specification into KStream building process so
|
||||
* that the desired store can be built by StreamBuilder and added to topology for later use by processors.
|
||||
* This is particularly useful when need to combine stream DSL with low level processor APIs. In those cases,
|
||||
* if a writable state store is desired in processors, it needs to be created using this annotation.
|
||||
* Here is the example.
|
||||
*
|
||||
* <pre class="code">
|
||||
* @StreamListener("input")
|
||||
* @KafkaStreamsStateStore(name="mystate", type= KafkaStreamsStateStoreProperties.StoreType.WINDOW, size=300000)
|
||||
* public void process(KStream<Object, Product> input) {
|
||||
* ......
|
||||
* }
|
||||
*</pre>
|
||||
*
|
||||
* With that, you should be able to read/write this state store in your processor/transformer code.
|
||||
*
|
||||
* <pre class="code">
|
||||
* new Processor<Object, Product>() {
|
||||
* WindowStore<Object, String> state;
|
||||
* @Override
|
||||
* public void init(ProcessorContext processorContext) {
|
||||
* state = (WindowStore)processorContext.getStateStore("mystate");
|
||||
* ......
|
||||
* }
|
||||
* }
|
||||
*</pre>
|
||||
*
|
||||
* @author Lei Chen
|
||||
*/
|
||||
|
||||
@Target({ ElementType.TYPE, ElementType.METHOD, ElementType.ANNOTATION_TYPE })
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
|
||||
public @interface KafkaStreamsStateStore {
|
||||
|
||||
/**
|
||||
* @return name of state store.
|
||||
*/
|
||||
String name() default "";
|
||||
|
||||
/**
|
||||
* @return {@link KafkaStreamsStateStoreProperties.StoreType} of state store.
|
||||
*/
|
||||
KafkaStreamsStateStoreProperties.StoreType type() default KafkaStreamsStateStoreProperties.StoreType.KEYVALUE;
|
||||
|
||||
/**
|
||||
* @return key serde of state store.
|
||||
*/
|
||||
String keySerde() default "org.apache.kafka.common.serialization.Serdes$StringSerde";
|
||||
|
||||
/**
|
||||
* @return value serde of state store.
|
||||
*/
|
||||
String valueSerde() default "org.apache.kafka.common.serialization.Serdes$StringSerde";
|
||||
|
||||
/**
|
||||
* @return length in milli-second of window(for windowed store).
|
||||
*/
|
||||
long lengthMs() default 0;
|
||||
|
||||
/**
|
||||
* @return the maximum period of time in milli-second to keep each window in this store(for windowed store).
|
||||
*/
|
||||
long retentionMs() default 0;
|
||||
|
||||
/**
|
||||
* @return whether caching should be enabled on the created store.
|
||||
*/
|
||||
boolean cache() default false;
|
||||
|
||||
/**
|
||||
* @return whether logging should be enabled on the created store.
|
||||
*/
|
||||
boolean logging() default true;
|
||||
}
|
||||
@@ -16,13 +16,19 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.properties;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KafkaStreamsBinderConfigurationProperties extends KafkaBinderConfigurationProperties {
|
||||
|
||||
public KafkaStreamsBinderConfigurationProperties(KafkaProperties kafkaProperties) {
|
||||
super(kafkaProperties);
|
||||
}
|
||||
|
||||
public enum SerdeError {
|
||||
logAndContinue,
|
||||
logAndFail,
|
||||
|
||||
@@ -0,0 +1,151 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.properties;
|
||||
|
||||
|
||||
/**
|
||||
* @author Lei Chen
|
||||
*/
|
||||
public class KafkaStreamsStateStoreProperties {
|
||||
|
||||
public enum StoreType {
|
||||
KEYVALUE("keyvalue"),
|
||||
WINDOW("window"),
|
||||
SESSION("session")
|
||||
;
|
||||
|
||||
private final String type;
|
||||
|
||||
/**
|
||||
* @param type
|
||||
*/
|
||||
StoreType(final String type) {
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return type;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* name for this state store
|
||||
*/
|
||||
private String name;
|
||||
|
||||
/**
|
||||
* type for this state store
|
||||
*/
|
||||
private StoreType type;
|
||||
|
||||
/**
|
||||
* Size/length of this state store in ms. Only applicable for window store.
|
||||
*/
|
||||
private long length;
|
||||
|
||||
/**
|
||||
* Retention period for this state store in ms.
|
||||
*/
|
||||
private long retention;
|
||||
|
||||
/**
|
||||
* Key serde class specified per state store.
|
||||
*/
|
||||
private String keySerdeString;
|
||||
|
||||
/**
|
||||
* Value serde class specified per state store.
|
||||
*/
|
||||
private String valueSerdeString;
|
||||
|
||||
/**
|
||||
* Whether enable cache in this state store.
|
||||
*/
|
||||
private boolean cacheEnabled;
|
||||
|
||||
/**
|
||||
* Whether enable logging in this state store.
|
||||
*/
|
||||
private boolean loggingDisabled;
|
||||
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public StoreType getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public void setType(StoreType type) {
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
public long getLength() {
|
||||
return length;
|
||||
}
|
||||
|
||||
public void setLength(long length) {
|
||||
this.length = length;
|
||||
}
|
||||
|
||||
public long getRetention() {
|
||||
return retention;
|
||||
}
|
||||
|
||||
public void setRetention(long retention) {
|
||||
this.retention = retention;
|
||||
}
|
||||
|
||||
public String getKeySerdeString() {
|
||||
return keySerdeString;
|
||||
}
|
||||
|
||||
public void setKeySerdeString(String keySerdeString) {
|
||||
this.keySerdeString = keySerdeString;
|
||||
}
|
||||
|
||||
public String getValueSerdeString() {
|
||||
return valueSerdeString;
|
||||
}
|
||||
|
||||
public void setValueSerdeString(String valueSerdeString) {
|
||||
this.valueSerdeString = valueSerdeString;
|
||||
}
|
||||
|
||||
public boolean isCacheEnabled() {
|
||||
return cacheEnabled;
|
||||
}
|
||||
|
||||
public void setCacheEnabled(boolean cacheEnabled) {
|
||||
this.cacheEnabled = cacheEnabled;
|
||||
}
|
||||
|
||||
public boolean isLoggingDisabled() {
|
||||
return loggingDisabled;
|
||||
}
|
||||
|
||||
public void setLoggingDisabled(boolean loggingDisabled) {
|
||||
this.loggingDisabled = loggingDisabled;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,80 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.bootstrap;
|
||||
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
import org.springframework.boot.builder.SpringApplicationBuilder;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@Ignore("Temporarily disabling the test as builds are getting slower due to this.")
|
||||
public class KafkaStreamsBinderBootstrapTest {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, 10);
|
||||
|
||||
@Test
|
||||
public void testKafkaStreamsBinderWithCustomEnvironmentCanStart() {
|
||||
ConfigurableApplicationContext applicationContext = new SpringApplicationBuilder(SimpleApplication.class)
|
||||
.web(WebApplicationType.NONE)
|
||||
.run("--spring.cloud.stream.bindings.input.destination=foo",
|
||||
"--spring.cloud.stream.bindings.input.binder=kBind1",
|
||||
"--spring.cloud.stream.binders.kBind1.type=kstream",
|
||||
"--spring.cloud.stream.binders.kBind1.environment.spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.binders.kBind1.environment.spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
|
||||
applicationContext.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKafkaStreamsBinderWithStandardConfigurationCanStart() {
|
||||
ConfigurableApplicationContext applicationContext = new SpringApplicationBuilder(SimpleApplication.class)
|
||||
.web(WebApplicationType.NONE)
|
||||
.run("--spring.cloud.stream.bindings.input.destination=foo",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
|
||||
applicationContext.close();
|
||||
}
|
||||
|
||||
@SpringBootApplication
|
||||
@EnableBinding(StreamSourceProcessor.class)
|
||||
static class SimpleApplication {
|
||||
|
||||
@StreamListener
|
||||
public void handle(@Input("input") KStream<Object, String> stream) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
interface StreamSourceProcessor {
|
||||
@Input("input")
|
||||
KStream<?, ?> inputStream();
|
||||
}
|
||||
}
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
@@ -68,10 +68,11 @@ import static org.mockito.Mockito.verify;
|
||||
public abstract class DeserializationErrorHandlerByKafkaTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts", "error.words.group");
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts", "error.words.group",
|
||||
"error.word1.groupx", "error.word2.groupx");
|
||||
|
||||
@SpyBean
|
||||
KafkaStreamsMessageConversionDelegate KafkaStreamsMessageConversionDelegate;
|
||||
org.springframework.cloud.stream.binder.kafka.streams.KafkaStreamsMessageConversionDelegate KafkaStreamsMessageConversionDelegate;
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
@@ -130,6 +131,47 @@ public abstract class DeserializationErrorHandlerByKafkaTests {
|
||||
}
|
||||
}
|
||||
|
||||
@SpringBootTest(properties = {
|
||||
"spring.cloud.stream.bindings.input.consumer.useNativeDecoding=true",
|
||||
"spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"spring.cloud.stream.bindings.input.destination=word1,word2",
|
||||
"spring.cloud.stream.bindings.input.group=groupx",
|
||||
"spring.cloud.stream.kafka.streams.binder.serdeError=sendToDlq",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=" +
|
||||
"org.apache.kafka.common.serialization.Serdes$IntegerSerde"},
|
||||
webEnvironment= SpringBootTest.WebEnvironment.NONE
|
||||
)
|
||||
public static class DeserializationByKafkaAndDlqTestsWithMultipleInputs extends DeserializationErrorHandlerByKafkaTests {
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void test() throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("word1");
|
||||
template.sendDefault("foobar");
|
||||
|
||||
template.setDefaultTopic("word2");
|
||||
template.sendDefault("foobar");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("foobarx", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
Consumer<String, String> consumer1 = cf.createConsumer();
|
||||
embeddedKafka.consumeFromEmbeddedTopics(consumer1, "error.word1.groupx", "error.word2.groupx");
|
||||
|
||||
//TODO: Investigate why the ordering matters below: i.e. if we consume from error.word1.groupx first, an exception is thrown.
|
||||
ConsumerRecord<String, String> cr1 = KafkaTestUtils.getSingleRecord(consumer1, "error.word2.groupx");
|
||||
assertThat(cr1.value().equals("foobar")).isTrue();
|
||||
ConsumerRecord<String, String> cr2 = KafkaTestUtils.getSingleRecord(consumer1, "error.word1.groupx");
|
||||
assertThat(cr2.value().equals("foobar")).isTrue();
|
||||
|
||||
//Ensuring that the deserialization was indeed done by Kafka natively
|
||||
verify(KafkaStreamsMessageConversionDelegate, never()).deserializeOnInbound(any(Class.class), any(KStream.class));
|
||||
verify(KafkaStreamsMessageConversionDelegate, never()).serializeOnOutbound(any(KStream.class));
|
||||
}
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
@@ -62,10 +62,11 @@ import static org.mockito.Mockito.verify;
|
||||
public abstract class DeserializtionErrorHandlerByBinderTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts-id", "error.foos.foobar-group");
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts-id", "error.foos.foobar-group",
|
||||
"error.foos1.fooz-group", "error.foos2.fooz-group");
|
||||
|
||||
@SpyBean
|
||||
KafkaStreamsMessageConversionDelegate KafkaStreamsMessageConversionDelegate;
|
||||
org.springframework.cloud.stream.binder.kafka.streams.KafkaStreamsMessageConversionDelegate KafkaStreamsMessageConversionDelegate;
|
||||
|
||||
private static Consumer<Integer, String> consumer;
|
||||
|
||||
@@ -128,6 +129,50 @@ public abstract class DeserializtionErrorHandlerByBinderTests {
|
||||
}
|
||||
}
|
||||
|
||||
@SpringBootTest(properties = {
|
||||
"spring.cloud.stream.bindings.input.destination=foos1,foos2",
|
||||
"spring.cloud.stream.bindings.output.destination=counts-id",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"spring.cloud.stream.kafka.streams.bindings.output.producer.keySerde=org.apache.kafka.common.serialization.Serdes$IntegerSerde",
|
||||
"spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"spring.cloud.stream.kafka.streams.binder.serdeError=sendToDlq",
|
||||
"spring.cloud.stream.bindings.input.group=fooz-group"},
|
||||
webEnvironment= SpringBootTest.WebEnvironment.NONE
|
||||
)
|
||||
public static class DeserializationByBinderAndDlqTestsWithMultipleInputs extends DeserializtionErrorHandlerByBinderTests {
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void test() throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foos1");
|
||||
template.sendDefault("hello");
|
||||
|
||||
template.setDefaultTopic("foos2");
|
||||
template.sendDefault("hello");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("foobar1", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
Consumer<String, String> consumer1 = cf.createConsumer();
|
||||
embeddedKafka.consumeFromEmbeddedTopics(consumer1, "error.foos1.fooz-group", "error.foos2.fooz-group");
|
||||
|
||||
ConsumerRecord<String, String> cr1 = KafkaTestUtils.getSingleRecord(consumer1, "error.foos1.fooz-group");
|
||||
assertThat(cr1.value().equals("hello")).isTrue();
|
||||
|
||||
ConsumerRecord<String, String> cr2 = KafkaTestUtils.getSingleRecord(consumer1, "error.foos2.fooz-group");
|
||||
assertThat(cr2.value().equals("hello")).isTrue();
|
||||
|
||||
//Ensuring that the deserialization was indeed done by the binder
|
||||
verify(KafkaStreamsMessageConversionDelegate).deserializeOnInbound(any(Class.class), any(KStream.class));
|
||||
}
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class ProductCountApplication {
|
||||
@@ -0,0 +1,195 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Sarath Shyam
|
||||
*
|
||||
* This test case demonstrates a kafk-streams topology which consumes messages from
|
||||
* multiple kafka topics(destinations).
|
||||
* See {@link KafkaStreamsBinderMultipleInputTopicsTest#testKstreamWordCountWithStringInputAndPojoOuput} where
|
||||
* the input topic names are specified as comma-separated String values for
|
||||
* the property spring.cloud.stream.bindings.input.destination.
|
||||
*
|
||||
*
|
||||
*/
|
||||
public class KafkaStreamsBinderMultipleInputTopicsTest {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts");
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKstreamWordCountWithStringInputAndPojoOuput() throws Exception {
|
||||
SpringApplication app = new SpringApplication(WordCountProcessorApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.input.destination=words1,words2",
|
||||
"--spring.cloud.stream.bindings.output.destination=counts",
|
||||
"--spring.cloud.stream.bindings.output.contentType=application/json",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kafka.streams.timeWindow.length=5000",
|
||||
"--spring.cloud.stream.kafka.streams.timeWindow.advanceBy=0",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidate(context);
|
||||
} finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidate(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("words1");
|
||||
template.sendDefault("foobar1");
|
||||
template.setDefaultTopic("words2");
|
||||
template.sendDefault("foobar2");
|
||||
|
||||
//Sleep a bit so that both the messages are processed before reading from the output topic.
|
||||
//Else assertions might fail arbitrarily.
|
||||
Thread.sleep(5000);
|
||||
|
||||
ConsumerRecords<String, String> received = KafkaTestUtils.getRecords(consumer);
|
||||
|
||||
List<String> wordCounts = new ArrayList<>(2);
|
||||
|
||||
received.records("counts").forEach((consumerRecord) -> {
|
||||
wordCounts.add((consumerRecord.value()));
|
||||
});
|
||||
System.out.println(wordCounts);
|
||||
assertThat(wordCounts.contains("{\"word\":\"foobar1\",\"count\":1}")).isTrue();
|
||||
assertThat(wordCounts.contains("{\"word\":\"foobar2\",\"count\":1}")).isTrue();
|
||||
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
static class WordCountProcessorApplication {
|
||||
|
||||
|
||||
@StreamListener
|
||||
@SendTo("output")
|
||||
public KStream<?, WordCount> process(@Input("input") KStream<Object, String> input) {
|
||||
|
||||
input.map((k,v) -> {
|
||||
System.out.println(k);
|
||||
System.out.println(v);
|
||||
return new KeyValue<>(k,v);
|
||||
});
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Serialized.with(Serdes.String(), Serdes.String()))
|
||||
.count(Materialized.as("WordCounts"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key, value)));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class WordCount {
|
||||
|
||||
private String word;
|
||||
|
||||
private long count;
|
||||
|
||||
WordCount(String word, long count) {
|
||||
this.word = word;
|
||||
this.count = count;
|
||||
}
|
||||
|
||||
public String getWord() {
|
||||
return word;
|
||||
}
|
||||
|
||||
public void setWord(String word) {
|
||||
this.word = word;
|
||||
}
|
||||
|
||||
public long getCount() {
|
||||
return count;
|
||||
}
|
||||
|
||||
public void setCount(long count) {
|
||||
this.count = count;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
@@ -106,10 +106,10 @@ public class KafkaStreamsBinderPojoInputAndPrimitiveTypeOutputTests {
|
||||
template.sendDefault("{\"id\":\"123\"}");
|
||||
ConsumerRecord<Integer, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts-id");
|
||||
|
||||
assertThat(cr.key().equals(123));
|
||||
assertThat(cr.key()).isEqualTo(123);
|
||||
ObjectMapper om = new ObjectMapper();
|
||||
Long aLong = om.readValue(cr.value(), Long.class);
|
||||
assertThat(aLong.equals(1L));
|
||||
assertThat(aLong).isEqualTo(1L);
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Date;
|
||||
@@ -24,11 +24,14 @@ import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.apache.kafka.streams.state.QueryableStoreTypes;
|
||||
import org.apache.kafka.streams.state.ReadOnlyWindowStore;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
@@ -45,9 +48,13 @@ import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.integration.test.util.TestUtils;
|
||||
import org.springframework.kafka.core.CleanupConfig;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.core.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
@@ -101,7 +108,17 @@ public class KafkaStreamsBinderWordCountIntegrationTests {
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidate(context);
|
||||
} finally {
|
||||
//Assertions on StreamBuilderFactoryBean
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = context.getBean("&stream-builder-process", StreamsBuilderFactoryBean.class);
|
||||
KafkaStreams kafkaStreams = streamsBuilderFactoryBean.getKafkaStreams();
|
||||
ReadOnlyWindowStore<Object, Object> store = kafkaStreams.store("foo-WordCounts", QueryableStoreTypes.windowStore());
|
||||
assertThat(store).isNotNull();
|
||||
CleanupConfig cleanup = TestUtils.getPropertyValue(streamsBuilderFactoryBean, "cleanupConfig",
|
||||
CleanupConfig.class);
|
||||
assertThat(cleanup.cleanupOnStart()).isTrue();
|
||||
assertThat(cleanup.cleanupOnStop()).isFalse();
|
||||
}
|
||||
finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
@@ -129,8 +146,6 @@ public class KafkaStreamsBinderWordCountIntegrationTests {
|
||||
public KStream<?, WordCount> process(@Input("input") KStream<Object, String> input) {
|
||||
|
||||
input.map((k,v) -> {
|
||||
System.out.println(k);
|
||||
System.out.println(v);
|
||||
return new KeyValue<>(k,v);
|
||||
});
|
||||
return input
|
||||
@@ -143,6 +158,11 @@ public class KafkaStreamsBinderWordCountIntegrationTests {
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key.key(), value, new Date(key.window().start()), new Date(key.window().end()))));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public CleanupConfig cleanupConfig() {
|
||||
return new CleanupConfig(true, false);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class WordCount {
|
||||
@@ -14,17 +14,20 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.IntegerSerializer;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.state.HostInfo;
|
||||
import org.apache.kafka.streams.state.QueryableStoreTypes;
|
||||
import org.apache.kafka.streams.state.ReadOnlyKeyValueStore;
|
||||
import org.junit.AfterClass;
|
||||
@@ -32,12 +35,12 @@ import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.InteractiveQueryService;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
@@ -89,6 +92,7 @@ public class KafkaStreamsInteractiveQueryIntegrationTests {
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.application.server=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
@@ -109,15 +113,23 @@ public class KafkaStreamsInteractiveQueryIntegrationTests {
|
||||
|
||||
ProductCountApplication.Foo foo = context.getBean(ProductCountApplication.Foo.class);
|
||||
assertThat(foo.getProductStock(123).equals(1L));
|
||||
|
||||
//perform assertions on HostInfo related methods in InteractiveQueryService
|
||||
InteractiveQueryService interactiveQueryService = context.getBean(InteractiveQueryService.class);
|
||||
HostInfo currentHostInfo = interactiveQueryService.getCurrentHostInfo();
|
||||
assertThat(currentHostInfo.host() + ":" + currentHostInfo.port()).isEqualTo(embeddedKafka.getBrokersAsString());
|
||||
|
||||
HostInfo hostInfo = interactiveQueryService.getHostInfo("prod-id-count-store", 123, new IntegerSerializer());
|
||||
assertThat(hostInfo.host() + ":" + hostInfo.port()).isEqualTo(embeddedKafka.getBrokersAsString());
|
||||
|
||||
HostInfo hostInfoFoo = interactiveQueryService.getHostInfo("prod-id-count-store-foo", 123, new IntegerSerializer());
|
||||
assertThat(hostInfoFoo).isNull();
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class ProductCountApplication {
|
||||
|
||||
@Autowired
|
||||
private QueryableStoreRegistry queryableStoreRegistry;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
@SuppressWarnings("deprecation")
|
||||
@@ -127,26 +139,27 @@ public class KafkaStreamsInteractiveQueryIntegrationTests {
|
||||
.filter((key, product) -> product.getId() == 123)
|
||||
.map((key, value) -> new KeyValue<>(value.id, value))
|
||||
.groupByKey(Serialized.with(new Serdes.IntegerSerde(), new JsonSerde<>(Product.class)))
|
||||
.count("prod-id-count-store")
|
||||
.count(Materialized.as("prod-id-count-store"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, "Count for product with ID 123: " + value));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public Foo foo(QueryableStoreRegistry queryableStoreRegistry) {
|
||||
return new Foo(queryableStoreRegistry);
|
||||
public Foo foo(InteractiveQueryService interactiveQueryService) {
|
||||
return new Foo(interactiveQueryService);
|
||||
}
|
||||
|
||||
static class Foo {
|
||||
QueryableStoreRegistry queryableStoreRegistry;
|
||||
InteractiveQueryService interactiveQueryService;
|
||||
|
||||
Foo(QueryableStoreRegistry queryableStoreRegistry) {
|
||||
this.queryableStoreRegistry = queryableStoreRegistry;
|
||||
Foo(InteractiveQueryService interactiveQueryService) {
|
||||
this.interactiveQueryService = interactiveQueryService;
|
||||
}
|
||||
|
||||
public Long getProductStock(Integer id) {
|
||||
ReadOnlyKeyValueStore<Object, Object> keyValueStore =
|
||||
queryableStoreRegistry.getQueryableStoreType("prod-id-count-store", QueryableStoreTypes.keyValueStore());
|
||||
interactiveQueryService.getQueryableStore("prod-id-count-store", QueryableStoreTypes.keyValueStore());
|
||||
|
||||
return (Long) keyValueStore.get(id);
|
||||
}
|
||||
}
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
@@ -71,7 +71,7 @@ public abstract class KafkaStreamsNativeEncodingDecodingTests {
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts");
|
||||
|
||||
@SpyBean
|
||||
KafkaStreamsMessageConversionDelegate KafkaStreamsMessageConversionDelegate;
|
||||
org.springframework.cloud.stream.binder.kafka.streams.KafkaStreamsMessageConversionDelegate KafkaStreamsMessageConversionDelegate;
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
@@ -0,0 +1,147 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.processor.Processor;
|
||||
import org.apache.kafka.streams.processor.ProcessorContext;
|
||||
import org.apache.kafka.streams.state.WindowStore;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsStateStore;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsStateStoreProperties;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Lei Chen
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KafkaStreamsStateStoreIntegrationTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts-id");
|
||||
|
||||
@Test
|
||||
public void testKstreamStateStore() throws Exception {
|
||||
SpringApplication app = new SpringApplication(ProductCountApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.input.destination=foobar",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
Thread.sleep(2000);
|
||||
receiveAndValidateFoo(context);
|
||||
} catch (Exception e) {
|
||||
throw e;
|
||||
} finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foobar");
|
||||
template.sendDefault("{\"id\":\"123\"}");
|
||||
Thread.sleep(1000);
|
||||
|
||||
//assertions
|
||||
ProductCountApplication productCount = context.getBean(ProductCountApplication.class);
|
||||
WindowStore<Object, String> state = productCount.state;
|
||||
assertThat(state != null).isTrue();
|
||||
assertThat(state.name()).isEqualTo("mystate");
|
||||
assertThat(state.persistent()).isTrue();
|
||||
assertThat(productCount.processed).isTrue();
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessorX.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class ProductCountApplication {
|
||||
|
||||
WindowStore<Object, String> state;
|
||||
boolean processed;
|
||||
|
||||
@StreamListener("input")
|
||||
@KafkaStreamsStateStore(name = "mystate", type = KafkaStreamsStateStoreProperties.StoreType.WINDOW, lengthMs = 300000)
|
||||
@SuppressWarnings({"deprecation", "unchecked"})
|
||||
public void process(KStream<Object, Product> input) {
|
||||
|
||||
input
|
||||
.process(() -> new Processor<Object, Product>() {
|
||||
|
||||
@Override
|
||||
public void init(ProcessorContext processorContext) {
|
||||
state = (WindowStore) processorContext.getStateStore("mystate");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(Object s, Product product) {
|
||||
processed = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (state != null) {
|
||||
state.close();
|
||||
}
|
||||
}
|
||||
}, "mystate");
|
||||
}
|
||||
}
|
||||
|
||||
public static class Product {
|
||||
|
||||
Integer id;
|
||||
|
||||
public Integer getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(Integer id) {
|
||||
this.id = id;
|
||||
}
|
||||
}
|
||||
|
||||
interface KafkaStreamsProcessorX {
|
||||
|
||||
@Input("input")
|
||||
KStream<?, ?> input();
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
@@ -38,9 +38,12 @@ import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.integration.test.util.TestUtils;
|
||||
import org.springframework.kafka.core.CleanupConfig;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.core.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.support.serializer.JsonSerde;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
@@ -92,7 +95,15 @@ public class KafkastreamsBinderPojoInputStringOutputIntegrationTests {
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidateFoo(context);
|
||||
} finally {
|
||||
//Assertions on StreamBuilderFactoryBean
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = context.getBean("&stream-builder-process",
|
||||
StreamsBuilderFactoryBean.class);
|
||||
CleanupConfig cleanup = TestUtils.getPropertyValue(streamsBuilderFactoryBean, "cleanupConfig",
|
||||
CleanupConfig.class);
|
||||
assertThat(cleanup.cleanupOnStart()).isFalse();
|
||||
assertThat(cleanup.cleanupOnStop()).isTrue();
|
||||
}
|
||||
finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Date;
|
||||
@@ -1,5 +1,5 @@
|
||||
eclipse.preferences.version=1
|
||||
org.eclipse.jdt.ui.ignorelowercasenames=true
|
||||
org.eclipse.jdt.ui.importorder=java;javax;com;org;org.springframework;ch.qos;\#;
|
||||
org.eclipse.jdt.ui.importorder=java;javax;com;io.micrometer;org;org.springframework;ch.qos;\#;
|
||||
org.eclipse.jdt.ui.ondemandthreshold=99
|
||||
org.eclipse.jdt.ui.staticondemandthreshold=99
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.0.0.RELEASE</version>
|
||||
<version>2.1.0.M2</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
@@ -37,11 +37,6 @@
|
||||
<artifactId>spring-boot-autoconfigure</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
@@ -65,6 +60,26 @@
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<!-- Following dependencies are needed to support Kafka 1.1.0 client-->
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<classifier>test</classifier>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
</project>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -78,25 +78,31 @@ public class KafkaBinderHealthIndicator implements HealthIndicator {
|
||||
public Health call() {
|
||||
try {
|
||||
if (metadataConsumer == null) {
|
||||
metadataConsumer = consumerFactory.createConsumer();
|
||||
}
|
||||
Set<String> downMessages = new HashSet<>();
|
||||
for (String topic : KafkaBinderHealthIndicator.this.binder.getTopicsInUse().keySet()) {
|
||||
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
|
||||
for (PartitionInfo partitionInfo : partitionInfos) {
|
||||
if (KafkaBinderHealthIndicator.this.binder.getTopicsInUse().get(topic).getPartitionInfos()
|
||||
.contains(partitionInfo) && partitionInfo.leader().id() == -1) {
|
||||
downMessages.add(partitionInfo.toString());
|
||||
synchronized(KafkaBinderHealthIndicator.this) {
|
||||
if (metadataConsumer == null) {
|
||||
metadataConsumer = consumerFactory.createConsumer();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (downMessages.isEmpty()) {
|
||||
return Health.up().build();
|
||||
}
|
||||
else {
|
||||
return Health.down()
|
||||
.withDetail("Following partitions in use have no leaders: ", downMessages.toString())
|
||||
.build();
|
||||
synchronized (metadataConsumer) {
|
||||
Set<String> downMessages = new HashSet<>();
|
||||
for (String topic : KafkaBinderHealthIndicator.this.binder.getTopicsInUse().keySet()) {
|
||||
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
|
||||
for (PartitionInfo partitionInfo : partitionInfos) {
|
||||
if (KafkaBinderHealthIndicator.this.binder.getTopicsInUse().get(topic).getPartitionInfos()
|
||||
.contains(partitionInfo) && partitionInfo.leader().id() == -1) {
|
||||
downMessages.add(partitionInfo.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
if (downMessages.isEmpty()) {
|
||||
return Health.up().build();
|
||||
}
|
||||
else {
|
||||
return Health.down()
|
||||
.withDetail("Following partitions in use have no leaders: ", downMessages.toString())
|
||||
.build();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
|
||||
@@ -20,11 +20,17 @@ import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.micrometer.core.instrument.TimeGauge;
|
||||
import io.micrometer.core.instrument.binder.MeterBinder;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
@@ -50,9 +56,13 @@ import org.springframework.util.ObjectUtils;
|
||||
* @author Artem Bilan
|
||||
* @author Oleg Zhurakousky
|
||||
* @author Jon Schneider
|
||||
* @author Thomas Cheyney
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KafkaBinderMetrics implements MeterBinder, ApplicationListener<BindingCreatedEvent> {
|
||||
|
||||
private static final int DEFAULT_TIMEOUT = 60;
|
||||
|
||||
private final static Log LOG = LogFactory.getLog(KafkaBinderMetrics.class);
|
||||
|
||||
static final String METRIC_NAME = "spring.cloud.stream.binder.kafka.offset";
|
||||
@@ -65,6 +75,10 @@ public class KafkaBinderMetrics implements MeterBinder, ApplicationListener<Bind
|
||||
|
||||
private final MeterRegistry meterRegistry;
|
||||
|
||||
private Consumer<?, ?> metadataConsumer;
|
||||
|
||||
private int timeout = DEFAULT_TIMEOUT;
|
||||
|
||||
public KafkaBinderMetrics(KafkaMessageChannelBinder binder,
|
||||
KafkaBinderConfigurationProperties binderConfigurationProperties,
|
||||
ConsumerFactory<?, ?> defaultConsumerFactory, @Nullable MeterRegistry meterRegistry) {
|
||||
@@ -81,6 +95,10 @@ public class KafkaBinderMetrics implements MeterBinder, ApplicationListener<Bind
|
||||
this(binder, binderConfigurationProperties, null, null);
|
||||
}
|
||||
|
||||
public void setTimeout(int timeout) {
|
||||
this.timeout = timeout;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void bindTo(MeterRegistry registry) {
|
||||
for (Map.Entry<String, KafkaMessageChannelBinder.TopicInformation> topicInfo : this.binder.getTopicsInUse()
|
||||
@@ -103,30 +121,56 @@ public class KafkaBinderMetrics implements MeterBinder, ApplicationListener<Bind
|
||||
}
|
||||
|
||||
private double calculateConsumerLagOnTopic(String topic, String group) {
|
||||
long lag = 0;
|
||||
try (Consumer<?, ?> metadataConsumer = createConsumerFactory(group).createConsumer()) {
|
||||
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
|
||||
List<TopicPartition> topicPartitions = new LinkedList<>();
|
||||
for (PartitionInfo partitionInfo : partitionInfos) {
|
||||
topicPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
|
||||
}
|
||||
ExecutorService exec = Executors.newSingleThreadExecutor();
|
||||
Future<Long> future = exec.submit(() -> {
|
||||
|
||||
Map<TopicPartition, Long> endOffsets = metadataConsumer.endOffsets(topicPartitions);
|
||||
|
||||
for (Map.Entry<TopicPartition, Long> endOffset : endOffsets.entrySet()) {
|
||||
OffsetAndMetadata current = metadataConsumer.committed(endOffset.getKey());
|
||||
if (current != null) {
|
||||
lag += endOffset.getValue() - current.offset();
|
||||
long lag = 0;
|
||||
try {
|
||||
if (metadataConsumer == null) {
|
||||
synchronized(KafkaBinderMetrics.this) {
|
||||
if (metadataConsumer == null) {
|
||||
metadataConsumer = createConsumerFactory(group).createConsumer();
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
lag += endOffset.getValue();
|
||||
synchronized (metadataConsumer) {
|
||||
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
|
||||
List<TopicPartition> topicPartitions = new LinkedList<>();
|
||||
for (PartitionInfo partitionInfo : partitionInfos) {
|
||||
topicPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
|
||||
}
|
||||
|
||||
Map<TopicPartition, Long> endOffsets = metadataConsumer.endOffsets(topicPartitions);
|
||||
|
||||
for (Map.Entry<TopicPartition, Long> endOffset : endOffsets.entrySet()) {
|
||||
OffsetAndMetadata current = metadataConsumer.committed(endOffset.getKey());
|
||||
if (current != null) {
|
||||
lag += endOffset.getValue() - current.offset();
|
||||
}
|
||||
else {
|
||||
lag += endOffset.getValue();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOG.debug("Cannot generate metric for topic: " + topic, e);
|
||||
}
|
||||
return lag;
|
||||
});
|
||||
try {
|
||||
return future.get(this.timeout, TimeUnit.SECONDS);
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOG.debug("Cannot generate metric for topic: " + topic, e);
|
||||
catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
return 0L;
|
||||
}
|
||||
catch (ExecutionException | TimeoutException e) {
|
||||
return 0L;
|
||||
}
|
||||
finally {
|
||||
exec.shutdownNow();
|
||||
}
|
||||
return lag;
|
||||
}
|
||||
|
||||
private ConsumerFactory<?, ?> createConsumerFactory(String group) {
|
||||
@@ -134,8 +178,9 @@ public class KafkaBinderMetrics implements MeterBinder, ApplicationListener<Bind
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
if (!ObjectUtils.isEmpty(binderConfigurationProperties.getConsumerConfiguration())) {
|
||||
props.putAll(binderConfigurationProperties.getConsumerConfiguration());
|
||||
Map<String, Object> mergedConfig = this.binderConfigurationProperties.mergedConsumerConfiguration();
|
||||
if (!ObjectUtils.isEmpty(mergedConfig)) {
|
||||
props.putAll(mergedConfig);
|
||||
}
|
||||
if (!props.containsKey(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
|
||||
|
||||
@@ -18,10 +18,12 @@ package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.io.PrintWriter;
|
||||
import java.io.StringWriter;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
@@ -31,6 +33,7 @@ import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
@@ -65,31 +68,32 @@ import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerPro
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binding.MessageConverterConfigurer.PartitioningInterceptor;
|
||||
import org.springframework.cloud.stream.config.ListenerContainerCustomizer;
|
||||
import org.springframework.cloud.stream.provisioning.ConsumerDestination;
|
||||
import org.springframework.cloud.stream.provisioning.ProducerDestination;
|
||||
import org.springframework.context.Lifecycle;
|
||||
import org.springframework.expression.common.LiteralExpression;
|
||||
import org.springframework.expression.spel.standard.SpelExpressionParser;
|
||||
import org.springframework.integration.StaticMessageHeaderAccessor;
|
||||
import org.springframework.integration.acks.AcknowledgmentCallback;
|
||||
import org.springframework.integration.channel.ChannelInterceptorAware;
|
||||
import org.springframework.integration.core.MessageProducer;
|
||||
import org.springframework.integration.kafka.inbound.KafkaMessageDrivenChannelAdapter;
|
||||
import org.springframework.integration.kafka.inbound.KafkaMessageSource;
|
||||
import org.springframework.integration.kafka.outbound.KafkaProducerMessageHandler;
|
||||
import org.springframework.integration.kafka.support.RawRecordHeaderErrorMessageStrategy;
|
||||
import org.springframework.integration.support.AcknowledgmentCallback;
|
||||
import org.springframework.integration.support.AcknowledgmentCallback.Status;
|
||||
import org.springframework.integration.support.ErrorMessageStrategy;
|
||||
import org.springframework.integration.support.MessageBuilder;
|
||||
import org.springframework.integration.support.StaticMessageHeaderAccessor;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.core.ProducerFactory;
|
||||
import org.springframework.kafka.listener.AbstractMessageListenerContainer;
|
||||
import org.springframework.kafka.listener.AbstractMessageListenerContainer.AckMode;
|
||||
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
|
||||
import org.springframework.kafka.listener.ConsumerAwareRebalanceListener;
|
||||
import org.springframework.kafka.listener.config.ContainerProperties;
|
||||
import org.springframework.kafka.listener.ContainerProperties;
|
||||
import org.springframework.kafka.support.DefaultKafkaHeaderMapper;
|
||||
import org.springframework.kafka.support.KafkaHeaderMapper;
|
||||
import org.springframework.kafka.support.KafkaHeaders;
|
||||
@@ -102,6 +106,7 @@ import org.springframework.messaging.MessageChannel;
|
||||
import org.springframework.messaging.MessageHandler;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.MessagingException;
|
||||
import org.springframework.messaging.support.ChannelInterceptor;
|
||||
import org.springframework.messaging.support.ErrorMessage;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
@@ -127,12 +132,21 @@ public class KafkaMessageChannelBinder extends
|
||||
AbstractMessageChannelBinder<ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>, KafkaTopicProvisioner>
|
||||
implements ExtendedPropertiesBinder<MessageChannel, KafkaConsumerProperties, KafkaProducerProperties> {
|
||||
|
||||
public static final String X_EXCEPTION_FQCN = "x-exception-fqcn";
|
||||
|
||||
public static final String X_EXCEPTION_STACKTRACE = "x-exception-stacktrace";
|
||||
|
||||
public static final String X_EXCEPTION_MESSAGE = "x-exception-message";
|
||||
|
||||
public static final String X_ORIGINAL_TOPIC = "x-original-topic";
|
||||
|
||||
public static final String X_ORIGINAL_PARTITION = "x-original-partition";
|
||||
|
||||
public static final String X_ORIGINAL_OFFSET = "x-original-offset";
|
||||
|
||||
public static final String X_ORIGINAL_TIMESTAMP = "x-original-timestamp";
|
||||
|
||||
public static final String X_ORIGINAL_TIMESTAMP_TYPE = "x-original-timestamp-type";
|
||||
|
||||
private final KafkaBinderConfigurationProperties configurationProperties;
|
||||
|
||||
@@ -144,9 +158,13 @@ public class KafkaMessageChannelBinder extends
|
||||
|
||||
private KafkaExtendedBindingProperties extendedBindingProperties = new KafkaExtendedBindingProperties();
|
||||
|
||||
public KafkaMessageChannelBinder(KafkaBinderConfigurationProperties configurationProperties, KafkaTopicProvisioner provisioningProvider) {
|
||||
this(configurationProperties, provisioningProvider, null);
|
||||
}
|
||||
|
||||
public KafkaMessageChannelBinder(KafkaBinderConfigurationProperties configurationProperties,
|
||||
KafkaTopicProvisioner provisioningProvider) {
|
||||
super(headersToMap(configurationProperties), provisioningProvider);
|
||||
KafkaTopicProvisioner provisioningProvider, ListenerContainerCustomizer<AbstractMessageListenerContainer<?, ?>> containerCustomizer) {
|
||||
super(headersToMap(configurationProperties), provisioningProvider, containerCustomizer);
|
||||
this.configurationProperties = configurationProperties;
|
||||
if (StringUtils.hasText(configurationProperties.getTransaction().getTransactionIdPrefix())) {
|
||||
this.transactionManager = new KafkaTransactionManager<>(
|
||||
@@ -200,6 +218,14 @@ public class KafkaMessageChannelBinder extends
|
||||
protected MessageHandler createProducerMessageHandler(final ProducerDestination destination,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties, MessageChannel errorChannel)
|
||||
throws Exception {
|
||||
throw new IllegalStateException("The abstract binder should not call this method");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected MessageHandler createProducerMessageHandler(final ProducerDestination destination,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties,
|
||||
MessageChannel channel, MessageChannel errorChannel)
|
||||
throws Exception {
|
||||
/*
|
||||
* IMPORTANT: With a transactional binder, individual producer properties for Kafka are
|
||||
* ignored; the global binder (spring.cloud.stream.kafka.binder.transaction.producer.*)
|
||||
@@ -219,20 +245,18 @@ public class KafkaMessageChannelBinder extends
|
||||
return partitionsFor;
|
||||
});
|
||||
this.topicsInUse.put(destination.getName(), new TopicInformation(null, partitions));
|
||||
if (producerProperties.getPartitionCount() < partitions.size()) {
|
||||
if (producerProperties.isPartitioned() && producerProperties.getPartitionCount() < partitions.size()) {
|
||||
if (this.logger.isInfoEnabled()) {
|
||||
this.logger.info("The `partitionCount` of the producer for topic " + destination.getName() + " is "
|
||||
+ producerProperties.getPartitionCount() + ", smaller than the actual partition count of "
|
||||
+ partitions.size() + " of the topic. The larger number will be used instead.");
|
||||
+ partitions.size() + " for the topic. The larger number will be used instead.");
|
||||
}
|
||||
/*
|
||||
* This is dirty; it relies on the fact that we, and the partition interceptor, share a
|
||||
* hard reference to the producer properties instance. But I don't see another way to fix
|
||||
* it since the interceptor has already been added to the channel, and we don't have
|
||||
* access to the channel here; if we did, we could inject the proper partition count
|
||||
* there. TODO: Consider this when doing the 2.0 binder restructuring.
|
||||
*/
|
||||
producerProperties.setPartitionCount(partitions.size());
|
||||
List<ChannelInterceptor> interceptors = ((ChannelInterceptorAware) channel).getChannelInterceptors();
|
||||
interceptors.forEach(interceptor -> {
|
||||
if (interceptor instanceof PartitioningInterceptor) {
|
||||
((PartitioningInterceptor) interceptor).setPartitionCount(partitions.size());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
KafkaTemplate<byte[], byte[]> kafkaTemplate = new KafkaTemplate<>(producerFB);
|
||||
@@ -286,8 +310,9 @@ public class KafkaMessageChannelBinder extends
|
||||
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
props.put(ProducerConfig.ACKS_CONFIG, String.valueOf(this.configurationProperties.getRequiredAcks()));
|
||||
if (!ObjectUtils.isEmpty(configurationProperties.getProducerConfiguration())) {
|
||||
props.putAll(configurationProperties.getProducerConfiguration());
|
||||
Map<String, Object> mergedConfig = this.configurationProperties.mergedProducerConfiguration();
|
||||
if (!ObjectUtils.isEmpty(mergedConfig)) {
|
||||
props.putAll(mergedConfig);
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configurationProperties.getKafkaConnectionString());
|
||||
@@ -328,41 +353,45 @@ public class KafkaMessageChannelBinder extends
|
||||
int partitionCount = extendedConsumerProperties.getInstanceCount()
|
||||
* extendedConsumerProperties.getConcurrency();
|
||||
|
||||
Collection<PartitionInfo> allPartitions = getPartitionInfo(destination, extendedConsumerProperties,
|
||||
consumerFactory, partitionCount);
|
||||
|
||||
Collection<PartitionInfo> listenedPartitions;
|
||||
Collection<PartitionInfo> listenedPartitions = new ArrayList<>();
|
||||
|
||||
boolean usingPatterns = extendedConsumerProperties.getExtension().isDestinationIsPattern();
|
||||
Assert.isTrue(!usingPatterns || !extendedConsumerProperties.isMultiplex(),
|
||||
"Cannot use a pattern with multiplexed destinations; "
|
||||
+ "use the regex pattern to specify multiple topics instead");
|
||||
boolean groupManagement = extendedConsumerProperties.getExtension().isAutoRebalanceEnabled();
|
||||
if (groupManagement ||
|
||||
extendedConsumerProperties.getInstanceCount() == 1) {
|
||||
listenedPartitions = allPartitions;
|
||||
if (!extendedConsumerProperties.isMultiplex()) {
|
||||
listenedPartitions.addAll(processTopic(group, extendedConsumerProperties, consumerFactory,
|
||||
partitionCount, usingPatterns, groupManagement, destination.getName()));
|
||||
}
|
||||
else {
|
||||
listenedPartitions = new ArrayList<>();
|
||||
for (PartitionInfo partition : allPartitions) {
|
||||
// divide partitions across modules
|
||||
if ((partition.partition()
|
||||
% extendedConsumerProperties.getInstanceCount()) == extendedConsumerProperties
|
||||
.getInstanceIndex()) {
|
||||
listenedPartitions.add(partition);
|
||||
}
|
||||
for (String name : StringUtils.commaDelimitedListToStringArray(destination.getName())) {
|
||||
listenedPartitions.addAll(processTopic(group, extendedConsumerProperties, consumerFactory,
|
||||
partitionCount, usingPatterns, groupManagement, name.trim()));
|
||||
}
|
||||
}
|
||||
this.topicsInUse.put(destination.getName(), new TopicInformation(group, listenedPartitions));
|
||||
|
||||
Assert.isTrue(!CollectionUtils.isEmpty(listenedPartitions), "A list of partitions must be provided");
|
||||
String[] topics = extendedConsumerProperties.isMultiplex() ? StringUtils.commaDelimitedListToStringArray(destination.getName())
|
||||
: new String[] { destination.getName() };
|
||||
for (int i = 0; i < topics.length; i++) {
|
||||
topics[i] = topics[i].trim();
|
||||
}
|
||||
Assert.isTrue(usingPatterns
|
||||
|| !CollectionUtils.isEmpty(listenedPartitions), "A list of partitions must be provided");
|
||||
final TopicPartitionInitialOffset[] topicPartitionInitialOffsets = getTopicPartitionInitialOffsets(
|
||||
listenedPartitions);
|
||||
final ContainerProperties containerProperties = anonymous
|
||||
|| extendedConsumerProperties.getExtension().isAutoRebalanceEnabled()
|
||||
? new ContainerProperties(destination.getName())
|
||||
? usingPatterns
|
||||
? new ContainerProperties(Pattern.compile(topics[0]))
|
||||
: new ContainerProperties(topics)
|
||||
: new ContainerProperties(topicPartitionInitialOffsets);
|
||||
if (this.transactionManager != null) {
|
||||
containerProperties.setTransactionManager(this.transactionManager);
|
||||
}
|
||||
containerProperties.setIdleEventInterval(extendedConsumerProperties.getExtension().getIdleEventInterval());
|
||||
int concurrency = Math.min(extendedConsumerProperties.getConcurrency(), listenedPartitions.size());
|
||||
int concurrency = usingPatterns ? extendedConsumerProperties.getConcurrency()
|
||||
: Math.min(extendedConsumerProperties.getConcurrency(), listenedPartitions.size());
|
||||
resetOffsets(extendedConsumerProperties, consumerFactory, groupManagement, containerProperties);
|
||||
@SuppressWarnings("rawtypes")
|
||||
final ConcurrentMessageListenerContainer<?, ?> messageListenerContainer =
|
||||
@@ -382,24 +411,25 @@ public class KafkaMessageChannelBinder extends
|
||||
else if (getApplicationContext() != null) {
|
||||
messageListenerContainer.setApplicationEventPublisher(getApplicationContext());
|
||||
}
|
||||
messageListenerContainer.setBeanName(destination.getName() + ".container");
|
||||
messageListenerContainer.setBeanName(topics + ".container");
|
||||
// end of these won't be needed...
|
||||
if (!extendedConsumerProperties.getExtension().isAutoCommitOffset()) {
|
||||
messageListenerContainer.getContainerProperties()
|
||||
.setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL);
|
||||
.setAckMode(ContainerProperties.AckMode.MANUAL);
|
||||
messageListenerContainer.getContainerProperties().setAckOnError(false);
|
||||
}
|
||||
else {
|
||||
messageListenerContainer.getContainerProperties()
|
||||
.setAckOnError(isAutoCommitOnError(extendedConsumerProperties));
|
||||
if (extendedConsumerProperties.getExtension().isAckEachRecord()) {
|
||||
messageListenerContainer.getContainerProperties().setAckMode(AckMode.RECORD);
|
||||
messageListenerContainer.getContainerProperties().setAckMode(ContainerProperties.AckMode.RECORD);
|
||||
}
|
||||
}
|
||||
if (this.logger.isDebugEnabled()) {
|
||||
this.logger.debug(
|
||||
"Listened partitions: " + StringUtils.collectionToCommaDelimitedString(listenedPartitions));
|
||||
}
|
||||
this.getContainerCustomizer().configure(messageListenerContainer, destination.getName(), group);
|
||||
final KafkaMessageDrivenChannelAdapter<?, ?> kafkaMessageDrivenChannelAdapter =
|
||||
new KafkaMessageDrivenChannelAdapter<>(messageListenerContainer);
|
||||
kafkaMessageDrivenChannelAdapter.setMessageConverter(getMessageConverter(extendedConsumerProperties));
|
||||
@@ -416,6 +446,33 @@ public class KafkaMessageChannelBinder extends
|
||||
return kafkaMessageDrivenChannelAdapter;
|
||||
}
|
||||
|
||||
public Collection<PartitionInfo> processTopic(final String group,
|
||||
final ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties,
|
||||
final ConsumerFactory<?, ?> consumerFactory, int partitionCount, boolean usingPatterns,
|
||||
boolean groupManagement, String topic) {
|
||||
Collection<PartitionInfo> listenedPartitions;
|
||||
Collection<PartitionInfo> allPartitions = usingPatterns ? Collections.emptyList()
|
||||
: getPartitionInfo(topic, extendedConsumerProperties, consumerFactory, partitionCount);
|
||||
|
||||
if (groupManagement ||
|
||||
extendedConsumerProperties.getInstanceCount() == 1) {
|
||||
listenedPartitions = allPartitions;
|
||||
}
|
||||
else {
|
||||
listenedPartitions = new ArrayList<>();
|
||||
for (PartitionInfo partition : allPartitions) {
|
||||
// divide partitions across modules
|
||||
if ((partition.partition()
|
||||
% extendedConsumerProperties.getInstanceCount()) == extendedConsumerProperties
|
||||
.getInstanceIndex()) {
|
||||
listenedPartitions.add(partition);
|
||||
}
|
||||
}
|
||||
}
|
||||
this.topicsInUse.put(topic, new TopicInformation(group, listenedPartitions));
|
||||
return listenedPartitions;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset the offsets if needed; may update the offsets in in the container's
|
||||
* topicPartitionInitialOffsets.
|
||||
@@ -477,15 +534,29 @@ public class KafkaMessageChannelBinder extends
|
||||
String consumerGroup = anonymous ? "anonymous." + UUID.randomUUID().toString() : group;
|
||||
final ConsumerFactory<?, ?> consumerFactory = createKafkaConsumerFactory(anonymous, consumerGroup,
|
||||
consumerProperties);
|
||||
KafkaMessageSource<?, ?> source = new KafkaMessageSource<>(consumerFactory, destination.getName());
|
||||
String[] topics = consumerProperties.isMultiplex() ? StringUtils.commaDelimitedListToStringArray(destination.getName())
|
||||
: new String[] { destination.getName() };
|
||||
for (int i = 0; i < topics.length; i++) {
|
||||
topics[i] = topics[i].trim();
|
||||
}
|
||||
KafkaMessageSource<?, ?> source = new KafkaMessageSource<>(consumerFactory, topics);
|
||||
source.setMessageConverter(getMessageConverter(consumerProperties));
|
||||
source.setRawMessageHeader(consumerProperties.getExtension().isEnableDlq());
|
||||
|
||||
// I copied this from the regular consumer - it looks bogus to me - includes all partitions
|
||||
// not just the ones this binding is listening to; doesn't seem right for a health check.
|
||||
Collection<PartitionInfo> partitionInfos = getPartitionInfo(destination, consumerProperties, consumerFactory,
|
||||
-1);
|
||||
this.topicsInUse.put(destination.getName(), new TopicInformation(group, partitionInfos));
|
||||
if (!consumerProperties.isMultiplex()) {
|
||||
// I copied this from the regular consumer - it looks bogus to me - includes all partitions
|
||||
// not just the ones this binding is listening to; doesn't seem right for a health check.
|
||||
Collection<PartitionInfo> partitionInfos = getPartitionInfo(destination.getName(), consumerProperties,
|
||||
consumerFactory, -1);
|
||||
this.topicsInUse.put(destination.getName(), new TopicInformation(group, partitionInfos));
|
||||
}
|
||||
else {
|
||||
for (int i = 0; i < topics.length; i++) {
|
||||
Collection<PartitionInfo> partitionInfos = getPartitionInfo(topics[i], consumerProperties,
|
||||
consumerFactory, -1);
|
||||
this.topicsInUse.put(topics[i], new TopicInformation(group, partitionInfos));
|
||||
}
|
||||
}
|
||||
|
||||
source.setRebalanceListener(new ConsumerRebalanceListener() {
|
||||
|
||||
@@ -567,16 +638,16 @@ public class KafkaMessageChannelBinder extends
|
||||
return mapper;
|
||||
}
|
||||
|
||||
private Collection<PartitionInfo> getPartitionInfo(final ConsumerDestination destination,
|
||||
private Collection<PartitionInfo> getPartitionInfo(String topic,
|
||||
final ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties,
|
||||
final ConsumerFactory<?, ?> consumerFactory, int partitionCount) {
|
||||
Collection<PartitionInfo> allPartitions = provisioningProvider.getPartitionsForTopic(partitionCount,
|
||||
extendedConsumerProperties.getExtension().isAutoRebalanceEnabled(),
|
||||
() -> {
|
||||
Consumer<?, ?> consumer = consumerFactory.createConsumer();
|
||||
List<PartitionInfo> partitionsFor = consumer.partitionsFor(destination.getName());
|
||||
consumer.close();
|
||||
return partitionsFor;
|
||||
try (Consumer<?, ?> consumer = consumerFactory.createConsumer()) {
|
||||
List<PartitionInfo> partitionsFor = consumer.partitionsFor(topic);
|
||||
return partitionsFor;
|
||||
}
|
||||
});
|
||||
return allPartitions;
|
||||
}
|
||||
@@ -597,15 +668,12 @@ public class KafkaMessageChannelBinder extends
|
||||
: getProducerFactory(null,
|
||||
new ExtendedProducerProperties<>(dlqProducerProperties));
|
||||
final KafkaTemplate<?,?> kafkaTemplate = new KafkaTemplate<>(producerFactory);
|
||||
String dlqName = StringUtils.hasText(kafkaConsumerProperties.getDlqName())
|
||||
? kafkaConsumerProperties.getDlqName()
|
||||
: "error." + destination.getName() + "." + group;
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
DlqSender<?,?> dlqSender = new DlqSender(kafkaTemplate, dlqName);
|
||||
DlqSender<?,?> dlqSender = new DlqSender(kafkaTemplate);
|
||||
|
||||
return message -> {
|
||||
@SuppressWarnings("unchecked")
|
||||
|
||||
final ConsumerRecord<Object, Object> record = message.getHeaders()
|
||||
.get(KafkaHeaders.RAW_DATA, ConsumerRecord.class);
|
||||
|
||||
@@ -632,11 +700,25 @@ public class KafkaMessageChannelBinder extends
|
||||
Headers kafkaHeaders = new RecordHeaders(record.headers().toArray());
|
||||
AtomicReference<ConsumerRecord<?, ?>> recordToSend = new AtomicReference<>(record);
|
||||
if (message.getPayload() instanceof Throwable) {
|
||||
|
||||
Throwable throwable = (Throwable) message.getPayload();
|
||||
|
||||
HeaderMode headerMode = properties.getHeaderMode();
|
||||
|
||||
if (headerMode == null || HeaderMode.headers.equals(headerMode)) {
|
||||
kafkaHeaders.add(new RecordHeader(X_ORIGINAL_TOPIC,
|
||||
record.topic().getBytes(StandardCharsets.UTF_8)));
|
||||
|
||||
kafkaHeaders.add(
|
||||
new RecordHeader(X_ORIGINAL_TOPIC, record.topic().getBytes(StandardCharsets.UTF_8)));
|
||||
kafkaHeaders.add(new RecordHeader(X_ORIGINAL_PARTITION,
|
||||
ByteBuffer.allocate(Integer.BYTES).putInt(record.partition()).array()));
|
||||
kafkaHeaders.add(new RecordHeader(X_ORIGINAL_OFFSET,
|
||||
ByteBuffer.allocate(Long.BYTES).putLong(record.offset()).array()));
|
||||
kafkaHeaders.add(new RecordHeader(X_ORIGINAL_TIMESTAMP,
|
||||
ByteBuffer.allocate(Long.BYTES).putLong(record.timestamp()).array()));
|
||||
kafkaHeaders.add(new RecordHeader(X_ORIGINAL_TIMESTAMP_TYPE,
|
||||
record.timestampType().toString().getBytes(StandardCharsets.UTF_8)));
|
||||
kafkaHeaders.add(new RecordHeader(X_EXCEPTION_FQCN,
|
||||
throwable.getClass().getName().getBytes(StandardCharsets.UTF_8)));
|
||||
kafkaHeaders.add(new RecordHeader(X_EXCEPTION_MESSAGE,
|
||||
throwable.getMessage().getBytes(StandardCharsets.UTF_8)));
|
||||
kafkaHeaders.add(new RecordHeader(X_EXCEPTION_STACKTRACE,
|
||||
@@ -645,14 +727,18 @@ public class KafkaMessageChannelBinder extends
|
||||
else if (HeaderMode.embeddedHeaders.equals(headerMode)) {
|
||||
try {
|
||||
MessageValues messageValues = EmbeddedHeaderUtils
|
||||
.extractHeaders(MessageBuilder.withPayload((byte[]) record.value()).build(),
|
||||
false);
|
||||
.extractHeaders(MessageBuilder.withPayload((byte[]) record.value()).build(), false);
|
||||
messageValues.put(X_ORIGINAL_TOPIC, record.topic());
|
||||
messageValues.put(X_ORIGINAL_PARTITION, record.partition());
|
||||
messageValues.put(X_ORIGINAL_OFFSET, record.offset());
|
||||
messageValues.put(X_ORIGINAL_TIMESTAMP, record.timestamp());
|
||||
messageValues.put(X_ORIGINAL_TIMESTAMP_TYPE, record.timestampType().toString());
|
||||
messageValues.put(X_EXCEPTION_FQCN, throwable.getClass().getName());
|
||||
messageValues.put(X_EXCEPTION_MESSAGE, throwable.getMessage());
|
||||
messageValues.put(X_EXCEPTION_STACKTRACE, getStackTraceAsString(throwable));
|
||||
|
||||
final String[] headersToEmbed = new ArrayList<>(messageValues.keySet()).toArray(
|
||||
new String[messageValues.keySet().size()]);
|
||||
final String[] headersToEmbed = new ArrayList<>(messageValues.keySet())
|
||||
.toArray(new String[messageValues.keySet().size()]);
|
||||
byte[] payload = EmbeddedHeaderUtils.embedHeaders(messageValues,
|
||||
EmbeddedHeaderUtils.headersToEmbed(headersToEmbed));
|
||||
recordToSend.set(new ConsumerRecord<Object, Object>(record.topic(), record.partition(),
|
||||
@@ -663,7 +749,9 @@ public class KafkaMessageChannelBinder extends
|
||||
}
|
||||
}
|
||||
}
|
||||
dlqSender.sendToDlq(recordToSend.get(), kafkaHeaders);
|
||||
String dlqName = StringUtils.hasText(kafkaConsumerProperties.getDlqName())
|
||||
? kafkaConsumerProperties.getDlqName() : "error." + record.topic() + "." + group;
|
||||
dlqSender.sendToDlq(recordToSend.get(), kafkaHeaders, dlqName);
|
||||
};
|
||||
}
|
||||
return null;
|
||||
@@ -693,10 +781,10 @@ public class KafkaMessageChannelBinder extends
|
||||
((MessagingException) message.getPayload()).getFailedMessage());
|
||||
if (ack != null) {
|
||||
if (isAutoCommitOnError(properties)) {
|
||||
ack.acknowledge(Status.REJECT);
|
||||
ack.acknowledge(AcknowledgmentCallback.Status.REJECT);
|
||||
}
|
||||
else {
|
||||
ack.acknowledge(Status.REQUEUE);
|
||||
ack.acknowledge(AcknowledgmentCallback.Status.REQUEUE);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -723,8 +811,9 @@ public class KafkaMessageChannelBinder extends
|
||||
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, anonymous ? "latest" : "earliest");
|
||||
props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroup);
|
||||
|
||||
if (!ObjectUtils.isEmpty(configurationProperties.getConsumerConfiguration())) {
|
||||
props.putAll(configurationProperties.getConsumerConfiguration());
|
||||
Map<String, Object> mergedConfig = configurationProperties.mergedConsumerConfiguration();
|
||||
if (!ObjectUtils.isEmpty(mergedConfig)) {
|
||||
props.putAll(mergedConfig);
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configurationProperties.getKafkaConnectionString());
|
||||
@@ -851,18 +940,16 @@ public class KafkaMessageChannelBinder extends
|
||||
private final class DlqSender<K,V> {
|
||||
|
||||
private final KafkaTemplate<K,V> kafkaTemplate;
|
||||
private final String dlqName;
|
||||
|
||||
DlqSender(KafkaTemplate<K, V> kafkaTemplate, String dlqName) {
|
||||
DlqSender(KafkaTemplate<K, V> kafkaTemplate) {
|
||||
this.kafkaTemplate = kafkaTemplate;
|
||||
this.dlqName = dlqName;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
void sendToDlq(ConsumerRecord<?, ?> consumerRecord, Headers headers) {
|
||||
void sendToDlq(ConsumerRecord<?, ?> consumerRecord, Headers headers, String dlqName) {
|
||||
K key = (K)consumerRecord.key();
|
||||
V value = (V)consumerRecord.value();
|
||||
ProducerRecord<K,V> producerRecord = new ProducerRecord<>(this.dlqName, consumerRecord.partition(),
|
||||
ProducerRecord<K,V> producerRecord = new ProducerRecord<>(dlqName, consumerRecord.partition(),
|
||||
key, value, headers);
|
||||
|
||||
StringBuilder sb = new StringBuilder().append(" a message with key='")
|
||||
|
||||
@@ -18,6 +18,8 @@ package org.springframework.cloud.stream.binder.kafka.config;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import javax.security.auth.login.AppConfigurationEntry;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.micrometer.core.instrument.binder.MeterBinder;
|
||||
|
||||
@@ -36,12 +38,15 @@ import org.springframework.cloud.stream.binder.kafka.properties.JaasLoginModuleC
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.config.ListenerContainerCustomizer;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.kafka.listener.AbstractMessageListenerContainer;
|
||||
import org.springframework.kafka.security.jaas.KafkaJaasLoginModuleInitializer;
|
||||
import org.springframework.kafka.support.LoggingProducerListener;
|
||||
import org.springframework.kafka.support.ProducerListener;
|
||||
import org.springframework.lang.Nullable;
|
||||
|
||||
/**
|
||||
* @author David Turanski
|
||||
@@ -56,7 +61,8 @@ import org.springframework.kafka.support.ProducerListener;
|
||||
*/
|
||||
@Configuration
|
||||
@ConditionalOnMissingBean(Binder.class)
|
||||
@Import({KafkaAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class, KafkaBinderHealthIndicatorConfiguration.class })
|
||||
@Import({ KafkaAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class,
|
||||
KafkaBinderHealthIndicatorConfiguration.class })
|
||||
@EnableConfigurationProperties({ KafkaExtendedBindingProperties.class })
|
||||
public class KafkaBinderConfiguration {
|
||||
|
||||
@@ -70,8 +76,8 @@ public class KafkaBinderConfiguration {
|
||||
private KafkaProperties kafkaProperties;
|
||||
|
||||
@Bean
|
||||
KafkaBinderConfigurationProperties configurationProperties() {
|
||||
return new KafkaBinderConfigurationProperties();
|
||||
KafkaBinderConfigurationProperties configurationProperties(KafkaProperties kafkaProperties) {
|
||||
return new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
@@ -81,10 +87,10 @@ public class KafkaBinderConfiguration {
|
||||
|
||||
@Bean
|
||||
KafkaMessageChannelBinder kafkaMessageChannelBinder(KafkaBinderConfigurationProperties configurationProperties,
|
||||
KafkaTopicProvisioner provisioningProvider) {
|
||||
KafkaTopicProvisioner provisioningProvider, @Nullable ListenerContainerCustomizer<AbstractMessageListenerContainer<?, ?>> listenerContainerCustomizer) {
|
||||
|
||||
KafkaMessageChannelBinder kafkaMessageChannelBinder = new KafkaMessageChannelBinder(
|
||||
configurationProperties, provisioningProvider);
|
||||
configurationProperties, provisioningProvider, listenerContainerCustomizer);
|
||||
kafkaMessageChannelBinder.setProducerListener(producerListener);
|
||||
kafkaMessageChannelBinder.setExtendedBindingProperties(this.kafkaExtendedBindingProperties);
|
||||
return kafkaMessageChannelBinder;
|
||||
@@ -97,8 +103,34 @@ public class KafkaBinderConfiguration {
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaJaasLoginModuleInitializer jaasInitializer() throws IOException {
|
||||
return new KafkaJaasLoginModuleInitializer();
|
||||
@ConditionalOnMissingBean(KafkaJaasLoginModuleInitializer.class)
|
||||
public KafkaJaasLoginModuleInitializer jaasInitializer(KafkaBinderConfigurationProperties configurationProperties) throws IOException {
|
||||
KafkaJaasLoginModuleInitializer kafkaJaasLoginModuleInitializer = new KafkaJaasLoginModuleInitializer();
|
||||
JaasLoginModuleConfiguration jaas = configurationProperties.getJaas();
|
||||
if (jaas != null) {
|
||||
kafkaJaasLoginModuleInitializer.setLoginModule(jaas.getLoginModule());
|
||||
|
||||
KafkaJaasLoginModuleInitializer.ControlFlag controlFlag = null;
|
||||
AppConfigurationEntry.LoginModuleControlFlag controlFlagValue = jaas.getControlFlagValue();
|
||||
|
||||
if (AppConfigurationEntry.LoginModuleControlFlag.OPTIONAL.equals(controlFlagValue)) {
|
||||
controlFlag = KafkaJaasLoginModuleInitializer.ControlFlag.OPTIONAL;
|
||||
}
|
||||
else if (AppConfigurationEntry.LoginModuleControlFlag.REQUIRED.equals(controlFlagValue)) {
|
||||
controlFlag = KafkaJaasLoginModuleInitializer.ControlFlag.REQUIRED;
|
||||
}
|
||||
else if (AppConfigurationEntry.LoginModuleControlFlag.REQUISITE.equals(controlFlagValue)) {
|
||||
controlFlag = KafkaJaasLoginModuleInitializer.ControlFlag.REQUISITE;
|
||||
}
|
||||
else if (AppConfigurationEntry.LoginModuleControlFlag.SUFFICIENT.equals(controlFlagValue)) {
|
||||
controlFlag = KafkaJaasLoginModuleInitializer.ControlFlag.SUFFICIENT;
|
||||
}
|
||||
if (controlFlag != null) {
|
||||
kafkaJaasLoginModuleInitializer.setControlFlag(controlFlag);
|
||||
}
|
||||
kafkaJaasLoginModuleInitializer.setOptions(jaas.getOptions());
|
||||
}
|
||||
return kafkaJaasLoginModuleInitializer;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -33,7 +33,7 @@ import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
|
||||
/**
|
||||
*
|
||||
*
|
||||
* @author Oleg Zhurakousky
|
||||
*
|
||||
*/
|
||||
@@ -48,8 +48,9 @@ class KafkaBinderHealthIndicatorConfiguration {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
if (!ObjectUtils.isEmpty(configurationProperties.getConsumerConfiguration())) {
|
||||
props.putAll(configurationProperties.getConsumerConfiguration());
|
||||
Map<String, Object> mergedConfig = configurationProperties.mergedConsumerConfiguration();
|
||||
if (!ObjectUtils.isEmpty(mergedConfig)) {
|
||||
props.putAll(mergedConfig);
|
||||
}
|
||||
if (!props.containsKey(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
|
||||
|
||||
@@ -46,7 +46,8 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
@TestPropertySource(properties = {
|
||||
"spring.cloud.stream.kafka.bindings.input.consumer.admin.replication-factor=2",
|
||||
"spring.cloud.stream.kafka.bindings.input.consumer.admin.replicas-assignments.0=0,1",
|
||||
"spring.cloud.stream.kafka.bindings.input.consumer.admin.configuration.message.format.version=0.9.0.0" })
|
||||
"spring.cloud.stream.kafka.bindings.input.consumer.admin.configuration.message.format.version=0.9.0.0",
|
||||
"spring.main.allow-bean-definition-overriding=true"})
|
||||
@EnableIntegration
|
||||
public class AdminConfigTests {
|
||||
|
||||
|
||||
@@ -41,7 +41,9 @@ import org.springframework.test.context.TestPropertySource;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
@@ -74,11 +76,11 @@ public class KafkaBinderAutoConfigurationPropertiesTest {
|
||||
Map<String, Object> producerConfigs = (Map<String, Object>) ReflectionUtils.getField(producerFactoryConfigField,
|
||||
producerFactory);
|
||||
assertTrue(producerConfigs.get("batch.size").equals(10));
|
||||
assertTrue(producerConfigs.get("key.serializer").equals(LongSerializer.class));
|
||||
assertTrue(producerConfigs.get("key.deserializer") == null);
|
||||
assertTrue(producerConfigs.get("value.serializer").equals(LongSerializer.class));
|
||||
assertTrue(producerConfigs.get("value.deserializer") == null);
|
||||
assertTrue(producerConfigs.get("compression.type").equals("snappy"));
|
||||
assertEquals(producerConfigs.get("key.serializer"), LongSerializer.class);
|
||||
assertNull(producerConfigs.get("key.deserializer"));
|
||||
assertEquals(producerConfigs.get("value.serializer"), LongSerializer.class);
|
||||
assertNull(producerConfigs.get("value.deserializer"));
|
||||
assertEquals("snappy", producerConfigs.get("compression.type"));
|
||||
List<String> bootstrapServers = new ArrayList<>();
|
||||
bootstrapServers.add("10.98.09.199:9092");
|
||||
bootstrapServers.add("10.98.09.196:9092");
|
||||
@@ -95,12 +97,12 @@ public class KafkaBinderAutoConfigurationPropertiesTest {
|
||||
ReflectionUtils.makeAccessible(consumerFactoryConfigField);
|
||||
Map<String, Object> consumerConfigs = (Map<String, Object>) ReflectionUtils.getField(consumerFactoryConfigField,
|
||||
consumerFactory);
|
||||
assertTrue(consumerConfigs.get("key.deserializer").equals(LongDeserializer.class));
|
||||
assertTrue(consumerConfigs.get("key.serializer") == null);
|
||||
assertTrue(consumerConfigs.get("value.deserializer").equals(LongDeserializer.class));
|
||||
assertTrue(consumerConfigs.get("value.serialized") == null);
|
||||
assertTrue(consumerConfigs.get("group.id").equals("groupIdFromBootConfig"));
|
||||
assertTrue(consumerConfigs.get("auto.offset.reset").equals("earliest"));
|
||||
assertEquals(consumerConfigs.get("key.deserializer"), LongDeserializer.class);
|
||||
assertNull(consumerConfigs.get("key.serializer"));
|
||||
assertEquals(consumerConfigs.get("value.deserializer"), LongDeserializer.class);
|
||||
assertNull(consumerConfigs.get("value.serialized"));
|
||||
assertEquals("groupIdFromBootConfig", consumerConfigs.get("group.id"));
|
||||
assertEquals("earliest", consumerConfigs.get("auto.offset.reset"));
|
||||
assertTrue((((List<String>) consumerConfigs.get("bootstrap.servers")).containsAll(bootstrapServers)));
|
||||
}
|
||||
|
||||
|
||||
@@ -41,6 +41,7 @@ import org.springframework.test.context.TestPropertySource;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
@@ -75,11 +76,11 @@ public class KafkaBinderConfigurationPropertiesTest {
|
||||
ReflectionUtils.makeAccessible(producerFactoryConfigField);
|
||||
Map<String, Object> producerConfigs = (Map<String, Object>) ReflectionUtils.getField(producerFactoryConfigField,
|
||||
producerFactory);
|
||||
assertTrue(producerConfigs.get("batch.size").equals("12345"));
|
||||
assertTrue(producerConfigs.get("linger.ms").equals("100"));
|
||||
assertTrue(producerConfigs.get("key.serializer").equals(ByteArraySerializer.class));
|
||||
assertTrue(producerConfigs.get("value.serializer").equals(ByteArraySerializer.class));
|
||||
assertTrue(producerConfigs.get("compression.type").equals("gzip"));
|
||||
assertEquals("12345", producerConfigs.get("batch.size"));;
|
||||
assertEquals("100", producerConfigs.get("linger.ms"));
|
||||
assertEquals(producerConfigs.get("key.serializer"), ByteArraySerializer.class);
|
||||
assertEquals(producerConfigs.get("value.serializer"), ByteArraySerializer.class);
|
||||
assertEquals("gzip", producerConfigs.get("compression.type"));
|
||||
List<String> bootstrapServers = new ArrayList<>();
|
||||
bootstrapServers.add("10.98.09.199:9082");
|
||||
assertTrue((((String) producerConfigs.get("bootstrap.servers")).contains("10.98.09.199:9082")));
|
||||
@@ -95,8 +96,8 @@ public class KafkaBinderConfigurationPropertiesTest {
|
||||
ReflectionUtils.makeAccessible(consumerFactoryConfigField);
|
||||
Map<String, Object> consumerConfigs = (Map<String, Object>) ReflectionUtils.getField(consumerFactoryConfigField,
|
||||
consumerFactory);
|
||||
assertTrue(consumerConfigs.get("key.deserializer").equals(ByteArrayDeserializer.class));
|
||||
assertTrue(consumerConfigs.get("value.deserializer").equals(ByteArrayDeserializer.class));
|
||||
assertEquals(consumerConfigs.get("key.deserializer"), ByteArrayDeserializer.class);
|
||||
assertEquals(consumerConfigs.get("value.deserializer"), ByteArrayDeserializer.class);
|
||||
assertTrue((((String) consumerConfigs.get("bootstrap.servers")).contains("10.98.09.199:9082")));
|
||||
}
|
||||
|
||||
|
||||
@@ -73,7 +73,7 @@ public class KafkaBinderHealthIndicatorTest {
|
||||
@Test
|
||||
public void kafkaBinderIsUp() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group", partitions));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group1-healthIndicator", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
Health health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.UP);
|
||||
@@ -82,7 +82,7 @@ public class KafkaBinderHealthIndicatorTest {
|
||||
@Test
|
||||
public void kafkaBinderIsDown() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(-1, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group", partitions));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group2-healthIndicator", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
Health health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.DOWN);
|
||||
@@ -91,7 +91,7 @@ public class KafkaBinderHealthIndicatorTest {
|
||||
@Test(timeout = 5000)
|
||||
public void kafkaBinderDoesNotAnswer() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(-1, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group", partitions));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group3-healthIndicator", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willAnswer(new Answer<Object>() {
|
||||
|
||||
@Override
|
||||
@@ -110,7 +110,7 @@ public class KafkaBinderHealthIndicatorTest {
|
||||
@Test
|
||||
public void createsConsumerOnceWhenInvokedMultipleTimes() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group", partitions));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group4-healthIndicator", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
|
||||
indicator.health();
|
||||
|
||||
@@ -23,9 +23,11 @@ import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.micrometer.core.instrument.TimeGauge;
|
||||
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.common.KafkaException;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
@@ -33,6 +35,7 @@ import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.ArgumentMatchers;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaMessageChannelBinder.TopicInformation;
|
||||
@@ -43,6 +46,7 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Henryk Konsek
|
||||
* @author Thomas Cheyney
|
||||
*/
|
||||
public class KafkaBinderMetricsTest {
|
||||
|
||||
@@ -80,11 +84,11 @@ public class KafkaBinderMetricsTest {
|
||||
public void shouldIndicateLag() {
|
||||
org.mockito.BDDMockito.given(consumer.committed(ArgumentMatchers.any(TopicPartition.class))).willReturn(new OffsetAndMetadata(500));
|
||||
List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group", partitions));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group1-metrics", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
metrics.bindTo(meterRegistry);
|
||||
assertThat(meterRegistry.getMeters()).hasSize(1);
|
||||
assertThat(meterRegistry.get(KafkaBinderMetrics.METRIC_NAME).tag("group", "group").tag("topic", TEST_TOPIC).timeGauge()
|
||||
assertThat(meterRegistry.get(KafkaBinderMetrics.METRIC_NAME).tag("group", "group1-metrics").tag("topic", TEST_TOPIC).timeGauge()
|
||||
.value(TimeUnit.MILLISECONDS)).isEqualTo(500.0);
|
||||
}
|
||||
|
||||
@@ -96,22 +100,22 @@ public class KafkaBinderMetricsTest {
|
||||
org.mockito.BDDMockito.given(consumer.endOffsets(ArgumentMatchers.anyCollection())).willReturn(endOffsets);
|
||||
org.mockito.BDDMockito.given(consumer.committed(ArgumentMatchers.any(TopicPartition.class))).willReturn(new OffsetAndMetadata(500));
|
||||
List<PartitionInfo> partitions = partitions(new Node(0, null, 0), new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group", partitions));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group2-metrics", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
metrics.bindTo(meterRegistry);
|
||||
assertThat(meterRegistry.getMeters()).hasSize(1);
|
||||
assertThat(meterRegistry.get(KafkaBinderMetrics.METRIC_NAME).tag("group", "group").tag("topic", TEST_TOPIC).timeGauge()
|
||||
assertThat(meterRegistry.get(KafkaBinderMetrics.METRIC_NAME).tag("group", "group2-metrics").tag("topic", TEST_TOPIC).timeGauge()
|
||||
.value(TimeUnit.MILLISECONDS)).isEqualTo(1000.0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldIndicateFullLagForNotCommittedGroups() {
|
||||
List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group", partitions));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group3-metrics", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
metrics.bindTo(meterRegistry);
|
||||
assertThat(meterRegistry.getMeters()).hasSize(1);
|
||||
assertThat(meterRegistry.get(KafkaBinderMetrics.METRIC_NAME).tag("group", "group").tag("topic", TEST_TOPIC).timeGauge()
|
||||
assertThat(meterRegistry.get(KafkaBinderMetrics.METRIC_NAME).tag("group", "group3-metrics").tag("topic", TEST_TOPIC).timeGauge()
|
||||
.value(TimeUnit.MILLISECONDS)).isEqualTo(1000.0);
|
||||
}
|
||||
|
||||
@@ -123,6 +127,37 @@ public class KafkaBinderMetricsTest {
|
||||
assertThat(meterRegistry.getMeters()).isEmpty();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void createsConsumerOnceWhenInvokedMultipleTimes() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group4-metrics", partitions));
|
||||
|
||||
metrics.bindTo(meterRegistry);
|
||||
|
||||
TimeGauge gauge = meterRegistry.get(KafkaBinderMetrics.METRIC_NAME).tag("group", "group4-metrics").tag("topic", TEST_TOPIC).timeGauge();
|
||||
gauge.value(TimeUnit.MILLISECONDS);
|
||||
assertThat(gauge.value(TimeUnit.MILLISECONDS)).isEqualTo(1000.0);
|
||||
|
||||
org.mockito.Mockito.verify(this.consumerFactory).createConsumer();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void consumerCreationFailsFirstTime() {
|
||||
org.mockito.BDDMockito.given(consumerFactory.createConsumer()).willThrow(KafkaException.class)
|
||||
.willReturn(consumer);
|
||||
|
||||
final List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group5-metrics", partitions));
|
||||
|
||||
metrics.bindTo(meterRegistry);
|
||||
|
||||
TimeGauge gauge = meterRegistry.get(KafkaBinderMetrics.METRIC_NAME).tag("group", "group5-metrics").tag("topic", TEST_TOPIC).timeGauge();
|
||||
assertThat(gauge.value(TimeUnit.MILLISECONDS)).isEqualTo(0);
|
||||
assertThat(gauge.value(TimeUnit.MILLISECONDS)).isEqualTo(1000.0);
|
||||
|
||||
org.mockito.Mockito.verify(this.consumerFactory, Mockito.times(2)).createConsumer();
|
||||
}
|
||||
|
||||
private List<PartitionInfo> partitions(Node... nodes) {
|
||||
List<PartitionInfo> partitions = new ArrayList<>();
|
||||
for (int i = 0; i < nodes.length; i++) {
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
@@ -30,10 +31,10 @@ import java.util.UUID;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
import org.apache.kafka.clients.admin.AdminClientConfig;
|
||||
import org.apache.kafka.clients.admin.CreateTopicsResult;
|
||||
@@ -48,6 +49,7 @@ import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.KafkaFuture;
|
||||
import org.apache.kafka.common.record.TimestampType;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
import org.apache.kafka.common.serialization.Deserializer;
|
||||
@@ -63,7 +65,6 @@ import org.junit.Test;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
||||
import org.springframework.beans.DirectFieldAccessor;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.BinderHeaders;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
@@ -81,6 +82,7 @@ import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerPro
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.utils.KafkaTopicUtils;
|
||||
import org.springframework.cloud.stream.binding.MessageConverterConfigurer.PartitioningInterceptor;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.provisioning.ProvisioningException;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
@@ -101,15 +103,16 @@ import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.core.ProducerFactory;
|
||||
import org.springframework.kafka.listener.AbstractMessageListenerContainer;
|
||||
import org.springframework.kafka.listener.AbstractMessageListenerContainer.AckMode;
|
||||
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
|
||||
import org.springframework.kafka.listener.ContainerProperties;
|
||||
import org.springframework.kafka.listener.MessageListenerContainer;
|
||||
import org.springframework.kafka.support.Acknowledgment;
|
||||
import org.springframework.kafka.support.KafkaHeaders;
|
||||
import org.springframework.kafka.support.SendResult;
|
||||
import org.springframework.kafka.support.TopicPartitionInitialOffset;
|
||||
import org.springframework.kafka.support.converter.MessagingMessageConverter;
|
||||
import org.springframework.kafka.test.core.BrokerAddress;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.rule.EmbeddedKafkaRule;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
@@ -118,11 +121,11 @@ import org.springframework.messaging.MessageHandlingException;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.MessagingException;
|
||||
import org.springframework.messaging.SubscribableChannel;
|
||||
import org.springframework.messaging.support.ChannelInterceptor;
|
||||
import org.springframework.messaging.support.ErrorMessage;
|
||||
import org.springframework.messaging.support.GenericMessage;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.MimeType;
|
||||
import org.springframework.util.MimeTypeUtils;
|
||||
import org.springframework.util.concurrent.ListenableFuture;
|
||||
import org.springframework.util.concurrent.SettableListenableFuture;
|
||||
@@ -149,7 +152,7 @@ public class KafkaBinderTests extends
|
||||
private final String CLASS_UNDER_TEST_NAME = KafkaMessageChannelBinder.class.getSimpleName();
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, 10, "error.pollableDlq.group");
|
||||
public static EmbeddedKafkaRule embeddedKafka = new EmbeddedKafkaRule(1, true, 10, "error.pollableDlq.group-pcWithDlq");
|
||||
|
||||
private KafkaTestBinder binder;
|
||||
|
||||
@@ -182,7 +185,7 @@ public class KafkaBinderTests extends
|
||||
protected KafkaTestBinder getBinder() {
|
||||
if (binder == null) {
|
||||
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner = new KafkaTopicProvisioner(binderConfiguration, new KafkaProperties());
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner = new KafkaTopicProvisioner(binderConfiguration, new TestKafkaProperties());
|
||||
try {
|
||||
kafkaTopicProvisioner.afterPropertiesSet();
|
||||
}
|
||||
@@ -196,7 +199,7 @@ public class KafkaBinderTests extends
|
||||
|
||||
private Binder getBinder(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties) {
|
||||
KafkaTopicProvisioner provisioningProvider =
|
||||
new KafkaTopicProvisioner(kafkaBinderConfigurationProperties, new KafkaProperties());
|
||||
new KafkaTopicProvisioner(kafkaBinderConfigurationProperties, new TestKafkaProperties());
|
||||
try {
|
||||
provisioningProvider.afterPropertiesSet();
|
||||
}
|
||||
@@ -207,15 +210,15 @@ public class KafkaBinderTests extends
|
||||
}
|
||||
|
||||
private KafkaBinderConfigurationProperties createConfigurationProperties() {
|
||||
KafkaBinderConfigurationProperties binderConfiguration = new KafkaBinderConfigurationProperties();
|
||||
BrokerAddress[] brokerAddresses = embeddedKafka.getBrokerAddresses();
|
||||
KafkaBinderConfigurationProperties binderConfiguration = new KafkaBinderConfigurationProperties(
|
||||
new TestKafkaProperties());
|
||||
BrokerAddress[] brokerAddresses = embeddedKafka.getEmbeddedKafka().getBrokerAddresses();
|
||||
List<String> bAddresses = new ArrayList<>();
|
||||
for (BrokerAddress bAddress : brokerAddresses) {
|
||||
bAddresses.add(bAddress.toString());
|
||||
}
|
||||
String[] foo = new String[bAddresses.size()];
|
||||
binderConfiguration.setBrokers(bAddresses.toArray(foo));
|
||||
binderConfiguration.setZkNodes(embeddedKafka.getZookeeperConnectionString());
|
||||
return binderConfiguration;
|
||||
}
|
||||
|
||||
@@ -223,7 +226,7 @@ public class KafkaBinderTests extends
|
||||
return consumerFactory().createConsumer().partitionsFor(topic).size();
|
||||
}
|
||||
|
||||
private void invokeCreateTopic(String topic, int partitions, int replicationFactor) throws Throwable {
|
||||
private void invokeCreateTopic(String topic, int partitions, int replicationFactor) throws Exception {
|
||||
|
||||
NewTopic newTopic = new NewTopic(topic, partitions,
|
||||
(short) replicationFactor);
|
||||
@@ -242,7 +245,7 @@ public class KafkaBinderTests extends
|
||||
timeoutMultiplier = Double.parseDouble(multiplier);
|
||||
}
|
||||
|
||||
BrokerAddress[] brokerAddresses = embeddedKafka.getBrokerAddresses();
|
||||
BrokerAddress[] brokerAddresses = embeddedKafka.getEmbeddedKafka().getBrokerAddresses();
|
||||
List<String> bAddresses = new ArrayList<>();
|
||||
for (BrokerAddress bAddress : brokerAddresses) {
|
||||
bAddresses.add(bAddress.toString());
|
||||
@@ -334,9 +337,9 @@ public class KafkaBinderTests extends
|
||||
Assertions.assertThat(inboundMessageRef.get().getHeaders().get(BinderHeaders.BINDER_ORIGINAL_CONTENT_TYPE)).isNull();
|
||||
Assertions.assertThat(inboundMessageRef.get().getHeaders().get(MessageHeaders.CONTENT_TYPE))
|
||||
.isEqualTo(MimeTypeUtils.TEXT_PLAIN);
|
||||
Assertions.assertThat(inboundMessageRef.get().getHeaders().get("foo")).isInstanceOf(MimeType.class);
|
||||
MimeType actual = (MimeType) inboundMessageRef.get().getHeaders().get("foo");
|
||||
Assertions.assertThat(actual).isEqualTo(MimeTypeUtils.TEXT_PLAIN);
|
||||
Assertions.assertThat(inboundMessageRef.get().getHeaders().get("foo")).isInstanceOf(String.class);
|
||||
String actual = (String) inboundMessageRef.get().getHeaders().get("foo");
|
||||
Assertions.assertThat(actual).isEqualTo(MimeTypeUtils.TEXT_PLAIN.toString());
|
||||
producerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
@@ -493,7 +496,7 @@ public class KafkaBinderTests extends
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_ORIGINAL_TOPIC))
|
||||
.isEqualTo("foo.bar".getBytes(StandardCharsets.UTF_8));
|
||||
assertThat(new String((byte[]) receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_EXCEPTION_MESSAGE)))
|
||||
.startsWith("failed to send Message to channel 'input'");
|
||||
.startsWith("Dispatcher failed to deliver Message; nested exception is java.lang.RuntimeException: fail");
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_EXCEPTION_STACKTRACE))
|
||||
.isNotNull();
|
||||
binderBindUnbindLatency();
|
||||
@@ -608,6 +611,7 @@ public class KafkaBinderTests extends
|
||||
consumerProperties.getExtension().setEnableDlq(true);
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
consumerProperties.setHeaderMode(headerMode);
|
||||
consumerProperties.setMultiplex(true);
|
||||
|
||||
DirectChannel moduleInputChannel = createBindableChannel("input", createConsumerBindingProperties(consumerProperties));
|
||||
|
||||
@@ -620,9 +624,14 @@ public class KafkaBinderTests extends
|
||||
String producerName = "dlqTest." + uniqueBindingId + ".0";
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer(producerName,
|
||||
moduleOutputChannel, producerProperties);
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer(producerName,
|
||||
String consumerDest = producerName + ", " + producerName.replaceAll("0", "1");
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer(consumerDest,
|
||||
"testGroup", moduleInputChannel, consumerProperties);
|
||||
|
||||
MessageListenerContainer container = TestUtils.getPropertyValue(consumerBinding,
|
||||
"lifecycle.messageListenerContainer", MessageListenerContainer.class);
|
||||
assertThat(container.getContainerProperties().getTopicPartitions().length).isEqualTo(2);
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> dlqConsumerProperties = createConsumerProperties();
|
||||
dlqConsumerProperties.setMaxAttempts(1);
|
||||
dlqConsumerProperties.setHeaderMode(headerMode);
|
||||
@@ -630,7 +639,7 @@ public class KafkaBinderTests extends
|
||||
ApplicationContext context = TestUtils.getPropertyValue(binder.getBinder(), "applicationContext",
|
||||
ApplicationContext.class);
|
||||
SubscribableChannel boundErrorChannel = context
|
||||
.getBean(producerName + ".testGroup.errors-0", SubscribableChannel.class);
|
||||
.getBean(consumerDest + ".testGroup.errors-0", SubscribableChannel.class);
|
||||
SubscribableChannel globalErrorChannel = context.getBean("errorChannel", SubscribableChannel.class);
|
||||
final AtomicReference<Message<?>> boundErrorChannelMessage = new AtomicReference<>();
|
||||
final AtomicReference<Message<?>> globalErrorChannelMessage = new AtomicReference<>();
|
||||
@@ -654,21 +663,51 @@ public class KafkaBinderTests extends
|
||||
assertThat(receivedMessage.getPayload()).isEqualTo(testMessagePayload.getBytes());
|
||||
if (HeaderMode.embeddedHeaders.equals(headerMode)) {
|
||||
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
|
||||
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_ORIGINAL_TOPIC))
|
||||
.isEqualTo(producerName);
|
||||
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_ORIGINAL_PARTITION)).isEqualTo(0);
|
||||
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_ORIGINAL_OFFSET))
|
||||
.isEqualTo(0);
|
||||
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_ORIGINAL_TIMESTAMP)).isNotNull();
|
||||
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_ORIGINAL_TIMESTAMP_TYPE))
|
||||
.isEqualTo(TimestampType.CREATE_TIME.toString());
|
||||
|
||||
assertThat(((String) receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_EXCEPTION_MESSAGE)))
|
||||
.startsWith("failed to send Message to channel 'input'");
|
||||
.startsWith("Dispatcher failed to deliver Message; nested exception is java.lang.RuntimeException: fail");
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_EXCEPTION_STACKTRACE))
|
||||
.isNotNull();
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_EXCEPTION_FQCN)).isNotNull();
|
||||
}
|
||||
else if (!HeaderMode.none.equals(headerMode)) {
|
||||
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
|
||||
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_ORIGINAL_TOPIC))
|
||||
.isEqualTo(producerName.getBytes(StandardCharsets.UTF_8));
|
||||
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_ORIGINAL_PARTITION))
|
||||
.isEqualTo(ByteBuffer.allocate(Integer.BYTES).putInt(0).array());
|
||||
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_ORIGINAL_OFFSET))
|
||||
.isEqualTo(ByteBuffer.allocate(Long.BYTES).putLong(0).array());
|
||||
|
||||
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_ORIGINAL_TIMESTAMP)).isNotNull();
|
||||
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_ORIGINAL_TIMESTAMP_TYPE))
|
||||
.isEqualTo(TimestampType.CREATE_TIME.toString().getBytes());
|
||||
|
||||
assertThat(new String((byte[]) receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_EXCEPTION_MESSAGE)))
|
||||
.startsWith("failed to send Message to channel 'input'");
|
||||
.startsWith("Dispatcher failed to deliver Message; nested exception is java.lang.RuntimeException: fail");
|
||||
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_EXCEPTION_STACKTRACE))
|
||||
.isNotNull();
|
||||
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_EXCEPTION_FQCN)).isNotNull();
|
||||
}
|
||||
else {
|
||||
assertThat(receivedMessage.getHeaders().get(KafkaMessageChannelBinder.X_ORIGINAL_TOPIC)).isNull();
|
||||
@@ -1126,7 +1165,7 @@ public class KafkaBinderTests extends
|
||||
|
||||
AbstractMessageListenerContainer<?, ?> container = TestUtils.getPropertyValue(consumerBinding,
|
||||
"lifecycle.messageListenerContainer", AbstractMessageListenerContainer.class);
|
||||
assertThat(container.getContainerProperties().getAckMode()).isEqualTo(AckMode.BATCH);
|
||||
assertThat(container.getContainerProperties().getAckMode()).isEqualTo(ContainerProperties.AckMode.BATCH);
|
||||
|
||||
String testPayload1 = "foo" + UUID.randomUUID().toString();
|
||||
Message<?> message1 = org.springframework.integration.support.MessageBuilder.withPayload(
|
||||
@@ -1173,7 +1212,7 @@ public class KafkaBinderTests extends
|
||||
|
||||
AbstractMessageListenerContainer<?, ?> container = TestUtils.getPropertyValue(consumerBinding2,
|
||||
"lifecycle.messageListenerContainer", AbstractMessageListenerContainer.class);
|
||||
assertThat(container.getContainerProperties().getAckMode()).isEqualTo(AckMode.RECORD);
|
||||
assertThat(container.getContainerProperties().getAckMode()).isEqualTo(ContainerProperties.AckMode.RECORD);
|
||||
|
||||
Message<?> receivedMessage1 = receive(inbound1);
|
||||
assertThat(receivedMessage1).isNotNull();
|
||||
@@ -1215,6 +1254,7 @@ public class KafkaBinderTests extends
|
||||
producerProperties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload"));
|
||||
producerProperties.setPartitionSelectorExpression(spelExpressionParser.parseExpression("hashCode()"));
|
||||
producerProperties.setPartitionCount(3);
|
||||
invokeCreateTopic("output", 6, 1);
|
||||
|
||||
DirectChannel output = createBindableChannel("output", createProducerBindingProperties(producerProperties));
|
||||
output.setBeanName("test.output");
|
||||
@@ -1226,7 +1266,14 @@ public class KafkaBinderTests extends
|
||||
}
|
||||
catch (UnsupportedOperationException ignored) {
|
||||
}
|
||||
|
||||
List<ChannelInterceptor> interceptors = output.getChannelInterceptors();
|
||||
AtomicInteger count = new AtomicInteger();
|
||||
interceptors.forEach(interceptor -> {
|
||||
if (interceptor instanceof PartitioningInterceptor) {
|
||||
count.set(TestUtils.getPropertyValue(interceptor, "partitionHandler.partitionCount", Integer.class));
|
||||
}
|
||||
});
|
||||
assertThat(count.get()).isEqualTo(6);
|
||||
Message<Integer> message2 = org.springframework.integration.support.MessageBuilder.withPayload(2)
|
||||
.setHeader(IntegrationMessageHeaderAccessor.CORRELATION_ID, "foo")
|
||||
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_NUMBER, 42)
|
||||
@@ -1779,8 +1826,6 @@ public class KafkaBinderTests extends
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testDefaultConsumerStartsAtEarliest() throws Exception {
|
||||
Binder binder = getBinder(createConfigurationProperties());
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
|
||||
BindingProperties producerBindingProperties = createProducerBindingProperties(createProducerProperties());
|
||||
DirectChannel output = createBindableChannel("output", producerBindingProperties);
|
||||
@@ -2419,7 +2464,7 @@ public class KafkaBinderTests extends
|
||||
assertThat(inbound.getHeaders().get(BinderHeaders.NATIVE_HEADERS_PRESENT)).isNull();
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("testSendAndReceiveWithMixedMode", "false",
|
||||
embeddedKafka);
|
||||
embeddedKafka.getEmbeddedKafka());
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
@@ -2454,9 +2499,11 @@ public class KafkaBinderTests extends
|
||||
public void testPolledConsumer() throws Exception {
|
||||
KafkaTestBinder binder = getBinder();
|
||||
PollableSource<MessageHandler> inboundBindTarget = new DefaultPollableMessageSource(this.messageConverter);
|
||||
Binding<PollableSource<MessageHandler>> binding = binder.bindPollableConsumer("pollable", "group",
|
||||
inboundBindTarget, createConsumerProperties());
|
||||
Map<String, Object> producerProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProps = createConsumerProperties();
|
||||
consumerProps.setMultiplex(true);
|
||||
Binding<PollableSource<MessageHandler>> binding = binder.bindPollableConsumer("pollable,anotherOne", "group-polledConsumer",
|
||||
inboundBindTarget, consumerProps);
|
||||
Map<String, Object> producerProps = KafkaTestUtils.producerProps(embeddedKafka.getEmbeddedKafka());
|
||||
KafkaTemplate template = new KafkaTemplate(new DefaultKafkaProducerFactory<>(producerProps));
|
||||
template.send("pollable", "testPollable");
|
||||
boolean polled = inboundBindTarget.poll(m -> {
|
||||
@@ -2470,6 +2517,19 @@ public class KafkaBinderTests extends
|
||||
Thread.sleep(100);
|
||||
}
|
||||
assertThat(polled).isTrue();
|
||||
|
||||
template.send("anotherOne", "testPollable2");
|
||||
polled = inboundBindTarget.poll(m -> {
|
||||
assertThat(m.getPayload()).isEqualTo("testPollable2");
|
||||
});
|
||||
n = 0;
|
||||
while (n++ < 100 && !polled) {
|
||||
polled = inboundBindTarget.poll(m -> {
|
||||
assertThat(m.getPayload()).isEqualTo("testPollable2".getBytes());
|
||||
});
|
||||
Thread.sleep(100);
|
||||
}
|
||||
assertThat(polled).isTrue();
|
||||
binding.unbind();
|
||||
}
|
||||
|
||||
@@ -2482,8 +2542,8 @@ public class KafkaBinderTests extends
|
||||
properties.setMaxAttempts(2);
|
||||
properties.setBackOffInitialInterval(0);
|
||||
properties.getExtension().setEnableDlq(true);
|
||||
Map<String, Object> producerProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
Binding<PollableSource<MessageHandler>> binding = binder.bindPollableConsumer("pollableDlq", "group",
|
||||
Map<String, Object> producerProps = KafkaTestUtils.producerProps(embeddedKafka.getEmbeddedKafka());
|
||||
Binding<PollableSource<MessageHandler>> binding = binder.bindPollableConsumer("pollableDlq", "group-pcWithDlq",
|
||||
inboundBindTarget, properties);
|
||||
KafkaTemplate template = new KafkaTemplate(new DefaultKafkaProducerFactory<>(producerProps));
|
||||
template.send("pollableDlq", "testPollableDLQ");
|
||||
@@ -2499,18 +2559,47 @@ public class KafkaBinderTests extends
|
||||
catch (MessageHandlingException e) {
|
||||
assertThat(e.getCause().getMessage()).isEqualTo("test DLQ");
|
||||
}
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("dlq", "false", embeddedKafka);
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("dlq", "false", embeddedKafka.getEmbeddedKafka());
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
ConsumerFactory cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
Consumer consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "error.pollableDlq.group");
|
||||
ConsumerRecord deadLetter = KafkaTestUtils.getSingleRecord(consumer, "error.pollableDlq.group");
|
||||
embeddedKafka.getEmbeddedKafka().consumeFromAnEmbeddedTopic(consumer, "error.pollableDlq.group-pcWithDlq");
|
||||
ConsumerRecord deadLetter = KafkaTestUtils.getSingleRecord(consumer, "error.pollableDlq.group-pcWithDlq");
|
||||
assertThat(deadLetter).isNotNull();
|
||||
assertThat(deadLetter.value()).isEqualTo("testPollableDLQ");
|
||||
binding.unbind();
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@SuppressWarnings({ "rawtypes", "unchecked" })
|
||||
@Test
|
||||
public void testTopicPatterns() throws Exception {
|
||||
try (AdminClient admin = AdminClient.create(Collections.singletonMap(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG,
|
||||
embeddedKafka.getEmbeddedKafka().getBrokersAsString()))) {
|
||||
admin.createTopics(Collections.singletonList(new NewTopic("topicPatterns.1", 1, (short) 1))).all().get();
|
||||
Binder binder = getBinder();
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.getExtension().setDestinationIsPattern(true);
|
||||
DirectChannel moduleInputChannel = createBindableChannel("input", createConsumerBindingProperties(consumerProperties));
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
final AtomicReference<String> topic = new AtomicReference<>();
|
||||
moduleInputChannel.subscribe(m -> {
|
||||
topic.set(m.getHeaders().get(KafkaHeaders.RECEIVED_TOPIC, String.class));
|
||||
latch.countDown();
|
||||
});
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("topicPatterns\\..*",
|
||||
"testTopicPatterns", moduleInputChannel, consumerProperties);
|
||||
DefaultKafkaProducerFactory pf = new DefaultKafkaProducerFactory(
|
||||
KafkaTestUtils.producerProps(embeddedKafka.getEmbeddedKafka()));
|
||||
KafkaTemplate template = new KafkaTemplate(pf);
|
||||
template.send("topicPatterns.1", "foo");
|
||||
assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue();
|
||||
assertThat(topic.get()).isEqualTo("topicPatterns.1");
|
||||
consumerBinding.unbind();
|
||||
pf.destroy();
|
||||
}
|
||||
}
|
||||
|
||||
private final class FailingInvocationCountingMessageHandler implements MessageHandler {
|
||||
|
||||
private int invocationCount;
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.time.Duration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
@@ -30,11 +31,13 @@ import java.util.stream.Collectors;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
@@ -67,8 +70,10 @@ public class KafkaBinderUnitTests {
|
||||
|
||||
@Test
|
||||
public void testPropertyOverrides() throws Exception {
|
||||
KafkaBinderConfigurationProperties binderConfigurationProperties = new KafkaBinderConfigurationProperties();
|
||||
KafkaTopicProvisioner provisioningProvider = new KafkaTopicProvisioner(binderConfigurationProperties, new KafkaProperties());
|
||||
KafkaProperties kafkaProperties = new TestKafkaProperties();
|
||||
KafkaBinderConfigurationProperties binderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
KafkaTopicProvisioner provisioningProvider = new KafkaTopicProvisioner(binderConfigurationProperties, kafkaProperties);
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(binderConfigurationProperties,
|
||||
provisioningProvider);
|
||||
KafkaConsumerProperties consumerProps = new KafkaConsumerProperties();
|
||||
@@ -79,62 +84,87 @@ public class KafkaBinderUnitTests {
|
||||
method.setAccessible(true);
|
||||
|
||||
// test default for anon
|
||||
Object factory = method.invoke(binder, true, "foo", ecp);
|
||||
Object factory = method.invoke(binder, true, "foo-1", ecp);
|
||||
Map<?, ?> configs = TestUtils.getPropertyValue(factory, "configs", Map.class);
|
||||
assertThat(configs.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("latest");
|
||||
|
||||
// test default for named
|
||||
factory = method.invoke(binder, false, "foo", ecp);
|
||||
factory = method.invoke(binder, false, "foo-2", ecp);
|
||||
configs = TestUtils.getPropertyValue(factory, "configs", Map.class);
|
||||
assertThat(configs.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("earliest");
|
||||
|
||||
// binder level setting
|
||||
binderConfigurationProperties.setConfiguration(
|
||||
Collections.singletonMap(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"));
|
||||
factory = method.invoke(binder, false, "foo", ecp);
|
||||
factory = method.invoke(binder, false, "foo-3", ecp);
|
||||
configs = TestUtils.getPropertyValue(factory, "configs", Map.class);
|
||||
assertThat(configs.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("latest");
|
||||
|
||||
// consumer level setting
|
||||
consumerProps.setConfiguration(Collections.singletonMap(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"));
|
||||
factory = method.invoke(binder, false, "foo", ecp);
|
||||
factory = method.invoke(binder, false, "foo-4", ecp);
|
||||
configs = TestUtils.getPropertyValue(factory, "configs", Map.class);
|
||||
assertThat(configs.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("earliest");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMergedConsumerProperties() {
|
||||
KafkaProperties bootProps = new TestKafkaProperties();
|
||||
bootProps.getConsumer().getProperties().put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "bar");
|
||||
KafkaBinderConfigurationProperties props = new KafkaBinderConfigurationProperties(bootProps);
|
||||
assertThat(props.mergedConsumerConfiguration().get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("bar");
|
||||
props.getConfiguration().put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "baz");
|
||||
assertThat(props.mergedConsumerConfiguration().get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("baz");
|
||||
props.getConsumerProperties().put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "qux");
|
||||
assertThat(props.mergedConsumerConfiguration().get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("qux");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMergedProducerProperties() {
|
||||
KafkaProperties bootProps = new TestKafkaProperties();
|
||||
bootProps.getProducer().getProperties().put(ProducerConfig.RETRIES_CONFIG, "bar");
|
||||
KafkaBinderConfigurationProperties props = new KafkaBinderConfigurationProperties(bootProps);
|
||||
assertThat(props.mergedProducerConfiguration().get(ProducerConfig.RETRIES_CONFIG)).isEqualTo("bar");
|
||||
props.getConfiguration().put(ProducerConfig.RETRIES_CONFIG, "baz");
|
||||
assertThat(props.mergedProducerConfiguration().get(ProducerConfig.RETRIES_CONFIG)).isEqualTo("baz");
|
||||
props.getProducerProperties().put(ProducerConfig.RETRIES_CONFIG, "qux");
|
||||
assertThat(props.mergedProducerConfiguration().get(ProducerConfig.RETRIES_CONFIG)).isEqualTo("qux");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOffsetResetWithGroupManagementEarliest() throws Exception {
|
||||
testOffsetResetWithGroupManagement(true, true);
|
||||
testOffsetResetWithGroupManagement(true, true, "foo-100", "testOffsetResetWithGroupManagementEarliest");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOffsetResetWithGroupManagementLatest() throws Throwable {
|
||||
testOffsetResetWithGroupManagement(false, true);
|
||||
testOffsetResetWithGroupManagement(false, true, "foo-101", "testOffsetResetWithGroupManagementLatest");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOffsetResetWithManualAssignmentEarliest() throws Exception {
|
||||
testOffsetResetWithGroupManagement(true, false);
|
||||
testOffsetResetWithGroupManagement(true, false, "foo-102", "testOffsetResetWithManualAssignmentEarliest");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOffsetResetWithGroupManualAssignmentLatest() throws Throwable {
|
||||
testOffsetResetWithGroupManagement(false, false);
|
||||
testOffsetResetWithGroupManagement(false, false, "foo-103", "testOffsetResetWithGroupManualAssignmentLatest");
|
||||
}
|
||||
|
||||
private void testOffsetResetWithGroupManagement(final boolean earliest, boolean groupManage) throws Exception {
|
||||
private void testOffsetResetWithGroupManagement(final boolean earliest, boolean groupManage, String topic, String group) throws Exception {
|
||||
final List<TopicPartition> partitions = new ArrayList<>();
|
||||
partitions.add(new TopicPartition("foo", 0));
|
||||
partitions.add(new TopicPartition("foo", 1));
|
||||
KafkaBinderConfigurationProperties configurationProperties = new KafkaBinderConfigurationProperties();
|
||||
partitions.add(new TopicPartition(topic, 0));
|
||||
partitions.add(new TopicPartition(topic, 1));
|
||||
KafkaBinderConfigurationProperties configurationProperties = new KafkaBinderConfigurationProperties(
|
||||
new TestKafkaProperties());
|
||||
KafkaTopicProvisioner provisioningProvider = mock(KafkaTopicProvisioner.class);
|
||||
ConsumerDestination dest = mock(ConsumerDestination.class);
|
||||
given(dest.getName()).willReturn("foo");
|
||||
given(dest.getName()).willReturn(topic);
|
||||
given(provisioningProvider.provisionConsumerDestination(anyString(), anyString(), any())).willReturn(dest);
|
||||
final AtomicInteger part = new AtomicInteger();
|
||||
willAnswer(i -> {
|
||||
return partitions.stream()
|
||||
.map(p -> new PartitionInfo("foo", part.getAndIncrement(), null, null, null))
|
||||
.map(p -> new PartitionInfo(topic, part.getAndIncrement(), null, null, null))
|
||||
.collect(Collectors.toList());
|
||||
}).given(provisioningProvider).getPartitionsForTopic(anyInt(), anyBoolean(), any());
|
||||
@SuppressWarnings("unchecked")
|
||||
@@ -148,14 +178,14 @@ public class KafkaBinderUnitTests {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
return new ConsumerRecords<>(Collections.emptyMap());
|
||||
}).given(consumer).poll(anyLong());
|
||||
}).given(consumer).poll(any(Duration.class));
|
||||
willAnswer(i -> {
|
||||
((org.apache.kafka.clients.consumer.ConsumerRebalanceListener) i.getArgument(1))
|
||||
.onPartitionsAssigned(partitions);
|
||||
latch.countDown();
|
||||
latch.countDown();
|
||||
return null;
|
||||
}).given(consumer).subscribe(eq(Collections.singletonList("foo")),
|
||||
}).given(consumer).subscribe(eq(Collections.singletonList(topic)),
|
||||
any(org.apache.kafka.clients.consumer.ConsumerRebalanceListener.class));
|
||||
willAnswer(i -> {
|
||||
latch.countDown();
|
||||
@@ -165,7 +195,7 @@ public class KafkaBinderUnitTests {
|
||||
|
||||
@Override
|
||||
protected ConsumerFactory<?, ?> createKafkaConsumerFactory(boolean anonymous, String consumerGroup,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties) {
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties) {
|
||||
|
||||
return new ConsumerFactory<byte[], byte[]>() {
|
||||
|
||||
@@ -184,6 +214,11 @@ public class KafkaBinderUnitTests {
|
||||
return consumer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Consumer<byte[], byte[]> createConsumer(String groupId, String clientIdPrefix, String clientIdSuffix) {
|
||||
return consumer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAutoCommit() {
|
||||
return false;
|
||||
@@ -194,7 +229,7 @@ public class KafkaBinderUnitTests {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,
|
||||
earliest ? "earliest" : "latest");
|
||||
props.put(ConsumerConfig.GROUP_ID_CONFIG, "bar");
|
||||
props.put(ConsumerConfig.GROUP_ID_CONFIG, group);
|
||||
return props;
|
||||
}
|
||||
|
||||
@@ -212,7 +247,7 @@ public class KafkaBinderUnitTests {
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = new ExtendedConsumerProperties<KafkaConsumerProperties>(
|
||||
extension);
|
||||
consumerProperties.setInstanceCount(1);
|
||||
binder.bindConsumer("foo", "bar", channel, consumerProperties);
|
||||
Binding<MessageChannel> messageChannelBinding = binder.bindConsumer(topic, group, channel, consumerProperties);
|
||||
assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue();
|
||||
if (groupManage) {
|
||||
if (earliest) {
|
||||
@@ -232,7 +267,7 @@ public class KafkaBinderUnitTests {
|
||||
verify(consumer).seek(partitions.get(1), Long.MAX_VALUE);
|
||||
}
|
||||
}
|
||||
messageChannelBinding.unbind();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProv
|
||||
import org.springframework.context.support.GenericApplicationContext;
|
||||
import org.springframework.integration.channel.DirectChannel;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.rule.EmbeddedKafkaRule;
|
||||
import org.springframework.messaging.support.GenericMessage;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
|
||||
@@ -53,14 +53,15 @@ import static org.mockito.Mockito.spy;
|
||||
public class KafkaTransactionTests {
|
||||
|
||||
@ClassRule
|
||||
public static final KafkaEmbedded embeddedKafka = new KafkaEmbedded(1);
|
||||
public static final EmbeddedKafkaRule embeddedKafka = new EmbeddedKafkaRule(1);
|
||||
|
||||
@SuppressWarnings({ "rawtypes", "unchecked" })
|
||||
@Test
|
||||
public void testProducerRunsInTx() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
kafkaProperties.setBootstrapServers(Collections.singletonList(embeddedKafka.getBrokersAsString()));
|
||||
KafkaBinderConfigurationProperties configurationProperties = new KafkaBinderConfigurationProperties();
|
||||
KafkaProperties kafkaProperties = new TestKafkaProperties();
|
||||
kafkaProperties.setBootstrapServers(Collections.singletonList(embeddedKafka.getEmbeddedKafka().getBrokersAsString()));
|
||||
KafkaBinderConfigurationProperties configurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
configurationProperties.getTransaction().setTransactionIdPrefix("foo-");
|
||||
KafkaTopicProvisioner provisioningProvider = new KafkaTopicProvisioner(configurationProperties, kafkaProperties);
|
||||
provisioningProvider.setMetadataRetryOperations(new RetryTemplate());
|
||||
|
||||
@@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
|
||||
/**
|
||||
* Test {@link KafkaProperties} initialized in the same way as the
|
||||
* {@code KafkaBinderEnvironmentPostProcessor} initializes the properties.
|
||||
*
|
||||
* @author Gary Russell
|
||||
* @since 2.1
|
||||
*
|
||||
*/
|
||||
public class TestKafkaProperties extends KafkaProperties {
|
||||
|
||||
public TestKafkaProperties() {
|
||||
getConsumer().setKeyDeserializer(ByteArrayDeserializer.class);
|
||||
getConsumer().setValueDeserializer(ByteArrayDeserializer.class);
|
||||
getProducer().setKeySerializer(ByteArraySerializer.class);
|
||||
getProducer().setValueSerializer(ByteArraySerializer.class);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -19,10 +19,11 @@ package org.springframework.cloud.stream.binder.kafka.bootstrap;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
import org.springframework.boot.builder.SpringApplicationBuilder;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.rule.EmbeddedKafkaRule;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
@@ -30,14 +31,14 @@ import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
public class KafkaBinderBootstrapTest {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, 10);
|
||||
public static EmbeddedKafkaRule embeddedKafka = new EmbeddedKafkaRule(1, true, 10);
|
||||
|
||||
@Test
|
||||
public void testKafkaBinderConfiguration() throws Exception {
|
||||
ConfigurableApplicationContext applicationContext = new SpringApplicationBuilder(SimpleApplication.class)
|
||||
.web(false)
|
||||
.run("--spring.cloud.stream.kafka.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
.web(WebApplicationType.NONE)
|
||||
.run("--spring.cloud.stream.kafka.binder.brokers=" + embeddedKafka.getEmbeddedKafka().getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.binder.zkNodes=" + embeddedKafka.getEmbeddedKafka().getZookeeperConnectionString());
|
||||
applicationContext.close();
|
||||
}
|
||||
|
||||
|
||||
@@ -16,15 +16,18 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.integration;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.micrometer.core.instrument.binder.MeterBinder;
|
||||
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import org.springframework.beans.DirectFieldAccessor;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.test.context.FilteredClassLoader;
|
||||
@@ -32,9 +35,15 @@ import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.test.context.runner.ApplicationContextRunner;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binding.BindingService;
|
||||
import org.springframework.cloud.stream.config.ListenerContainerCustomizer;
|
||||
import org.springframework.cloud.stream.messaging.Sink;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.listener.AbstractMessageListenerContainer;
|
||||
import org.springframework.kafka.test.rule.EmbeddedKafkaRule;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
import org.springframework.test.context.junit4.SpringRunner;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
@@ -51,16 +60,16 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
properties = "spring.cloud.stream.bindings.input.group=" + KafkaBinderActuatorTests.TEST_CONSUMER_GROUP)
|
||||
public class KafkaBinderActuatorTests {
|
||||
|
||||
static final String TEST_CONSUMER_GROUP = "testGroup";
|
||||
static final String TEST_CONSUMER_GROUP = "testGroup-actuatorTests";
|
||||
|
||||
private static final String KAFKA_BROKERS_PROPERTY = "spring.kafka.bootstrap-servers";
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded kafkaEmbedded = new KafkaEmbedded(1, true);
|
||||
public static EmbeddedKafkaRule kafkaEmbedded = new EmbeddedKafkaRule(1, true);
|
||||
|
||||
@BeforeClass
|
||||
public static void setup() {
|
||||
System.setProperty(KAFKA_BROKERS_PROPERTY, kafkaEmbedded.getBrokersAsString());
|
||||
System.setProperty(KAFKA_BROKERS_PROPERTY, kafkaEmbedded.getEmbeddedKafka().getBrokersAsString());
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
@@ -93,6 +102,13 @@ public class KafkaBinderActuatorTests {
|
||||
.run(context -> {
|
||||
assertThat(context.getBeanNamesForType(MeterRegistry.class)).isEmpty();
|
||||
assertThat(context.getBeanNamesForType(MeterBinder.class)).isEmpty();
|
||||
|
||||
DirectFieldAccessor channelBindingServiceAccessor = new DirectFieldAccessor(context.getBean(BindingService.class));
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, List<Binding<MessageChannel>>> consumerBindings = (Map<String, List<Binding<MessageChannel>>>) channelBindingServiceAccessor
|
||||
.getPropertyValue("consumerBindings");
|
||||
assertThat(new DirectFieldAccessor(consumerBindings.get("input").get(0)).getPropertyValue("lifecycle.messageListenerContainer.beanName"))
|
||||
.isEqualTo("setByCustomizer:input");
|
||||
});
|
||||
}
|
||||
|
||||
@@ -100,6 +116,11 @@ public class KafkaBinderActuatorTests {
|
||||
@EnableAutoConfiguration
|
||||
public static class KafkaMetricsTestConfig {
|
||||
|
||||
@Bean
|
||||
public ListenerContainerCustomizer<AbstractMessageListenerContainer<?, ?>> containerCustomizer() {
|
||||
return (c, q, g) -> c.setBeanName("setByCustomizer:" + q);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void process(String payload) throws InterruptedException {
|
||||
// Artificial slow listener to emulate consumer lag
|
||||
|
||||
Reference in New Issue
Block a user