Compare commits
84 Commits
v1.3.0.M2
...
v2.0.0.RC1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
99ff426b92 | ||
|
|
0a59b4c628 | ||
|
|
a654382e99 | ||
|
|
9a4e86a750 | ||
|
|
addb40bab5 | ||
|
|
cdffcd844c | ||
|
|
a5344655cb | ||
|
|
72e2aeec2a | ||
|
|
090125fa71 | ||
|
|
0fcb972cb6 | ||
|
|
dbe19776f5 | ||
|
|
f44923e480 | ||
|
|
b31f902905 | ||
|
|
13cc4e0155 | ||
|
|
5e0688df72 | ||
|
|
abd43cfffa | ||
|
|
8057b1f764 | ||
|
|
78ae3d1867 | ||
|
|
a7378c9132 | ||
|
|
9500ccfe46 | ||
|
|
3a5aed61c9 | ||
|
|
86146bfe81 | ||
|
|
f2abb59b19 | ||
|
|
36c39974ad | ||
|
|
16a195a887 | ||
|
|
bc562e3a77 | ||
|
|
39dd68a8ad | ||
|
|
8daaed43a9 | ||
|
|
d13d92131c | ||
|
|
0b4ccdefce | ||
|
|
ae269e729b | ||
|
|
9182adcf56 | ||
|
|
0b9e211e27 | ||
|
|
a7fe39fef5 | ||
|
|
50b8955dfc | ||
|
|
9a02fa69ac | ||
|
|
77cbfe2858 | ||
|
|
5db3ede9c4 | ||
|
|
e59cb569a6 | ||
|
|
b0c4f0cfcd | ||
|
|
2f6d5df7bc | ||
|
|
66f194dd93 | ||
|
|
4cb49f9ee4 | ||
|
|
035cc1a005 | ||
|
|
b3f42fe67d | ||
|
|
a9f40ac084 | ||
|
|
db02abe531 | ||
|
|
f1dc14b5c3 | ||
|
|
50ce8ca2ba | ||
|
|
6a312592a4 | ||
|
|
43d786f701 | ||
|
|
68811cad28 | ||
|
|
4382dab8f8 | ||
|
|
20a8158a56 | ||
|
|
3ad0d7c465 | ||
|
|
5b3974c932 | ||
|
|
3c7615f7a3 | ||
|
|
8ae0157135 | ||
|
|
08658ffa6c | ||
|
|
8d797deaf9 | ||
|
|
561b4b7e73 | ||
|
|
93fdd2ef0f | ||
|
|
fd48a1d0eb | ||
|
|
a07a0017bb | ||
|
|
62b40b852f | ||
|
|
c396c5c756 | ||
|
|
b20f4a0e08 | ||
|
|
77f4bc3fb8 | ||
|
|
2aa8e9eefa | ||
|
|
e3460d6fce | ||
|
|
29bb8513c0 | ||
|
|
69227166c7 | ||
|
|
4ff4507741 | ||
|
|
f2e1b63460 | ||
|
|
73f1ed9523 | ||
|
|
dc7662e17d | ||
|
|
b76fff31b8 | ||
|
|
1f4f0c3858 | ||
|
|
1aecd02404 | ||
|
|
6485bd2abd | ||
|
|
02913cd177 | ||
|
|
0865602141 | ||
|
|
790b141799 | ||
|
|
60e620e36e |
107
pom.xml
107
pom.xml
@@ -2,30 +2,27 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.3.0.M2</version>
|
||||
<version>2.0.0.RC1</version>
|
||||
<packaging>pom</packaging>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-build</artifactId>
|
||||
<version>1.3.3.RELEASE</version>
|
||||
<version>2.0.0.RC1</version>
|
||||
<relativePath />
|
||||
</parent>
|
||||
<properties>
|
||||
<java.version>1.7</java.version>
|
||||
<kafka.version>0.10.1.1</kafka.version>
|
||||
<spring-kafka.version>1.1.6.RELEASE</spring-kafka.version>
|
||||
<spring-integration-kafka.version>2.1.1.RELEASE</spring-integration-kafka.version>
|
||||
<spring-cloud-stream.version>1.3.0.M2</spring-cloud-stream.version>
|
||||
<spring-cloud-build.version>1.3.3.RELEASE</spring-cloud-build.version>
|
||||
<java.version>1.8</java.version>
|
||||
<spring-kafka.version>2.1.3.RELEASE</spring-kafka.version>
|
||||
<spring-integration-kafka.version>3.0.2.RELEASE</spring-integration-kafka.version>
|
||||
<kafka.version>1.0.0</kafka.version>
|
||||
<spring-cloud-stream.version>2.0.0.RC1</spring-cloud-stream.version>
|
||||
</properties>
|
||||
<modules>
|
||||
<module>spring-cloud-stream-binder-kafka</module>
|
||||
<module>spring-cloud-starter-stream-kafka</module>
|
||||
<module>spring-cloud-stream-binder-kafka-docs</module>
|
||||
<module>spring-cloud-stream-binder-kafka-0.10.1-test</module>
|
||||
<module>spring-cloud-stream-binder-kafka-0.10.2-test</module>
|
||||
<module>spring-cloud-stream-binder-kafka-core</module>
|
||||
<module>spring-cloud-stream-binder-kstream</module>
|
||||
<module>spring-cloud-stream-binder-kafka-streams</module>
|
||||
</modules>
|
||||
|
||||
<dependencyManagement>
|
||||
@@ -45,30 +42,6 @@
|
||||
<artifactId>spring-cloud-stream</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-codec</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>jline</groupId>
|
||||
<artifactId>jline</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
@@ -96,12 +69,6 @@
|
||||
<scope>test</scope>
|
||||
<version>${spring-kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<classifier>test</classifier>
|
||||
<version>${kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-streams</artifactId>
|
||||
@@ -113,6 +80,27 @@
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
<version>${kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>jline</groupId>
|
||||
<artifactId>jline</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
|
||||
@@ -124,18 +112,6 @@
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<version>1.7</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
<version>2.17</version>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.puppycrawl.tools</groupId>
|
||||
<artifactId>checkstyle</artifactId>
|
||||
<version>7.1</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
@@ -159,26 +135,15 @@
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-build-tools</artifactId>
|
||||
<version>${spring-cloud-build.version}</version>
|
||||
<artifactId>spring-cloud-stream-tools</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>checkstyle-validation</id>
|
||||
<phase>validate</phase>
|
||||
<configuration>
|
||||
<configLocation>checkstyle.xml</configLocation>
|
||||
<encoding>UTF-8</encoding>
|
||||
<consoleOutput>true</consoleOutput>
|
||||
<failsOnError>true</failsOnError>
|
||||
<includeTestSourceDirectory>true</includeTestSourceDirectory>
|
||||
</configuration>
|
||||
<goals>
|
||||
<goal>check</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
<configuration>
|
||||
<configLocation>checkstyle.xml</configLocation>
|
||||
<headerLocation>checkstyle-header.txt</headerLocation>
|
||||
<includeTestSourceDirectory>true</includeTestSourceDirectory>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
0
spring-cloud-starter-stream-kafka/.jdk8
Normal file
0
spring-cloud-starter-stream-kafka/.jdk8
Normal file
@@ -4,7 +4,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.3.0.M2</version>
|
||||
<version>2.0.0.RC1</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
<description>Spring Cloud Starter Stream Kafka</description>
|
||||
|
||||
@@ -1,126 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.3.0.M2</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-0.10.1-test</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder 0.10.1 Tests</description>
|
||||
<url>http://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>http://www.spring.io</url>
|
||||
</organization>
|
||||
<properties>
|
||||
<main.basedir>${basedir}/../..</main.basedir>
|
||||
<kafka.version>0.10.1.1</kafka.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<scope>test</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-0.10.2-test</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-schema</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-avro-serializer</artifactId>
|
||||
<version>3.1.2</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-schema-registry</artifactId>
|
||||
<version>3.1.2</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>confluent</id>
|
||||
<url>http://packages.confluent.io/maven/</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>3.0.2</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>test-jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
</project>
|
||||
@@ -1,241 +0,0 @@
|
||||
/*
|
||||
* Copyright 2014-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.UUID;
|
||||
|
||||
import io.confluent.kafka.schemaregistry.rest.SchemaRegistryConfig;
|
||||
import io.confluent.kafka.schemaregistry.rest.SchemaRegistryRestApplication;
|
||||
import kafka.utils.ZKStringSerializer$;
|
||||
import kafka.utils.ZkUtils;
|
||||
import org.I0Itec.zkclient.ZkClient;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.Deserializer;
|
||||
import org.assertj.core.api.Assertions;
|
||||
import org.eclipse.jetty.server.Server;
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.Spy;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.integration.channel.DirectChannel;
|
||||
import org.springframework.integration.channel.QueueChannel;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.support.KafkaHeaders;
|
||||
import org.springframework.kafka.test.core.BrokerAddress;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
import org.springframework.messaging.SubscribableChannel;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Integration tests for the {@link KafkaMessageChannelBinder}.
|
||||
*
|
||||
* This test specifically tests for the 0.10.1.x version of Kafka.
|
||||
*
|
||||
* @author Eric Bottard
|
||||
* @author Marius Bogoevici
|
||||
* @author Mark Fisher
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
public class Kafka_0_10_1_BinderTests extends Kafka_0_10_2_BinderTests {
|
||||
|
||||
private final String CLASS_UNDER_TEST_NAME = KafkaMessageChannelBinder.class.getSimpleName();
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, 10);
|
||||
|
||||
private Kafka10TestBinder binder;
|
||||
|
||||
private Kafka10AdminUtilsOperation adminUtilsOperation = new Kafka10AdminUtilsOperation();
|
||||
|
||||
@Override
|
||||
protected void binderBindUnbindLatency() throws InterruptedException {
|
||||
Thread.sleep(500);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Kafka10TestBinder getBinder() {
|
||||
if (binder == null) {
|
||||
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
|
||||
binder = new Kafka10TestBinder(binderConfiguration);
|
||||
}
|
||||
return binder;
|
||||
}
|
||||
|
||||
protected KafkaBinderConfigurationProperties createConfigurationProperties() {
|
||||
KafkaBinderConfigurationProperties binderConfiguration = new KafkaBinderConfigurationProperties();
|
||||
BrokerAddress[] brokerAddresses = embeddedKafka.getBrokerAddresses();
|
||||
List<String> bAddresses = new ArrayList<>();
|
||||
for (BrokerAddress bAddress : brokerAddresses) {
|
||||
bAddresses.add(bAddress.toString());
|
||||
}
|
||||
String[] foo = new String[bAddresses.size()];
|
||||
binderConfiguration.setBrokers(bAddresses.toArray(foo));
|
||||
binderConfiguration.setZkNodes(embeddedKafka.getZookeeperConnectionString());
|
||||
return binderConfiguration;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int partitionSize(String topic) {
|
||||
return consumerFactory().createConsumer().partitionsFor(topic).size();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ZkUtils getZkUtils(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties) {
|
||||
final ZkClient zkClient = new ZkClient(kafkaBinderConfigurationProperties.getZkConnectionString(),
|
||||
kafkaBinderConfigurationProperties.getZkSessionTimeout(), kafkaBinderConfigurationProperties.getZkConnectionTimeout(),
|
||||
ZKStringSerializer$.MODULE$);
|
||||
|
||||
return new ZkUtils(zkClient, null, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions, int replicationFactor, Properties topicConfig) {
|
||||
adminUtilsOperation.invokeCreateTopic(zkUtils, topic, partitions, replicationFactor, new Properties());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int invokePartitionSize(String topic, ZkUtils zkUtils) {
|
||||
return adminUtilsOperation.partitionSize(topic, zkUtils);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getKafkaOffsetHeaderKey() {
|
||||
return KafkaHeaders.OFFSET;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Binder getBinder(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties) {
|
||||
return new Kafka10TestBinder(kafkaBinderConfigurationProperties);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void init() {
|
||||
String multiplier = System.getenv("KAFKA_TIMEOUT_MULTIPLIER");
|
||||
if (multiplier != null) {
|
||||
timeoutMultiplier = Double.parseDouble(multiplier);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean usesExplicitRouting() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getClassUnderTestName() {
|
||||
return CLASS_UNDER_TEST_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Spy spyOn(final String name) {
|
||||
throw new UnsupportedOperationException("'spyOn' is not used by Kafka tests");
|
||||
}
|
||||
|
||||
|
||||
private ConsumerFactory<byte[], byte[]> consumerFactory() {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
|
||||
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
|
||||
props.put(ConsumerConfig.GROUP_ID_CONFIG, "TEST-CONSUMER-GROUP");
|
||||
Deserializer<byte[]> valueDecoder = new ByteArrayDeserializer();
|
||||
Deserializer<byte[]> keyDecoder = new ByteArrayDeserializer();
|
||||
|
||||
return new DefaultKafkaConsumerFactory<>(props, keyDecoder, valueDecoder);
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testCustomAvroSerialization() throws Exception {
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
final ZkClient zkClient = new ZkClient(configurationProperties.getZkConnectionString(),
|
||||
configurationProperties.getZkSessionTimeout(), configurationProperties.getZkConnectionTimeout(),
|
||||
ZKStringSerializer$.MODULE$);
|
||||
final ZkUtils zkUtils = new ZkUtils(zkClient, null, false);
|
||||
Map<String, Object> schemaRegistryProps = new HashMap<>();
|
||||
schemaRegistryProps.put("kafkastore.connection.url", configurationProperties.getZkConnectionString());
|
||||
schemaRegistryProps.put("listeners", "http://0.0.0.0:8082");
|
||||
schemaRegistryProps.put("port", "8082");
|
||||
schemaRegistryProps.put("kafkastore.topic", "_schemas");
|
||||
SchemaRegistryConfig config = new SchemaRegistryConfig(schemaRegistryProps);
|
||||
SchemaRegistryRestApplication app = new SchemaRegistryRestApplication(config);
|
||||
Server server = app.createServer();
|
||||
server.start();
|
||||
long endTime = System.currentTimeMillis() + 5000;
|
||||
while(true) {
|
||||
if (server.isRunning()) {
|
||||
break;
|
||||
}
|
||||
else if (System.currentTimeMillis() > endTime) {
|
||||
Assertions.fail("Kafka Schema Registry Server failed to start");
|
||||
}
|
||||
}
|
||||
User1 firstOutboundFoo = new User1();
|
||||
String userName1 = "foo-name" + UUID.randomUUID().toString();
|
||||
String favColor1 = "foo-color" + UUID.randomUUID().toString();
|
||||
firstOutboundFoo.setName(userName1);
|
||||
firstOutboundFoo.setFavoriteColor(favColor1);
|
||||
Message<?> message = MessageBuilder.withPayload(firstOutboundFoo).build();
|
||||
SubscribableChannel moduleOutputChannel = new DirectChannel();
|
||||
String testTopicName = "existing" + System.currentTimeMillis();
|
||||
invokeCreateTopic(zkUtils, testTopicName, 6, 1, new Properties());
|
||||
configurationProperties.setAutoAddPartitions(true);
|
||||
Binder binder = getBinder(configurationProperties);
|
||||
QueueChannel moduleInputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.getExtension().getConfiguration().put("value.serializer", "io.confluent.kafka.serializers.KafkaAvroSerializer");
|
||||
producerProperties.getExtension().getConfiguration().put("schema.registry.url", "http://localhost:8082");
|
||||
producerProperties.setUseNativeEncoding(true);
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, moduleOutputChannel, producerProperties);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
consumerProperties.getExtension().getConfiguration().put("value.deserializer", "io.confluent.kafka.serializers.KafkaAvroDeserializer");
|
||||
consumerProperties.getExtension().getConfiguration().put("schema.registry.url", "http://localhost:8082");
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer(testTopicName, "test", moduleInputChannel, consumerProperties);
|
||||
// Let the consumer actually bind to the producer before sending a msg
|
||||
binderBindUnbindLatency();
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(moduleInputChannel);
|
||||
Assertions.assertThat(inbound).isNotNull();
|
||||
assertTrue(message.getPayload() instanceof User1);
|
||||
User1 receivedUser = (User1) message.getPayload();
|
||||
Assertions.assertThat(receivedUser.getName()).isEqualTo(userName1);
|
||||
Assertions.assertThat(receivedUser.getFavoriteColor()).isEqualTo(favColor1);
|
||||
producerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
}
|
||||
@@ -1,120 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.3.0.M2</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-0.10.2-test</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder 0.10.2 Tests</description>
|
||||
<url>http://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>http://www.spring.io</url>
|
||||
</organization>
|
||||
<properties>
|
||||
<main.basedir>${basedir}/../..</main.basedir>
|
||||
<kafka.version>0.10.2.1</kafka.version>
|
||||
<spring-kafka.version>1.2.2.RELEASE</spring-kafka.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<scope>test</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-schema</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-avro-serializer</artifactId>
|
||||
<version>3.2.2</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-schema-registry</artifactId>
|
||||
<version>3.2.2</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>confluent</id>
|
||||
<url>http://packages.confluent.io/maven/</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>3.0.2</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>test-jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
</project>
|
||||
@@ -1,241 +0,0 @@
|
||||
/*
|
||||
* Copyright 2014-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.UUID;
|
||||
|
||||
import io.confluent.kafka.schemaregistry.rest.SchemaRegistryConfig;
|
||||
import io.confluent.kafka.schemaregistry.rest.SchemaRegistryRestApplication;
|
||||
import kafka.utils.ZKStringSerializer$;
|
||||
import kafka.utils.ZkUtils;
|
||||
import org.I0Itec.zkclient.ZkClient;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.Deserializer;
|
||||
import org.assertj.core.api.Assertions;
|
||||
import org.eclipse.jetty.server.Server;
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.Spy;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.integration.channel.DirectChannel;
|
||||
import org.springframework.integration.channel.QueueChannel;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.support.KafkaHeaders;
|
||||
import org.springframework.kafka.test.core.BrokerAddress;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
import org.springframework.messaging.SubscribableChannel;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Integration tests for the {@link KafkaMessageChannelBinder}.
|
||||
*
|
||||
* This test specifically tests for the 0.10.2.x version of Kafka.
|
||||
*
|
||||
* @author Eric Bottard
|
||||
* @author Marius Bogoevici
|
||||
* @author Mark Fisher
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
public class Kafka_0_10_2_BinderTests extends KafkaBinderTests {
|
||||
|
||||
private final String CLASS_UNDER_TEST_NAME = KafkaMessageChannelBinder.class.getSimpleName();
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, 10);
|
||||
|
||||
private Kafka10TestBinder binder;
|
||||
|
||||
private Kafka10AdminUtilsOperation adminUtilsOperation = new Kafka10AdminUtilsOperation();
|
||||
|
||||
@Override
|
||||
protected void binderBindUnbindLatency() throws InterruptedException {
|
||||
Thread.sleep(500);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Kafka10TestBinder getBinder() {
|
||||
if (binder == null) {
|
||||
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
|
||||
binder = new Kafka10TestBinder(binderConfiguration);
|
||||
}
|
||||
return binder;
|
||||
}
|
||||
|
||||
protected KafkaBinderConfigurationProperties createConfigurationProperties() {
|
||||
KafkaBinderConfigurationProperties binderConfiguration = new KafkaBinderConfigurationProperties();
|
||||
BrokerAddress[] brokerAddresses = embeddedKafka.getBrokerAddresses();
|
||||
List<String> bAddresses = new ArrayList<>();
|
||||
for (BrokerAddress bAddress : brokerAddresses) {
|
||||
bAddresses.add(bAddress.toString());
|
||||
}
|
||||
String[] foo = new String[bAddresses.size()];
|
||||
binderConfiguration.setBrokers(bAddresses.toArray(foo));
|
||||
binderConfiguration.setZkNodes(embeddedKafka.getZookeeperConnectionString());
|
||||
return binderConfiguration;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int partitionSize(String topic) {
|
||||
return consumerFactory().createConsumer().partitionsFor(topic).size();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ZkUtils getZkUtils(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties) {
|
||||
final ZkClient zkClient = new ZkClient(kafkaBinderConfigurationProperties.getZkConnectionString(),
|
||||
kafkaBinderConfigurationProperties.getZkSessionTimeout(), kafkaBinderConfigurationProperties.getZkConnectionTimeout(),
|
||||
ZKStringSerializer$.MODULE$);
|
||||
|
||||
return new ZkUtils(zkClient, null, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions, int replicationFactor, Properties topicConfig) {
|
||||
adminUtilsOperation.invokeCreateTopic(zkUtils, topic, partitions, replicationFactor, new Properties());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int invokePartitionSize(String topic, ZkUtils zkUtils) {
|
||||
return adminUtilsOperation.partitionSize(topic, zkUtils);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getKafkaOffsetHeaderKey() {
|
||||
return KafkaHeaders.OFFSET;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Binder getBinder(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties) {
|
||||
return new Kafka10TestBinder(kafkaBinderConfigurationProperties);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void init() {
|
||||
String multiplier = System.getenv("KAFKA_TIMEOUT_MULTIPLIER");
|
||||
if (multiplier != null) {
|
||||
timeoutMultiplier = Double.parseDouble(multiplier);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean usesExplicitRouting() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getClassUnderTestName() {
|
||||
return CLASS_UNDER_TEST_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Spy spyOn(final String name) {
|
||||
throw new UnsupportedOperationException("'spyOn' is not used by Kafka tests");
|
||||
}
|
||||
|
||||
|
||||
private ConsumerFactory<byte[], byte[]> consumerFactory() {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
|
||||
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
|
||||
props.put(ConsumerConfig.GROUP_ID_CONFIG, "TEST-CONSUMER-GROUP");
|
||||
Deserializer<byte[]> valueDecoder = new ByteArrayDeserializer();
|
||||
Deserializer<byte[]> keyDecoder = new ByteArrayDeserializer();
|
||||
|
||||
return new DefaultKafkaConsumerFactory<>(props, keyDecoder, valueDecoder);
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testCustomAvroSerialization() throws Exception {
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
final ZkClient zkClient = new ZkClient(configurationProperties.getZkConnectionString(),
|
||||
configurationProperties.getZkSessionTimeout(), configurationProperties.getZkConnectionTimeout(),
|
||||
ZKStringSerializer$.MODULE$);
|
||||
final ZkUtils zkUtils = new ZkUtils(zkClient, null, false);
|
||||
Map<String, Object> schemaRegistryProps = new HashMap<>();
|
||||
schemaRegistryProps.put("kafkastore.connection.url", configurationProperties.getZkConnectionString());
|
||||
schemaRegistryProps.put("listeners", "http://0.0.0.0:8082");
|
||||
schemaRegistryProps.put("port", "8082");
|
||||
schemaRegistryProps.put("kafkastore.topic", "_schemas");
|
||||
SchemaRegistryConfig config = new SchemaRegistryConfig(schemaRegistryProps);
|
||||
SchemaRegistryRestApplication app = new SchemaRegistryRestApplication(config);
|
||||
Server server = app.createServer();
|
||||
server.start();
|
||||
long endTime = System.currentTimeMillis() + 5000;
|
||||
while(true) {
|
||||
if (server.isRunning()) {
|
||||
break;
|
||||
}
|
||||
else if (System.currentTimeMillis() > endTime) {
|
||||
Assertions.fail("Kafka Schema Registry Server failed to start");
|
||||
}
|
||||
}
|
||||
User1 firstOutboundFoo = new User1();
|
||||
String userName1 = "foo-name" + UUID.randomUUID().toString();
|
||||
String favColor1 = "foo-color" + UUID.randomUUID().toString();
|
||||
firstOutboundFoo.setName(userName1);
|
||||
firstOutboundFoo.setFavoriteColor(favColor1);
|
||||
Message<?> message = MessageBuilder.withPayload(firstOutboundFoo).build();
|
||||
SubscribableChannel moduleOutputChannel = new DirectChannel();
|
||||
String testTopicName = "existing" + System.currentTimeMillis();
|
||||
invokeCreateTopic(zkUtils, testTopicName, 6, 1, new Properties());
|
||||
configurationProperties.setAutoAddPartitions(true);
|
||||
Binder binder = getBinder(configurationProperties);
|
||||
QueueChannel moduleInputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.getExtension().getConfiguration().put("value.serializer", "io.confluent.kafka.serializers.KafkaAvroSerializer");
|
||||
producerProperties.getExtension().getConfiguration().put("schema.registry.url", "http://localhost:8082");
|
||||
producerProperties.setUseNativeEncoding(true);
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, moduleOutputChannel, producerProperties);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
consumerProperties.getExtension().getConfiguration().put("value.deserializer", "io.confluent.kafka.serializers.KafkaAvroDeserializer");
|
||||
consumerProperties.getExtension().getConfiguration().put("schema.registry.url", "http://localhost:8082");
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer(testTopicName, "test", moduleInputChannel, consumerProperties);
|
||||
// Let the consumer actually bind to the producer before sending a msg
|
||||
binderBindUnbindLatency();
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(moduleInputChannel);
|
||||
Assertions.assertThat(inbound).isNotNull();
|
||||
assertTrue(message.getPayload() instanceof User1);
|
||||
User1 receivedUser = (User1) message.getPayload();
|
||||
Assertions.assertThat(receivedUser.getName()).isEqualTo(userName1);
|
||||
Assertions.assertThat(receivedUser.getFavoriteColor()).isEqualTo(favColor1);
|
||||
producerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.avro.Schema;
|
||||
import org.apache.avro.reflect.Nullable;
|
||||
import org.apache.avro.specific.SpecificRecordBase;
|
||||
|
||||
import org.springframework.core.io.ClassPathResource;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
public class User1 extends SpecificRecordBase {
|
||||
|
||||
@Nullable
|
||||
private String name;
|
||||
|
||||
@Nullable
|
||||
private String favoriteColor;
|
||||
|
||||
public String getName() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String getFavoriteColor() {
|
||||
return this.favoriteColor;
|
||||
}
|
||||
|
||||
public void setFavoriteColor(String favoriteColor) {
|
||||
this.favoriteColor = favoriteColor;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Schema getSchema() {
|
||||
try {
|
||||
return new Schema.Parser().parse(new ClassPathResource("schemas/users_v1.schema").getInputStream());
|
||||
}
|
||||
catch (IOException e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object get(int i) {
|
||||
if (i == 0) {
|
||||
return getName().toString();
|
||||
}
|
||||
if (i == 1) {
|
||||
return getFavoriteColor().toString();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(int i, Object o) {
|
||||
if (i == 0) {
|
||||
setName((String) o);
|
||||
}
|
||||
if (i == 1) {
|
||||
setFavoriteColor((String) o);
|
||||
}
|
||||
}
|
||||
}
|
||||
0
spring-cloud-stream-binder-kafka-core/.jdk8
Normal file
0
spring-cloud-stream-binder-kafka-core/.jdk8
Normal file
@@ -0,0 +1,5 @@
|
||||
eclipse.preferences.version=1
|
||||
org.eclipse.jdt.ui.ignorelowercasenames=true
|
||||
org.eclipse.jdt.ui.importorder=java;javax;com;org;org.springframework;ch.qos;\#;
|
||||
org.eclipse.jdt.ui.ondemandthreshold=99
|
||||
org.eclipse.jdt.ui.staticondemandthreshold=99
|
||||
@@ -1,46 +1,49 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.3.0.M2</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder Core</description>
|
||||
<url>http://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>http://www.spring.io</url>
|
||||
</organization>
|
||||
<properties>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.0.0.RC1</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder Core</description>
|
||||
<url>http://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>http://www.spring.io</url>
|
||||
</organization>
|
||||
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
<version>${spring-integration-kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.apache.avro</groupId>
|
||||
<artifactId>avro-compiler</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-configuration-processor</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
</project>
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
/*
|
||||
* Copyright 2002-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.admin;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
import kafka.utils.ZkUtils;
|
||||
|
||||
/**
|
||||
* API around {@link kafka.admin.AdminUtils} to support
|
||||
* various versions of Kafka brokers.
|
||||
*
|
||||
* Note: Implementations that support Kafka brokers other than 0.10, need to use
|
||||
* a possible strategy that involves reflection around {@link kafka.admin.AdminUtils}.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public interface AdminUtilsOperation {
|
||||
|
||||
/**
|
||||
* Invoke {@link kafka.admin.AdminUtils#addPartitions}
|
||||
*
|
||||
* @param zkUtils Zookeeper utils
|
||||
* @param topic name of the topic
|
||||
* @param numPartitions
|
||||
* @param replicaAssignmentStr
|
||||
* @param checkBrokerAvailable
|
||||
*/
|
||||
void invokeAddPartitions(ZkUtils zkUtils, String topic, int numPartitions,
|
||||
String replicaAssignmentStr, boolean checkBrokerAvailable);
|
||||
|
||||
/**
|
||||
* Invoke {@link kafka.admin.AdminUtils#fetchTopicMetadataFromZk}
|
||||
*
|
||||
* @param topic name
|
||||
* @param zkUtils zookeeper utils
|
||||
* @return error code
|
||||
*/
|
||||
short errorCodeFromTopicMetadata(String topic, ZkUtils zkUtils);
|
||||
|
||||
/**
|
||||
* Find partition size from Kafka broker using {@link kafka.admin.AdminUtils}
|
||||
*
|
||||
* @param topic name
|
||||
* @param zkUtils zookeeper utils
|
||||
* @return partition size
|
||||
*/
|
||||
int partitionSize(String topic, ZkUtils zkUtils);
|
||||
|
||||
/**
|
||||
* Inovke {@link kafka.admin.AdminUtils#createTopic}
|
||||
*
|
||||
* @param zkUtils zookeeper utils
|
||||
* @param topic name
|
||||
* @param partitions
|
||||
* @param replicationFactor
|
||||
* @param topicConfig
|
||||
*/
|
||||
void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions,
|
||||
int replicationFactor, Properties topicConfig);
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
/*
|
||||
* Copyright 2002-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.admin;
|
||||
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Properties;
|
||||
|
||||
import kafka.utils.ZkUtils;
|
||||
|
||||
import org.springframework.util.ClassUtils;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class Kafka09AdminUtilsOperation implements AdminUtilsOperation {
|
||||
|
||||
private static Class<?> ADMIN_UTIL_CLASS;
|
||||
|
||||
static {
|
||||
try {
|
||||
ADMIN_UTIL_CLASS = ClassUtils.forName("kafka.admin.AdminUtils", null);
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("AdminUtils class not found", e);
|
||||
}
|
||||
}
|
||||
|
||||
public void invokeAddPartitions(ZkUtils zkUtils, String topic, int numPartitions,
|
||||
String replicaAssignmentStr, boolean checkBrokerAvailable) {
|
||||
try {
|
||||
Method[] declaredMethods = ADMIN_UTIL_CLASS.getDeclaredMethods();
|
||||
Method addPartitions = null;
|
||||
for (Method m : declaredMethods) {
|
||||
if (m.getName().equals("addPartitions")) {
|
||||
addPartitions = m;
|
||||
}
|
||||
}
|
||||
if (addPartitions != null) {
|
||||
addPartitions.invoke(null, zkUtils, topic, numPartitions,
|
||||
replicaAssignmentStr, checkBrokerAvailable);
|
||||
}
|
||||
else {
|
||||
throw new InvocationTargetException(
|
||||
new RuntimeException("method not found"));
|
||||
}
|
||||
}
|
||||
catch (InvocationTargetException e) {
|
||||
ReflectionUtils.handleInvocationTargetException(e);
|
||||
}
|
||||
catch (IllegalAccessException e) {
|
||||
ReflectionUtils.handleReflectionException(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public short errorCodeFromTopicMetadata(String topic, ZkUtils zkUtils) {
|
||||
try {
|
||||
Method fetchTopicMetadataFromZk = ReflectionUtils.findMethod(ADMIN_UTIL_CLASS, "fetchTopicMetadataFromZk", String.class, ZkUtils.class);
|
||||
Object result = fetchTopicMetadataFromZk.invoke(null, topic, zkUtils);
|
||||
Class<?> topicMetadataClass = ClassUtils.forName("kafka.api.TopicMetadata", null);
|
||||
Method errorCodeMethod = ReflectionUtils.findMethod(topicMetadataClass, "errorCode");
|
||||
return (short) errorCodeMethod.invoke(result);
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("AdminUtils class not found", e);
|
||||
}
|
||||
catch (InvocationTargetException e) {
|
||||
ReflectionUtils.handleInvocationTargetException(e);
|
||||
}
|
||||
catch (IllegalAccessException e) {
|
||||
ReflectionUtils.handleReflectionException(e);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public int partitionSize(String topic, ZkUtils zkUtils) {
|
||||
try {
|
||||
Method fetchTopicMetadataFromZk = ReflectionUtils.findMethod(ADMIN_UTIL_CLASS, "fetchTopicMetadataFromZk", String.class, ZkUtils.class);
|
||||
Object result = fetchTopicMetadataFromZk.invoke(null, topic, zkUtils);
|
||||
Class<?> topicMetadataClass = ClassUtils.forName("kafka.api.TopicMetadata", null);
|
||||
|
||||
Method partitionsMetadata = ReflectionUtils.findMethod(topicMetadataClass, "partitionsMetadata");
|
||||
scala.collection.Seq<kafka.api.PartitionMetadata> partitionSize =
|
||||
(scala.collection.Seq<kafka.api.PartitionMetadata>)partitionsMetadata.invoke(result);
|
||||
|
||||
return partitionSize.size();
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("AdminUtils class not found", e);
|
||||
}
|
||||
catch (InvocationTargetException e) {
|
||||
ReflectionUtils.handleInvocationTargetException(e);
|
||||
}
|
||||
catch (IllegalAccessException e) {
|
||||
ReflectionUtils.handleReflectionException(e);
|
||||
}
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
public void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions,
|
||||
int replicationFactor, Properties topicConfig) {
|
||||
try {
|
||||
Method[] declaredMethods = ADMIN_UTIL_CLASS.getDeclaredMethods();
|
||||
Method createTopic = null;
|
||||
for (Method m : declaredMethods) {
|
||||
if (m.getName().equals("createTopic")) {
|
||||
createTopic = m;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (createTopic != null) {
|
||||
createTopic.invoke(null, zkUtils, topic, partitions,
|
||||
replicationFactor, topicConfig);
|
||||
}
|
||||
else {
|
||||
throw new InvocationTargetException(
|
||||
new RuntimeException("method not found"));
|
||||
}
|
||||
}
|
||||
catch (InvocationTargetException e) {
|
||||
ReflectionUtils.handleInvocationTargetException(e);
|
||||
}
|
||||
catch (IllegalAccessException e) {
|
||||
ReflectionUtils.handleReflectionException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
/*
|
||||
* Copyright 2002-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.admin;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
import kafka.admin.AdminUtils;
|
||||
import kafka.utils.ZkUtils;
|
||||
import org.apache.kafka.common.requests.MetadataResponse;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class Kafka10AdminUtilsOperation implements AdminUtilsOperation {
|
||||
|
||||
public void invokeAddPartitions(ZkUtils zkUtils, String topic, int numPartitions,
|
||||
String replicaAssignmentStr, boolean checkBrokerAvailable) {
|
||||
AdminUtils.addPartitions(zkUtils, topic, numPartitions, replicaAssignmentStr, checkBrokerAvailable, null);
|
||||
}
|
||||
|
||||
public short errorCodeFromTopicMetadata(String topic, ZkUtils zkUtils) {
|
||||
|
||||
MetadataResponse.TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils);
|
||||
return topicMetadata.error().code();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public int partitionSize(String topic, ZkUtils zkUtils) {
|
||||
|
||||
MetadataResponse.TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils);
|
||||
return topicMetadata.partitionMetadata().size();
|
||||
}
|
||||
|
||||
public void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions,
|
||||
int replicationFactor, Properties topicConfig) {
|
||||
|
||||
AdminUtils.createTopic(zkUtils, topic, partitions, replicationFactor,
|
||||
topicConfig, null);
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,7 @@ package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.security.auth.login.AppConfigurationEntry;
|
||||
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2015-2016 the original author or authors.
|
||||
* Copyright 2015-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -35,10 +35,16 @@ import org.springframework.util.StringUtils;
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
* @author Rafal Zukowski
|
||||
*/
|
||||
@ConfigurationProperties(prefix = "spring.cloud.stream.kafka.binder")
|
||||
public class KafkaBinderConfigurationProperties {
|
||||
|
||||
private static final String DEFAULT_KAFKA_CONNECTION_STRING = "localhost:9092";
|
||||
|
||||
private final Transaction transaction = new Transaction();
|
||||
|
||||
@Autowired(required = false)
|
||||
private KafkaProperties kafkaProperties;
|
||||
|
||||
@@ -78,7 +84,7 @@ public class KafkaBinderConfigurationProperties {
|
||||
*/
|
||||
private int zkConnectionTimeout = 10000;
|
||||
|
||||
private int requiredAcks = 1;
|
||||
private String requiredAcks = "1";
|
||||
|
||||
private int replicationFactor = 1;
|
||||
|
||||
@@ -88,8 +94,22 @@ public class KafkaBinderConfigurationProperties {
|
||||
|
||||
private int queueSize = 8192;
|
||||
|
||||
/**
|
||||
* Time to wait to get partition information in seconds; default 60.
|
||||
*/
|
||||
private int healthTimeout = 60;
|
||||
|
||||
private JaasLoginModuleConfiguration jaas;
|
||||
|
||||
/**
|
||||
* The bean name of a custom header mapper to use instead of a {@link org.springframework.kafka.support.DefaultKafkaHeaderMapper}.
|
||||
*/
|
||||
private String headerMapperBeanName;
|
||||
|
||||
public Transaction getTransaction() {
|
||||
return this.transaction;
|
||||
}
|
||||
|
||||
public String getZkConnectionString() {
|
||||
return toConnectionString(this.zkNodes, this.defaultZkPort);
|
||||
}
|
||||
@@ -98,6 +118,10 @@ public class KafkaBinderConfigurationProperties {
|
||||
return toConnectionString(this.brokers, this.defaultBrokerPort);
|
||||
}
|
||||
|
||||
public String getDefaultKafkaConnectionString() {
|
||||
return DEFAULT_KAFKA_CONNECTION_STRING;
|
||||
}
|
||||
|
||||
public String[] getHeaders() {
|
||||
return this.headers;
|
||||
}
|
||||
@@ -196,11 +220,15 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.maxWait = maxWait;
|
||||
}
|
||||
|
||||
public int getRequiredAcks() {
|
||||
public String getRequiredAcks() {
|
||||
return this.requiredAcks;
|
||||
}
|
||||
|
||||
public void setRequiredAcks(int requiredAcks) {
|
||||
this.requiredAcks = String.valueOf(requiredAcks);
|
||||
}
|
||||
|
||||
public void setRequiredAcks(String requiredAcks) {
|
||||
this.requiredAcks = requiredAcks;
|
||||
}
|
||||
|
||||
@@ -228,6 +256,14 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.minPartitionCount = minPartitionCount;
|
||||
}
|
||||
|
||||
public int getHealthTimeout() {
|
||||
return this.healthTimeout;
|
||||
}
|
||||
|
||||
public void setHealthTimeout(int healthTimeout) {
|
||||
this.healthTimeout = healthTimeout;
|
||||
}
|
||||
|
||||
public int getQueueSize() {
|
||||
return this.queueSize;
|
||||
}
|
||||
@@ -338,4 +374,32 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.jaas = jaas;
|
||||
}
|
||||
|
||||
public String getHeaderMapperBeanName() {
|
||||
return this.headerMapperBeanName;
|
||||
}
|
||||
|
||||
public void setHeaderMapperBeanName(String headerMapperBeanName) {
|
||||
this.headerMapperBeanName = headerMapperBeanName;
|
||||
}
|
||||
|
||||
public static class Transaction {
|
||||
|
||||
private final KafkaProducerProperties producer = new KafkaProducerProperties();
|
||||
|
||||
private String transactionIdPrefix;
|
||||
|
||||
public String getTransactionIdPrefix() {
|
||||
return this.transactionIdPrefix;
|
||||
}
|
||||
|
||||
public void setTransactionIdPrefix(String transactionIdPrefix) {
|
||||
this.transactionIdPrefix = transactionIdPrefix;
|
||||
}
|
||||
|
||||
public KafkaProducerProperties getProducer() {
|
||||
return this.producer;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -22,6 +22,8 @@ import java.util.Map;
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*
|
||||
* <p>
|
||||
* Thanks to Laszlo Szabo for providing the initial patch for generic property support.
|
||||
@@ -29,6 +31,28 @@ import java.util.Map;
|
||||
*/
|
||||
public class KafkaConsumerProperties {
|
||||
|
||||
public enum StartOffset {
|
||||
earliest(-2L),
|
||||
latest(-1L);
|
||||
|
||||
private final long referencePoint;
|
||||
|
||||
StartOffset(long referencePoint) {
|
||||
this.referencePoint = referencePoint;
|
||||
}
|
||||
|
||||
public long getReferencePoint() {
|
||||
return this.referencePoint;
|
||||
}
|
||||
}
|
||||
|
||||
public enum StandardHeaders {
|
||||
none,
|
||||
id,
|
||||
timestamp,
|
||||
both
|
||||
}
|
||||
|
||||
private boolean autoRebalanceEnabled = true;
|
||||
|
||||
private boolean autoCommitOffset = true;
|
||||
@@ -41,8 +65,18 @@ public class KafkaConsumerProperties {
|
||||
|
||||
private String dlqName;
|
||||
|
||||
private KafkaProducerProperties dlqProducerProperties = new KafkaProducerProperties();
|
||||
|
||||
private int recoveryInterval = 5000;
|
||||
|
||||
private String[] trustedPackages;
|
||||
|
||||
private StandardHeaders standardHeaders = StandardHeaders.none;
|
||||
|
||||
private String converterBeanName;
|
||||
|
||||
private long idleEventInterval = 30_000;
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
public boolean isAutoCommitOffset() {
|
||||
@@ -93,21 +127,6 @@ public class KafkaConsumerProperties {
|
||||
this.autoRebalanceEnabled = autoRebalanceEnabled;
|
||||
}
|
||||
|
||||
public enum StartOffset {
|
||||
earliest(-2L),
|
||||
latest(-1L);
|
||||
|
||||
private final long referencePoint;
|
||||
|
||||
StartOffset(long referencePoint) {
|
||||
this.referencePoint = referencePoint;
|
||||
}
|
||||
|
||||
public long getReferencePoint() {
|
||||
return this.referencePoint;
|
||||
}
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
@@ -123,4 +142,44 @@ public class KafkaConsumerProperties {
|
||||
public void setDlqName(String dlqName) {
|
||||
this.dlqName = dlqName;
|
||||
}
|
||||
|
||||
public String[] getTrustedPackages() {
|
||||
return trustedPackages;
|
||||
}
|
||||
|
||||
public void setTrustedPackages(String[] trustedPackages) {
|
||||
this.trustedPackages = trustedPackages;
|
||||
}
|
||||
|
||||
public KafkaProducerProperties getDlqProducerProperties() {
|
||||
return dlqProducerProperties;
|
||||
}
|
||||
|
||||
public void setDlqProducerProperties(KafkaProducerProperties dlqProducerProperties) {
|
||||
this.dlqProducerProperties = dlqProducerProperties;
|
||||
}
|
||||
public StandardHeaders getStandardHeaders() {
|
||||
return this.standardHeaders;
|
||||
}
|
||||
|
||||
public void setStandardHeaders(StandardHeaders standardHeaders) {
|
||||
this.standardHeaders = standardHeaders;
|
||||
}
|
||||
|
||||
public String getConverterBeanName() {
|
||||
return this.converterBeanName;
|
||||
}
|
||||
|
||||
public void setConverterBeanName(String converterBeanName) {
|
||||
this.converterBeanName = converterBeanName;
|
||||
}
|
||||
|
||||
public long getIdleEventInterval() {
|
||||
return this.idleEventInterval;
|
||||
}
|
||||
|
||||
public void setIdleEventInterval(long idleEventInterval) {
|
||||
this.idleEventInterval = idleEventInterval;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import org.springframework.cloud.stream.binder.ExtendedBindingProperties;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Gary Russell
|
||||
*/
|
||||
@ConfigurationProperties("spring.cloud.stream.kafka")
|
||||
public class KafkaExtendedBindingProperties
|
||||
@@ -40,22 +41,45 @@ public class KafkaExtendedBindingProperties
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaConsumerProperties getExtendedConsumerProperties(String channelName) {
|
||||
if (this.bindings.containsKey(channelName) && this.bindings.get(channelName).getConsumer() != null) {
|
||||
return this.bindings.get(channelName).getConsumer();
|
||||
public synchronized KafkaConsumerProperties getExtendedConsumerProperties(String channelName) {
|
||||
if (bindings.containsKey(channelName)) {
|
||||
if (bindings.get(channelName).getConsumer() != null) {
|
||||
return bindings.get(channelName).getConsumer();
|
||||
}
|
||||
else {
|
||||
KafkaConsumerProperties properties = new KafkaConsumerProperties();
|
||||
this.bindings.get(channelName).setConsumer(properties);
|
||||
return properties;
|
||||
}
|
||||
}
|
||||
else {
|
||||
return new KafkaConsumerProperties();
|
||||
KafkaConsumerProperties properties = new KafkaConsumerProperties();
|
||||
KafkaBindingProperties rbp = new KafkaBindingProperties();
|
||||
rbp.setConsumer(properties);
|
||||
bindings.put(channelName, rbp);
|
||||
return properties;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaProducerProperties getExtendedProducerProperties(String channelName) {
|
||||
if (this.bindings.containsKey(channelName) && this.bindings.get(channelName).getProducer() != null) {
|
||||
return this.bindings.get(channelName).getProducer();
|
||||
public synchronized KafkaProducerProperties getExtendedProducerProperties(String channelName) {
|
||||
if (bindings.containsKey(channelName)) {
|
||||
if (bindings.get(channelName).getProducer() != null) {
|
||||
return bindings.get(channelName).getProducer();
|
||||
}
|
||||
else {
|
||||
KafkaProducerProperties properties = new KafkaProducerProperties();
|
||||
this.bindings.get(channelName).setProducer(properties);
|
||||
return properties;
|
||||
}
|
||||
}
|
||||
else {
|
||||
return new KafkaProducerProperties();
|
||||
KafkaProducerProperties properties = new KafkaProducerProperties();
|
||||
KafkaBindingProperties rbp = new KafkaBindingProperties();
|
||||
rbp.setProducer(properties);
|
||||
bindings.put(channelName, rbp);
|
||||
return properties;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -16,16 +16,17 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import org.springframework.expression.Expression;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.validation.constraints.NotNull;
|
||||
|
||||
import org.springframework.expression.Expression;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Henryk Konsek
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KafkaProducerProperties {
|
||||
|
||||
@@ -39,6 +40,8 @@ public class KafkaProducerProperties {
|
||||
|
||||
private Expression messageKeyExpression;
|
||||
|
||||
private String[] headerPatterns;
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
public int getBufferSize() {
|
||||
@@ -82,6 +85,14 @@ public class KafkaProducerProperties {
|
||||
this.messageKeyExpression = messageKeyExpression;
|
||||
}
|
||||
|
||||
public String[] getHeaderPatterns() {
|
||||
return this.headerPatterns;
|
||||
}
|
||||
|
||||
public void setHeaderPatterns(String[] headerPatterns) {
|
||||
this.headerPatterns = headerPatterns;
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2014-2017 the original author or authors.
|
||||
* Copyright 2014-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -17,21 +17,33 @@
|
||||
package org.springframework.cloud.stream.binder.kafka.provisioning;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Properties;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import kafka.common.ErrorMapping;
|
||||
import kafka.utils.ZkUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
import org.apache.kafka.clients.admin.AdminClientConfig;
|
||||
import org.apache.kafka.clients.admin.CreatePartitionsResult;
|
||||
import org.apache.kafka.clients.admin.CreateTopicsResult;
|
||||
import org.apache.kafka.clients.admin.DescribeTopicsResult;
|
||||
import org.apache.kafka.clients.admin.ListTopicsResult;
|
||||
import org.apache.kafka.clients.admin.NewPartitions;
|
||||
import org.apache.kafka.clients.admin.NewTopic;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.apache.kafka.common.KafkaFuture;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.security.JaasUtils;
|
||||
|
||||
import org.springframework.beans.factory.InitializingBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.BinderException;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
@@ -40,13 +52,12 @@ import org.springframework.cloud.stream.provisioning.ConsumerDestination;
|
||||
import org.springframework.cloud.stream.provisioning.ProducerDestination;
|
||||
import org.springframework.cloud.stream.provisioning.ProvisioningException;
|
||||
import org.springframework.cloud.stream.provisioning.ProvisioningProvider;
|
||||
import org.springframework.retry.RetryCallback;
|
||||
import org.springframework.retry.RetryContext;
|
||||
import org.springframework.retry.RetryOperations;
|
||||
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
|
||||
import org.springframework.retry.policy.SimpleRetryPolicy;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
@@ -60,18 +71,25 @@ import org.springframework.util.StringUtils;
|
||||
public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsumerProperties<KafkaConsumerProperties>,
|
||||
ExtendedProducerProperties<KafkaProducerProperties>>, InitializingBean {
|
||||
|
||||
private static final int DEFAULT_OPERATION_TIMEOUT = 30;
|
||||
|
||||
private final Log logger = LogFactory.getLog(getClass());
|
||||
|
||||
private final KafkaBinderConfigurationProperties configurationProperties;
|
||||
|
||||
private final AdminUtilsOperation adminUtilsOperation;
|
||||
private final AdminClient adminClient;
|
||||
|
||||
private RetryOperations metadataRetryOperations;
|
||||
|
||||
private final int operationTimeout = DEFAULT_OPERATION_TIMEOUT;
|
||||
|
||||
public KafkaTopicProvisioner(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties,
|
||||
AdminUtilsOperation adminUtilsOperation) {
|
||||
KafkaProperties kafkaProperties) {
|
||||
Assert.isTrue(kafkaProperties != null, "KafkaProperties cannot be null");
|
||||
Map<String, Object> adminClientProperties = kafkaProperties.buildAdminProperties();
|
||||
this.configurationProperties = kafkaBinderConfigurationProperties;
|
||||
this.adminUtilsOperation = adminUtilsOperation;
|
||||
normalalizeBootPropsWithBinder(adminClientProperties, kafkaProperties, kafkaBinderConfigurationProperties);
|
||||
this.adminClient = AdminClient.create(adminClientProperties);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -105,14 +123,20 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
this.logger.info("Using kafka topic for outbound: " + name);
|
||||
}
|
||||
KafkaTopicUtils.validateTopicName(name);
|
||||
createTopicsIfAutoCreateEnabledAndAdminUtilsPresent(name, properties.getPartitionCount(), false);
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminUtilsOperation != null) {
|
||||
final ZkUtils zkUtils = ZkUtils.apply(this.configurationProperties.getZkConnectionString(),
|
||||
this.configurationProperties.getZkSessionTimeout(),
|
||||
this.configurationProperties.getZkConnectionTimeout(),
|
||||
JaasUtils.isZkSecurityEnabled());
|
||||
int partitions = adminUtilsOperation.partitionSize(name, zkUtils);
|
||||
return new KafkaProducerDestination(name, partitions);
|
||||
createTopic(name, properties.getPartitionCount(), false);
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminClient != null) {
|
||||
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult.all();
|
||||
|
||||
try {
|
||||
Map<String, TopicDescription> topicDescriptions = all.get(operationTimeout, TimeUnit.SECONDS);
|
||||
TopicDescription topicDescription = topicDescriptions.get(name);
|
||||
int partitions = topicDescription.partitions().size();
|
||||
return new KafkaProducerDestination(name, partitions);
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new ProvisioningException("Problems encountered with partitions finding", e);
|
||||
}
|
||||
}
|
||||
else {
|
||||
return new KafkaProducerDestination(name);
|
||||
@@ -129,30 +153,104 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
throw new IllegalArgumentException("Instance count cannot be zero");
|
||||
}
|
||||
int partitionCount = properties.getInstanceCount() * properties.getConcurrency();
|
||||
createTopicsIfAutoCreateEnabledAndAdminUtilsPresent(name, partitionCount, properties.getExtension().isAutoRebalanceEnabled());
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminUtilsOperation != null) {
|
||||
final ZkUtils zkUtils = ZkUtils.apply(this.configurationProperties.getZkConnectionString(),
|
||||
this.configurationProperties.getZkSessionTimeout(),
|
||||
this.configurationProperties.getZkConnectionTimeout(),
|
||||
JaasUtils.isZkSecurityEnabled());
|
||||
int partitions = adminUtilsOperation.partitionSize(name, zkUtils);
|
||||
if (properties.getExtension().isEnableDlq() && !anonymous) {
|
||||
String dlqTopic = StringUtils.hasText(properties.getExtension().getDlqName()) ?
|
||||
properties.getExtension().getDlqName() : "error." + name + "." + group;
|
||||
createTopicAndPartitions(dlqTopic, partitions, properties.getExtension().isAutoRebalanceEnabled());
|
||||
return new KafkaConsumerDestination(name, partitions, dlqTopic);
|
||||
createTopic(name, partitionCount, properties.getExtension().isAutoRebalanceEnabled());
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminClient != null) {
|
||||
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult.all();
|
||||
try {
|
||||
Map<String, TopicDescription> topicDescriptions = all.get(operationTimeout, TimeUnit.SECONDS);
|
||||
TopicDescription topicDescription = topicDescriptions.get(name);
|
||||
int partitions = topicDescription.partitions().size();
|
||||
ConsumerDestination dlqTopic = createDlqIfNeedBe(name, group, properties, anonymous, partitions);
|
||||
if (dlqTopic != null) {
|
||||
return dlqTopic;
|
||||
}
|
||||
return new KafkaConsumerDestination(name, partitions);
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new ProvisioningException("provisioning exception", e);
|
||||
}
|
||||
return new KafkaConsumerDestination(name, partitions);
|
||||
}
|
||||
return new KafkaConsumerDestination(name);
|
||||
}
|
||||
|
||||
private void createTopicsIfAutoCreateEnabledAndAdminUtilsPresent(final String topicName, final int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker) {
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminUtilsOperation != null) {
|
||||
/**
|
||||
* In general, binder properties supersede boot kafka properties.
|
||||
* The one exception is the bootstrap servers. In that case, we should only override
|
||||
* the boot properties if (there is a binder property AND it is a non-default value)
|
||||
* OR (if there is no boot property); this is needed because the binder property
|
||||
* never returns a null value.
|
||||
* @param adminProps the admin properties to normalize.
|
||||
* @param bootProps the boot kafka properties.
|
||||
* @param binderProps the binder kafka properties.
|
||||
*/
|
||||
private void normalalizeBootPropsWithBinder(Map<String, Object> adminProps, KafkaProperties bootProps,
|
||||
KafkaBinderConfigurationProperties binderProps) {
|
||||
// First deal with the outlier
|
||||
String kafkaConnectionString = binderProps.getKafkaConnectionString();
|
||||
if (ObjectUtils.isEmpty(adminProps.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG))
|
||||
|| !kafkaConnectionString.equals(binderProps.getDefaultKafkaConnectionString())) {
|
||||
adminProps.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConnectionString);
|
||||
}
|
||||
// Now override any boot values with binder values
|
||||
Map<String, String> binderProperties = binderProps.getConfiguration();
|
||||
Set<String> adminConfigNames = AdminClientConfig.configNames();
|
||||
binderProperties.forEach((key, value) -> {
|
||||
if (key.equals(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)) {
|
||||
throw new IllegalStateException(
|
||||
"Set binder bootstrap servers via the 'brokers' property, not 'configuration'");
|
||||
}
|
||||
if (adminConfigNames.contains(key)) {
|
||||
Object replaced = adminProps.put(key, value);
|
||||
if (replaced != null && this.logger.isDebugEnabled()) {
|
||||
logger.debug("Overrode boot property: [" + key + "], from: [" + replaced + "] to: [" + value + "]");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private ConsumerDestination createDlqIfNeedBe(String name, String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties,
|
||||
boolean anonymous, int partitions) {
|
||||
if (properties.getExtension().isEnableDlq() && !anonymous) {
|
||||
String dlqTopic = StringUtils.hasText(properties.getExtension().getDlqName()) ?
|
||||
properties.getExtension().getDlqName() : "error." + name + "." + group;
|
||||
try {
|
||||
createTopicAndPartitions(dlqTopic, partitions, properties.getExtension().isAutoRebalanceEnabled());
|
||||
}
|
||||
catch (Throwable throwable) {
|
||||
if (throwable instanceof Error) {
|
||||
throw (Error) throwable;
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("provisioning exception", throwable);
|
||||
}
|
||||
}
|
||||
return new KafkaConsumerDestination(name, partitions, dlqTopic);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private void createTopic(String name, int partitionCount, boolean tolerateLowerPartitionsOnBroker) {
|
||||
try {
|
||||
createTopicIfNecessary(name, partitionCount, tolerateLowerPartitionsOnBroker);
|
||||
}
|
||||
catch (Throwable throwable) {
|
||||
if (throwable instanceof Error) {
|
||||
throw (Error) throwable;
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("provisioning exception", throwable);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void createTopicIfNecessary(final String topicName, final int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker) throws Throwable {
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminClient != null) {
|
||||
createTopicAndPartitions(topicName, partitionCount, tolerateLowerPartitionsOnBroker);
|
||||
}
|
||||
else if (this.configurationProperties.isAutoCreateTopics() && adminUtilsOperation == null) {
|
||||
else if (this.configurationProperties.isAutoCreateTopics() && adminClient == null) {
|
||||
this.logger.warn("Auto creation of topics is enabled, but Kafka AdminUtils class is not present on the classpath. " +
|
||||
"No topic will be created by the binder");
|
||||
}
|
||||
@@ -166,75 +264,68 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
* desired number.
|
||||
*/
|
||||
private void createTopicAndPartitions(final String topicName, final int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker) {
|
||||
boolean tolerateLowerPartitionsOnBroker) throws Throwable {
|
||||
ListTopicsResult listTopicsResult = adminClient.listTopics();
|
||||
KafkaFuture<Set<String>> namesFutures = listTopicsResult.names();
|
||||
|
||||
final ZkUtils zkUtils = ZkUtils.apply(this.configurationProperties.getZkConnectionString(),
|
||||
this.configurationProperties.getZkSessionTimeout(),
|
||||
this.configurationProperties.getZkConnectionTimeout(),
|
||||
JaasUtils.isZkSecurityEnabled());
|
||||
try {
|
||||
short errorCode = adminUtilsOperation.errorCodeFromTopicMetadata(topicName, zkUtils);
|
||||
if (errorCode == ErrorMapping.NoError()) {
|
||||
// only consider minPartitionCount for resizing if autoAddPartitions is true
|
||||
int effectivePartitionCount = this.configurationProperties.isAutoAddPartitions()
|
||||
? Math.max(this.configurationProperties.getMinPartitionCount(), partitionCount)
|
||||
: partitionCount;
|
||||
int partitionSize = adminUtilsOperation.partitionSize(topicName, zkUtils);
|
||||
|
||||
if (partitionSize < effectivePartitionCount) {
|
||||
if (this.configurationProperties.isAutoAddPartitions()) {
|
||||
adminUtilsOperation.invokeAddPartitions(zkUtils, topicName, effectivePartitionCount, null, false);
|
||||
}
|
||||
else if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "There will be " + (effectivePartitionCount - partitionSize) + " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "Consider either increasing the partition count of the topic or enabling " +
|
||||
"`autoAddPartitions`");
|
||||
}
|
||||
Set<String> names = namesFutures.get(operationTimeout, TimeUnit.SECONDS);
|
||||
if (names.contains(topicName)) {
|
||||
// only consider minPartitionCount for resizing if autoAddPartitions is true
|
||||
int effectivePartitionCount = this.configurationProperties.isAutoAddPartitions()
|
||||
? Math.max(this.configurationProperties.getMinPartitionCount(), partitionCount)
|
||||
: partitionCount;
|
||||
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singletonList(topicName));
|
||||
KafkaFuture<Map<String, TopicDescription>> topicDescriptionsFuture = describeTopicsResult.all();
|
||||
Map<String, TopicDescription> topicDescriptions = topicDescriptionsFuture.get(operationTimeout, TimeUnit.SECONDS);
|
||||
TopicDescription topicDescription = topicDescriptions.get(topicName);
|
||||
int partitionSize = topicDescription.partitions().size();
|
||||
if (partitionSize < effectivePartitionCount) {
|
||||
if (this.configurationProperties.isAutoAddPartitions()) {
|
||||
CreatePartitionsResult partitions = adminClient.createPartitions(
|
||||
Collections.singletonMap(topicName, NewPartitions.increaseTo(effectivePartitionCount)));
|
||||
partitions.all().get(operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
else if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "There will be " + (effectivePartitionCount - partitionSize) + " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "Consider either increasing the partition count of the topic or enabling " +
|
||||
"`autoAddPartitions`");
|
||||
}
|
||||
}
|
||||
else if (errorCode == ErrorMapping.UnknownTopicOrPartitionCode()) {
|
||||
// always consider minPartitionCount for topic creation
|
||||
final int effectivePartitionCount = Math.max(this.configurationProperties.getMinPartitionCount(),
|
||||
partitionCount);
|
||||
|
||||
this.metadataRetryOperations.execute(new RetryCallback<Object, RuntimeException>() {
|
||||
|
||||
@Override
|
||||
public Object doWithRetry(RetryContext context) throws RuntimeException {
|
||||
|
||||
try {
|
||||
adminUtilsOperation.invokeCreateTopic(zkUtils, topicName, effectivePartitionCount,
|
||||
configurationProperties.getReplicationFactor(), new Properties());
|
||||
}
|
||||
catch (Exception e) {
|
||||
String exceptionClass = e.getClass().getName();
|
||||
if (exceptionClass.equals("kafka.common.TopicExistsException")
|
||||
|| exceptionClass.equals("org.apache.kafka.common.errors.TopicExistsException")) {
|
||||
if (logger.isWarnEnabled()) {
|
||||
logger.warn("Attempt to create topic: " + topicName + ". Topic already exists.");
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("Error fetching Kafka topic metadata: ",
|
||||
ErrorMapping.exceptionFor(errorCode));
|
||||
}
|
||||
}
|
||||
finally {
|
||||
zkUtils.close();
|
||||
else if (!names.contains(topicName)) {
|
||||
// always consider minPartitionCount for topic creation
|
||||
final int effectivePartitionCount = Math.max(this.configurationProperties.getMinPartitionCount(),
|
||||
partitionCount);
|
||||
this.metadataRetryOperations.execute(context -> {
|
||||
|
||||
NewTopic newTopic = new NewTopic(topicName, effectivePartitionCount,
|
||||
(short) configurationProperties.getReplicationFactor());
|
||||
CreateTopicsResult createTopicsResult = adminClient.createTopics(Collections.singletonList(newTopic));
|
||||
try {
|
||||
createTopicsResult.all().get(operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
catch (Exception e) {
|
||||
if (e instanceof ExecutionException) {
|
||||
String exceptionMessage = e.getMessage();
|
||||
if (exceptionMessage.contains("org.apache.kafka.common.errors.TopicExistsException")) {
|
||||
if (logger.isWarnEnabled()) {
|
||||
logger.warn("Attempt to create topic: " + topicName + ". Topic already exists.");
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
logger.error("Failed to create topics", e.getCause());
|
||||
throw e.getCause();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -243,27 +334,23 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
final Callable<Collection<PartitionInfo>> callable) {
|
||||
try {
|
||||
return this.metadataRetryOperations
|
||||
.execute(new RetryCallback<Collection<PartitionInfo>, Exception>() {
|
||||
|
||||
@Override
|
||||
public Collection<PartitionInfo> doWithRetry(RetryContext context) throws Exception {
|
||||
Collection<PartitionInfo> partitions = callable.call();
|
||||
// do a sanity check on the partition set
|
||||
int partitionSize = partitions.size();
|
||||
if (partitionSize < partitionCount) {
|
||||
if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "There will be " + (partitionCount - partitionSize) + " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ") + "been found instead");
|
||||
}
|
||||
.execute(context -> {
|
||||
Collection<PartitionInfo> partitions = callable.call();
|
||||
// do a sanity check on the partition set
|
||||
int partitionSize = partitions.size();
|
||||
if (partitionSize < partitionCount) {
|
||||
if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "There will be " + (partitionCount - partitionSize) + " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ") + "been found instead");
|
||||
}
|
||||
return partitions;
|
||||
}
|
||||
return partitions;
|
||||
});
|
||||
}
|
||||
catch (Exception e) {
|
||||
@@ -333,6 +420,10 @@ public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsu
|
||||
return this.consumerDestinationName;
|
||||
}
|
||||
|
||||
public String getDlqName() {
|
||||
return dlqName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KafkaConsumerDestination{" +
|
||||
|
||||
@@ -0,0 +1,98 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.provisioning;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
import org.apache.kafka.clients.admin.AdminClientConfig;
|
||||
import org.apache.kafka.common.config.SslConfigs;
|
||||
import org.apache.kafka.common.network.SslChannelBuilder;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.core.io.ClassPathResource;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.assertj.core.api.Assertions.fail;
|
||||
|
||||
|
||||
/**
|
||||
* @author Gary Russell
|
||||
* @since 2.0
|
||||
*
|
||||
*/
|
||||
public class KafkaTopicProvisionerTests {
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
@Test
|
||||
public void bootPropertiesOverriddenExceptServers() throws Exception {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
bootConfig.getProperties().put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "PLAINTEXT");
|
||||
bootConfig.setBootstrapServers(Collections.singletonList("localhost:1234"));
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties();
|
||||
binderConfig.getConfiguration().put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, "SSL");
|
||||
ClassPathResource ts = new ClassPathResource("test.truststore.ks");
|
||||
binderConfig.getConfiguration().put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, ts.getFile().getAbsolutePath());
|
||||
binderConfig.setBrokers("localhost:9092");
|
||||
KafkaTopicProvisioner provisioner = new KafkaTopicProvisioner(binderConfig, bootConfig);
|
||||
AdminClient adminClient = KafkaTestUtils.getPropertyValue(provisioner, "adminClient", AdminClient.class);
|
||||
assertThat(KafkaTestUtils.getPropertyValue(adminClient, "client.selector.channelBuilder")).isInstanceOf(SslChannelBuilder.class);
|
||||
Map configs = KafkaTestUtils.getPropertyValue(adminClient, "client.selector.channelBuilder.configs", Map.class);
|
||||
assertThat(((List) configs.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)).get(0)).isEqualTo("localhost:1234");
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
@Test
|
||||
public void bootPropertiesOverriddenIncludingServers() throws Exception {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
bootConfig.getProperties().put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "PLAINTEXT");
|
||||
bootConfig.setBootstrapServers(Collections.singletonList("localhost:9092"));
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties();
|
||||
binderConfig.getConfiguration().put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, "SSL");
|
||||
ClassPathResource ts = new ClassPathResource("test.truststore.ks");
|
||||
binderConfig.getConfiguration().put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, ts.getFile().getAbsolutePath());
|
||||
binderConfig.setBrokers("localhost:1234");
|
||||
KafkaTopicProvisioner provisioner = new KafkaTopicProvisioner(binderConfig, bootConfig);
|
||||
AdminClient adminClient = KafkaTestUtils.getPropertyValue(provisioner, "adminClient", AdminClient.class);
|
||||
assertThat(KafkaTestUtils.getPropertyValue(adminClient, "client.selector.channelBuilder")).isInstanceOf(SslChannelBuilder.class);
|
||||
Map configs = KafkaTestUtils.getPropertyValue(adminClient, "client.selector.channelBuilder.configs", Map.class);
|
||||
assertThat(((List) configs.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)).get(0)).isEqualTo("localhost:1234");
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
@Test
|
||||
public void brokersInvalid() throws Exception {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties();
|
||||
binderConfig.getConfiguration().put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "localhost:1234");
|
||||
try {
|
||||
new KafkaTopicProvisioner(binderConfig, bootConfig);
|
||||
fail("Expected illegal state");
|
||||
}
|
||||
catch (IllegalStateException e) {
|
||||
assertThat(e.getMessage())
|
||||
.isEqualTo("Set binder bootstrap servers via the 'brokers' property, not 'configuration'");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
Binary file not shown.
0
spring-cloud-stream-binder-kafka-docs/.jdk8
Normal file
0
spring-cloud-stream-binder-kafka-docs/.jdk8
Normal file
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.3.0.M2</version>
|
||||
<version>2.0.0.RC1</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>spring-cloud-stream-binder-kafka-docs</artifactId>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[[spring-cloud-stream-binder-kafka-reference]]
|
||||
= Spring Cloud Stream Kafka Binder Reference Guide
|
||||
Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinathan, Gunnar Hillert, Mark Pollack, Patrick Peralta, Glenn Renfro, Thomas Risberg, Dave Syer, David Turanski, Janne Valkealahti, Benjamin Klein, Henryk Konsek
|
||||
Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinathan, Gunnar Hillert, Mark Pollack, Patrick Peralta, Glenn Renfro, Thomas Risberg, Dave Syer, David Turanski, Janne Valkealahti, Benjamin Klein, Henryk Konsek, Gary Russell
|
||||
:doctype: book
|
||||
:toc:
|
||||
:toclevels: 4
|
||||
@@ -23,12 +23,15 @@ Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinat
|
||||
|
||||
= Reference Guide
|
||||
include::overview.adoc[]
|
||||
|
||||
include::dlq.adoc[]
|
||||
include::metrics.adoc[]
|
||||
|
||||
include::partitions.adoc[]
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
include::building.adoc[]
|
||||
|
||||
include::contributing.adoc[]
|
||||
|
||||
// ======================================================================================
|
||||
|
||||
@@ -0,0 +1,558 @@
|
||||
== Kafka Streams Binding Capabilities of Spring Cloud Stream
|
||||
|
||||
Spring Cloud Stream Kafka support also includes a binder specifically designed for Apache Kafka Streams binding.
|
||||
Using this binder, applications can be written that leverage the Apache Kafka Streams API.
|
||||
For more information on Kafka Streams, see https://kafka.apache.org/documentation/streams/developer-guide[Kafka Streams API Developer Manual]
|
||||
|
||||
Kafka Streams support in Spring Cloud Stream is based on the foundations provided by the Spring Kafka project.
|
||||
For details on that support, see http://docs.spring.io/spring-kafka/reference/html/_reference.html#kafka-streams[Kafaka Streams Support in Spring Kafka].
|
||||
|
||||
Here are the maven coordinates for the Spring Cloud Stream Kafka Streams binder artifact.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-streams</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
High level streams DSL provided through the Kafka Streams API can be used through Spring Cloud Stream support.
|
||||
Some minimal support for writing applications using the processor API is also available through the binder.
|
||||
Kafka Streams applications using the Spring Cloud Stream support can be written using the processor model, i.e. messages read from an inbound topic and messages written to an outbound topic or using the sink style where it does not have an output binding.
|
||||
|
||||
=== Usage example of high level streams DSL
|
||||
|
||||
This application will listen from a Kafka topic and write the word count for each unique word that it sees in a 5 seconds time window.
|
||||
|
||||
[source]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(KStreamProcessor.class)
|
||||
public class WordCountProcessorApplication {
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<?, WordCount> process(KStream<?, String> input) {
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.groupBy((key, value) -> value)
|
||||
.windowedBy(TimeWindows.of(5000))
|
||||
.count(Materialized.as("WordCounts-multi"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key.key(), value, new Date(key.window().start()), new Date(key.window().end()))));
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(WordCountProcessorApplication.class, args);
|
||||
}
|
||||
----
|
||||
|
||||
If you build it as a Spring Boot uber jar, you can run the above example in the following way:
|
||||
|
||||
[source]
|
||||
----
|
||||
java -jar uber.jar --spring.cloud.stream.bindings.input.destination=words --spring.cloud.stream.bindings.output.destination=counts
|
||||
----
|
||||
|
||||
This means that the application will listen from the incoming Kafka topic `words` and write to the output topic `counts`.
|
||||
|
||||
Spring Cloud Stream will ensure that the messages from both the incoming and outgoing topics are bound as KStream objects.
|
||||
Applications can exclusively focus on the business aspects of the code, i.e. writing the logic required in the processor rather than setting up the streams specific configuration required by the Kafka Streams infrastructure.
|
||||
All such infrastructure details are handled by the framework.
|
||||
|
||||
=== Multiple Input bindings on the inbound
|
||||
|
||||
Spring Cloud Stream Kafka Streams binder allows the users to write applications with multiple bindings.
|
||||
There are use cases in which you may want to have multiple incoming KStream objects or a combination of KStream and KTable objects.
|
||||
Both of these flavors are supported.
|
||||
Here are some examples.
|
||||
|
||||
[source]
|
||||
----
|
||||
@EnableBinding(KStreamKTableBinding.class)
|
||||
.....
|
||||
.....
|
||||
@StreamListener
|
||||
public void process(@Input("inputStream") KStream<String, PlayEvent> playEvents,
|
||||
@Input("inputTable") KTable<Long, Song> songTable) {
|
||||
....
|
||||
....
|
||||
}
|
||||
|
||||
interface KStreamKTableBinding {
|
||||
|
||||
@Input("inputStream")
|
||||
KStream<?, ?> inputStream();
|
||||
|
||||
@Input("inputTable")
|
||||
KTable<?, ?> inputTable();
|
||||
}
|
||||
|
||||
----
|
||||
|
||||
In the above example, the application is written in a sink style, i.e. there are no output bindings and the application has to make the decision to what needs to happen.
|
||||
Most likely, when you write applications this way, you might want to send the information downstream or store them in a state store (See below for Queryable State Stores).
|
||||
|
||||
In the case of incoming KTable, if you want to materialize it as a state store, you have to express that through the following property.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.inputTable.consumer.materializedAs: all-songs
|
||||
----
|
||||
|
||||
Here is an example for multiple input bindings and an output binding (processor style).
|
||||
|
||||
[source]
|
||||
----
|
||||
@EnableBinding(KStreamKTableBinding.class)
|
||||
....
|
||||
....
|
||||
|
||||
@StreamListener
|
||||
@SendTo("output")
|
||||
public KStream<String, Long> process(@Input("input") KStream<String, Long> userClicksStream,
|
||||
@Input("inputTable") KTable<String, String> userRegionsTable) {
|
||||
....
|
||||
....
|
||||
}
|
||||
|
||||
interface KStreamKTableBinding extends KafkaStreamsProcessor {
|
||||
|
||||
@Input("inputX")
|
||||
KTable<?, ?> inputTable();
|
||||
}
|
||||
|
||||
----
|
||||
|
||||
=== Support for branching in Kafka Streams API
|
||||
|
||||
Kafka Streams allow outbound data to be split into multiple topics based on some predicates.
|
||||
Spring Cloud Stream Kafka Streams binder provides support for this feature without losing the overall programming model exposed through `StreamListener` in the end user application.
|
||||
You write the application in the usual way as demonstrated above in the word count example.
|
||||
When using the branching feature, you are required to do a few things.
|
||||
First, you need to make sure that your return type is `KStream[]` instead of a regular `KStream`.
|
||||
Then you need to use the `SendTo` annotation containing the output bindings in the order (example below).
|
||||
For each of these output bindings, you need to configure destination, content-type etc. as required by any other standard Spring Cloud Stream application
|
||||
|
||||
Here is an example:
|
||||
|
||||
[source]
|
||||
----
|
||||
@EnableBinding(KStreamProcessorWithBranches.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class WordCountProcessorApplication {
|
||||
|
||||
@Autowired
|
||||
private TimeWindows timeWindows;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo({"output1","output2","output3})
|
||||
public KStream<?, WordCount>[] process(KStream<Object, String> input) {
|
||||
|
||||
Predicate<Object, WordCount> isEnglish = (k, v) -> v.word.equals("english");
|
||||
Predicate<Object, WordCount> isFrench = (k, v) -> v.word.equals("french");
|
||||
Predicate<Object, WordCount> isSpanish = (k, v) -> v.word.equals("spanish");
|
||||
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.groupBy((key, value) -> value)
|
||||
.windowedBy(timeWindows)
|
||||
.count(Materialized.as("WordCounts-1"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key.key(), value, new Date(key.window().start()), new Date(key.window().end()))))
|
||||
.branch(isEnglish, isFrench, isSpanish);
|
||||
}
|
||||
|
||||
interface KStreamProcessorWithBranches {
|
||||
|
||||
@Input("input")
|
||||
KStream<?, ?> input();
|
||||
|
||||
@Output("output1")
|
||||
KStream<?, ?> output1();
|
||||
|
||||
@Output("output2")
|
||||
KStream<?, ?> output2();
|
||||
|
||||
@Output("output3")
|
||||
KStream<?, ?> output3();
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
Then in the properties:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.output1.contentType: application/json
|
||||
spring.cloud.stream.bindings.output2.contentType: application/json
|
||||
spring.cloud.stream.bindings.output3.contentType: application/json
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms: 1000
|
||||
spring.cloud.stream.kafka.streams.binder.configuration:
|
||||
default.key.serde: org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
default.value.serde: org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
spring.cloud.stream.bindings.output1:
|
||||
destination: foo
|
||||
producer:
|
||||
headerMode: raw
|
||||
spring.cloud.stream.bindings.output2:
|
||||
destination: bar
|
||||
producer:
|
||||
headerMode: raw
|
||||
spring.cloud.stream.bindings.output3:
|
||||
destination: fox
|
||||
producer:
|
||||
headerMode: raw
|
||||
spring.cloud.stream.bindings.input:
|
||||
destination: words
|
||||
consumer:
|
||||
headerMode: raw
|
||||
----
|
||||
|
||||
=== Message conversion in Spring Cloud Stream Kafka Streams applications
|
||||
|
||||
Spring Cloud Stream Kafka Streams binder allows the usage of usual patterns for content type conversions as in other message channel based binder applications.
|
||||
Many Kafka Streams operations - that are part of the actual application and not at the inbound and outbound - need to know the type of SerDe’s used to correctly transform key and value data.
|
||||
Therefore, it may be more natural to rely on the SerDe facilities provided by the Apache Kafka Streams library itself for inbound and outbound conversions rather than using the content type conversions offered by the framework.
|
||||
On the other hand, you might be already familiar with the content type conversion patterns in spring cloud stream and want to keep using them for inbound and outbound conversions.
|
||||
Both options are supported in the Spring Cloud Stream binder for Apache Kafka Streams.
|
||||
|
||||
==== Outbound serialization
|
||||
|
||||
If native encoding is disabled (which is the default), then the framework will convert the message using the contentType set by the user (or the default content type of application/json).
|
||||
It will ignore any Serde set on the outbound in this case for outbound serialization.
|
||||
|
||||
Here is the property to set the contentType on the outbound.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.output.contentType: application/json
|
||||
----
|
||||
|
||||
Here is the property to enable native encoding.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.output.nativeEncoding: true
|
||||
----
|
||||
|
||||
If native encoding is enabled on the output binding (user has to explicitly enable it as above), then the framework will skip doing any message conversion on the outbound.
|
||||
In that case, it will use the Serde set by the user.
|
||||
First, it checks for the `valueSerde` property set on the actual output binding. Here is an example
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.output.producer.valueSerde: org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
----
|
||||
If this property is not set, then it will default to the common value Serde - `spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde`.
|
||||
|
||||
It is worth to mention that Spring Cloud Stream Kafka Streams binder does not serialize the keys on outbound, rather it is always done by Kafka itself.
|
||||
Therefore, you either have to specify the keySerde property on the binding or it will default to the application wide common keySerde set on the streams configuration.
|
||||
|
||||
Binding level key serde:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.output.producer.keySerde
|
||||
----
|
||||
|
||||
Common Key serde:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde
|
||||
----
|
||||
|
||||
If branching is used, then you need to use multiple output bindings. For example,
|
||||
|
||||
[source]
|
||||
----
|
||||
interface KStreamProcessorWithBranches {
|
||||
|
||||
@Input("input")
|
||||
KStream<?, ?> input();
|
||||
|
||||
@Output("output1")
|
||||
KStream<?, ?> output1();
|
||||
|
||||
@Output("output2")
|
||||
KStream<?, ?> output2();
|
||||
|
||||
@Output("output3")
|
||||
KStream<?, ?> output3();
|
||||
}
|
||||
----
|
||||
|
||||
If nativeEncoding is set, then you can set different Serde values on these individual output bindings as below.
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kstream.bindings.output1.producer.valueSerde=IntegerSerde
|
||||
spring.cloud.stream.kstream.bindings.outpu2t.producer.valueSerde=StringSerde
|
||||
spring.cloud.stream.kstream.bindings.output3.producer.valueSerde=JsonSerde
|
||||
----
|
||||
|
||||
Then if you have `SendTo` like this, @SendTo({"output1", "output2", "output3"}), the `KStream[]` from the branches are applied with proper Serde objects as defined above.
|
||||
If you are not enabling nativeEncoding, you can then set different contentType values on the output bindings as below.
|
||||
In that case, the framework will use the appropriate message converter to convert the messages before sending to Kafka.
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.output1.contentType: application/json
|
||||
spring.cloud.stream.bindings.output2.contentType: application/java-serialzied-object
|
||||
spring.cloud.stream.bindings.output3.contentType: application/octet-stream
|
||||
----
|
||||
|
||||
==== Inbound Deserialization
|
||||
|
||||
Similar rules apply to data deserialization on the inbound as in the case of outbound serialization.
|
||||
|
||||
If native decoding is disabled (which is the default), then the framework will convert the message using the contentType set by the user (or the default content type of application/json).
|
||||
It will ignore any Serde set on the inbound in this case for inbound dserialization.
|
||||
|
||||
Here is the property to set the contentType on the inbound.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.input.contentType: application/json
|
||||
----
|
||||
|
||||
Here is the property to enable native decoding.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.input.nativeDecoding: true
|
||||
----
|
||||
|
||||
If native decoding is enabled on the input binding (user has to explicitly enable it as above), then the framework will skip doing any message conversion on the inbound.
|
||||
In that case, it will use the Serde set by the user.
|
||||
First, it checks for the `valueSerde` property set on the actual input binding. Here is an example
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.input.consumer.valueSerde: org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
----
|
||||
If this property is not set, then it will default to the common value Serde - `spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde`.
|
||||
|
||||
It is worth to mention that Spring Cloud Stream Kafka Streams binder does not deserialize the keys on inbound, rather it is always done by Kafka itself.
|
||||
Therefore, you either have to specify the keySerde property on the binding or it will default to the application wide common keySerde set on the streams configuration.
|
||||
|
||||
Binding level key serde:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.input.consumer.keySerde
|
||||
----
|
||||
|
||||
Common Key serde:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde
|
||||
----
|
||||
|
||||
As in the case of KStream branching on the outbound, the benefit of setting value Serde per binding is that if you have multiple input bindings (multiple KStreams) and they all require separate value Serdes, then you can configure them individually.
|
||||
If you use the common configuration approach, then that is not possible.
|
||||
|
||||
==== Error handling on Deserialization exceptions
|
||||
|
||||
Apache Kafka Streams now provide the capability for natively handling exceptions from deserialization errors.
|
||||
For details on this support, please see https://cwiki.apache.org/confluence/display/KAFKA/KIP-161%3A+streams+deserialization+exception+handlers[this]
|
||||
Out of the box, Apache Kafka Streams provide two kinds of deserialization exception handlers - logAndContinue and logAndFail.
|
||||
As the name indicates, the former will log the error and continue processing next records and the latter will log the error and fai..
|
||||
LogAndFail is the default deserialization exception handler.
|
||||
|
||||
Spring Cloud Stream binder for Apache Kafka Streams allows to specify these exception handlers through the following properties.
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.binder.serdeError: logAndContinue
|
||||
----
|
||||
|
||||
In addition to the above two deserialization exception handlers, the binder also provides a third one for sending the bad records (poison pills) to a DLQ topic.
|
||||
Here is how you enable this DLQ exception handler.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.binder.serdeError: sendToDlq
|
||||
----
|
||||
When the above property is set, then all records in error from deserialization are sent to the DLQ topic.
|
||||
First it checks, if there is a `dlqName` property is set on the binding itself using the following property.
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.input.consumer.dlqName: foo-dlq
|
||||
----
|
||||
If this is set, then the records in error are sent to the topic `foo-dlq`.
|
||||
If this is not set, then it will create a DLQ topic called `error.<input-topic-name>.<group-name>`.
|
||||
|
||||
A couple of things to keep in mind when using the exception handling feature through Spring Cloud Stream binder for Apache Kafka Streams.
|
||||
|
||||
* The property `spring.cloud.stream.kafka.streams.binder.serdeError` is applicable for the entire application.
|
||||
This implies that if there are multiple `StreamListener` methods in the same application, this property is applied to all of them.
|
||||
* The exception handling for deserialization works consistently with native deserialization and framework provided message conversion.
|
||||
|
||||
==== Handling Non-Deserialization exceptions
|
||||
|
||||
Other kinds of error handling is limited in Apache Kafka Streams currently and it is up to the end user applications to handle any such application level errors.
|
||||
One side effect of providing a DLQ for deserialization exception handlers as above is that, it provides a way to get access to the DLQ sending bean directly from your application.
|
||||
Once you get access to that bean, you can programmatically send any exception records from your application to the DLQ.
|
||||
Here is an example for how you may do that.
|
||||
Keep in mind that, this approach only works out of the box when you use the low level processor API in your application as below.
|
||||
It still remains hard to achieve the same using the high level DSL without the library natively providing error handling support, but this example provides some hints to work around.
|
||||
|
||||
[source]
|
||||
----
|
||||
@Autowired
|
||||
private SendToDlqAndContinue dlqHandler;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<?, WordCount> process(KStream<Object, String> input) {
|
||||
|
||||
input.process(() -> new Processor() {
|
||||
ProcessorContext context;
|
||||
|
||||
@Override
|
||||
public void init(ProcessorContext context) {
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(Object o, Object o2) {
|
||||
|
||||
try {
|
||||
.....
|
||||
.....
|
||||
}
|
||||
catch(Exception e) {
|
||||
//explicitly provide the kafka topic corresponding to the input binding as the first argument.
|
||||
//DLQ handler will correctly map to the dlq topic from the actual incoming destination.
|
||||
dlqHandler.sendToDlq("topic-name", (byte[]) o1, (byte[]) o2, context.partition());
|
||||
}
|
||||
}
|
||||
|
||||
.....
|
||||
.....
|
||||
});
|
||||
}
|
||||
----
|
||||
|
||||
=== Support for interactive queries
|
||||
|
||||
As part of the public API of the binder, it now exposes a class called `QueryableStoreRegistry`.
|
||||
You can access this as a Spring bean in your application.
|
||||
One easy way to get access to this bean from your application is to autowire the bean as below.
|
||||
|
||||
[source]
|
||||
----
|
||||
@Autowired
|
||||
private QueryableStoreRegistry queryableStoreRegistry;
|
||||
----
|
||||
|
||||
Once you gain access to this bean, then you can find out the particular state store that you are interested in.
|
||||
Here is an example:
|
||||
|
||||
[source]
|
||||
----
|
||||
ReadOnlyKeyValueStore<Object, Object> keyValueStore =
|
||||
queryableStoreRegistry.getQueryableStoreType("my-store", QueryableStoreTypes.keyValueStore());
|
||||
----
|
||||
Then you can retrieve the data that you stored in this store during the execution of your application.
|
||||
|
||||
=== Kafka Streams properties
|
||||
|
||||
We covered all the relevant properties that you need when writing Kafka Streams applications using Spring Cloud Stream, scattered in the above sections, but here they are again.
|
||||
|
||||
The following properties are available at the binder level and must be prefixed with `spring.cloud.stream.kafka.binder.`.
|
||||
|
||||
configuration::
|
||||
Map with a key/value pair containing properties pertaining to Apache Kafka Streams API.
|
||||
This property must be prefixed with `spring.cloud.stream.kafka.streams.binder.`.
|
||||
Following are some examples of using this property.
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000
|
||||
----
|
||||
|
||||
For more information about all the properties that may go into streams configuration, see StreamsConfig JavaDocs in Apache Kafka Streams docs.
|
||||
|
||||
brokers::
|
||||
Broker URL
|
||||
+
|
||||
Default: `localhost`
|
||||
zkNodes::
|
||||
Zookeeper URL
|
||||
+
|
||||
Default: `localhost`
|
||||
serdeError::
|
||||
Deserialization error handler type.
|
||||
Possible values are - `logAndContinue`, `logAndFail` or `sendToDlq`
|
||||
+
|
||||
Default: `logAndFail`
|
||||
applicationId::
|
||||
Application ID for all the stream configurations in the current application context.
|
||||
You can override the application id for an individual `StreamListener` method using the `group` property on the binding.
|
||||
You have to ensure that you are using the same group name for all input bindings in the case of multiple inputs on the same methods.
|
||||
+
|
||||
Default: `default`
|
||||
|
||||
|
||||
The following properties are available for Kafka Streams producers only and must be prefixed with `spring.cloud.stream.kafka.streams.bindings.<binding name>.producer.`.
|
||||
|
||||
keySerde::
|
||||
key serde to use
|
||||
+
|
||||
Default: `none`.
|
||||
valueSerde::
|
||||
value serde to use
|
||||
+
|
||||
Default: `none`.
|
||||
useNativeEncoding::
|
||||
flag to enable native encoding
|
||||
+
|
||||
Default: `false`.
|
||||
|
||||
The following properties are available for Kafka Streams consumers only and must be prefixed with `spring.cloud.stream.kafka.streams.bindings.<binding name>.consumer.`.
|
||||
|
||||
keySerde::
|
||||
key serde to use
|
||||
+
|
||||
Default: `none`.
|
||||
valueSerde::
|
||||
value serde to use
|
||||
+
|
||||
Default: `none`.
|
||||
materializedAs::
|
||||
state store to materialize when using incoming KTable types
|
||||
+
|
||||
Default: `none`.
|
||||
useNativeDecoding::
|
||||
flag to enable native decoding
|
||||
+
|
||||
Default: `false`.
|
||||
dlqName::
|
||||
DLQ topic name.
|
||||
+
|
||||
Default: `none`.
|
||||
|
||||
Other common properties used from core Spring Cloud Stream.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.<binding name>.destination
|
||||
spring.cloud.stream.bindings.<binding name>.group
|
||||
----
|
||||
|
||||
TimeWindow properties:
|
||||
|
||||
Windowing is an important concept in stream processing applications.
|
||||
Following properties are available for configuring time windows.
|
||||
|
||||
spring.cloud.stream.kafka.streams.timeWindow.length::
|
||||
When this property is given, you can autowire a `TimeWindows` bean into the application.
|
||||
The value is expressed in milliseconds.
|
||||
+
|
||||
Default: `none`.
|
||||
spring.cloud.stream.kstream.timeWindow.advanceBy::
|
||||
Value is given in milliseconds.
|
||||
+
|
||||
Default: `none`.
|
||||
@@ -1,10 +0,0 @@
|
||||
[[kafka-metrics]]
|
||||
== Kafka metrics
|
||||
|
||||
Kafka binder module exposes the following metrics:
|
||||
|
||||
`spring.cloud.stream.binder.kafka.someGroup.someTopic.lag` - this metric indicates how many messages
|
||||
have not been yet consumed from given binder's topic (`someTopic`) by given consumer group (`someGroup`).
|
||||
For example if the value of the metric `spring.cloud.stream.binder.kafka.myGroup.myTopic.lag` is `1000`, then
|
||||
consumer group `myGroup` has `1000` messages to waiting to be consumed from topic `myTopic`. This metric is
|
||||
particularly useful to provide auto-scaling feedback to PaaS platform of your choice.
|
||||
@@ -73,6 +73,11 @@ spring.cloud.stream.kafka.binder.headers::
|
||||
The list of custom headers that will be transported by the binder.
|
||||
+
|
||||
Default: empty.
|
||||
spring.cloud.stream.kafka.binder.healthTimeout::
|
||||
The time to wait to get partition information in seconds; default 60.
|
||||
Health will report as down if this timer expires.
|
||||
+
|
||||
Default: 10.
|
||||
spring.cloud.stream.kafka.binder.offsetUpdateTimeWindow::
|
||||
The frequency, in milliseconds, with which offsets are saved.
|
||||
Ignored if `0`.
|
||||
@@ -115,7 +120,18 @@ spring.cloud.stream.kafka.binder.socketBufferSize::
|
||||
Size (in bytes) of the socket buffer to be used by the Kafka consumers.
|
||||
+
|
||||
Default: `2097152`.
|
||||
spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix::
|
||||
Enable transactions in the binder; see `transaction.id` in the Kafka documentation and https://docs.spring.io/spring-kafka/reference/html/_reference.html#transactions[Transactions] in the `spring-kafka` documentation.
|
||||
When transactions are enabled, individual `producer` properties are ignored and all producers use the `spring.cloud.stream.kafka.binder.transaction.producer.*` properties.
|
||||
+
|
||||
Default `null` (no transactions)
|
||||
spring.cloud.stream.kafka.binder.transaction.producer.*::
|
||||
Global producer properties for producers in a transactional binder.
|
||||
See `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` and <<kafka-producer-properties>> and the general producer properties supported by all binders.
|
||||
+
|
||||
Default: See individual producer properties.
|
||||
|
||||
[[kafka-consumer-properties]]
|
||||
=== Kafka Consumer Properties
|
||||
|
||||
The following properties are available for Kafka consumers only and
|
||||
@@ -158,6 +174,8 @@ enableDlq::
|
||||
By default, messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`.
|
||||
The DLQ topic name can be configurable via the property `dlqName`.
|
||||
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
|
||||
See <<kafka-dlq-processing>> processing for more information.
|
||||
Starting with _version 2.0_, messages sent to the DLQ topic are enhanced with the following headers: `x-original-topic`, `x-exception-message` and `x-exception-stacktrace` as `byte[]`.
|
||||
+
|
||||
Default: `false`.
|
||||
configuration::
|
||||
@@ -168,7 +186,29 @@ dlqName::
|
||||
The name of the DLQ topic to receive the error messages.
|
||||
+
|
||||
Default: null (If not specified, messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`).
|
||||
dlqProducerProperties::
|
||||
Using this, dlq specific producer properties can be set.
|
||||
All the properties available through kafka producer properties can be set through this property.
|
||||
+
|
||||
Default: Default Kafka producer properties.
|
||||
standardHeaders::
|
||||
Indicates which standard headers are populated by the inbound channel adapter.
|
||||
`none`, `id`, `timestamp` or `both`.
|
||||
Useful if using native deserialization and the first component to receive a message needs an `id` (such as an aggregator that is configured to use a JDBC message store).
|
||||
+
|
||||
Default: `none`
|
||||
converterBeanName::
|
||||
The name of a bean that implements `RecordMessageConverter`; used in the inbound channel adapter to replace the default `MessagingMessageConverter`.
|
||||
+
|
||||
Default: `null`
|
||||
idleEventInterval::
|
||||
The interval, in milliseconds between events indicating that no messages have recently been received.
|
||||
Use an `ApplicationListener<ListenerContainerIdleEvent>` to receive these events.
|
||||
See <<pause-resume>> for a usage example.
|
||||
+
|
||||
Default: `30000`
|
||||
|
||||
[[kafka-producer-properties]]
|
||||
=== Kafka Producer Properties
|
||||
|
||||
The following properties are available for Kafka producers only and
|
||||
@@ -192,6 +232,14 @@ messageKeyExpression::
|
||||
For example `headers.key` or `payload.myKey`.
|
||||
+
|
||||
Default: `none`.
|
||||
headerPatterns::
|
||||
A comma-delimited list of simple patterns to match spring-messaging headers to be mapped to the kafka `Headers` in the `ProducerRecord`.
|
||||
Patterns can begin or end with the wildcard character (asterisk).
|
||||
Patterns can be negated by prefixing with `!`; matching stops after the first match (positive or negative).
|
||||
For example `!foo,fo*` will pass `fox` but not `foo`.
|
||||
`id` and `timestamp` are never mapped.
|
||||
+
|
||||
Default: `*` (all headers - except the `id` and `timestamp`)
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka producer properties.
|
||||
+
|
||||
@@ -334,6 +382,46 @@ Usually applications may use principals that do not have administrative rights i
|
||||
In secure environments, we strongly recommend creating topics and managing ACLs administratively using Kafka tooling.
|
||||
====
|
||||
|
||||
[[pause-resume]]
|
||||
==== Example: Pausing and Resuming the Consumer
|
||||
|
||||
If you wish to suspend consumption, but not cause a partition rebalance, you can pause and resume the consumer.
|
||||
This is facilitated by adding the `Consumer` as a parameter to your `@StreamListener`.
|
||||
To resume, you need an `ApplicationListener` for `ListenerContainerIdleEvent` s; the frequency at which events are published is controlled by the `idleEventInterval` property.
|
||||
Since the consumer is not thread-safe, you must call these methods on the calling thread.
|
||||
|
||||
The following simple application shows how to pause and resume.
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class Application {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(Application.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void in(String in, @Header(KafkaHeaders.CONSUMER) Consumer<?, ?> consumer) {
|
||||
System.out.println(in);
|
||||
consumer.pause(Collections.singleton(new TopicPartition("myTopic", 0)));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ApplicationListener<ListenerContainerIdleEvent> idleListener() {
|
||||
return event -> {
|
||||
System.out.println(event);
|
||||
if (event.getConsumer().paused().size() > 0) {
|
||||
event.getConsumer().resume(event.getConsumer().paused());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
|
||||
==== Using the binder with Apache Kafka 0.10
|
||||
|
||||
The default Kafka support in Spring Cloud Stream Kafka binder is for Kafka version 0.10.1.1. The binder also supports connecting to other 0.10 based versions and 0.9 clients.
|
||||
@@ -430,98 +518,24 @@ On the other hand, if auto topic creation is disabled on the server, then care m
|
||||
|
||||
If you want to have full control over how partitions are allocated, then leave the default settings as they are, i.e. do not exclude the kafka broker jar and ensure that `spring.cloud.stream.kafka.binder.autoCreateTopics` is set to `true`, which is the default.
|
||||
|
||||
== Kafka Streams Binding Capabilities of Spring Cloud Stream
|
||||
[[kafka-error-channels]]
|
||||
== Error Channels
|
||||
|
||||
Spring Cloud Stream Kafka support also includes a binder specifically designed for Kafka Streams binding.
|
||||
Using this binder, applications can be written that leverage the Kafka Streams API.
|
||||
For more information on Kafka Streams, see https://kafka.apache.org/documentation/streams/developer-guide[Kafka Streams API Developer Manual]
|
||||
Starting with _version 1.3_, the binder unconditionally sends exceptions to an error channel for each consumer destination, and can be configured to send async producer send failures to an error channel too.
|
||||
See <<binder-error-channels>> for more information.
|
||||
|
||||
Kafka Streams support in Spring Cloud Stream is based on the foundations provided by the Spring Kafka project. For details on that support, see http://docs.spring.io/spring-kafka/reference/html/_reference.html#kafka-streams[Kafaka Streams Support in Spring Kafka].
|
||||
The payload of the `ErrorMessage` for a send failure is a `KafkaSendFailureException` with properties:
|
||||
|
||||
Here are the maven coordinates for the Spring Cloud Stream KStream binder artifact.
|
||||
* `failedMessage` - the spring-messaging `Message<?>` that failed to be sent.
|
||||
* `record` - the raw `ProducerRecord` that was created from the `failedMessage`
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kstream</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
In addition to leveraging the Spring Cloud Stream programming model which is based on Spring Boot, one of the main other benefits that the KStream binder provides is the fact that it avoids the boilerplate configuration that one needs to write when using the Kafka Streams API directly.
|
||||
High level streams DSL provided through the Kafka Streams API can be used through Spring Cloud Stream in the current support.
|
||||
|
||||
=== Usage example of high level streams DSL
|
||||
|
||||
This application will listen from a Kafka topic and write the word count for each unique word that it sees in a 5 seconds time window.
|
||||
|
||||
[source]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(KStreamProcessor.class)
|
||||
public class WordCountProcessorApplication {
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<?, String> process(KStream<?, String> input) {
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.map((key, word) -> new KeyValue<>(word, word))
|
||||
.groupByKey(Serdes.String(), Serdes.String())
|
||||
.count(TimeWindows.of(5000), "store-name")
|
||||
.toStream()
|
||||
.map((w, c) -> new KeyValue<>(null, "Count for " + w.key() + ": " + c));
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(WordCountProcessorApplication.class, args);
|
||||
}
|
||||
----
|
||||
|
||||
If you build it as Spring Boot runnable fat jar, you can run the above example in the following way:
|
||||
|
||||
[source]
|
||||
----
|
||||
java -jar uber.jar --spring.cloud.stream.bindings.input.destination=words --spring.cloud.stream.bindings.output.destination=counts
|
||||
----
|
||||
|
||||
This means that the application will listen from the incoming Kafka topic words and write to the output topic counts.
|
||||
|
||||
Spring Cloud Stream will ensure that the messages from both the incoming and outgoing topics are bound as KStream objects.
|
||||
As one may observe, the developer can exclusively focus on the business aspects of the code, i.e. writing the logic required in the processor rather than setting up the streams specific configuration required by the Kafka Streams infrastructure.
|
||||
All those boilerplate is handled by Spring Cloud Stream behind the scenes.
|
||||
|
||||
=== Support for interactive queries
|
||||
|
||||
If access to the `KafkaStreams` is needed for interactive queries, the internal `KafkaStreams` instance can be accessed via `KStreamBuilderFactoryBean.getKafkaStreams()`.
|
||||
You can autowire the `KStreamBuilderFactoryBean` instance provided by the KStream binder. Then you can get `KafkaStreams` instance from it and retrieve the underlying store, execute queries on it, etc.
|
||||
|
||||
=== Kafka Streams properties
|
||||
|
||||
configuration::
|
||||
Map with a key/value pair containing properties pertaining to Kafka Streams API.
|
||||
This property must be prefixed with `spring.cloud.stream.kstream.binder.`.
|
||||
|
||||
Following are some examples of using this property.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kstream.binder.configuration.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
spring.cloud.stream.kstream.binder.configuration.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
spring.cloud.stream.kstream.binder.configuration.commit.interval.ms=1000
|
||||
----
|
||||
|
||||
For more information about all the properties that may go into streams configuration, see StreamsConfig JavaDocs.
|
||||
|
||||
There can also be binding specific properties.
|
||||
|
||||
For instance, you can use a different Serde for your input or output destination.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kstream.bindings.output.producer.keySerde=org.apache.kafka.common.serialization.Serdes$IntegerSerde
|
||||
spring.cloud.stream.kstream.bindings.output.producer.valueSerde=org.apache.kafka.common.serialization.Serdes$LongSerde
|
||||
----
|
||||
There is no automatic handling of producer exceptions (such as sending to a <<kafka-dlq-processing, Dead-Letter queue>>); you can consume these exceptions with your own Spring Integration flow.
|
||||
|
||||
[[kafka-metrics]]
|
||||
== Kafka Metrics
|
||||
|
||||
Kafka binder module exposes the following metrics:
|
||||
|
||||
`spring.cloud.stream.binder.kafka.someGroup.someTopic.lag` - this metric indicates how many messages have not been yet consumed from given binder's topic by given consumer group.
|
||||
For example if the value of the metric `spring.cloud.stream.binder.kafka.myGroup.myTopic.lag` is `1000`, then consumer group `myGroup` has `1000` messages to waiting to be consumed from topic `myTopic`.
|
||||
This metric is particularly useful to provide auto-scaling feedback to PaaS platform of your choice.
|
||||
|
||||
@@ -0,0 +1,100 @@
|
||||
== Partitioning with the Kafka Binder
|
||||
|
||||
Apache Kafka supports topic partitioning natively.
|
||||
|
||||
Sometimes it is advantageous to send data to specific partitions, for example when you want to strictly order message processing - all messages for a particular customer should go to the same partition.
|
||||
|
||||
The following illustrates how to configure the producer and consumer side:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Source.class)
|
||||
public class KafkaPartitionProducerApplication {
|
||||
|
||||
private static final Random RANDOM = new Random(System.currentTimeMillis());
|
||||
|
||||
private static final String[] data = new String[] {
|
||||
"foo1", "bar1", "qux1",
|
||||
"foo2", "bar2", "qux2",
|
||||
"foo3", "bar3", "qux3",
|
||||
"foo4", "bar4", "qux4",
|
||||
};
|
||||
|
||||
public static void main(String[] args) {
|
||||
new SpringApplicationBuilder(KafkaPartitionProducerApplication.class)
|
||||
.web(false)
|
||||
.run(args);
|
||||
}
|
||||
|
||||
@InboundChannelAdapter(channel = Source.OUTPUT, poller = @Poller(fixedRate = "5000"))
|
||||
public Message<?> generate() {
|
||||
String value = data[RANDOM.nextInt(data.length)];
|
||||
System.out.println("Sending: " + value);
|
||||
return MessageBuilder.withPayload(value)
|
||||
.setHeader("partitionKey", value)
|
||||
.build();
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
.application.yml
|
||||
[source, yaml]
|
||||
----
|
||||
spring:
|
||||
cloud:
|
||||
stream:
|
||||
bindings:
|
||||
output:
|
||||
destination: partitioned.topic
|
||||
producer:
|
||||
partitioned: true
|
||||
partition-key-expression: headers['partitionKey']
|
||||
partition-count: 12
|
||||
----
|
||||
|
||||
IMPORTANT: The topic must be provisioned to have enough partitions to achieve the desired concurrency for all consumer groups.
|
||||
The above configuration will support up to 12 consumer instances (or 6 if their `concurrency` is 2, etc.).
|
||||
It is generally best to "over provision" the partitions to allow for future increases in consumers and/or concurrency.
|
||||
|
||||
NOTE: The above configuration uses the default partitioning (`key.hashCode() % partitionCount`).
|
||||
This may or may not provide a suitably balanced algorithm, depending on the key values; you can override this default by using the `partitionSelectorExpression` or `partitionSelectorClass` properties.
|
||||
|
||||
Since partitions are natively handled by Kafka, no special configuration is needed on the consumer side.
|
||||
Kafka will allocate partitions across the instances.
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class KafkaPartitionConsumerApplication {
|
||||
|
||||
public static void main(String[] args) {
|
||||
new SpringApplicationBuilder(KafkaPartitionConsumerApplication.class)
|
||||
.web(false)
|
||||
.run(args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void listen(@Payload String in, @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition) {
|
||||
System.out.println(in + " received from partition " + partition);
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
.application.yml
|
||||
[source, yaml]
|
||||
----
|
||||
spring:
|
||||
cloud:
|
||||
stream:
|
||||
bindings:
|
||||
input:
|
||||
destination: partitioned.topic
|
||||
group: myGroup
|
||||
----
|
||||
|
||||
You can add instances as needed; Kafka will rebalance the partition allocations.
|
||||
If the instance count (or `instance count * concurrency`) exceeds the number of partitions, some consumers will be idle.
|
||||
@@ -0,0 +1,4 @@
|
||||
include::overview.adoc[leveloffset=+1]
|
||||
include::dlq.adoc[leveloffset=+1]
|
||||
include::partitions.adoc[leveloffset=+1]
|
||||
include::kafka-streams.adoc[leveloffset=+1]
|
||||
0
spring-cloud-stream-binder-kafka-streams/.jdk8
Normal file
0
spring-cloud-stream-binder-kafka-streams/.jdk8
Normal file
@@ -2,15 +2,15 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>spring-cloud-stream-binder-kstream</artifactId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-streams</artifactId>
|
||||
<packaging>jar</packaging>
|
||||
<name>spring-cloud-stream-binder-kstream</name>
|
||||
<name>spring-cloud-stream-binder-kafka-streams</name>
|
||||
<description>Kafka Streams Binder Implementation</description>
|
||||
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.3.0.M2</version>
|
||||
<version>2.0.0.RC1</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
@@ -23,19 +23,11 @@
|
||||
<artifactId>spring-boot-configuration-processor</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-codec</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-autoconfigure</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-streams</artifactId>
|
||||
@@ -52,12 +44,17 @@
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<classifier>test</classifier>
|
||||
</dependency>
|
||||
<!-- Added back since Kafka still depends on it, but it has been removed by Boot due to EOL -->
|
||||
<dependency>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
<version>1.2.17</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
@@ -0,0 +1,153 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.errors.DeserializationExceptionHandler;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Produced;
|
||||
|
||||
import org.springframework.cloud.stream.binder.AbstractBinder;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.DefaultBinding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsProducerProperties;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* {@link org.springframework.cloud.stream.binder.Binder} implementation for {@link KStream}.
|
||||
* This implemenation extends from the {@link AbstractBinder} directly.
|
||||
*
|
||||
* Provides both producer and consumer bindings for the bound KStream.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KStreamBinder extends
|
||||
AbstractBinder<KStream<Object, Object>, ExtendedConsumerProperties<KafkaStreamsConsumerProperties>, ExtendedProducerProperties<KafkaStreamsProducerProperties>>
|
||||
implements ExtendedPropertiesBinder<KStream<Object, Object>, KafkaStreamsConsumerProperties, KafkaStreamsProducerProperties> {
|
||||
|
||||
private final static Log LOG = LogFactory.getLog(KStreamBinder.class);
|
||||
|
||||
private final KafkaTopicProvisioner kafkaTopicProvisioner;
|
||||
|
||||
private KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties = new KafkaStreamsExtendedBindingProperties();
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
private final KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
private final KeyValueSerdeResolver keyValueSerdeResolver;
|
||||
|
||||
KStreamBinder(KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver) {
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
this.kafkaTopicProvisioner = kafkaTopicProvisioner;
|
||||
this.kafkaStreamsMessageConversionDelegate = kafkaStreamsMessageConversionDelegate;
|
||||
this.KafkaStreamsBindingInformationCatalogue = KafkaStreamsBindingInformationCatalogue;
|
||||
this.keyValueSerdeResolver = keyValueSerdeResolver;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected Binding<KStream<Object, Object>> doBindConsumer(String name, String group,
|
||||
KStream<Object, Object> inputTarget,
|
||||
ExtendedConsumerProperties<KafkaStreamsConsumerProperties> properties) {
|
||||
this.KafkaStreamsBindingInformationCatalogue.registerConsumerProperties(inputTarget, properties.getExtension());
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties = new ExtendedConsumerProperties<>(
|
||||
properties.getExtension());
|
||||
if (binderConfigurationProperties.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.sendToDlq) {
|
||||
extendedConsumerProperties.getExtension().setEnableDlq(true);
|
||||
}
|
||||
if (!StringUtils.hasText(group)) {
|
||||
group = binderConfigurationProperties.getApplicationId();
|
||||
}
|
||||
this.kafkaTopicProvisioner.provisionConsumerDestination(name, group, extendedConsumerProperties);
|
||||
StreamsConfig streamsConfig = this.KafkaStreamsBindingInformationCatalogue.getStreamsConfig(inputTarget);
|
||||
if (extendedConsumerProperties.getExtension().isEnableDlq()) {
|
||||
String dlqName = StringUtils.isEmpty(extendedConsumerProperties.getExtension().getDlqName()) ?
|
||||
"error." + name + "." + group : extendedConsumerProperties.getExtension().getDlqName();
|
||||
KafkaStreamsDlqDispatch kafkaStreamsDlqDispatch = new KafkaStreamsDlqDispatch(dlqName, binderConfigurationProperties,
|
||||
extendedConsumerProperties.getExtension());
|
||||
SendToDlqAndContinue sendToDlqAndContinue = this.getApplicationContext().getBean(SendToDlqAndContinue.class);
|
||||
sendToDlqAndContinue.addKStreamDlqDispatch(name, kafkaStreamsDlqDispatch);
|
||||
|
||||
DeserializationExceptionHandler deserializationExceptionHandler = streamsConfig.defaultDeserializationExceptionHandler();
|
||||
if(deserializationExceptionHandler instanceof SendToDlqAndContinue) {
|
||||
((SendToDlqAndContinue)deserializationExceptionHandler).addKStreamDlqDispatch(name, kafkaStreamsDlqDispatch);
|
||||
}
|
||||
}
|
||||
return new DefaultBinding<>(name, group, inputTarget, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected Binding<KStream<Object, Object>> doBindProducer(String name, KStream<Object, Object> outboundBindTarget,
|
||||
ExtendedProducerProperties<KafkaStreamsProducerProperties> properties) {
|
||||
ExtendedProducerProperties<KafkaProducerProperties> extendedProducerProperties = new ExtendedProducerProperties<>(
|
||||
new KafkaProducerProperties());
|
||||
this.kafkaTopicProvisioner.provisionProducerDestination(name, extendedProducerProperties);
|
||||
Serde<?> keySerde = this.keyValueSerdeResolver.getOuboundKeySerde(properties.getExtension());
|
||||
Serde<?> valueSerde = this.keyValueSerdeResolver.getOutboundValueSerde(properties, properties.getExtension());
|
||||
to(properties.isUseNativeEncoding(), name, outboundBindTarget, (Serde<Object>) keySerde, (Serde<Object>) valueSerde);
|
||||
return new DefaultBinding<>(name, null, outboundBindTarget, null);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void to(boolean isNativeEncoding, String name, KStream<Object, Object> outboundBindTarget,
|
||||
Serde<Object> keySerde, Serde<Object> valueSerde) {
|
||||
if (!isNativeEncoding) {
|
||||
LOG.info("Native encoding is disabled for " + name + ". Outbound message conversion done by Spring Cloud Stream.");
|
||||
kafkaStreamsMessageConversionDelegate.serializeOnOutbound(outboundBindTarget)
|
||||
.to(name, Produced.with(keySerde, valueSerde));
|
||||
}
|
||||
else {
|
||||
LOG.info("Native encoding is enabled for " + name + ". Outbound serialization done at the broker.");
|
||||
outboundBindTarget.to(name, Produced.with(keySerde, valueSerde));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaStreamsConsumerProperties getExtendedConsumerProperties(String channelName) {
|
||||
return this.kafkaStreamsExtendedBindingProperties.getExtendedConsumerProperties(channelName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaStreamsProducerProperties getExtendedProducerProperties(String channelName) {
|
||||
return this.kafkaStreamsExtendedBindingProperties.getExtendedProducerProperties(channelName);
|
||||
}
|
||||
|
||||
public void setKafkaStreamsExtendedBindingProperties(KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties) {
|
||||
this.kafkaStreamsExtendedBindingProperties = kafkaStreamsExtendedBindingProperties;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Gary Russell
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@Configuration
|
||||
public class KStreamBinderConfiguration {
|
||||
|
||||
private static final Log logger = LogFactory.getLog(KStreamBinderConfiguration.class);
|
||||
|
||||
@Autowired
|
||||
private KafkaProperties kafkaProperties;
|
||||
|
||||
@Autowired
|
||||
private KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties;
|
||||
|
||||
@Bean
|
||||
public KafkaTopicProvisioner provisioningProvider(KafkaBinderConfigurationProperties binderConfigurationProperties) {
|
||||
return new KafkaTopicProvisioner(binderConfigurationProperties, kafkaProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamBinder kStreamBinder(KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsMessageConversionDelegate KafkaStreamsMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver) {
|
||||
KStreamBinder kStreamBinder = new KStreamBinder(binderConfigurationProperties, kafkaTopicProvisioner,
|
||||
KafkaStreamsMessageConversionDelegate, KafkaStreamsBindingInformationCatalogue,
|
||||
keyValueSerdeResolver);
|
||||
kStreamBinder.setKafkaStreamsExtendedBindingProperties(kafkaStreamsExtendedBindingProperties);
|
||||
return kStreamBinder;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,106 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.aopalliance.intercept.MethodInterceptor;
|
||||
import org.aopalliance.intercept.MethodInvocation;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.aop.framework.ProxyFactory;
|
||||
import org.springframework.cloud.stream.binding.AbstractBindingTargetFactory;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
/**
|
||||
* {@link org.springframework.cloud.stream.binding.BindingTargetFactory} for{@link KStream}.
|
||||
*
|
||||
* The implementation creates proxies for both input and output binding.
|
||||
* The actual target will be created downstream through further binding process.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KStreamBoundElementFactory extends AbstractBindingTargetFactory<KStream> {
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
KStreamBoundElementFactory(BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue) {
|
||||
super(KStream.class);
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.kafkaStreamsBindingInformationCatalogue = KafkaStreamsBindingInformationCatalogue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KStream createInput(String name) {
|
||||
return createProxyForKStream(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public KStream createOutput(final String name) {
|
||||
return createProxyForKStream(name);
|
||||
}
|
||||
|
||||
private KStream createProxyForKStream(String name) {
|
||||
KStreamWrapperHandler wrapper= new KStreamWrapperHandler();
|
||||
ProxyFactory proxyFactory = new ProxyFactory(KStreamWrapper.class, KStream.class);
|
||||
proxyFactory.addAdvice(wrapper);
|
||||
|
||||
KStream proxy = (KStream) proxyFactory.getProxy();
|
||||
|
||||
//Add the binding properties to the catalogue for later retrieval during further binding steps downstream.
|
||||
BindingProperties bindingProperties = bindingServiceProperties.getBindingProperties(name);
|
||||
this.kafkaStreamsBindingInformationCatalogue.registerBindingProperties(proxy, bindingProperties);
|
||||
return proxy;
|
||||
}
|
||||
|
||||
public interface KStreamWrapper {
|
||||
|
||||
void wrap(KStream<Object, Object> delegate);
|
||||
|
||||
}
|
||||
|
||||
private static class KStreamWrapperHandler implements KStreamWrapper, MethodInterceptor {
|
||||
|
||||
private KStream<Object, Object> delegate;
|
||||
|
||||
public void wrap(KStream<Object, Object> delegate) {
|
||||
Assert.notNull(delegate, "delegate cannot be null");
|
||||
Assert.isNull(this.delegate, "delegate already set to " + this.delegate);
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object invoke(MethodInvocation methodInvocation) throws Throwable {
|
||||
if (methodInvocation.getMethod().getDeclaringClass().equals(KStream.class)) {
|
||||
Assert.notNull(delegate, "Trying to invoke " + methodInvocation
|
||||
.getMethod() + " but no delegate has been set.");
|
||||
return methodInvocation.getMethod().invoke(delegate, methodInvocation.getArguments());
|
||||
}
|
||||
else if (methodInvocation.getMethod().getDeclaringClass().equals(KStreamWrapper.class)) {
|
||||
return methodInvocation.getMethod().invoke(this, methodInvocation.getArguments());
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException("Only KStream method invocations are permitted");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
@@ -23,20 +23,20 @@ import org.apache.kafka.streams.kstream.KeyValueMapper;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerParameterAdapter;
|
||||
import org.springframework.core.MethodParameter;
|
||||
import org.springframework.core.ResolvableType;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.converter.MessageConverter;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KStreamListenerParameterAdapter implements StreamListenerParameterAdapter<KStream<?,?>, KStream<?, ?>> {
|
||||
class KStreamStreamListenerParameterAdapter implements StreamListenerParameterAdapter<KStream<?,?>, KStream<?, ?>> {
|
||||
|
||||
private final MessageConverter messageConverter;
|
||||
private final KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate;
|
||||
private final KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
public KStreamListenerParameterAdapter(MessageConverter messageConverter) {
|
||||
this.messageConverter = messageConverter;
|
||||
KStreamStreamListenerParameterAdapter(KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue) {
|
||||
this.kafkaStreamsMessageConversionDelegate = kafkaStreamsMessageConversionDelegate;
|
||||
this.KafkaStreamsBindingInformationCatalogue = KafkaStreamsBindingInformationCatalogue;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -51,25 +51,11 @@ public class KStreamListenerParameterAdapter implements StreamListenerParameterA
|
||||
ResolvableType resolvableType = ResolvableType.forMethodParameter(parameter);
|
||||
final Class<?> valueClass = (resolvableType.getGeneric(1).getRawClass() != null)
|
||||
? (resolvableType.getGeneric(1).getRawClass()) : Object.class;
|
||||
|
||||
return bindingTarget.map(new KeyValueMapper() {
|
||||
@Override
|
||||
public Object apply(Object o, Object o2) {
|
||||
if (valueClass.isAssignableFrom(o2.getClass())) {
|
||||
return new KeyValue<>(o, o2);
|
||||
}
|
||||
else if (o2 instanceof Message) {
|
||||
return new KeyValue<>(o, messageConverter.fromMessage((Message) o2, valueClass));
|
||||
}
|
||||
else if(o2 instanceof String || o2 instanceof byte[]) {
|
||||
Message<Object> message = MessageBuilder.withPayload(o2).build();
|
||||
return new KeyValue<>(o, messageConverter.fromMessage(message, valueClass));
|
||||
}
|
||||
else {
|
||||
return new KeyValue<>(o, o2);
|
||||
}
|
||||
}
|
||||
});
|
||||
if (this.KafkaStreamsBindingInformationCatalogue.isUseNativeDecoding(bindingTarget)) {
|
||||
return bindingTarget.map((KeyValueMapper) KeyValue::new);
|
||||
}
|
||||
else {
|
||||
return kafkaStreamsMessageConversionDelegate.deserializeOnInbound(valueClass, bindingTarget);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -14,23 +14,21 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KeyValueMapper;
|
||||
|
||||
import org.springframework.cloud.stream.binding.StreamListenerResultAdapter;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KStreamStreamListenerResultAdapter implements StreamListenerResultAdapter<KStream, KStreamBoundElementFactory.KStreamWrapper> {
|
||||
class KStreamStreamListenerResultAdapter implements StreamListenerResultAdapter<KStream, KStreamBoundElementFactory.KStreamWrapper> {
|
||||
|
||||
@Override
|
||||
public boolean supports(Class<?> resultType, Class<?> boundElement) {
|
||||
@@ -40,17 +38,7 @@ public class KStreamStreamListenerResultAdapter implements StreamListenerResultA
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public Closeable adapt(KStream streamListenerResult, KStreamBoundElementFactory.KStreamWrapper boundElement) {
|
||||
boundElement.wrap(streamListenerResult.map(new KeyValueMapper() {
|
||||
@Override
|
||||
public Object apply(Object k, Object v) {
|
||||
if (v instanceof Message<?>) {
|
||||
return new KeyValue<>(k, v);
|
||||
}
|
||||
else {
|
||||
return new KeyValue<>(k, MessageBuilder.withPayload(v).build());
|
||||
}
|
||||
}
|
||||
}));
|
||||
boundElement.wrap(streamListenerResult.map(KeyValue::new));
|
||||
return new NoOpCloseable();
|
||||
}
|
||||
|
||||
@@ -0,0 +1,110 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.errors.DeserializationExceptionHandler;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
|
||||
import org.springframework.cloud.stream.binder.AbstractBinder;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.DefaultBinding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsProducerProperties;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* {@link org.springframework.cloud.stream.binder.Binder} implementation for {@link KTable}.
|
||||
* This implemenation extends from the {@link AbstractBinder} directly.
|
||||
*
|
||||
* Provides only consumer binding for the bound KTable as output bindings are not allowed on it.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KTableBinder extends
|
||||
AbstractBinder<KTable<Object, Object>, ExtendedConsumerProperties<KafkaStreamsConsumerProperties>, ExtendedProducerProperties<KafkaStreamsProducerProperties>>
|
||||
implements ExtendedPropertiesBinder<KTable<Object, Object>, KafkaStreamsConsumerProperties, KafkaStreamsProducerProperties> {
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
private final KafkaTopicProvisioner kafkaTopicProvisioner;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
private KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties = new KafkaStreamsExtendedBindingProperties();
|
||||
|
||||
KTableBinder(KafkaStreamsBinderConfigurationProperties binderConfigurationProperties, KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue) {
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
this.kafkaTopicProvisioner = kafkaTopicProvisioner;
|
||||
this.KafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected Binding<KTable<Object, Object>> doBindConsumer(String name, String group, KTable<Object, Object> inputTarget,
|
||||
ExtendedConsumerProperties<KafkaStreamsConsumerProperties> properties) {
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties = new ExtendedConsumerProperties<>(
|
||||
properties.getExtension());
|
||||
if (binderConfigurationProperties.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.sendToDlq) {
|
||||
extendedConsumerProperties.getExtension().setEnableDlq(true);
|
||||
}
|
||||
if (!StringUtils.hasText(group)) {
|
||||
group = binderConfigurationProperties.getApplicationId();
|
||||
}
|
||||
this.kafkaTopicProvisioner.provisionConsumerDestination(name, group, extendedConsumerProperties);
|
||||
|
||||
if (extendedConsumerProperties.getExtension().isEnableDlq()) {
|
||||
String dlqName = StringUtils.isEmpty(extendedConsumerProperties.getExtension().getDlqName()) ?
|
||||
"error." + name + "." + group : extendedConsumerProperties.getExtension().getDlqName();
|
||||
KafkaStreamsDlqDispatch kafkaStreamsDlqDispatch = new KafkaStreamsDlqDispatch(dlqName, binderConfigurationProperties,
|
||||
extendedConsumerProperties.getExtension());
|
||||
SendToDlqAndContinue sendToDlqAndContinue = this.getApplicationContext().getBean(SendToDlqAndContinue.class);
|
||||
sendToDlqAndContinue.addKStreamDlqDispatch(name, kafkaStreamsDlqDispatch);
|
||||
|
||||
StreamsConfig streamsConfig = this.KafkaStreamsBindingInformationCatalogue.getStreamsConfig(inputTarget);
|
||||
DeserializationExceptionHandler deserializationExceptionHandler = streamsConfig.defaultDeserializationExceptionHandler();
|
||||
if(deserializationExceptionHandler instanceof SendToDlqAndContinue) {
|
||||
((SendToDlqAndContinue)deserializationExceptionHandler).addKStreamDlqDispatch(name, kafkaStreamsDlqDispatch);
|
||||
}
|
||||
}
|
||||
return new DefaultBinding<>(name, group, inputTarget, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Binding<KTable<Object, Object>> doBindProducer(String name, KTable<Object, Object> outboundBindTarget,
|
||||
ExtendedProducerProperties<KafkaStreamsProducerProperties> properties) {
|
||||
throw new UnsupportedOperationException("No producer level binding is allowed for KTable");
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaStreamsConsumerProperties getExtendedConsumerProperties(String channelName) {
|
||||
return this.kafkaStreamsExtendedBindingProperties.getExtendedConsumerProperties(channelName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaStreamsProducerProperties getExtendedProducerProperties(String channelName) {
|
||||
return this.kafkaStreamsExtendedBindingProperties.getExtendedProducerProperties(channelName);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KTableBinderConfiguration {
|
||||
|
||||
@Autowired
|
||||
private KafkaProperties kafkaProperties;
|
||||
|
||||
@Autowired
|
||||
private KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties;
|
||||
|
||||
@Bean
|
||||
public KafkaTopicProvisioner provisioningProvider(KafkaBinderConfigurationProperties binderConfigurationProperties) {
|
||||
return new KafkaTopicProvisioner(binderConfigurationProperties, kafkaProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KTableBinder kTableBinder(KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue) {
|
||||
KTableBinder kStreamBinder = new KTableBinder(binderConfigurationProperties, kafkaTopicProvisioner,
|
||||
KafkaStreamsBindingInformationCatalogue);
|
||||
return kStreamBinder;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.aopalliance.intercept.MethodInterceptor;
|
||||
import org.aopalliance.intercept.MethodInvocation;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
|
||||
import org.springframework.aop.framework.ProxyFactory;
|
||||
import org.springframework.cloud.stream.binding.AbstractBindingTargetFactory;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
/**
|
||||
* {@link org.springframework.cloud.stream.binding.BindingTargetFactory} for {@link KTable}
|
||||
*
|
||||
* Input bindings are only created as output bindings on KTable are not allowed.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KTableBoundElementFactory extends AbstractBindingTargetFactory<KTable> {
|
||||
|
||||
KTableBoundElementFactory() {
|
||||
super(KTable.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KTable createInput(String name) {
|
||||
KTableBoundElementFactory.KTableWrapperHandler wrapper= new KTableBoundElementFactory.KTableWrapperHandler();
|
||||
ProxyFactory proxyFactory = new ProxyFactory(KTableBoundElementFactory.KTableWrapper.class, KTable.class);
|
||||
proxyFactory.addAdvice(wrapper);
|
||||
|
||||
return (KTable) proxyFactory.getProxy();
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public KTable createOutput(final String name) {
|
||||
throw new UnsupportedOperationException("Outbound operations are not allowed on target type KTable");
|
||||
}
|
||||
|
||||
public interface KTableWrapper {
|
||||
void wrap(KTable<Object, Object> delegate);
|
||||
}
|
||||
|
||||
private static class KTableWrapperHandler implements KTableBoundElementFactory.KTableWrapper, MethodInterceptor {
|
||||
|
||||
private KTable<Object, Object> delegate;
|
||||
|
||||
public void wrap(KTable<Object, Object> delegate) {
|
||||
Assert.notNull(delegate, "delegate cannot be null");
|
||||
Assert.isNull(this.delegate, "delegate already set to " + this.delegate);
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object invoke(MethodInvocation methodInvocation) throws Throwable {
|
||||
if (methodInvocation.getMethod().getDeclaringClass().equals(KTable.class)) {
|
||||
Assert.notNull(delegate, "Trying to invoke " + methodInvocation
|
||||
.getMethod() + " but no delegate has been set.");
|
||||
return methodInvocation.getMethod().invoke(delegate, methodInvocation.getArguments());
|
||||
}
|
||||
else if (methodInvocation.getMethod().getDeclaringClass().equals(KTableBoundElementFactory.KTableWrapper.class)) {
|
||||
return methodInvocation.getMethod().invoke(this, methodInvocation.getArguments());
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException("Only KStream method invocations are permitted");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@Configuration
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
public class KafkaStreamsApplicationSupportAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
@ConditionalOnProperty("spring.cloud.stream.kafka.streams.timeWindow.length")
|
||||
public TimeWindows configuredTimeWindow(KafkaStreamsApplicationSupportProperties processorProperties) {
|
||||
return processorProperties.getTimeWindow().getAdvanceBy() > 0
|
||||
? TimeWindows.of(processorProperties.getTimeWindow().getLength()).advanceBy(processorProperties.getTimeWindow().getAdvanceBy())
|
||||
: TimeWindows.of(processorProperties.getTimeWindow().getLength());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,152 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.errors.LogAndContinueExceptionHandler;
|
||||
import org.apache.kafka.streams.errors.LogAndFailExceptionHandler;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerResultAdapter;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.cloud.stream.converter.CompositeMessageConverterFactory;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@EnableConfigurationProperties(KafkaStreamsExtendedBindingProperties.class)
|
||||
public class KafkaStreamsBinderSupportAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
@ConfigurationProperties(prefix = "spring.cloud.stream.kafka.streams.binder")
|
||||
public KafkaStreamsBinderConfigurationProperties binderConfigurationProperties() {
|
||||
return new KafkaStreamsBinderConfigurationProperties();
|
||||
}
|
||||
|
||||
@Bean("streamConfigGlobalProperties")
|
||||
public Map<String, Object> streamConfigGlobalProperties(KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, binderConfigurationProperties.getKafkaConnectionString());
|
||||
props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName());
|
||||
props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName());
|
||||
props.put(StreamsConfig.APPLICATION_ID_CONFIG, binderConfigurationProperties.getApplicationId());
|
||||
|
||||
if (binderConfigurationProperties.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.logAndContinue) {
|
||||
props.put(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
LogAndContinueExceptionHandler.class);
|
||||
} else if (binderConfigurationProperties.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.logAndFail) {
|
||||
props.put(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
LogAndFailExceptionHandler.class);
|
||||
} else if (binderConfigurationProperties.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.sendToDlq) {
|
||||
props.put(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
SendToDlqAndContinue.class);
|
||||
}
|
||||
|
||||
if (!ObjectUtils.isEmpty(binderConfigurationProperties.getConfiguration())) {
|
||||
props.putAll(binderConfigurationProperties.getConfiguration());
|
||||
}
|
||||
|
||||
return props;
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamStreamListenerResultAdapter kstreamStreamListenerResultAdapter() {
|
||||
return new KStreamStreamListenerResultAdapter();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamStreamListenerParameterAdapter kstreamStreamListenerParameterAdapter(
|
||||
KafkaStreamsMessageConversionDelegate kstreamBoundMessageConversionDelegate, KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue) {
|
||||
return new KStreamStreamListenerParameterAdapter(kstreamBoundMessageConversionDelegate, KafkaStreamsBindingInformationCatalogue);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaStreamsStreamListenerSetupMethodOrchestrator kafkaStreamsStreamListenerSetupMethodOrchestrator(
|
||||
BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
KStreamStreamListenerParameterAdapter kafkaStreamListenerParameterAdapter,
|
||||
Collection<StreamListenerResultAdapter> streamListenerResultAdapters,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
return new KafkaStreamsStreamListenerSetupMethodOrchestrator(bindingServiceProperties,
|
||||
kafkaStreamsExtendedBindingProperties, keyValueSerdeResolver, kafkaStreamsBindingInformationCatalogue,
|
||||
kafkaStreamListenerParameterAdapter, streamListenerResultAdapters, binderConfigurationProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaStreamsMessageConversionDelegate messageConversionDelegate(CompositeMessageConverterFactory compositeMessageConverterFactory,
|
||||
SendToDlqAndContinue sendToDlqAndContinue,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
return new KafkaStreamsMessageConversionDelegate(compositeMessageConverterFactory, sendToDlqAndContinue,
|
||||
KafkaStreamsBindingInformationCatalogue, binderConfigurationProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamBoundElementFactory kStreamBoundElementFactory(BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue) {
|
||||
return new KStreamBoundElementFactory(bindingServiceProperties,
|
||||
KafkaStreamsBindingInformationCatalogue);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KTableBoundElementFactory kTableBoundElementFactory() {
|
||||
return new KTableBoundElementFactory();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public SendToDlqAndContinue sendToDlqAndContinue() {
|
||||
return new SendToDlqAndContinue();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue() {
|
||||
return new KafkaStreamsBindingInformationCatalogue();
|
||||
}
|
||||
|
||||
@Bean
|
||||
@SuppressWarnings("unchecked")
|
||||
public KeyValueSerdeResolver keyValueSerdeResolver(@Qualifier("streamConfigGlobalProperties") Object streamConfigGlobalProperties,
|
||||
KafkaStreamsBinderConfigurationProperties kafkaStreamsBinderConfigurationProperties) {
|
||||
return new KeyValueSerdeResolver((Map<String, Object>) streamConfigGlobalProperties, kafkaStreamsBinderConfigurationProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public QueryableStoreRegistry queryableStoreTypeRegistry() {
|
||||
return new QueryableStoreRegistry();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public StreamsBuilderFactoryManager streamsBuilderFactoryManager(KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
QueryableStoreRegistry queryableStoreRegistry) {
|
||||
return new StreamsBuilderFactoryManager(kafkaStreamsBindingInformationCatalogue, queryableStoreRegistry);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,143 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.kafka.core.StreamsBuilderFactoryBean;
|
||||
|
||||
/**
|
||||
* A catalogue that provides binding information for Kafka Streams target types such as KStream.
|
||||
* It also keeps a catalogue for the underlying {@link StreamsBuilderFactoryBean} and
|
||||
* {@link StreamsConfig} associated with various {@link org.springframework.cloud.stream.annotation.StreamListener}
|
||||
* methods in the {@link org.springframework.context.ApplicationContext}.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KafkaStreamsBindingInformationCatalogue {
|
||||
|
||||
private final Map<KStream<?, ?>, BindingProperties> bindingProperties = new ConcurrentHashMap<>();
|
||||
|
||||
private final Map<KStream<?, ?>, KafkaStreamsConsumerProperties> consumerProperties = new ConcurrentHashMap<>();
|
||||
|
||||
private final Map<Object, StreamsConfig> streamsConfigs = new ConcurrentHashMap<>();
|
||||
|
||||
private final Set<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans = new HashSet<>();
|
||||
|
||||
/**
|
||||
* For a given bounded {@link KStream}, retrieve it's corresponding destination
|
||||
* on the broker.
|
||||
*
|
||||
* @param bindingTarget KStream binding target
|
||||
* @return destination topic on Kafka
|
||||
*/
|
||||
String getDestination(KStream<?,?> bindingTarget) {
|
||||
BindingProperties bindingProperties = this.bindingProperties.get(bindingTarget);
|
||||
return bindingProperties.getDestination();
|
||||
}
|
||||
|
||||
/**
|
||||
* Is native decoding is enabled on this {@link KStream}.
|
||||
*
|
||||
* @param bindingTarget KStream binding target
|
||||
* @return true if native decoding is enabled, fasle otherwise.
|
||||
*/
|
||||
boolean isUseNativeDecoding(KStream<?,?> bindingTarget) {
|
||||
BindingProperties bindingProperties = this.bindingProperties.get(bindingTarget);
|
||||
if (bindingProperties.getConsumer() == null) {
|
||||
bindingProperties.setConsumer(new ConsumerProperties());
|
||||
}
|
||||
return bindingProperties.getConsumer().isUseNativeDecoding();
|
||||
}
|
||||
|
||||
/**
|
||||
* Is DLQ enabled for this {@link KStream}
|
||||
*
|
||||
* @param bindingTarget KStream binding target
|
||||
* @return true if DLQ is enabled, false otherwise.
|
||||
*/
|
||||
boolean isDlqEnabled(KStream<?,?> bindingTarget) {
|
||||
return consumerProperties.get(bindingTarget).isEnableDlq();
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the content type associated with a given {@link KStream}
|
||||
*
|
||||
* @param bindingTarget KStream binding target
|
||||
* @return content Type associated.
|
||||
*/
|
||||
String getContentType(KStream<?,?> bindingTarget) {
|
||||
BindingProperties bindingProperties = this.bindingProperties.get(bindingTarget);
|
||||
return bindingProperties.getContentType();
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve and return the registered {@link StreamsBuilderFactoryBean} for the given KStream
|
||||
*
|
||||
* @param bindingTarget KStream binding target
|
||||
* @return corresponding {@link StreamsBuilderFactoryBean}
|
||||
*/
|
||||
StreamsConfig getStreamsConfig(Object bindingTarget) {
|
||||
return streamsConfigs.get(bindingTarget);
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a cache for bounded KStream -> {@link BindingProperties}
|
||||
*
|
||||
* @param bindingTarget KStream binding target
|
||||
* @param bindingProperties {@link BindingProperties} for this KStream
|
||||
*/
|
||||
void registerBindingProperties(KStream<?,?> bindingTarget, BindingProperties bindingProperties) {
|
||||
this.bindingProperties.put(bindingTarget, bindingProperties);
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a cache for bounded KStream -> {@link KafkaStreamsConsumerProperties}
|
||||
*
|
||||
* @param bindingTarget KStream binding target
|
||||
* @param kafkaStreamsConsumerProperties Consumer properties for this KStream
|
||||
*/
|
||||
void registerConsumerProperties(KStream<?,?> bindingTarget, KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties) {
|
||||
this.consumerProperties.put(bindingTarget, kafkaStreamsConsumerProperties);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a mapping for KStream -> {@link StreamsBuilderFactoryBean}
|
||||
*
|
||||
* @param streamsBuilderFactoryBean provides the {@link StreamsBuilderFactoryBean} mapped to the KStream
|
||||
*/
|
||||
void addStreamBuilderFactory(StreamsBuilderFactoryBean streamsBuilderFactoryBean) {
|
||||
this.streamsBuilderFactoryBeans.add(streamsBuilderFactoryBean);
|
||||
}
|
||||
|
||||
void addStreamsConfigs(Object bindingTarget, StreamsConfig streamsConfig) {
|
||||
this.streamsConfigs.put(bindingTarget, streamsConfig);
|
||||
}
|
||||
|
||||
Set<StreamsBuilderFactoryBean> getStreamsBuilderFactoryBeans() {
|
||||
return streamsBuilderFactoryBeans;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,142 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @author Rafal Zukowski
|
||||
*/
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.core.ProducerFactory;
|
||||
import org.springframework.kafka.support.SendResult;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.concurrent.ListenableFuture;
|
||||
import org.springframework.util.concurrent.ListenableFutureCallback;
|
||||
|
||||
class KafkaStreamsDlqDispatch {
|
||||
|
||||
private final Log logger = LogFactory.getLog(getClass());
|
||||
|
||||
private final KafkaTemplate<byte[],byte[]> kafkaTemplate;
|
||||
|
||||
private final String dlqName;
|
||||
|
||||
KafkaStreamsDlqDispatch(String dlqName,
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties,
|
||||
KafkaConsumerProperties kafkaConsumerProperties) {
|
||||
ProducerFactory<byte[],byte[]> producerFactory = getProducerFactory(
|
||||
new ExtendedProducerProperties<>(kafkaConsumerProperties.getDlqProducerProperties()),
|
||||
kafkaBinderConfigurationProperties);
|
||||
|
||||
this.kafkaTemplate = new KafkaTemplate<>(producerFactory);
|
||||
this.dlqName = dlqName;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void sendToDlq(byte[] key, byte[] value, int partittion) {
|
||||
ProducerRecord<byte[],byte[]> producerRecord = new ProducerRecord<>(this.dlqName, partittion,
|
||||
key, value, null);
|
||||
|
||||
StringBuilder sb = new StringBuilder().append(" a message with key='")
|
||||
.append(toDisplayString(ObjectUtils.nullSafeToString(key))).append("'")
|
||||
.append(" and payload='")
|
||||
.append(toDisplayString(ObjectUtils.nullSafeToString(value)))
|
||||
.append("'").append(" received from ")
|
||||
.append(partittion);
|
||||
ListenableFuture<SendResult<byte[],byte[]>> sentDlq = null;
|
||||
try {
|
||||
sentDlq = this.kafkaTemplate.send(producerRecord);
|
||||
sentDlq.addCallback(new ListenableFutureCallback<SendResult<byte[],byte[]>>() {
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable ex) {
|
||||
KafkaStreamsDlqDispatch.this.logger.error(
|
||||
"Error sending to DLQ " + sb.toString(), ex);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onSuccess(SendResult<byte[],byte[]> result) {
|
||||
if (KafkaStreamsDlqDispatch.this.logger.isDebugEnabled()) {
|
||||
KafkaStreamsDlqDispatch.this.logger.debug(
|
||||
"Sent to DLQ " + sb.toString());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
catch (Exception ex) {
|
||||
if (sentDlq == null) {
|
||||
KafkaStreamsDlqDispatch.this.logger.error(
|
||||
"Error sending to DLQ " + sb.toString(), ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private DefaultKafkaProducerFactory<byte[],byte[]> getProducerFactory(ExtendedProducerProperties<KafkaProducerProperties> producerProperties,
|
||||
KafkaBinderConfigurationProperties configurationProperties) {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ProducerConfig.RETRIES_CONFIG, 0);
|
||||
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
|
||||
props.put(ProducerConfig.ACKS_CONFIG, configurationProperties.getRequiredAcks());
|
||||
if (!ObjectUtils.isEmpty(configurationProperties.getProducerConfiguration())) {
|
||||
props.putAll(configurationProperties.getProducerConfiguration());
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.BATCH_SIZE_CONFIG))) {
|
||||
props.put(ProducerConfig.BATCH_SIZE_CONFIG,
|
||||
String.valueOf(producerProperties.getExtension().getBufferSize()));
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.LINGER_MS_CONFIG))) {
|
||||
props.put(ProducerConfig.LINGER_MS_CONFIG,
|
||||
String.valueOf(producerProperties.getExtension().getBatchTimeout()));
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.COMPRESSION_TYPE_CONFIG))) {
|
||||
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG,
|
||||
producerProperties.getExtension().getCompressionType().toString());
|
||||
}
|
||||
if (!ObjectUtils.isEmpty(producerProperties.getExtension().getConfiguration())) {
|
||||
props.putAll(producerProperties.getExtension().getConfiguration());
|
||||
}
|
||||
//Always send as byte[] on dlq (the same byte[] that the consumer received)
|
||||
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
|
||||
return new DefaultKafkaProducerFactory<>(props);
|
||||
}
|
||||
|
||||
private String toDisplayString(String original) {
|
||||
if (original.length() <= 50) {
|
||||
return original;
|
||||
}
|
||||
return original.substring(0, 50) + "...";
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,200 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.processor.Processor;
|
||||
import org.apache.kafka.streams.processor.ProcessorContext;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.converter.CompositeMessageConverterFactory;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.converter.MessageConverter;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.MimeType;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Delegate for handling all framework level message conversions inbound and outbound on {@link KStream}.
|
||||
* If native encoding is not enabled, then serialization will be performed on outbound messages based
|
||||
* on a contentType. Similarly, if native decoding is not enabled, deserialization will be performed on
|
||||
* inbound messages based on a contentType. Based on the contentType, a {@link MessageConverter} will
|
||||
* be resolved.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KafkaStreamsMessageConversionDelegate {
|
||||
|
||||
private static final ThreadLocal<KeyValue<Object, Object>> keyValueThreadLocal = new ThreadLocal<>();
|
||||
|
||||
private final CompositeMessageConverterFactory compositeMessageConverterFactory;
|
||||
|
||||
private final SendToDlqAndContinue sendToDlqAndContinue;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kstreamBindingInformationCatalogue;
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties kstreamBinderConfigurationProperties;
|
||||
|
||||
KafkaStreamsMessageConversionDelegate(CompositeMessageConverterFactory compositeMessageConverterFactory,
|
||||
SendToDlqAndContinue sendToDlqAndContinue,
|
||||
KafkaStreamsBindingInformationCatalogue kstreamBindingInformationCatalogue,
|
||||
KafkaStreamsBinderConfigurationProperties kstreamBinderConfigurationProperties) {
|
||||
this.compositeMessageConverterFactory = compositeMessageConverterFactory;
|
||||
this.sendToDlqAndContinue = sendToDlqAndContinue;
|
||||
this.kstreamBindingInformationCatalogue = kstreamBindingInformationCatalogue;
|
||||
this.kstreamBinderConfigurationProperties = kstreamBinderConfigurationProperties;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serialize {@link KStream} records on outbound based on contentType.
|
||||
*
|
||||
* @param outboundBindTarget outbound KStream target
|
||||
* @return serialized KStream
|
||||
*/
|
||||
public KStream serializeOnOutbound(KStream<?,?> outboundBindTarget) {
|
||||
String contentType = this.kstreamBindingInformationCatalogue.getContentType(outboundBindTarget);
|
||||
MessageConverter messageConverter = StringUtils.hasText(contentType) ? compositeMessageConverterFactory
|
||||
.getMessageConverterForType(MimeType.valueOf(contentType))
|
||||
: null;
|
||||
|
||||
return outboundBindTarget.map((k, v) -> {
|
||||
Message<?> message = v instanceof Message<?> ? (Message<?>) v :
|
||||
MessageBuilder.withPayload(v).build();
|
||||
Map<String, Object> headers = new HashMap<>(message.getHeaders());
|
||||
if (!StringUtils.isEmpty(contentType)) {
|
||||
headers.put(MessageHeaders.CONTENT_TYPE, contentType);
|
||||
}
|
||||
MessageHeaders messageHeaders = new MessageHeaders(headers);
|
||||
return new KeyValue<>(k,
|
||||
messageConverter.toMessage(message.getPayload(),
|
||||
messageHeaders).getPayload());
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Deserialize incoming {@link KStream} based on contentType.
|
||||
*
|
||||
* @param valueClass on KStream value
|
||||
* @param bindingTarget inbound KStream target
|
||||
* @return deserialized KStream
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public KStream deserializeOnInbound(Class<?> valueClass, KStream<?, ?> bindingTarget) {
|
||||
MessageConverter messageConverter = compositeMessageConverterFactory.getMessageConverterForAllRegistered();
|
||||
|
||||
//Deserialize using a branching strategy
|
||||
KStream<?, ?>[] branch = bindingTarget.branch(
|
||||
//First filter where the message is converted and return true if everything went well, return false otherwise.
|
||||
(o, o2) -> {
|
||||
boolean isValidRecord = false;
|
||||
|
||||
try {
|
||||
if (valueClass.isAssignableFrom(o2.getClass())) {
|
||||
keyValueThreadLocal.set(new KeyValue<>(o, o2));
|
||||
}
|
||||
else if (o2 instanceof Message) {
|
||||
if (valueClass.isAssignableFrom(((Message) o2).getPayload().getClass())) {
|
||||
keyValueThreadLocal.set(new KeyValue<>(o, ((Message) o2).getPayload()));
|
||||
}
|
||||
else {
|
||||
convertAndSetMessage(o, valueClass, messageConverter, (Message) o2);
|
||||
}
|
||||
}
|
||||
else if (o2 instanceof String || o2 instanceof byte[]) {
|
||||
Message<?> message = MessageBuilder.withPayload(o2).build();
|
||||
convertAndSetMessage(o, valueClass, messageConverter, message);
|
||||
}
|
||||
else {
|
||||
keyValueThreadLocal.set(new KeyValue<>(o, o2));
|
||||
}
|
||||
isValidRecord = true;
|
||||
}
|
||||
catch (Exception ignored) {
|
||||
//pass through
|
||||
}
|
||||
return isValidRecord;
|
||||
},
|
||||
//sedond filter that catches any messages for which an exception thrown in the first filter above.
|
||||
(k, v) -> true
|
||||
);
|
||||
//process errors from the second filter in the branch above.
|
||||
processErrorFromDeserialization(bindingTarget, branch[1]);
|
||||
|
||||
//first branch above is the branch where the messages are converted, let it go through further processing.
|
||||
return branch[0].map((o, o2) -> {
|
||||
KeyValue<Object, Object> objectObjectKeyValue = keyValueThreadLocal.get();
|
||||
keyValueThreadLocal.remove();
|
||||
return objectObjectKeyValue;
|
||||
});
|
||||
}
|
||||
|
||||
private void convertAndSetMessage(Object o, Class<?> valueClass, MessageConverter messageConverter, Message<?> msg) {
|
||||
Object messageConverted = messageConverter.fromMessage(msg, valueClass);
|
||||
if (messageConverted == null) {
|
||||
throw new IllegalStateException("Inbound data conversion failed.");
|
||||
}
|
||||
keyValueThreadLocal.set(new KeyValue<>(o, messageConverted));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void processErrorFromDeserialization(KStream<?, ?> bindingTarget, KStream<?, ?> branch) {
|
||||
branch.process(() -> new Processor() {
|
||||
ProcessorContext context;
|
||||
|
||||
@Override
|
||||
public void init(ProcessorContext context) {
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(Object o, Object o2) {
|
||||
if (kstreamBindingInformationCatalogue.isDlqEnabled(bindingTarget)) {
|
||||
String destination = kstreamBindingInformationCatalogue.getDestination(bindingTarget);
|
||||
if (o2 instanceof Message) {
|
||||
Message message = (Message) o2;
|
||||
sendToDlqAndContinue.sendToDlq(destination, (byte[]) o, (byte[]) message.getPayload(), context.partition());
|
||||
}
|
||||
else {
|
||||
sendToDlqAndContinue.sendToDlq(destination, (byte[]) o, (byte[]) o2, context.partition());
|
||||
}
|
||||
}
|
||||
else if (kstreamBinderConfigurationProperties.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.logAndFail) {
|
||||
throw new IllegalStateException("Inbound deserialization failed.");
|
||||
}
|
||||
else if (kstreamBinderConfigurationProperties.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.logAndContinue) {
|
||||
//quietly pass through. No action needed, this is similar to log and continue.
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
@Override
|
||||
public void punctuate(long timestamp) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,432 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.apache.kafka.streams.Consumed;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.StreamsBuilder;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.errors.DeserializationExceptionHandler;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.state.KeyValueStore;
|
||||
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.BeanInitializationException;
|
||||
import org.springframework.beans.factory.config.BeanDefinition;
|
||||
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionBuilder;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerErrorMessages;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerParameterAdapter;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerResultAdapter;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerSetupMethodOrchestrator;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ApplicationContextAware;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.core.MethodParameter;
|
||||
import org.springframework.core.annotation.AnnotationUtils;
|
||||
import org.springframework.kafka.core.StreamsBuilderFactoryBean;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Kafka Streams specific implementation for {@link StreamListenerSetupMethodOrchestrator}
|
||||
* that overrides the default mechanisms for invoking StreamListener adapters.
|
||||
*
|
||||
* The orchestration primarily focus on the following areas:
|
||||
*
|
||||
* 1. Allow multiple KStream output bindings (KStream branching) by allowing more than one output values on {@link SendTo}
|
||||
* 2. Allow multiple inbound bindings for multiple KStream and or KTable types.
|
||||
* 3. Each StreamListener method that it orchestrates gets its own {@link StreamsBuilderFactoryBean} and {@link StreamsConfig}
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KafkaStreamsStreamListenerSetupMethodOrchestrator implements StreamListenerSetupMethodOrchestrator, ApplicationContextAware {
|
||||
|
||||
private final static Log LOG = LogFactory.getLog(KafkaStreamsStreamListenerSetupMethodOrchestrator.class);
|
||||
|
||||
private final StreamListenerParameterAdapter streamListenerParameterAdapter;
|
||||
|
||||
private final Collection<StreamListenerResultAdapter> streamListenerResultAdapters;
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
|
||||
private final KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties;
|
||||
|
||||
private final KeyValueSerdeResolver keyValueSerdeResolver;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
private final Map<Method, StreamsBuilderFactoryBean> methodStreamsBuilderFactoryBeanMap = new HashMap<>();
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
private ConfigurableApplicationContext applicationContext;
|
||||
|
||||
KafkaStreamsStreamListenerSetupMethodOrchestrator(BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
StreamListenerParameterAdapter streamListenerParameterAdapter,
|
||||
Collection<StreamListenerResultAdapter> streamListenerResultAdapters,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.kafkaStreamsExtendedBindingProperties = kafkaStreamsExtendedBindingProperties;
|
||||
this.keyValueSerdeResolver = keyValueSerdeResolver;
|
||||
this.kafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
this.streamListenerParameterAdapter = streamListenerParameterAdapter;
|
||||
this.streamListenerResultAdapters = streamListenerResultAdapters;
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean supports(Method method) {
|
||||
return methodParameterSupports(method) &&
|
||||
(methodReturnTypeSuppports(method) || Void.TYPE.equals(method.getReturnType()));
|
||||
}
|
||||
|
||||
private boolean methodReturnTypeSuppports(Method method) {
|
||||
Class<?> returnType = method.getReturnType();
|
||||
if (returnType.equals(KStream.class) ||
|
||||
(returnType.isArray() && returnType.getComponentType().equals(KStream.class))) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private boolean methodParameterSupports(Method method) {
|
||||
boolean supports = false;
|
||||
for (int i = 0; i < method.getParameterCount(); i++) {
|
||||
MethodParameter methodParameter = MethodParameter.forExecutable(method, i);
|
||||
Class<?> parameterType = methodParameter.getParameterType();
|
||||
if (parameterType.equals(KStream.class) || parameterType.equals(KTable.class)) {
|
||||
supports = true;
|
||||
}
|
||||
}
|
||||
return supports;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings({"rawtypes", "unchecked"})
|
||||
public void orchestrateStreamListenerSetupMethod(StreamListener streamListener, Method method, Object bean) {
|
||||
String[] methodAnnotatedOutboundNames = getOutboundBindingTargetNames(method);
|
||||
validateStreamListenerMethod(streamListener, method, methodAnnotatedOutboundNames);
|
||||
String methodAnnotatedInboundName = streamListener.value();
|
||||
Object[] adaptedInboundArguments = adaptAndRetrieveInboundArguments(method, methodAnnotatedInboundName,
|
||||
this.applicationContext,
|
||||
this.streamListenerParameterAdapter);
|
||||
try {
|
||||
if (Void.TYPE.equals(method.getReturnType())) {
|
||||
method.invoke(bean, adaptedInboundArguments);
|
||||
}
|
||||
else {
|
||||
Object result = method.invoke(bean, adaptedInboundArguments);
|
||||
|
||||
if (result.getClass().isArray()) {
|
||||
Assert.isTrue(methodAnnotatedOutboundNames.length == ((Object[]) result).length,
|
||||
"Result does not match with the number of declared outbounds");
|
||||
} else {
|
||||
Assert.isTrue(methodAnnotatedOutboundNames.length == 1,
|
||||
"Result does not match with the number of declared outbounds");
|
||||
}
|
||||
if (result.getClass().isArray()) {
|
||||
Object[] outboundKStreams = (Object[]) result;
|
||||
int i = 0;
|
||||
for (Object outboundKStream : outboundKStreams) {
|
||||
Object targetBean = this.applicationContext.getBean(methodAnnotatedOutboundNames[i++]);
|
||||
for (StreamListenerResultAdapter streamListenerResultAdapter : streamListenerResultAdapters) {
|
||||
if (streamListenerResultAdapter.supports(outboundKStream.getClass(), targetBean.getClass())) {
|
||||
streamListenerResultAdapter.adapt(outboundKStream, targetBean);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Object targetBean = this.applicationContext.getBean(methodAnnotatedOutboundNames[0]);
|
||||
for (StreamListenerResultAdapter streamListenerResultAdapter : streamListenerResultAdapters) {
|
||||
if (streamListenerResultAdapter.supports(result.getClass(), targetBean.getClass())) {
|
||||
streamListenerResultAdapter.adapt(result, targetBean);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new BeanInitializationException("Cannot setup StreamListener for " + method, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings({"unchecked"})
|
||||
public Object[] adaptAndRetrieveInboundArguments(Method method, String inboundName,
|
||||
ApplicationContext applicationContext,
|
||||
StreamListenerParameterAdapter... streamListenerParameterAdapters) {
|
||||
Object[] arguments = new Object[method.getParameterTypes().length];
|
||||
for (int parameterIndex = 0; parameterIndex < arguments.length; parameterIndex++) {
|
||||
MethodParameter methodParameter = MethodParameter.forExecutable(method, parameterIndex);
|
||||
Class<?> parameterType = methodParameter.getParameterType();
|
||||
Object targetReferenceValue = null;
|
||||
if (methodParameter.hasParameterAnnotation(Input.class)) {
|
||||
targetReferenceValue = AnnotationUtils.getValue(methodParameter.getParameterAnnotation(Input.class));
|
||||
Input methodAnnotation = methodParameter.getParameterAnnotation(Input.class);
|
||||
inboundName = methodAnnotation.value();
|
||||
}
|
||||
else if (arguments.length == 1 && StringUtils.hasText(inboundName)) {
|
||||
targetReferenceValue = inboundName;
|
||||
}
|
||||
if (targetReferenceValue != null) {
|
||||
Assert.isInstanceOf(String.class, targetReferenceValue, "Annotation value must be a String");
|
||||
Object targetBean = applicationContext.getBean((String) targetReferenceValue);
|
||||
BindingProperties bindingProperties = bindingServiceProperties.getBindingProperties(inboundName);
|
||||
enableNativeDecodingForKTableAlways(parameterType, bindingProperties);
|
||||
StreamsConfig streamsConfig = null;
|
||||
//Retrieve the StreamsConfig created for this method if available.
|
||||
//Otherwise, carete the StreamsBuilderFactory and get the underlying config.
|
||||
if (!methodStreamsBuilderFactoryBeanMap.containsKey(method)) {
|
||||
streamsConfig = buildStreamsBuilderAndRetrieveConfig(method, applicationContext, bindingProperties);
|
||||
}
|
||||
try {
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = methodStreamsBuilderFactoryBeanMap.get(method);
|
||||
StreamsBuilder streamsBuilder = streamsBuilderFactoryBean.getObject();
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties = kafkaStreamsExtendedBindingProperties.getExtendedConsumerProperties(inboundName);
|
||||
Serde<?> keySerde = this.keyValueSerdeResolver.getInboundKeySerde(extendedConsumerProperties);
|
||||
Serde<?> valueSerde = this.keyValueSerdeResolver.getInboundValueSerde(bindingProperties.getConsumer(), extendedConsumerProperties);
|
||||
if (parameterType.isAssignableFrom(KStream.class)) {
|
||||
KStream<?, ?> stream = getkStream(inboundName, bindingProperties, streamsBuilder, keySerde, valueSerde);
|
||||
KStreamBoundElementFactory.KStreamWrapper kStreamWrapper = (KStreamBoundElementFactory.KStreamWrapper) targetBean;
|
||||
//wrap the proxy created during the initial target type binding with real object (KStream)
|
||||
kStreamWrapper.wrap((KStream<Object, Object>) stream);
|
||||
kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactory(streamsBuilderFactoryBean);
|
||||
if (streamsConfig != null){
|
||||
kafkaStreamsBindingInformationCatalogue.addStreamsConfigs(kStreamWrapper, streamsConfig);
|
||||
}
|
||||
for (StreamListenerParameterAdapter streamListenerParameterAdapter : streamListenerParameterAdapters) {
|
||||
if (streamListenerParameterAdapter.supports(stream.getClass(), methodParameter)) {
|
||||
arguments[parameterIndex] = streamListenerParameterAdapter.adapt(kStreamWrapper, methodParameter);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (arguments[parameterIndex] == null && parameterType.isAssignableFrom(stream.getClass())) {
|
||||
arguments[parameterIndex] = stream;
|
||||
}
|
||||
Assert.notNull(arguments[parameterIndex], "Cannot convert argument " + parameterIndex + " of " + method
|
||||
+ "from " + stream.getClass() + " to " + parameterType);
|
||||
}
|
||||
else if (parameterType.isAssignableFrom(KTable.class)) {
|
||||
String materializedAs = extendedConsumerProperties.getMaterializedAs();
|
||||
String bindingDestination = bindingServiceProperties.getBindingDestination(inboundName);
|
||||
KTable<?, ?> table = materializedAs != null ?
|
||||
materializedAs(streamsBuilder, bindingDestination, materializedAs, keySerde, valueSerde ) :
|
||||
streamsBuilder.table(bindingDestination,
|
||||
Consumed.with(keySerde, valueSerde));
|
||||
KTableBoundElementFactory.KTableWrapper kTableWrapper = (KTableBoundElementFactory.KTableWrapper) targetBean;
|
||||
//wrap the proxy created during the initial target type binding with real object (KTable)
|
||||
kTableWrapper.wrap((KTable<Object, Object>) table);
|
||||
kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactory(streamsBuilderFactoryBean);
|
||||
if (streamsConfig != null){
|
||||
kafkaStreamsBindingInformationCatalogue.addStreamsConfigs(kTableWrapper, streamsConfig);
|
||||
}
|
||||
arguments[parameterIndex] = table;
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException(StreamListenerErrorMessages.INVALID_DECLARATIVE_METHOD_PARAMETERS);
|
||||
}
|
||||
}
|
||||
return arguments;
|
||||
}
|
||||
|
||||
private <K,V> KTable<K,V> materializedAs(StreamsBuilder streamsBuilder, String destination, String storeName, Serde<K> k, Serde<V> v) {
|
||||
return streamsBuilder.table(bindingServiceProperties.getBindingDestination(destination),
|
||||
Materialized.<K, V, KeyValueStore<Bytes, byte[]>>as(storeName)
|
||||
.withKeySerde(k)
|
||||
.withValueSerde(v));
|
||||
}
|
||||
|
||||
private KStream<?, ?> getkStream(String inboundName, BindingProperties bindingProperties, StreamsBuilder streamsBuilder,
|
||||
Serde<?> keySerde, Serde<?> valueSerde) {
|
||||
KStream<?, ?> stream = streamsBuilder.stream(bindingServiceProperties.getBindingDestination(inboundName),
|
||||
Consumed.with(keySerde, valueSerde));
|
||||
if (bindingProperties.getConsumer().isUseNativeDecoding()){
|
||||
LOG.info("Native decoding is enabled for " + inboundName + ". Inbound deserialization done at the broker.");
|
||||
}
|
||||
else {
|
||||
LOG.info("Native decoding is disabled for " + inboundName + ". Inbound message conversion done by Spring Cloud Stream.");
|
||||
}
|
||||
stream = stream.map((key, value) -> {
|
||||
KeyValue<Object, Object> keyValue;
|
||||
String contentType = bindingProperties.getContentType();
|
||||
if (!StringUtils.isEmpty(contentType) && !bindingProperties.getConsumer().isUseNativeDecoding()) {
|
||||
Message<?> message = MessageBuilder.withPayload(value)
|
||||
.setHeader(MessageHeaders.CONTENT_TYPE, contentType).build();
|
||||
keyValue = new KeyValue<>(key, message);
|
||||
}
|
||||
else {
|
||||
keyValue = new KeyValue<>(key, value);
|
||||
}
|
||||
return keyValue;
|
||||
});
|
||||
return stream;
|
||||
}
|
||||
|
||||
private void enableNativeDecodingForKTableAlways(Class<?> parameterType, BindingProperties bindingProperties) {
|
||||
if (parameterType.isAssignableFrom(KTable.class)) {
|
||||
if (bindingProperties.getConsumer() == null) {
|
||||
bindingProperties.setConsumer(new ConsumerProperties());
|
||||
}
|
||||
//No framework level message conversion provided for KTable, its done by the broker.
|
||||
bindingProperties.getConsumer().setUseNativeDecoding(true);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked"})
|
||||
private StreamsConfig buildStreamsBuilderAndRetrieveConfig(Method method, ApplicationContext applicationContext,
|
||||
BindingProperties bindingProperties) {
|
||||
ConfigurableListableBeanFactory beanFactory = this.applicationContext.getBeanFactory();
|
||||
StreamsBuilderFactoryBean streamsBuilder = new StreamsBuilderFactoryBean();
|
||||
streamsBuilder.setAutoStartup(false);
|
||||
String uuid = UUID.randomUUID().toString();
|
||||
BeanDefinition streamsBuilderBeanDefinition =
|
||||
BeanDefinitionBuilder.genericBeanDefinition((Class<StreamsBuilderFactoryBean>) streamsBuilder.getClass(), () -> streamsBuilder)
|
||||
.getRawBeanDefinition();
|
||||
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition("stream-builder-" + uuid, streamsBuilderBeanDefinition);
|
||||
StreamsBuilderFactoryBean streamsBuilderX = applicationContext.getBean("&stream-builder-" + uuid, StreamsBuilderFactoryBean.class);
|
||||
String group = bindingProperties.getGroup();
|
||||
if (!StringUtils.hasText(group)) {
|
||||
group = binderConfigurationProperties.getApplicationId();
|
||||
}
|
||||
Map<String, Object> streamConfigGlobalProperties = applicationContext.getBean("streamConfigGlobalProperties", Map.class);
|
||||
streamConfigGlobalProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, group);
|
||||
|
||||
//Custom StreamsConfig implementation that overrides to guarantee that the deserialization handler is cached.
|
||||
StreamsConfig streamsConfig = new StreamsConfig(streamConfigGlobalProperties) {
|
||||
DeserializationExceptionHandler deserializationExceptionHandler;
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public <T> T getConfiguredInstance(String key, Class<T> clazz) {
|
||||
if (key.equals(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG)){
|
||||
if (deserializationExceptionHandler != null){
|
||||
return (T)deserializationExceptionHandler;
|
||||
}
|
||||
else {
|
||||
T t = super.getConfiguredInstance(key, clazz);
|
||||
deserializationExceptionHandler = (DeserializationExceptionHandler)t;
|
||||
return t;
|
||||
}
|
||||
}
|
||||
return super.getConfiguredInstance(key, clazz);
|
||||
}
|
||||
};
|
||||
BeanDefinition streamsConfigBeanDefinition =
|
||||
BeanDefinitionBuilder.genericBeanDefinition((Class<StreamsConfig>) streamsConfig.getClass(), () -> streamsConfig)
|
||||
.getRawBeanDefinition();
|
||||
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition("streamsConfig-" + uuid, streamsConfigBeanDefinition);
|
||||
|
||||
streamsBuilder.setStreamsConfig(streamsConfig);
|
||||
methodStreamsBuilderFactoryBeanMap.put(method, streamsBuilderX);
|
||||
return streamsConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
|
||||
this.applicationContext = (ConfigurableApplicationContext) applicationContext;
|
||||
}
|
||||
|
||||
private void validateStreamListenerMethod(StreamListener streamListener, Method method, String[] methodAnnotatedOutboundNames) {
|
||||
String methodAnnotatedInboundName = streamListener.value();
|
||||
if (methodAnnotatedOutboundNames != null) {
|
||||
for (String s : methodAnnotatedOutboundNames) {
|
||||
if (StringUtils.hasText(s)) {
|
||||
Assert.isTrue(isDeclarativeOutput(method, s), "Method must be declarative");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (StringUtils.hasText(methodAnnotatedInboundName)) {
|
||||
int methodArgumentsLength = method.getParameterTypes().length;
|
||||
|
||||
for (int parameterIndex = 0; parameterIndex < methodArgumentsLength; parameterIndex++) {
|
||||
MethodParameter methodParameter = MethodParameter.forExecutable(method, parameterIndex);
|
||||
Assert.isTrue(isDeclarativeInput(methodAnnotatedInboundName, methodParameter), "Method must be declarative");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private boolean isDeclarativeOutput(Method m, String targetBeanName) {
|
||||
boolean declarative;
|
||||
Class<?> returnType = m.getReturnType();
|
||||
if (returnType.isArray()){
|
||||
Class<?> targetBeanClass = this.applicationContext.getType(targetBeanName);
|
||||
declarative = this.streamListenerResultAdapters.stream()
|
||||
.anyMatch(slpa -> slpa.supports(returnType.getComponentType(), targetBeanClass));
|
||||
return declarative;
|
||||
}
|
||||
Class<?> targetBeanClass = this.applicationContext.getType(targetBeanName);
|
||||
declarative = this.streamListenerResultAdapters.stream()
|
||||
.anyMatch(slpa -> slpa.supports(returnType, targetBeanClass));
|
||||
return declarative;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private boolean isDeclarativeInput(String targetBeanName, MethodParameter methodParameter) {
|
||||
if (!methodParameter.getParameterType().isAssignableFrom(Object.class) && this.applicationContext.containsBean(targetBeanName)) {
|
||||
Class<?> targetBeanClass = this.applicationContext.getType(targetBeanName);
|
||||
return this.streamListenerParameterAdapter.supports(targetBeanClass, methodParameter);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private static String[] getOutboundBindingTargetNames(Method method) {
|
||||
SendTo sendTo = AnnotationUtils.findAnnotation(method, SendTo.class);
|
||||
if (sendTo != null) {
|
||||
Assert.isTrue(!ObjectUtils.isEmpty(sendTo.value()), StreamListenerErrorMessages.ATLEAST_ONE_OUTPUT);
|
||||
Assert.isTrue(sendTo.value().length >= 1, "At least one outbound destination need to be provided.");
|
||||
return sendTo.value();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,163 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsProducerProperties;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Resolver for key and value Serde.
|
||||
*
|
||||
* On the inbound, if native decoding is enabled, then any deserialization on the value is handled by Kafka.
|
||||
* First, we look for any key/value Serde set on the binding itself, if that is not available then look at the
|
||||
* common Serde set at the global level. If that fails, it falls back to byte[].
|
||||
* If native decoding is disabled, then the binder will do the deserialization on value and ignore any Serde set for value
|
||||
* and rely on the contentType provided. Keys are always deserialized at the broker.
|
||||
*
|
||||
* Same rules apply on the outbound. If native encoding is enabled, then value serialization is done at the broker using
|
||||
* any binder level Serde for value, if not using common Serde, if not, then byte[].
|
||||
* If native encoding is disabled, then the binder will do serialization using a contentType. Keys are always serialized
|
||||
* by the broker.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KeyValueSerdeResolver {
|
||||
|
||||
private final Map<String,Object> streamConfigGlobalProperties;
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
KeyValueSerdeResolver(Map<String,Object> streamConfigGlobalProperties,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
this.streamConfigGlobalProperties = streamConfigGlobalProperties;
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for inbound key
|
||||
*
|
||||
* @param extendedConsumerProperties binding level extended {@link KafkaStreamsConsumerProperties}
|
||||
* @return configurd {@link Serde} for the inbound key.
|
||||
*/
|
||||
public Serde<?> getInboundKeySerde(KafkaStreamsConsumerProperties extendedConsumerProperties) {
|
||||
String keySerdeString = extendedConsumerProperties.getKeySerde();
|
||||
|
||||
return getKeySerde(keySerdeString);
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for inbound value
|
||||
*
|
||||
* @param consumerProperties {@link ConsumerProperties} on binding
|
||||
* @param extendedConsumerProperties binding level extended {@link KafkaStreamsConsumerProperties}
|
||||
* @return configurd {@link Serde} for the inbound value.
|
||||
*/
|
||||
public Serde<?> getInboundValueSerde(ConsumerProperties consumerProperties, KafkaStreamsConsumerProperties extendedConsumerProperties) {
|
||||
Serde<?> valueSerde;
|
||||
|
||||
String valueSerdeString = extendedConsumerProperties.getValueSerde();
|
||||
try {
|
||||
if (consumerProperties != null &&
|
||||
consumerProperties.isUseNativeDecoding()) {
|
||||
valueSerde = getValueSerde(valueSerdeString);
|
||||
}
|
||||
else {
|
||||
valueSerde = Serdes.ByteArray();
|
||||
}
|
||||
valueSerde.configure(streamConfigGlobalProperties, false);
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("Serde class not found: ", e);
|
||||
}
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for outbound key
|
||||
*
|
||||
* @param properties binding level extended {@link KafkaStreamsProducerProperties}
|
||||
* @return configurd {@link Serde} for the outbound key.
|
||||
*/
|
||||
public Serde<?> getOuboundKeySerde(KafkaStreamsProducerProperties properties) {
|
||||
return getKeySerde(properties.getKeySerde());
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for outbound value
|
||||
*
|
||||
* @param producerProperties {@link ProducerProperties} on binding
|
||||
* @param kafkaStreamsProducerProperties binding level extended {@link KafkaStreamsProducerProperties}
|
||||
* @return configurd {@link Serde} for the outbound value.
|
||||
*/
|
||||
public Serde<?> getOutboundValueSerde(ProducerProperties producerProperties, KafkaStreamsProducerProperties kafkaStreamsProducerProperties) {
|
||||
Serde<?> valueSerde;
|
||||
try {
|
||||
if (producerProperties.isUseNativeEncoding()) {
|
||||
valueSerde = getValueSerde(kafkaStreamsProducerProperties.getValueSerde());
|
||||
}
|
||||
else {
|
||||
valueSerde = Serdes.ByteArray();
|
||||
}
|
||||
valueSerde.configure(streamConfigGlobalProperties, false);
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("Serde class not found: ", e);
|
||||
}
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
private Serde<?> getKeySerde(String keySerdeString) {
|
||||
Serde<?> keySerde;
|
||||
try {
|
||||
if (StringUtils.hasText(keySerdeString)) {
|
||||
keySerde = Utils.newInstance(keySerdeString, Serde.class);
|
||||
}
|
||||
else {
|
||||
keySerde = this.binderConfigurationProperties.getConfiguration().containsKey("default.key.serde") ?
|
||||
Utils.newInstance(this.binderConfigurationProperties.getConfiguration().get("default.key.serde"), Serde.class) : Serdes.ByteArray();
|
||||
}
|
||||
keySerde.configure(streamConfigGlobalProperties, true);
|
||||
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("Serde class not found: ", e);
|
||||
}
|
||||
return keySerde;
|
||||
}
|
||||
|
||||
private Serde<?> getValueSerde(String valueSerdeString) throws ClassNotFoundException {
|
||||
Serde<?> valueSerde;
|
||||
if (StringUtils.hasText(valueSerdeString)) {
|
||||
valueSerde = Utils.newInstance(valueSerdeString, Serde.class);
|
||||
}
|
||||
else {
|
||||
valueSerde = this.binderConfigurationProperties.getConfiguration().containsKey("default.value.serde") ?
|
||||
Utils.newInstance(this.binderConfigurationProperties.getConfiguration().get("default.value.serde"), Serde.class) : Serdes.ByteArray();
|
||||
}
|
||||
return valueSerde;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.state.QueryableStoreType;
|
||||
|
||||
/**
|
||||
* Registry that contains {@link QueryableStoreType}s those created from
|
||||
* the user applications.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @since 2.0.0
|
||||
*/
|
||||
public class QueryableStoreRegistry {
|
||||
|
||||
private final Set<KafkaStreams> kafkaStreams = new HashSet<>();
|
||||
|
||||
/**
|
||||
* Retrieve and return a queryable store by name created in the application.
|
||||
*
|
||||
* @param storeName name of the queryable store
|
||||
* @param storeType type of the queryable store
|
||||
* @param <T> generic queryable store
|
||||
* @return queryable store.
|
||||
*/
|
||||
public <T> T getQueryableStoreType(String storeName, QueryableStoreType<T> storeType) {
|
||||
|
||||
for (KafkaStreams kafkaStream : kafkaStreams) {
|
||||
T store = kafkaStream.store(storeName, storeType);
|
||||
if (store != null) {
|
||||
return store;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register the {@link KafkaStreams} object created in the application.
|
||||
*
|
||||
* @param kafkaStreams {@link KafkaStreams} object created in the application
|
||||
*/
|
||||
void registerKafkaStreams(KafkaStreams kafkaStreams) {
|
||||
this.kafkaStreams.add(kafkaStreams);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,108 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.streams.errors.DeserializationExceptionHandler;
|
||||
import org.apache.kafka.streams.processor.ProcessorContext;
|
||||
import org.apache.kafka.streams.processor.internals.ProcessorContextImpl;
|
||||
import org.apache.kafka.streams.processor.internals.StreamTask;
|
||||
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
|
||||
/**
|
||||
* Custom implementation for {@link DeserializationExceptionHandler} that sends the records
|
||||
* in error to a DLQ topic, then continue stream processing on new records.
|
||||
*
|
||||
* @since 2.0.0
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class SendToDlqAndContinue implements DeserializationExceptionHandler{
|
||||
|
||||
/**
|
||||
* DLQ dispatcher per topic in the application context. The key here is not the actual DLQ topic
|
||||
* but the incoming topic that caused the error.
|
||||
*/
|
||||
private Map<String, KafkaStreamsDlqDispatch> dlqDispatchers = new HashMap<>();
|
||||
|
||||
/**
|
||||
* For a given topic, send the key/value record to DLQ topic.
|
||||
*
|
||||
* @param topic incoming topic that caused the error
|
||||
* @param key to send
|
||||
* @param value to send
|
||||
* @param partition for the topic where this record should be sent
|
||||
*/
|
||||
public void sendToDlq(String topic, byte[] key, byte[] value, int partition){
|
||||
KafkaStreamsDlqDispatch kafkaStreamsDlqDispatch = dlqDispatchers.get(topic);
|
||||
kafkaStreamsDlqDispatch.sendToDlq(key,value, partition);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public DeserializationHandlerResponse handle(ProcessorContext context, ConsumerRecord<byte[], byte[]> record, Exception exception) {
|
||||
KafkaStreamsDlqDispatch kafkaStreamsDlqDispatch = dlqDispatchers.get(record.topic());
|
||||
kafkaStreamsDlqDispatch.sendToDlq(record.key(), record.value(), record.partition());
|
||||
context.commit();
|
||||
|
||||
// The following conditional block should be reconsidered when we have a solution for this SO problem:
|
||||
// https://stackoverflow.com/questions/48470899/kafka-streams-deserialization-handler
|
||||
// Currently it seems like when deserialization error happens, there is no commits happening and the
|
||||
// following code will use reflection to get access to the underlying KafkaConsumer.
|
||||
// It works with Kafka 1.0.0, but there is no guarantee it will work in future versions of kafka as
|
||||
// we access private fields by name using reflection, but it is a temporary fix.
|
||||
if (context instanceof ProcessorContextImpl){
|
||||
ProcessorContextImpl processorContextImpl = (ProcessorContextImpl)context;
|
||||
Field task = ReflectionUtils.findField(ProcessorContextImpl.class, "task");
|
||||
ReflectionUtils.makeAccessible(task);
|
||||
Object taskField = ReflectionUtils.getField(task, processorContextImpl);
|
||||
|
||||
if (taskField.getClass().isAssignableFrom(StreamTask.class)){
|
||||
StreamTask streamTask = (StreamTask)taskField;
|
||||
Field consumer = ReflectionUtils.findField(StreamTask.class, "consumer");
|
||||
ReflectionUtils.makeAccessible(consumer);
|
||||
Object kafkaConsumerField = ReflectionUtils.getField(consumer, streamTask);
|
||||
if (kafkaConsumerField.getClass().isAssignableFrom(KafkaConsumer.class)){
|
||||
KafkaConsumer kafkaConsumer = (KafkaConsumer)kafkaConsumerField;
|
||||
final Map<TopicPartition, OffsetAndMetadata> consumedOffsetsAndMetadata = new HashMap<>();
|
||||
TopicPartition tp = new TopicPartition(record.topic(), record.partition());
|
||||
OffsetAndMetadata oam = new OffsetAndMetadata(record.offset() + 1);
|
||||
consumedOffsetsAndMetadata.put(tp, oam);
|
||||
kafkaConsumer.commitSync(consumedOffsetsAndMetadata);
|
||||
}
|
||||
}
|
||||
}
|
||||
return DeserializationHandlerResponse.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs) {
|
||||
|
||||
}
|
||||
|
||||
void addKStreamDlqDispatch(String topic, KafkaStreamsDlqDispatch kafkaStreamsDlqDispatch){
|
||||
dlqDispatchers.put(topic, kafkaStreamsDlqDispatch);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,108 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
import org.springframework.context.SmartLifecycle;
|
||||
import org.springframework.kafka.KafkaException;
|
||||
import org.springframework.kafka.core.StreamsBuilderFactoryBean;
|
||||
|
||||
/**
|
||||
* Iterate through all {@link StreamsBuilderFactoryBean} in the application context
|
||||
* and start them. As each one completes starting, register the associated KafkaStreams
|
||||
* object into {@link QueryableStoreRegistry}.
|
||||
*
|
||||
* This {@link SmartLifecycle} class ensures that the bean created from it is started very late
|
||||
* through the bootstrap process by setting the phase value closer to Integer.MAX_VALUE.
|
||||
* This is to guarantee that the {@link StreamsBuilderFactoryBean} on a
|
||||
* {@link org.springframework.cloud.stream.annotation.StreamListener} method with multiple
|
||||
* bindings is only started after all the binding phases have completed successfully.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class StreamsBuilderFactoryManager implements SmartLifecycle {
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
private final QueryableStoreRegistry queryableStoreRegistry;
|
||||
|
||||
private volatile boolean running;
|
||||
|
||||
StreamsBuilderFactoryManager(KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
QueryableStoreRegistry queryableStoreRegistry) {
|
||||
this.kafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
this.queryableStoreRegistry = queryableStoreRegistry;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAutoStartup() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop(Runnable callback) {
|
||||
stop();
|
||||
if (callback != null) {
|
||||
callback.run();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void start() {
|
||||
if (!this.running) {
|
||||
try {
|
||||
Set<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans = this.kafkaStreamsBindingInformationCatalogue.getStreamsBuilderFactoryBeans();
|
||||
for (StreamsBuilderFactoryBean streamsBuilderFactoryBean : streamsBuilderFactoryBeans) {
|
||||
streamsBuilderFactoryBean.start();
|
||||
queryableStoreRegistry.registerKafkaStreams(streamsBuilderFactoryBean.getKafkaStreams());
|
||||
}
|
||||
this.running = true;
|
||||
} catch (Exception e) {
|
||||
throw new KafkaException("Could not start stream: ", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void stop() {
|
||||
if (this.running) {
|
||||
try {
|
||||
Set<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans = this.kafkaStreamsBindingInformationCatalogue.getStreamsBuilderFactoryBeans();
|
||||
for (StreamsBuilderFactoryBean streamsBuilderFactoryBean : streamsBuilderFactoryBeans) {
|
||||
streamsBuilderFactoryBean.stop();
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
finally {
|
||||
this.running = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean isRunning() {
|
||||
return this.running;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getPhase() {
|
||||
return Integer.MAX_VALUE - 100;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.annotations;
|
||||
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.Output;
|
||||
|
||||
/**
|
||||
* Bindable interface for {@link KStream} input and output.
|
||||
*
|
||||
* This interface can be used as a bindable interface with {@link org.springframework.cloud.stream.annotation.EnableBinding}
|
||||
* when both input and output types are single KStream. In other scenarios where multiple types are required, other
|
||||
* similar bindable interfaces can be created and used. For example, there are cases in which multiple KStreams
|
||||
* are required on the outbound in the case of KStream branching or multiple input types are required either in the
|
||||
* form of multiple KStreams and a combination of KStreams and KTables. In those cases, new bindable interfaces compatible
|
||||
* with the requirements must be created. Here are some examples.
|
||||
*
|
||||
* <pre class="code">
|
||||
* interface KStreamBranchProcessor {
|
||||
* @Input("input")
|
||||
* KStream<?, ?> input();
|
||||
*
|
||||
* @Output("output-1")
|
||||
* KStream<?, ?> output1();
|
||||
*
|
||||
* @Output("output-2")
|
||||
* KStream<?, ?> output2();
|
||||
*
|
||||
* @Output("output-3")
|
||||
* KStream<?, ?> output3();
|
||||
*
|
||||
* ......
|
||||
*
|
||||
* }
|
||||
*</pre>
|
||||
*
|
||||
* <pre class="code">
|
||||
* interface KStreamKtableProcessor {
|
||||
* @Input("input-1")
|
||||
* KStream<?, ?> input1();
|
||||
*
|
||||
* @Input("input-2")
|
||||
* KTable<?, ?> input2();
|
||||
*
|
||||
* @Output("output")
|
||||
* KStream<?, ?> output();
|
||||
*
|
||||
* ......
|
||||
*
|
||||
* }
|
||||
*</pre>
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public interface KafkaStreamsProcessor {
|
||||
|
||||
/**
|
||||
* @return {@link Input} binding for {@link KStream} type.
|
||||
*/
|
||||
@Input("input")
|
||||
KStream<?, ?> input();
|
||||
|
||||
/**
|
||||
* @return {@link Output} binding for {@link KStream} type.
|
||||
*/
|
||||
@Output("output")
|
||||
KStream<?, ?> output();
|
||||
}
|
||||
@@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.properties;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
|
||||
/**
|
||||
* {@link ConfigurationProperties} that can be used by end user Kafka Stream applications. This class provides
|
||||
* convenient ways to access the commonly used kafka stream properties from the user application. For example, windowing
|
||||
* operations are common use cases in stream processing and one can provide window specific properties at runtime and use
|
||||
* those properties in the applications using this class.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@ConfigurationProperties("spring.cloud.stream.kafka.streams")
|
||||
public class KafkaStreamsApplicationSupportProperties {
|
||||
|
||||
private TimeWindow timeWindow;
|
||||
|
||||
public TimeWindow getTimeWindow() {
|
||||
return timeWindow;
|
||||
}
|
||||
|
||||
public void setTimeWindow(TimeWindow timeWindow) {
|
||||
this.timeWindow = timeWindow;
|
||||
}
|
||||
|
||||
public static class TimeWindow {
|
||||
|
||||
private int length;
|
||||
|
||||
private int advanceBy;
|
||||
|
||||
public int getLength() {
|
||||
return length;
|
||||
}
|
||||
|
||||
public void setLength(int length) {
|
||||
this.length = length;
|
||||
}
|
||||
|
||||
public int getAdvanceBy() {
|
||||
return advanceBy;
|
||||
}
|
||||
|
||||
public void setAdvanceBy(int advanceBy) {
|
||||
this.advanceBy = advanceBy;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.properties;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KafkaStreamsBinderConfigurationProperties extends KafkaBinderConfigurationProperties {
|
||||
|
||||
public enum SerdeError {
|
||||
logAndContinue,
|
||||
logAndFail,
|
||||
sendToDlq
|
||||
}
|
||||
|
||||
private String applicationId = "default";
|
||||
|
||||
public String getApplicationId() {
|
||||
return applicationId;
|
||||
}
|
||||
|
||||
public void setApplicationId(String applicationId) {
|
||||
this.applicationId = applicationId;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link org.apache.kafka.streams.errors.DeserializationExceptionHandler} to use
|
||||
* when there is a Serde error. {@link KafkaStreamsBinderConfigurationProperties.SerdeError}
|
||||
* values are used to provide the exception handler on consumer binding.
|
||||
*/
|
||||
private KafkaStreamsBinderConfigurationProperties.SerdeError serdeError;
|
||||
|
||||
public KafkaStreamsBinderConfigurationProperties.SerdeError getSerdeError() {
|
||||
return serdeError;
|
||||
}
|
||||
|
||||
public void setSerdeError(KafkaStreamsBinderConfigurationProperties.SerdeError serdeError) {
|
||||
this.serdeError = serdeError;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -14,30 +14,30 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.config;
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.properties;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class KStreamBindingProperties {
|
||||
public class KafkaStreamsBindingProperties {
|
||||
|
||||
private KStreamConsumerProperties consumer = new KStreamConsumerProperties();
|
||||
private KafkaStreamsConsumerProperties consumer = new KafkaStreamsConsumerProperties();
|
||||
|
||||
private KStreamProducerProperties producer = new KStreamProducerProperties();
|
||||
private KafkaStreamsProducerProperties producer = new KafkaStreamsProducerProperties();
|
||||
|
||||
public KStreamConsumerProperties getConsumer() {
|
||||
public KafkaStreamsConsumerProperties getConsumer() {
|
||||
return consumer;
|
||||
}
|
||||
|
||||
public void setConsumer(KStreamConsumerProperties consumer) {
|
||||
public void setConsumer(KafkaStreamsConsumerProperties consumer) {
|
||||
this.consumer = consumer;
|
||||
}
|
||||
|
||||
public KStreamProducerProperties getProducer() {
|
||||
public KafkaStreamsProducerProperties getProducer() {
|
||||
return producer;
|
||||
}
|
||||
|
||||
public void setProducer(KStreamProducerProperties producer) {
|
||||
public void setProducer(KafkaStreamsProducerProperties producer) {
|
||||
this.producer = producer;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.properties;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KafkaStreamsConsumerProperties extends KafkaConsumerProperties {
|
||||
|
||||
/**
|
||||
* Key serde specified per binding.
|
||||
*/
|
||||
private String keySerde;
|
||||
|
||||
/**
|
||||
* Value serde specified per binding.
|
||||
*/
|
||||
private String valueSerde;
|
||||
|
||||
/**
|
||||
* Materialized as a KeyValueStore
|
||||
*/
|
||||
private String materializedAs;
|
||||
|
||||
public String getKeySerde() {
|
||||
return keySerde;
|
||||
}
|
||||
|
||||
public void setKeySerde(String keySerde) {
|
||||
this.keySerde = keySerde;
|
||||
}
|
||||
|
||||
public String getValueSerde() {
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
public void setValueSerde(String valueSerde) {
|
||||
this.valueSerde = valueSerde;
|
||||
}
|
||||
|
||||
public String getMaterializedAs() {
|
||||
return materializedAs;
|
||||
}
|
||||
|
||||
public void setMaterializedAs(String materializedAs) {
|
||||
this.materializedAs = materializedAs;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.config;
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
@@ -25,37 +25,37 @@ import org.springframework.cloud.stream.binder.ExtendedBindingProperties;
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
@ConfigurationProperties("spring.cloud.stream.kstream")
|
||||
public class KStreamExtendedBindingProperties
|
||||
implements ExtendedBindingProperties<KStreamConsumerProperties, KStreamProducerProperties> {
|
||||
@ConfigurationProperties("spring.cloud.stream.kafka.streams")
|
||||
public class KafkaStreamsExtendedBindingProperties
|
||||
implements ExtendedBindingProperties<KafkaStreamsConsumerProperties, KafkaStreamsProducerProperties> {
|
||||
|
||||
private Map<String, KStreamBindingProperties> bindings = new HashMap<>();
|
||||
private Map<String, KafkaStreamsBindingProperties> bindings = new HashMap<>();
|
||||
|
||||
public Map<String, KStreamBindingProperties> getBindings() {
|
||||
public Map<String, KafkaStreamsBindingProperties> getBindings() {
|
||||
return this.bindings;
|
||||
}
|
||||
|
||||
public void setBindings(Map<String, KStreamBindingProperties> bindings) {
|
||||
public void setBindings(Map<String, KafkaStreamsBindingProperties> bindings) {
|
||||
this.bindings = bindings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KStreamConsumerProperties getExtendedConsumerProperties(String binding) {
|
||||
public KafkaStreamsConsumerProperties getExtendedConsumerProperties(String binding) {
|
||||
if (this.bindings.containsKey(binding) && this.bindings.get(binding).getConsumer() != null) {
|
||||
return this.bindings.get(binding).getConsumer();
|
||||
}
|
||||
else {
|
||||
return new KStreamConsumerProperties();
|
||||
return new KafkaStreamsConsumerProperties();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public KStreamProducerProperties getExtendedProducerProperties(String binding) {
|
||||
public KafkaStreamsProducerProperties getExtendedProducerProperties(String binding) {
|
||||
if (this.bindings.containsKey(binding) && this.bindings.get(binding).getProducer() != null) {
|
||||
return this.bindings.get(binding).getProducer();
|
||||
}
|
||||
else {
|
||||
return new KStreamProducerProperties();
|
||||
return new KafkaStreamsProducerProperties();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -14,15 +14,24 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.config;
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.properties;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KStreamCommonProperties {
|
||||
public class KafkaStreamsProducerProperties extends KafkaProducerProperties {
|
||||
|
||||
/**
|
||||
* Key serde specified per binding.
|
||||
*/
|
||||
private String keySerde;
|
||||
|
||||
/**
|
||||
* Value serde specified per binding.
|
||||
*/
|
||||
private String valueSerde;
|
||||
|
||||
public String getKeySerde() {
|
||||
@@ -0,0 +1,6 @@
|
||||
kstream:\
|
||||
org.springframework.cloud.stream.binder.kafka.streams.KStreamBinderConfiguration
|
||||
ktable:\
|
||||
org.springframework.cloud.stream.binder.kafka.streams.KTableBinderConfiguration
|
||||
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
org.springframework.boot.autoconfigure.EnableAutoConfiguration=\
|
||||
org.springframework.cloud.stream.binder.kafka.streams.KafkaStreamsBinderSupportAutoConfiguration,\
|
||||
org.springframework.cloud.stream.binder.kafka.streams.KafkaStreamsApplicationSupportAutoConfiguration
|
||||
|
||||
|
||||
@@ -0,0 +1,158 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.test.mock.mockito.SpyBean;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.annotation.PropertySource;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
import org.springframework.test.annotation.DirtiesContext;
|
||||
import org.springframework.test.context.ContextConfiguration;
|
||||
import org.springframework.test.context.junit4.SpringRunner;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@RunWith(SpringRunner.class)
|
||||
@ContextConfiguration
|
||||
@DirtiesContext
|
||||
public abstract class DeserializationErrorHandlerByKafkaTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts", "error.words.group");
|
||||
|
||||
@SpyBean
|
||||
KafkaStreamsMessageConversionDelegate KafkaStreamsMessageConversionDelegate;
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
System.setProperty("spring.cloud.stream.kafka.streams.binder.brokers", embeddedKafka.getBrokersAsString());
|
||||
System.setProperty("spring.cloud.stream.kafka.streams.binder.zkNodes", embeddedKafka.getZookeeperConnectionString());
|
||||
|
||||
System.setProperty("server.port","0");
|
||||
System.setProperty("spring.jmx.enabled","false");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("fooc", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@SpringBootTest(properties = {
|
||||
"spring.cloud.stream.bindings.input.consumer.useNativeDecoding=true",
|
||||
"spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"spring.cloud.stream.bindings.input.group=group",
|
||||
"spring.cloud.stream.kafka.streams.binder.serdeError=sendToDlq",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=" +
|
||||
"org.apache.kafka.common.serialization.Serdes$IntegerSerde"},
|
||||
webEnvironment= SpringBootTest.WebEnvironment.NONE
|
||||
)
|
||||
public static class DeserializationByKafkaAndDlqTests extends DeserializationErrorHandlerByKafkaTests {
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void test() throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("words");
|
||||
template.sendDefault("foobar");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("foobar", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
Consumer<String, String> consumer1 = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer1, "error.words.group");
|
||||
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer1, "error.words.group");
|
||||
assertThat(cr.value().equals("foobar")).isTrue();
|
||||
|
||||
//Ensuring that the deserialization was indeed done by Kafka natively
|
||||
verify(KafkaStreamsMessageConversionDelegate, never()).deserializeOnInbound(any(Class.class), any(KStream.class));
|
||||
verify(KafkaStreamsMessageConversionDelegate, never()).serializeOnOutbound(any(KStream.class));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
@PropertySource("classpath:/org/springframework/cloud/stream/binder/kstream/integTest-1.properties")
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
public static class WordCountProcessorApplication {
|
||||
|
||||
@Autowired
|
||||
private TimeWindows timeWindows;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<?, String> process(KStream<Object, String> input) {
|
||||
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Serialized.with(Serdes.String(), Serdes.String()))
|
||||
.windowedBy(timeWindows)
|
||||
.count(Materialized.as("foo-WordCounts-x"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, "Count for " + key.key() + " : " + value));
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,160 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.test.mock.mockito.SpyBean;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.support.serializer.JsonSerde;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
import org.springframework.test.annotation.DirtiesContext;
|
||||
import org.springframework.test.context.ContextConfiguration;
|
||||
import org.springframework.test.context.junit4.SpringRunner;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@RunWith(SpringRunner.class)
|
||||
@ContextConfiguration
|
||||
@DirtiesContext
|
||||
public abstract class DeserializtionErrorHandlerByBinderTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts-id", "error.foos.foobar-group");
|
||||
|
||||
@SpyBean
|
||||
KafkaStreamsMessageConversionDelegate KafkaStreamsMessageConversionDelegate;
|
||||
|
||||
private static Consumer<Integer, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
System.setProperty("spring.cloud.stream.kafka.streams.binder.brokers", embeddedKafka.getBrokersAsString());
|
||||
System.setProperty("spring.cloud.stream.kafka.streams.binder.zkNodes", embeddedKafka.getZookeeperConnectionString());
|
||||
|
||||
System.setProperty("server.port","0");
|
||||
System.setProperty("spring.jmx.enabled","false");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("foob", "false", embeddedKafka);
|
||||
//consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Deserializer.class.getName());
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts-id");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@SpringBootTest(properties = {
|
||||
"spring.cloud.stream.bindings.input.destination=foos",
|
||||
"spring.cloud.stream.bindings.output.destination=counts-id",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"spring.cloud.stream.kafka.streams.bindings.output.producer.keySerde=org.apache.kafka.common.serialization.Serdes$IntegerSerde",
|
||||
"spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"spring.cloud.stream.kafka.streams.binder.serdeError=sendToDlq",
|
||||
"spring.cloud.stream.bindings.input.group=foobar-group"},
|
||||
webEnvironment= SpringBootTest.WebEnvironment.NONE
|
||||
)
|
||||
public static class DeserializationByBinderAndDlqTests extends DeserializtionErrorHandlerByBinderTests {
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void test() throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foos");
|
||||
template.sendDefault("hello");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("foobar", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
Consumer<String, String> consumer1 = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer1, "error.foos.foobar-group");
|
||||
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer1, "error.foos.foobar-group");
|
||||
assertThat(cr.value().equals("hello")).isTrue();
|
||||
|
||||
//Ensuring that the deserialization was indeed done by the binder
|
||||
verify(KafkaStreamsMessageConversionDelegate).deserializeOnInbound(any(Class.class), any(KStream.class));
|
||||
}
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class ProductCountApplication {
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<Integer, Long> process(KStream<Object, Product> input) {
|
||||
return input
|
||||
.filter((key, product) -> product.getId() == 123)
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Serialized.with(new JsonSerde<>(Product.class), new JsonSerde<>(Product.class)))
|
||||
.windowedBy(TimeWindows.of(5000))
|
||||
.count(Materialized.as("id-count-store-x"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(key.key().id, value));
|
||||
}
|
||||
}
|
||||
static class Product {
|
||||
|
||||
Integer id;
|
||||
|
||||
public Integer getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(Integer id) {
|
||||
this.id = id;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -14,30 +14,30 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.LongDeserializer;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KeyValueMapper;
|
||||
import org.apache.kafka.streams.kstream.Predicate;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.apache.kafka.streams.kstream.Windowed;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kstream.annotations.KStreamProcessor;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
@@ -50,22 +50,22 @@ import org.springframework.messaging.handler.annotation.SendTo;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KStreamBinderPojoInputAndPrimitiveTypeOutputTests {
|
||||
public class KafkaStreamsBinderPojoInputAndPrimitiveTypeOutputTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts-id");
|
||||
|
||||
private static Consumer<Integer, Long> consumer;
|
||||
private static Consumer<Integer, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group-id", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class.getName());
|
||||
//consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Deserializer.class.getName());
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<Integer, Long> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts-id");
|
||||
}
|
||||
@@ -78,37 +78,41 @@ public class KStreamBinderPojoInputAndPrimitiveTypeOutputTests {
|
||||
@Test
|
||||
public void testKstreamBinderWithPojoInputAndStringOuput() throws Exception {
|
||||
SpringApplication app = new SpringApplication(ProductCountApplication.class);
|
||||
app.setWebEnvironment(false);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.input.destination=foos",
|
||||
"--spring.cloud.stream.bindings.output.destination=counts-id",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"--spring.cloud.stream.kstream.bindings.output.producer.keySerde=org.apache.kafka.common.serialization.Serdes$IntegerSerde",
|
||||
"--spring.cloud.stream.kstream.bindings.output.producer.valueSerde=org.apache.kafka.common.serialization.Serdes$LongSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.output.producer.keySerde=org.apache.kafka.common.serialization.Serdes$IntegerSerde",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kstream.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kstream.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
receiveAndValidateFoo(context);
|
||||
context.close();
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidateFoo(context);
|
||||
} finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception{
|
||||
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foos");
|
||||
template.sendDefault("{\"id\":\"123\"}");
|
||||
ConsumerRecord<Integer, Long> cr = KafkaTestUtils.getSingleRecord(consumer, "counts-id");
|
||||
ConsumerRecord<Integer, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts-id");
|
||||
|
||||
assertThat(cr.key().equals(123));
|
||||
assertThat(cr.value().equals(1L));
|
||||
ObjectMapper om = new ObjectMapper();
|
||||
Long aLong = om.readValue(cr.value(), Long.class);
|
||||
assertThat(aLong.equals(1L));
|
||||
}
|
||||
|
||||
@EnableBinding(KStreamProcessor.class)
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class ProductCountApplication {
|
||||
|
||||
@@ -116,30 +120,13 @@ public class KStreamBinderPojoInputAndPrimitiveTypeOutputTests {
|
||||
@SendTo("output")
|
||||
public KStream<Integer, Long> process(KStream<Object, Product> input) {
|
||||
return input
|
||||
.filter(new Predicate<Object, Product>() {
|
||||
|
||||
@Override
|
||||
public boolean test(Object key, Product product) {
|
||||
return product.getId() == 123;
|
||||
}
|
||||
})
|
||||
.map(new KeyValueMapper<Object, Product, KeyValue<Product, Product>>() {
|
||||
|
||||
@Override
|
||||
public KeyValue<Product, Product> apply(Object key, Product value) {
|
||||
return new KeyValue<>(value, value);
|
||||
}
|
||||
})
|
||||
.groupByKey(new JsonSerde<>(Product.class), new JsonSerde<>(Product.class))
|
||||
.count(TimeWindows.of(5000), "id-count-store")
|
||||
.filter((key, product) -> product.getId() == 123)
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Serialized.with(new JsonSerde<>(Product.class), new JsonSerde<>(Product.class)))
|
||||
.windowedBy(TimeWindows.of(5000))
|
||||
.count(Materialized.as("id-count-store-x"))
|
||||
.toStream()
|
||||
.map(new KeyValueMapper<Windowed<Product>, Long, KeyValue<Integer, Long>>() {
|
||||
|
||||
@Override
|
||||
public KeyValue<Integer, Long> apply(Windowed<Product> key, Long value) {
|
||||
return new KeyValue<>(key.key().id, value);
|
||||
}
|
||||
});
|
||||
.map((key, value) -> new KeyValue<>(key.key().id, value));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,11 +14,10 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
@@ -27,10 +26,9 @@ import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KeyValueMapper;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.apache.kafka.streams.kstream.ValueMapper;
|
||||
import org.apache.kafka.streams.kstream.Windowed;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
@@ -38,16 +36,17 @@ import org.junit.Test;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kstream.annotations.KStreamProcessor;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KStreamBuilderFactoryBean;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
@@ -56,11 +55,11 @@ import org.springframework.messaging.handler.annotation.SendTo;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KStreamBinderWordCountIntegrationTests {
|
||||
public class KafkaStreamsBinderWordCountIntegrationTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts");
|
||||
@@ -84,25 +83,30 @@ public class KStreamBinderWordCountIntegrationTests {
|
||||
@Test
|
||||
public void testKstreamWordCountWithStringInputAndPojoOuput() throws Exception {
|
||||
SpringApplication app = new SpringApplication(WordCountProcessorApplication.class);
|
||||
app.setWebEnvironment(false);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.input.destination=words",
|
||||
"--spring.cloud.stream.bindings.output.destination=counts",
|
||||
"--spring.cloud.stream.bindings.output.contentType=application/json",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kstream.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kstream.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
receiveAndValidate(context);
|
||||
context.close();
|
||||
"--spring.cloud.stream.kafka.streams.timeWindow.length=5000",
|
||||
"--spring.cloud.stream.kafka.streams.timeWindow.advanceBy=0",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidate(context);
|
||||
} finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidate(ConfigurableApplicationContext context) throws Exception{
|
||||
private void receiveAndValidate(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
@@ -112,92 +116,33 @@ public class KStreamBinderWordCountIntegrationTests {
|
||||
assertThat(cr.value().contains("\"word\":\"foobar\",\"count\":1")).isTrue();
|
||||
}
|
||||
|
||||
@EnableBinding(KStreamProcessor.class)
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
@EnableConfigurationProperties(WordCountProcessorProperties.class)
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
public static class WordCountProcessorApplication {
|
||||
|
||||
@Autowired
|
||||
private WordCountProcessorProperties processorProperties;
|
||||
private TimeWindows timeWindows;
|
||||
|
||||
@Autowired
|
||||
private KStreamBuilderFactoryBean kafkaStreams;
|
||||
|
||||
@StreamListener("input")
|
||||
@StreamListener
|
||||
@SendTo("output")
|
||||
public KStream<?, WordCount> process(KStream<Object, String> input) {
|
||||
public KStream<?, WordCount> process(@Input("input") KStream<Object, String> input) {
|
||||
|
||||
input.map((k,v) -> {
|
||||
System.out.println(k);
|
||||
System.out.println(v);
|
||||
return new KeyValue<>(k,v);
|
||||
});
|
||||
return input
|
||||
.flatMapValues(new ValueMapper<String, Iterable<String>>() {
|
||||
|
||||
@Override
|
||||
public List<String> apply(String value) {
|
||||
return Arrays.asList(value.toLowerCase().split("\\W+"));
|
||||
}
|
||||
})
|
||||
.map(new KeyValueMapper<Object, String, KeyValue<String, String>>() {
|
||||
|
||||
@Override
|
||||
public KeyValue<String, String> apply(Object key, String value) {
|
||||
return new KeyValue<>(value, value);
|
||||
}
|
||||
})
|
||||
.groupByKey(Serdes.String(), Serdes.String())
|
||||
.count(configuredTimeWindow(), processorProperties.getStoreName())
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Serialized.with(Serdes.String(), Serdes.String()))
|
||||
.windowedBy(timeWindows)
|
||||
.count(Materialized.as("foo-WordCounts"))
|
||||
.toStream()
|
||||
.map(new KeyValueMapper<Windowed<String>, Long, KeyValue<Object, WordCount>>() {
|
||||
|
||||
@Override
|
||||
public KeyValue<Object, WordCount> apply(Windowed<String> key, Long value) {
|
||||
return new KeyValue<>(null, new WordCount(key.key(), value, new Date(key.window().start()), new Date(key.window().end())));
|
||||
}
|
||||
});
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key.key(), value, new Date(key.window().start()), new Date(key.window().end()))));
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@link TimeWindows} property.
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
private TimeWindows configuredTimeWindow() {
|
||||
return processorProperties.getAdvanceBy() > 0
|
||||
? TimeWindows.of(processorProperties.getWindowLength()).advanceBy(processorProperties.getAdvanceBy())
|
||||
: TimeWindows.of(processorProperties.getWindowLength());
|
||||
}
|
||||
}
|
||||
|
||||
@ConfigurationProperties(prefix = "kstream.word.count")
|
||||
static class WordCountProcessorProperties {
|
||||
|
||||
private int windowLength = 5000;
|
||||
|
||||
private int advanceBy = 0;
|
||||
|
||||
private String storeName = "WordCounts";
|
||||
|
||||
int getWindowLength() {
|
||||
return windowLength;
|
||||
}
|
||||
|
||||
public void setWindowLength(int windowLength) {
|
||||
this.windowLength = windowLength;
|
||||
}
|
||||
|
||||
int getAdvanceBy() {
|
||||
return advanceBy;
|
||||
}
|
||||
|
||||
public void setAdvanceBy(int advanceBy) {
|
||||
this.advanceBy = advanceBy;
|
||||
}
|
||||
|
||||
String getStoreName() {
|
||||
return storeName;
|
||||
}
|
||||
|
||||
public void setStoreName(String storeName) {
|
||||
this.storeName = storeName;
|
||||
}
|
||||
}
|
||||
|
||||
static class WordCount {
|
||||
@@ -13,7 +13,8 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
@@ -21,11 +22,9 @@ import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KeyValueMapper;
|
||||
import org.apache.kafka.streams.kstream.Predicate;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.state.QueryableStoreTypes;
|
||||
import org.apache.kafka.streams.state.ReadOnlyKeyValueStore;
|
||||
import org.junit.AfterClass;
|
||||
@@ -35,15 +34,15 @@ import org.junit.Test;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kstream.annotations.KStreamProcessor;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KStreamBuilderFactoryBean;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.support.serializer.JsonSerde;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
@@ -54,8 +53,9 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KStreamInteractiveQueryIntegrationTests {
|
||||
public class KafkaStreamsInteractiveQueryIntegrationTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts-id");
|
||||
@@ -79,23 +79,26 @@ public class KStreamInteractiveQueryIntegrationTests {
|
||||
@Test
|
||||
public void testKstreamBinderWithPojoInputAndStringOuput() throws Exception {
|
||||
SpringApplication app = new SpringApplication(ProductCountApplication.class);
|
||||
app.setWebEnvironment(false);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.input.destination=foos",
|
||||
"--spring.cloud.stream.bindings.output.destination=counts-id",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kstream.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kstream.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
receiveAndValidateFoo(context);
|
||||
context.close();
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidateFoo(context);
|
||||
} finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception{
|
||||
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
@@ -108,65 +111,45 @@ public class KStreamInteractiveQueryIntegrationTests {
|
||||
assertThat(foo.getProductStock(123).equals(1L));
|
||||
}
|
||||
|
||||
@EnableBinding(KStreamProcessor.class)
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class ProductCountApplication {
|
||||
|
||||
@Autowired
|
||||
private KStreamBuilderFactoryBean kStreamBuilderFactoryBean;
|
||||
private QueryableStoreRegistry queryableStoreRegistry;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
@SuppressWarnings("deprecation")
|
||||
public KStream<?, String> process(KStream<Object, Product> input) {
|
||||
|
||||
return input
|
||||
.filter(new Predicate<Object, Product>() {
|
||||
|
||||
@Override
|
||||
public boolean test(Object key, Product product) {
|
||||
return product.getId() == 123;
|
||||
}
|
||||
})
|
||||
.map(new KeyValueMapper<Object, Product, KeyValue<Integer, Product>>() {
|
||||
|
||||
@Override
|
||||
public KeyValue<Integer, Product> apply(Object key, Product value) {
|
||||
return new KeyValue<>(value.id, value);
|
||||
}
|
||||
})
|
||||
.groupByKey(new Serdes.IntegerSerde(), new JsonSerde<>(Product.class))
|
||||
.filter((key, product) -> product.getId() == 123)
|
||||
.map((key, value) -> new KeyValue<>(value.id, value))
|
||||
.groupByKey(Serialized.with(new Serdes.IntegerSerde(), new JsonSerde<>(Product.class)))
|
||||
.count("prod-id-count-store")
|
||||
.toStream()
|
||||
.map(new KeyValueMapper<Integer, Long, KeyValue<Object, String>>() {
|
||||
|
||||
@Override
|
||||
public KeyValue<Object, String> apply(Integer key, Long value) {
|
||||
return new KeyValue<>(null, "Count for product with ID 123: " + value);
|
||||
}
|
||||
});
|
||||
.map((key, value) -> new KeyValue<>(null, "Count for product with ID 123: " + value));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public Foo foo(KStreamBuilderFactoryBean kStreamBuilderFactoryBean) {
|
||||
return new Foo(kStreamBuilderFactoryBean);
|
||||
public Foo foo(QueryableStoreRegistry queryableStoreRegistry) {
|
||||
return new Foo(queryableStoreRegistry);
|
||||
}
|
||||
|
||||
|
||||
static class Foo {
|
||||
KStreamBuilderFactoryBean kStreamBuilderFactoryBean;
|
||||
QueryableStoreRegistry queryableStoreRegistry;
|
||||
|
||||
Foo(KStreamBuilderFactoryBean kStreamBuilderFactoryBean) {
|
||||
this.kStreamBuilderFactoryBean = kStreamBuilderFactoryBean;
|
||||
Foo(QueryableStoreRegistry queryableStoreRegistry) {
|
||||
this.queryableStoreRegistry = queryableStoreRegistry;
|
||||
}
|
||||
|
||||
public Long getProductStock(Integer id) {
|
||||
KafkaStreams streams = kStreamBuilderFactoryBean.getKafkaStreams();
|
||||
ReadOnlyKeyValueStore<Object, Object> keyValueStore =
|
||||
streams.store("prod-id-count-store", QueryableStoreTypes.keyValueStore());
|
||||
return (Long)keyValueStore.get(id);
|
||||
queryableStoreRegistry.getQueryableStoreType("prod-id-count-store", QueryableStoreTypes.keyValueStore());
|
||||
return (Long) keyValueStore.get(id);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class Product {
|
||||
@@ -0,0 +1,162 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.test.mock.mockito.SpyBean;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.annotation.PropertySource;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
import org.springframework.test.annotation.DirtiesContext;
|
||||
import org.springframework.test.context.ContextConfiguration;
|
||||
import org.springframework.test.context.junit4.SpringRunner;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@RunWith(SpringRunner.class)
|
||||
@ContextConfiguration
|
||||
@DirtiesContext
|
||||
public abstract class KafkaStreamsNativeEncodingDecodingTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts");
|
||||
|
||||
@SpyBean
|
||||
KafkaStreamsMessageConversionDelegate KafkaStreamsMessageConversionDelegate;
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
System.setProperty("spring.cloud.stream.kafka.streams.binder.brokers", embeddedKafka.getBrokersAsString());
|
||||
System.setProperty("spring.cloud.stream.kafka.streams.binder.zkNodes", embeddedKafka.getZookeeperConnectionString());
|
||||
|
||||
System.setProperty("server.port","0");
|
||||
System.setProperty("spring.jmx.enabled","false");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@SpringBootTest(properties = {
|
||||
"spring.cloud.stream.bindings.input.consumer.useNativeDecoding=true",
|
||||
"spring.cloud.stream.bindings.output.producer.useNativeEncoding=true"},
|
||||
webEnvironment= SpringBootTest.WebEnvironment.NONE
|
||||
)
|
||||
public static class NativeEncodingDecodingEnabledTests extends KafkaStreamsNativeEncodingDecodingTests {
|
||||
|
||||
@Test
|
||||
public void test() throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("words");
|
||||
template.sendDefault("foobar");
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts");
|
||||
assertThat(cr.value().equals("Count for foobar : 1")).isTrue();
|
||||
|
||||
verify(KafkaStreamsMessageConversionDelegate, never()).serializeOnOutbound(any(KStream.class));
|
||||
verify(KafkaStreamsMessageConversionDelegate, never()).deserializeOnInbound(any(Class.class), any(KStream.class));
|
||||
}
|
||||
}
|
||||
|
||||
@SpringBootTest(webEnvironment= SpringBootTest.WebEnvironment.NONE)
|
||||
public static class NativeEncodingDecodingDisabledTests extends KafkaStreamsNativeEncodingDecodingTests {
|
||||
|
||||
@Test
|
||||
public void test() throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("words");
|
||||
template.sendDefault("foobar");
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts");
|
||||
assertThat(cr.value().equals("Count for foobar : 1")).isTrue();
|
||||
|
||||
verify(KafkaStreamsMessageConversionDelegate).serializeOnOutbound(any(KStream.class));
|
||||
verify(KafkaStreamsMessageConversionDelegate).deserializeOnInbound(any(Class.class), any(KStream.class));
|
||||
}
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
@PropertySource("classpath:/org/springframework/cloud/stream/binder/kstream/integTest-1.properties")
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
public static class WordCountProcessorApplication {
|
||||
|
||||
@Autowired
|
||||
private TimeWindows timeWindows;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<?, String> process(KStream<Object, String> input) {
|
||||
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Serialized.with(Serdes.String(), Serdes.String()))
|
||||
.windowedBy(timeWindows)
|
||||
.count(Materialized.as("foo-WordCounts-x"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, "Count for " + key.key() + " : " + value));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
@@ -23,20 +23,20 @@ import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KeyValueMapper;
|
||||
import org.apache.kafka.streams.kstream.Predicate;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.apache.kafka.streams.kstream.Windowed;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kstream.annotations.KStreamProcessor;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
@@ -51,8 +51,9 @@ import static org.assertj.core.api.Assertions.assertThat;
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KstreamBinderPojoInputStringOutputIntegrationTests {
|
||||
public class KafkastreamsBinderPojoInputStringOutputIntegrationTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts-id");
|
||||
@@ -76,20 +77,24 @@ public class KstreamBinderPojoInputStringOutputIntegrationTests {
|
||||
@Test
|
||||
public void testKstreamBinderWithPojoInputAndStringOuput() throws Exception {
|
||||
SpringApplication app = new SpringApplication(ProductCountApplication.class);
|
||||
app.setWebEnvironment(false);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.input.destination=foos",
|
||||
"--spring.cloud.stream.bindings.output.destination=counts-id",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.output.producer.keySerde=org.apache.kafka.common.serialization.Serdes$IntegerSerde",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kstream.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kstream.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
receiveAndValidateFoo(context);
|
||||
context.close();
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidateFoo(context);
|
||||
} finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception {
|
||||
@@ -102,39 +107,22 @@ public class KstreamBinderPojoInputStringOutputIntegrationTests {
|
||||
assertThat(cr.value().contains("Count for product with ID 123: 1")).isTrue();
|
||||
}
|
||||
|
||||
@EnableBinding(KStreamProcessor.class)
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class ProductCountApplication {
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<?, String> process(KStream<Object, Product> input) {
|
||||
public KStream<Integer, String> process(KStream<Object, Product> input) {
|
||||
|
||||
return input
|
||||
.filter(new Predicate<Object, Product>() {
|
||||
|
||||
@Override
|
||||
public boolean test(Object key, Product product) {
|
||||
return product.getId() == 123;
|
||||
}
|
||||
})
|
||||
.map(new KeyValueMapper<Object, Product, KeyValue<Product, Product>>() {
|
||||
|
||||
@Override
|
||||
public KeyValue<Product, Product> apply(Object key, Product value) {
|
||||
return new KeyValue<>(value, value);
|
||||
}
|
||||
})
|
||||
.groupByKey(new JsonSerde<>(Product.class), new JsonSerde<>(Product.class))
|
||||
.count(TimeWindows.of(5000), "id-count-store")
|
||||
.filter((key, product) -> product.getId() == 123)
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Serialized.with(new JsonSerde<>(Product.class), new JsonSerde<>(Product.class)))
|
||||
.windowedBy(TimeWindows.of(5000))
|
||||
.count(Materialized.as("id-count-store"))
|
||||
.toStream()
|
||||
.map(new KeyValueMapper<Windowed<Product>, Long, KeyValue<Object, String>>() {
|
||||
|
||||
@Override
|
||||
public KeyValue<Object, String> apply(Windowed<Product> key, Long value) {
|
||||
return new KeyValue<>(null, "Count for product with ID 123: " + value);
|
||||
}
|
||||
});
|
||||
.map((key, value) -> new KeyValue<>(key.key().id, "Count for product with ID 123: " + value));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,247 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.common.serialization.LongDeserializer;
|
||||
import org.apache.kafka.common.serialization.LongSerializer;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.common.serialization.StringDeserializer;
|
||||
import org.apache.kafka.common.serialization.StringSerializer;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.Joined;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class StreamToTableJoinIntegrationTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "output-topic");
|
||||
|
||||
private static Consumer<String, Long> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
|
||||
consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class);
|
||||
DefaultKafkaConsumerFactory<String, Long> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "output-topic");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessorX.class)
|
||||
@EnableAutoConfiguration
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
public static class CountClicksPerRegionApplication {
|
||||
|
||||
@StreamListener
|
||||
@SendTo("output")
|
||||
public KStream<String, Long> process(@Input("input") KStream<String, Long> userClicksStream,
|
||||
@Input("inputX") KTable<String, String> userRegionsTable) {
|
||||
|
||||
return userClicksStream
|
||||
.leftJoin(userRegionsTable, (clicks, region) -> new RegionWithClicks(region == null ? "UNKNOWN" : region, clicks),
|
||||
Joined.with(Serdes.String(), Serdes.Long(), null))
|
||||
.map((user, regionWithClicks) -> new KeyValue<>(regionWithClicks.getRegion(), regionWithClicks.getClicks()))
|
||||
.groupByKey(Serialized.with(Serdes.String(), Serdes.Long()))
|
||||
.reduce((firstClicks, secondClicks) -> firstClicks + secondClicks)
|
||||
.toStream();
|
||||
}
|
||||
}
|
||||
|
||||
interface KafkaStreamsProcessorX extends KafkaStreamsProcessor {
|
||||
|
||||
@Input("inputX")
|
||||
KTable<?, ?> inputX();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStreamToTable() throws Exception {
|
||||
SpringApplication app = new SpringApplication(CountClicksPerRegionApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.input.destination=user-clicks",
|
||||
"--spring.cloud.stream.bindings.inputX.destination=user-regions",
|
||||
"--spring.cloud.stream.bindings.output.destination=output-topic",
|
||||
"--spring.cloud.stream.bindings.input.consumer.useNativeDecoding=true",
|
||||
"--spring.cloud.stream.bindings.inputX.consumer.useNativeDecoding=true",
|
||||
"--spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input.consumer.keySerde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input.consumer.valueSerde=org.apache.kafka.common.serialization.Serdes$LongSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.inputX.consumer.keySerde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.inputX.consumer.valueSerde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.output.producer.keySerde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.output.producer.valueSerde=org.apache.kafka.common.serialization.Serdes$LongSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=10000",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.inputX.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
// Input 1: Clicks per user (multiple records allowed per user).
|
||||
List<KeyValue<String, Long>> userClicks = Arrays.asList(
|
||||
new KeyValue<>("alice", 13L),
|
||||
new KeyValue<>("bob", 4L),
|
||||
new KeyValue<>("chao", 25L),
|
||||
new KeyValue<>("bob", 19L),
|
||||
new KeyValue<>("dave", 56L),
|
||||
new KeyValue<>("eve", 78L),
|
||||
new KeyValue<>("alice", 40L),
|
||||
new KeyValue<>("fang", 99L)
|
||||
);
|
||||
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
senderProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
|
||||
senderProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, LongSerializer.class);
|
||||
|
||||
DefaultKafkaProducerFactory<String, Long> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<String, Long> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("user-clicks");
|
||||
|
||||
for (KeyValue<String,Long> keyValue : userClicks) {
|
||||
template.sendDefault(keyValue.key, keyValue.value);
|
||||
}
|
||||
|
||||
// Input 2: Region per user (multiple records allowed per user).
|
||||
List<KeyValue<String, String>> userRegions = Arrays.asList(
|
||||
new KeyValue<>("alice", "asia"), /* Alice lived in Asia originally... */
|
||||
new KeyValue<>("bob", "americas"),
|
||||
new KeyValue<>("chao", "asia"),
|
||||
new KeyValue<>("dave", "europe"),
|
||||
new KeyValue<>("alice", "europe"), /* ...but moved to Europe some time later. */
|
||||
new KeyValue<>("eve", "americas"),
|
||||
new KeyValue<>("fang", "asia")
|
||||
);
|
||||
|
||||
Map<String, Object> senderProps1 = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
senderProps1.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
|
||||
senderProps1.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
|
||||
|
||||
DefaultKafkaProducerFactory<String, String> pf1 = new DefaultKafkaProducerFactory<>(senderProps1);
|
||||
KafkaTemplate<String, String> template1 = new KafkaTemplate<>(pf1, true);
|
||||
template1.setDefaultTopic("user-regions");
|
||||
|
||||
for (KeyValue<String,String> keyValue : userRegions) {
|
||||
template1.sendDefault(keyValue.key, keyValue.value);
|
||||
}
|
||||
|
||||
List<KeyValue<String, Long>> expectedClicksPerRegion = Arrays.asList(
|
||||
new KeyValue<>("americas", 101L),
|
||||
new KeyValue<>("europe", 109L),
|
||||
new KeyValue<>("asia", 124L)
|
||||
);
|
||||
|
||||
//Verify that we receive the expected data
|
||||
int count = 0;
|
||||
long start = System.currentTimeMillis();
|
||||
List<KeyValue<String, Long>> actualClicksPerRegion = new ArrayList<>();
|
||||
do {
|
||||
ConsumerRecords<String, Long> records = KafkaTestUtils.getRecords(consumer);
|
||||
count = count + records.count();
|
||||
for (ConsumerRecord<String, Long> record : records) {
|
||||
actualClicksPerRegion.add(new KeyValue<>(record.key(), record.value()));
|
||||
}
|
||||
} while (count < expectedClicksPerRegion.size() && (System.currentTimeMillis() - start) < 30000 );
|
||||
|
||||
assertThat(count == expectedClicksPerRegion.size()).isTrue();
|
||||
assertThat(actualClicksPerRegion).hasSameElementsAs(expectedClicksPerRegion);
|
||||
}
|
||||
catch (Exception e){
|
||||
System.out.println(e);
|
||||
}
|
||||
finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Tuple for a region and its associated number of clicks.
|
||||
*/
|
||||
private static final class RegionWithClicks {
|
||||
|
||||
private final String region;
|
||||
private final long clicks;
|
||||
|
||||
RegionWithClicks(String region, long clicks) {
|
||||
if (region == null || region.isEmpty()) {
|
||||
throw new IllegalArgumentException("region must be set");
|
||||
}
|
||||
if (clicks < 0) {
|
||||
throw new IllegalArgumentException("clicks must not be negative");
|
||||
}
|
||||
this.region = region;
|
||||
this.clicks = clicks;
|
||||
}
|
||||
|
||||
public String getRegion() {
|
||||
return region;
|
||||
}
|
||||
|
||||
public long getClicks() {
|
||||
return clicks;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,226 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Date;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Predicate;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.Output;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class WordCountMultipleBranchesIntegrationTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts","foo","bar");
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("groupx", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromEmbeddedTopics(consumer, "counts", "foo", "bar");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@EnableBinding(KStreamProcessorX.class)
|
||||
@EnableAutoConfiguration
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
public static class WordCountProcessorApplication {
|
||||
|
||||
@Autowired
|
||||
private TimeWindows timeWindows;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo({"output1","output2","output3"})
|
||||
@SuppressWarnings("unchecked")
|
||||
public KStream<?, WordCount>[] process(KStream<Object, String> input) {
|
||||
|
||||
Predicate<Object, WordCount> isEnglish = (k, v) -> v.word.equals("english");
|
||||
Predicate<Object, WordCount> isFrench = (k, v) -> v.word.equals("french");
|
||||
Predicate<Object, WordCount> isSpanish = (k, v) -> v.word.equals("spanish");
|
||||
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.groupBy((key, value) -> value)
|
||||
.windowedBy(timeWindows)
|
||||
.count(Materialized.as("WordCounts-multi"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key.key(), value, new Date(key.window().start()), new Date(key.window().end()))))
|
||||
.branch(isEnglish, isFrench, isSpanish);
|
||||
}
|
||||
}
|
||||
|
||||
interface KStreamProcessorX {
|
||||
|
||||
@Input("input")
|
||||
KStream<?, ?> input();
|
||||
|
||||
@Output("output1")
|
||||
KStream<?, ?> output1();
|
||||
|
||||
@Output("output2")
|
||||
KStream<?, ?> output2();
|
||||
|
||||
@Output("output3")
|
||||
KStream<?, ?> output3();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKstreamWordCountWithStringInputAndPojoOuput() throws Exception {
|
||||
SpringApplication app = new SpringApplication(WordCountProcessorApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.input.destination=words",
|
||||
"--spring.cloud.stream.bindings.output1.destination=counts",
|
||||
"--spring.cloud.stream.bindings.output1.contentType=application/json",
|
||||
"--spring.cloud.stream.bindings.output2.destination=foo",
|
||||
"--spring.cloud.stream.bindings.output2.contentType=application/json",
|
||||
"--spring.cloud.stream.bindings.output3.destination=bar",
|
||||
"--spring.cloud.stream.bindings.output3.contentType=application/json",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kafka.streams.timeWindow.length=5000",
|
||||
"--spring.cloud.stream.kafka.streams.timeWindow.advanceBy=0",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidate(context);
|
||||
} finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidate(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("words");
|
||||
template.sendDefault("english");
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts");
|
||||
assertThat(cr.value().contains("\"word\":\"english\",\"count\":1")).isTrue();
|
||||
|
||||
template.sendDefault("french");
|
||||
template.sendDefault("french");
|
||||
cr = KafkaTestUtils.getSingleRecord(consumer, "foo");
|
||||
assertThat(cr.value().contains("\"word\":\"french\",\"count\":2")).isTrue();
|
||||
|
||||
template.sendDefault("spanish");
|
||||
template.sendDefault("spanish");
|
||||
template.sendDefault("spanish");
|
||||
cr = KafkaTestUtils.getSingleRecord(consumer, "bar");
|
||||
assertThat(cr.value().contains("\"word\":\"spanish\",\"count\":3")).isTrue();
|
||||
}
|
||||
|
||||
static class WordCount {
|
||||
|
||||
private String word;
|
||||
|
||||
private long count;
|
||||
|
||||
private Date start;
|
||||
|
||||
private Date end;
|
||||
|
||||
WordCount(String word, long count, Date start, Date end) {
|
||||
this.word = word;
|
||||
this.count = count;
|
||||
this.start = start;
|
||||
this.end = end;
|
||||
}
|
||||
|
||||
public String getWord() {
|
||||
return word;
|
||||
}
|
||||
|
||||
public void setWord(String word) {
|
||||
this.word = word;
|
||||
}
|
||||
|
||||
public long getCount() {
|
||||
return count;
|
||||
}
|
||||
|
||||
public void setCount(long count) {
|
||||
this.count = count;
|
||||
}
|
||||
|
||||
public Date getStart() {
|
||||
return start;
|
||||
}
|
||||
|
||||
public void setStart(Date start) {
|
||||
this.start = start;
|
||||
}
|
||||
|
||||
public Date getEnd() {
|
||||
return end;
|
||||
}
|
||||
|
||||
public void setEnd(Date end) {
|
||||
this.end = end;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
spring.cloud.stream.bindings.input.destination=words
|
||||
spring.cloud.stream.bindings.output.destination=counts
|
||||
spring.cloud.stream.bindings.output.contentType=application/json
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
spring.cloud.stream.bindings.output.producer.headerMode=raw
|
||||
spring.cloud.stream.bindings.input.consumer.headerMode=raw
|
||||
spring.cloud.stream.kafka.streams.timeWindow.length=5000
|
||||
spring.cloud.stream.kafka.streams.timeWindow.advanceBy=0
|
||||
0
spring-cloud-stream-binder-kafka/.jdk8
Normal file
0
spring-cloud-stream-binder-kafka/.jdk8
Normal file
@@ -0,0 +1,5 @@
|
||||
eclipse.preferences.version=1
|
||||
org.eclipse.jdt.ui.ignorelowercasenames=true
|
||||
org.eclipse.jdt.ui.importorder=java;javax;com;org;org.springframework;ch.qos;\#;
|
||||
org.eclipse.jdt.ui.ondemandthreshold=99
|
||||
org.eclipse.jdt.ui.staticondemandthreshold=99
|
||||
@@ -10,7 +10,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.3.0.M2</version>
|
||||
<version>2.0.0.RC1</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
@@ -27,10 +27,6 @@
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-codec</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-autoconfigure</artifactId>
|
||||
@@ -41,10 +37,6 @@
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
@@ -52,11 +44,6 @@
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<version>${spring-kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
@@ -68,12 +55,6 @@
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
@@ -81,20 +62,4 @@
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>3.0.2</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>test-jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
|
||||
@@ -19,6 +19,13 @@ package org.springframework.cloud.stream.binder.kafka;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
@@ -33,42 +40,91 @@ import org.springframework.kafka.core.ConsumerFactory;
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Marius Bogoevici
|
||||
* @author Henryk Konsek
|
||||
* @author Gary Russell
|
||||
* @author Laur Aliste
|
||||
*/
|
||||
public class KafkaBinderHealthIndicator implements HealthIndicator {
|
||||
|
||||
private static final int DEFAULT_TIMEOUT = 60;
|
||||
|
||||
private final KafkaMessageChannelBinder binder;
|
||||
|
||||
private final ConsumerFactory<?, ?> consumerFactory;
|
||||
|
||||
public KafkaBinderHealthIndicator(KafkaMessageChannelBinder binder,
|
||||
ConsumerFactory<?, ?> consumerFactory) {
|
||||
private int timeout = DEFAULT_TIMEOUT;
|
||||
|
||||
private Consumer<?, ?> metadataConsumer;
|
||||
|
||||
public KafkaBinderHealthIndicator(KafkaMessageChannelBinder binder, ConsumerFactory<?, ?> consumerFactory) {
|
||||
this.binder = binder;
|
||||
this.consumerFactory = consumerFactory;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the timeout in seconds to retrieve health information.
|
||||
*
|
||||
* @param timeout the timeout - default 60.
|
||||
*/
|
||||
public void setTimeout(int timeout) {
|
||||
this.timeout = timeout;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Health health() {
|
||||
try (Consumer<?, ?> metadataConsumer = consumerFactory.createConsumer()) {
|
||||
Set<String> downMessages = new HashSet<>();
|
||||
for (String topic : this.binder.getTopicsInUse().keySet()) {
|
||||
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
|
||||
for (PartitionInfo partitionInfo : partitionInfos) {
|
||||
if (this.binder.getTopicsInUse().get(topic).getPartitionInfos().contains(partitionInfo)
|
||||
&& partitionInfo.leader()
|
||||
.id() == -1) {
|
||||
downMessages.add(partitionInfo.toString());
|
||||
ExecutorService exec = Executors.newSingleThreadExecutor();
|
||||
Future<Health> future = exec.submit(new Callable<Health>() {
|
||||
|
||||
@Override
|
||||
public Health call() {
|
||||
try {
|
||||
if (metadataConsumer == null) {
|
||||
metadataConsumer = consumerFactory.createConsumer();
|
||||
}
|
||||
Set<String> downMessages = new HashSet<>();
|
||||
for (String topic : KafkaBinderHealthIndicator.this.binder.getTopicsInUse().keySet()) {
|
||||
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
|
||||
for (PartitionInfo partitionInfo : partitionInfos) {
|
||||
if (KafkaBinderHealthIndicator.this.binder.getTopicsInUse().get(topic).getPartitionInfos()
|
||||
.contains(partitionInfo) && partitionInfo.leader().id() == -1) {
|
||||
downMessages.add(partitionInfo.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
if (downMessages.isEmpty()) {
|
||||
return Health.up().build();
|
||||
}
|
||||
else {
|
||||
return Health.down()
|
||||
.withDetail("Following partitions in use have no leaders: ", downMessages.toString())
|
||||
.build();
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
return Health.down(e).build();
|
||||
}
|
||||
}
|
||||
if (downMessages.isEmpty()) {
|
||||
return Health.up().build();
|
||||
}
|
||||
return Health.down().withDetail("Following partitions in use have no leaders: ", downMessages.toString())
|
||||
|
||||
});
|
||||
try {
|
||||
return future.get(this.timeout, TimeUnit.SECONDS);
|
||||
}
|
||||
catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
return Health.down()
|
||||
.withDetail("Interrupted while waiting for partition information in", this.timeout + " seconds")
|
||||
.build();
|
||||
}
|
||||
catch (Exception e) {
|
||||
catch (ExecutionException e) {
|
||||
return Health.down(e).build();
|
||||
}
|
||||
catch (TimeoutException e) {
|
||||
return Health.down()
|
||||
.withDetail("Failed to retrieve partition information in", this.timeout + " seconds")
|
||||
.build();
|
||||
}
|
||||
finally {
|
||||
exec.shutdownNow();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,118 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import javax.security.auth.login.AppConfigurationEntry;
|
||||
import javax.security.auth.login.Configuration;
|
||||
|
||||
import org.apache.kafka.common.security.JaasUtils;
|
||||
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.DisposableBean;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ApplicationContextAware;
|
||||
import org.springframework.context.ApplicationListener;
|
||||
import org.springframework.context.event.ContextRefreshedEvent;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class KafkaBinderJaasInitializerListener implements ApplicationListener<ContextRefreshedEvent>,
|
||||
ApplicationContextAware, DisposableBean {
|
||||
|
||||
public static final String DEFAULT_ZK_LOGIN_CONTEXT_NAME = "Client";
|
||||
|
||||
private ApplicationContext applicationContext;
|
||||
|
||||
private final boolean ignoreJavaLoginConfigParamSystemProperty;
|
||||
|
||||
private final File placeholderJaasConfiguration;
|
||||
|
||||
public KafkaBinderJaasInitializerListener() throws IOException {
|
||||
// we ignore the system property if it wasn't originally set at launch
|
||||
this.ignoreJavaLoginConfigParamSystemProperty =
|
||||
(System.getProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM) == null);
|
||||
this.placeholderJaasConfiguration = File.createTempFile("kafka-client-jaas-config-placeholder", "conf");
|
||||
this.placeholderJaasConfiguration.deleteOnExit();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
|
||||
this.applicationContext = applicationContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void destroy() throws Exception {
|
||||
if (this.ignoreJavaLoginConfigParamSystemProperty) {
|
||||
System.clearProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onApplicationEvent(ContextRefreshedEvent event) {
|
||||
if (event.getSource() == this.applicationContext) {
|
||||
KafkaBinderConfigurationProperties binderConfigurationProperties =
|
||||
applicationContext.getBean(KafkaBinderConfigurationProperties.class);
|
||||
// only use programmatic support if a file is not set via system property
|
||||
if (ignoreJavaLoginConfigParamSystemProperty
|
||||
&& binderConfigurationProperties.getJaas() != null) {
|
||||
Map<String, AppConfigurationEntry[]> configurationEntries = new HashMap<>();
|
||||
AppConfigurationEntry kafkaClientConfigurationEntry = new AppConfigurationEntry
|
||||
(binderConfigurationProperties.getJaas().getLoginModule(),
|
||||
binderConfigurationProperties.getJaas().getControlFlagValue(),
|
||||
binderConfigurationProperties.getJaas().getOptions() != null ?
|
||||
binderConfigurationProperties.getJaas().getOptions() :
|
||||
Collections.<String, Object>emptyMap());
|
||||
configurationEntries.put(JaasUtils.LOGIN_CONTEXT_CLIENT,
|
||||
new AppConfigurationEntry[]{ kafkaClientConfigurationEntry });
|
||||
Configuration.setConfiguration(new InternalConfiguration(configurationEntries));
|
||||
// Workaround for a 0.9 client issue where even if the Configuration is set
|
||||
// a system property check is performed.
|
||||
// Since the Configuration already exists, this will be ignored.
|
||||
if (this.placeholderJaasConfiguration != null) {
|
||||
System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, this.placeholderJaasConfiguration.getAbsolutePath());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A {@link Configuration} set up programmatically by the Kafka binder
|
||||
*/
|
||||
public static class InternalConfiguration extends Configuration {
|
||||
|
||||
private final Map<String, AppConfigurationEntry[]> configurationEntries;
|
||||
|
||||
public InternalConfiguration(Map<String, AppConfigurationEntry[]> configurationEntries) {
|
||||
Assert.notNull(configurationEntries, " cannot be null");
|
||||
Assert.notEmpty(configurationEntries, " cannot be empty");
|
||||
this.configurationEntries = configurationEntries;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
|
||||
return configurationEntries.get(name);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -13,25 +13,25 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.micrometer.core.instrument.binder.MeterBinder;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.springframework.boot.actuate.endpoint.PublicMetrics;
|
||||
import org.springframework.boot.actuate.metrics.Metric;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
@@ -42,9 +42,9 @@ import org.springframework.util.ObjectUtils;
|
||||
*
|
||||
* @author Henryk Konsek
|
||||
*/
|
||||
public class KafkaBinderMetrics implements PublicMetrics {
|
||||
public class KafkaBinderMetrics implements MeterBinder {
|
||||
|
||||
private final static Logger LOG = LoggerFactory.getLogger(KafkaBinderMetrics.class);
|
||||
private final static Log LOG = LogFactory.getLog(KafkaBinderMetrics.class);
|
||||
|
||||
static final String METRIC_PREFIX = "spring.cloud.stream.binder.kafka";
|
||||
|
||||
@@ -68,8 +68,7 @@ public class KafkaBinderMetrics implements PublicMetrics {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Metric<?>> metrics() {
|
||||
List<Metric<?>> metrics = new LinkedList<>();
|
||||
public void bindTo(MeterRegistry registry) {
|
||||
for (Map.Entry<String, KafkaMessageChannelBinder.TopicInformation> topicInfo : this.binder.getTopicsInUse()
|
||||
.entrySet()) {
|
||||
if (!topicInfo.getValue().isConsumerTopic()) {
|
||||
@@ -96,13 +95,12 @@ public class KafkaBinderMetrics implements PublicMetrics {
|
||||
lag += endOffset.getValue();
|
||||
}
|
||||
}
|
||||
metrics.add(new Metric<>(String.format("%s.%s.%s.lag", METRIC_PREFIX, group, topic), lag));
|
||||
registry.gauge(String.format("%s.%s.%s.lag", METRIC_PREFIX, group, topic), lag);
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOG.debug("Cannot generate metric for topic: " + topic, e);
|
||||
}
|
||||
}
|
||||
return metrics;
|
||||
}
|
||||
|
||||
private ConsumerFactory<?, ?> createConsumerFactory(String group) {
|
||||
@@ -123,4 +121,4 @@ public class KafkaBinderMetrics implements PublicMetrics {
|
||||
return new DefaultKafkaConsumerFactory<>(props);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2014-2017 the original author or authors.
|
||||
* Copyright 2014-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,31 +16,47 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.io.PrintWriter;
|
||||
import java.io.StringWriter;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.producer.Producer;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.header.Headers;
|
||||
import org.apache.kafka.common.header.internals.RecordHeader;
|
||||
import org.apache.kafka.common.header.internals.RecordHeaders;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
|
||||
import org.springframework.beans.factory.DisposableBean;
|
||||
import org.springframework.beans.factory.NoSuchBeanDefinitionException;
|
||||
import org.springframework.cloud.stream.binder.AbstractMessageChannelBinder;
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.BinderHeaders;
|
||||
import org.springframework.cloud.stream.binder.DefaultPollableMessageSource;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
|
||||
import org.springframework.cloud.stream.binder.HeaderMode;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties.StandardHeaders;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
@@ -51,23 +67,34 @@ import org.springframework.expression.common.LiteralExpression;
|
||||
import org.springframework.expression.spel.standard.SpelExpressionParser;
|
||||
import org.springframework.integration.core.MessageProducer;
|
||||
import org.springframework.integration.kafka.inbound.KafkaMessageDrivenChannelAdapter;
|
||||
import org.springframework.integration.kafka.inbound.KafkaMessageSource;
|
||||
import org.springframework.integration.kafka.outbound.KafkaProducerMessageHandler;
|
||||
import org.springframework.integration.kafka.support.RawRecordHeaderErrorMessageStrategy;
|
||||
import org.springframework.integration.support.AcknowledgmentCallback;
|
||||
import org.springframework.integration.support.AcknowledgmentCallback.Status;
|
||||
import org.springframework.integration.support.ErrorMessageStrategy;
|
||||
import org.springframework.integration.support.StaticMessageHeaderAccessor;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.core.ProducerFactory;
|
||||
import org.springframework.kafka.listener.AbstractMessageListenerContainer;
|
||||
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
|
||||
import org.springframework.kafka.listener.config.ContainerProperties;
|
||||
import org.springframework.kafka.support.DefaultKafkaHeaderMapper;
|
||||
import org.springframework.kafka.support.KafkaHeaderMapper;
|
||||
import org.springframework.kafka.support.KafkaHeaders;
|
||||
import org.springframework.kafka.support.ProducerListener;
|
||||
import org.springframework.kafka.support.SendResult;
|
||||
import org.springframework.kafka.support.TopicPartitionInitialOffset;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.kafka.support.converter.MessagingMessageConverter;
|
||||
import org.springframework.kafka.transaction.KafkaTransactionManager;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
import org.springframework.messaging.MessageHandler;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.MessagingException;
|
||||
import org.springframework.messaging.support.ErrorMessage;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
@@ -76,7 +103,7 @@ import org.springframework.util.concurrent.ListenableFuture;
|
||||
import org.springframework.util.concurrent.ListenableFutureCallback;
|
||||
|
||||
/**
|
||||
* A {@link Binder} that uses Kafka as the underlying middleware.
|
||||
* A {@link org.springframework.cloud.stream.binder.Binder} that uses Kafka as the underlying middleware.
|
||||
*
|
||||
* @author Eric Bottard
|
||||
* @author Marius Bogoevici
|
||||
@@ -89,13 +116,21 @@ import org.springframework.util.concurrent.ListenableFutureCallback;
|
||||
* @author Doug Saus
|
||||
*/
|
||||
public class KafkaMessageChannelBinder extends
|
||||
AbstractMessageChannelBinder<ExtendedConsumerProperties<KafkaConsumerProperties>,
|
||||
ExtendedProducerProperties<KafkaProducerProperties>, KafkaTopicProvisioner>
|
||||
AbstractMessageChannelBinder<ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>, KafkaTopicProvisioner>
|
||||
implements ExtendedPropertiesBinder<MessageChannel, KafkaConsumerProperties, KafkaProducerProperties> {
|
||||
|
||||
public static final String X_EXCEPTION_STACKTRACE = "x-exception-stacktrace";
|
||||
|
||||
public static final String X_EXCEPTION_MESSAGE = "x-exception-message";
|
||||
|
||||
public static final String X_ORIGINAL_TOPIC = "x-original-topic";
|
||||
|
||||
|
||||
private final KafkaBinderConfigurationProperties configurationProperties;
|
||||
|
||||
private final Map<String, TopicInformation> topicsInUse = new HashMap<>();
|
||||
private final Map<String, TopicInformation> topicsInUse = new ConcurrentHashMap<>();
|
||||
|
||||
private final KafkaTransactionManager<byte[], byte[]> transactionManager;
|
||||
|
||||
private ProducerListener<byte[], byte[]> producerListener;
|
||||
|
||||
@@ -103,8 +138,16 @@ public class KafkaMessageChannelBinder extends
|
||||
|
||||
public KafkaMessageChannelBinder(KafkaBinderConfigurationProperties configurationProperties,
|
||||
KafkaTopicProvisioner provisioningProvider) {
|
||||
super(false, headersToMap(configurationProperties), provisioningProvider);
|
||||
super(headersToMap(configurationProperties), provisioningProvider);
|
||||
this.configurationProperties = configurationProperties;
|
||||
if (StringUtils.hasText(configurationProperties.getTransaction().getTransactionIdPrefix())) {
|
||||
this.transactionManager = new KafkaTransactionManager<>(
|
||||
getProducerFactory(configurationProperties.getTransaction().getTransactionIdPrefix(),
|
||||
new ExtendedProducerProperties<>(configurationProperties.getTransaction().getProducer())));
|
||||
}
|
||||
else {
|
||||
this.transactionManager = null;
|
||||
}
|
||||
}
|
||||
|
||||
private static String[] headersToMap(KafkaBinderConfigurationProperties configurationProperties) {
|
||||
@@ -147,16 +190,25 @@ public class KafkaMessageChannelBinder extends
|
||||
|
||||
@Override
|
||||
protected MessageHandler createProducerMessageHandler(final ProducerDestination destination,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties) throws Exception {
|
||||
final DefaultKafkaProducerFactory<byte[], byte[]> producerFB = getProducerFactory(producerProperties);
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties, MessageChannel errorChannel)
|
||||
throws Exception {
|
||||
/*
|
||||
* IMPORTANT: With a transactional binder, individual producer properties for Kafka are
|
||||
* ignored; the global binder (spring.cloud.stream.kafka.binder.transaction.producer.*)
|
||||
* properties are used instead, for all producers. A binder is transactional when
|
||||
* 'spring.cloud.stream.kafka.binder.transaction.transaction-id-prefix' has text.
|
||||
*/
|
||||
final ProducerFactory<byte[], byte[]> producerFB = this.transactionManager != null
|
||||
? this.transactionManager.getProducerFactory()
|
||||
: getProducerFactory(null, producerProperties);
|
||||
Collection<PartitionInfo> partitions = provisioningProvider.getPartitionsForTopic(
|
||||
producerProperties.getPartitionCount(),
|
||||
false,
|
||||
new Callable<Collection<PartitionInfo>>() {
|
||||
@Override
|
||||
public Collection<PartitionInfo> call() throws Exception {
|
||||
return producerFB.createProducer().partitionsFor(destination.getName());
|
||||
}
|
||||
producerProperties.getPartitionCount(), false,
|
||||
() -> {
|
||||
Producer<byte[], byte[]> producer = producerFB.createProducer();
|
||||
List<PartitionInfo> partitionsFor = producer.partitionsFor(destination.getName());
|
||||
producer.close();
|
||||
((DisposableBean) producerFB).destroy();
|
||||
return partitionsFor;
|
||||
});
|
||||
this.topicsInUse.put(destination.getName(), new TopicInformation(null, partitions));
|
||||
if (producerProperties.getPartitionCount() < partitions.size()) {
|
||||
@@ -165,17 +217,60 @@ public class KafkaMessageChannelBinder extends
|
||||
+ producerProperties.getPartitionCount() + ", smaller than the actual partition count of "
|
||||
+ partitions.size() + " of the topic. The larger number will be used instead.");
|
||||
}
|
||||
/*
|
||||
* This is dirty; it relies on the fact that we, and the partition interceptor, share a
|
||||
* hard reference to the producer properties instance. But I don't see another way to fix
|
||||
* it since the interceptor has already been added to the channel, and we don't have
|
||||
* access to the channel here; if we did, we could inject the proper partition count
|
||||
* there. TODO: Consider this when doing the 2.0 binder restructuring.
|
||||
*/
|
||||
producerProperties.setPartitionCount(partitions.size());
|
||||
}
|
||||
|
||||
KafkaTemplate<byte[], byte[]> kafkaTemplate = new KafkaTemplate<>(producerFB);
|
||||
if (this.producerListener != null) {
|
||||
kafkaTemplate.setProducerListener(this.producerListener);
|
||||
}
|
||||
return new ProducerConfigurationMessageHandler(kafkaTemplate, destination.getName(), producerProperties,
|
||||
producerFB);
|
||||
ProducerConfigurationMessageHandler handler = new ProducerConfigurationMessageHandler(kafkaTemplate,
|
||||
destination.getName(), producerProperties, producerFB);
|
||||
if (errorChannel != null) {
|
||||
handler.setSendFailureChannel(errorChannel);
|
||||
}
|
||||
KafkaHeaderMapper mapper = null;
|
||||
if (this.configurationProperties.getHeaderMapperBeanName() != null) {
|
||||
mapper = getApplicationContext().getBean(this.configurationProperties.getHeaderMapperBeanName(),
|
||||
KafkaHeaderMapper.class);
|
||||
}
|
||||
/*
|
||||
* Even if the user configures a bean, we must not use it if the header
|
||||
* mode is not the default (headers); setting the mapper to null
|
||||
* disables populating headers in the message handler.
|
||||
*/
|
||||
if (producerProperties.getHeaderMode() != null
|
||||
&& !HeaderMode.headers.equals(producerProperties.getHeaderMode())) {
|
||||
mapper = null;
|
||||
}
|
||||
else if (mapper == null) {
|
||||
String[] headerPatterns = producerProperties.getExtension().getHeaderPatterns();
|
||||
if (headerPatterns != null && headerPatterns.length > 0) {
|
||||
List<String> patterns = new LinkedList<>(Arrays.asList(headerPatterns));
|
||||
if (!patterns.contains("!" + MessageHeaders.TIMESTAMP)) {
|
||||
patterns.add(0, "!" + MessageHeaders.TIMESTAMP);
|
||||
}
|
||||
if (!patterns.contains("!" + MessageHeaders.ID)) {
|
||||
patterns.add(0, "!" + MessageHeaders.ID);
|
||||
}
|
||||
mapper = new DefaultKafkaHeaderMapper(patterns.toArray(new String[patterns.size()]));
|
||||
}
|
||||
else {
|
||||
mapper = new DefaultKafkaHeaderMapper();
|
||||
}
|
||||
}
|
||||
handler.setHeaderMapper(mapper);
|
||||
return handler;
|
||||
}
|
||||
|
||||
private DefaultKafkaProducerFactory<byte[], byte[]> getProducerFactory(
|
||||
protected DefaultKafkaProducerFactory<byte[], byte[]> getProducerFactory(String transactionIdPrefix,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties) {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ProducerConfig.RETRIES_CONFIG, 0);
|
||||
@@ -204,7 +299,11 @@ public class KafkaMessageChannelBinder extends
|
||||
if (!ObjectUtils.isEmpty(producerProperties.getExtension().getConfiguration())) {
|
||||
props.putAll(producerProperties.getExtension().getConfiguration());
|
||||
}
|
||||
return new DefaultKafkaProducerFactory<>(props);
|
||||
DefaultKafkaProducerFactory<byte[], byte[]> producerFactory = new DefaultKafkaProducerFactory<>(props);
|
||||
if (transactionIdPrefix != null) {
|
||||
producerFactory.setTransactionIdPrefix(transactionIdPrefix);
|
||||
}
|
||||
return producerFactory;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -221,14 +320,8 @@ public class KafkaMessageChannelBinder extends
|
||||
int partitionCount = extendedConsumerProperties.getInstanceCount()
|
||||
* extendedConsumerProperties.getConcurrency();
|
||||
|
||||
Collection<PartitionInfo> allPartitions = provisioningProvider.getPartitionsForTopic(partitionCount,
|
||||
extendedConsumerProperties.getExtension().isAutoRebalanceEnabled(),
|
||||
new Callable<Collection<PartitionInfo>>() {
|
||||
@Override
|
||||
public Collection<PartitionInfo> call() throws Exception {
|
||||
return consumerFactory.createConsumer().partitionsFor(destination.getName());
|
||||
}
|
||||
});
|
||||
Collection<PartitionInfo> allPartitions = getPartitionInfo(destination, extendedConsumerProperties,
|
||||
consumerFactory, partitionCount);
|
||||
|
||||
Collection<PartitionInfo> listenedPartitions;
|
||||
|
||||
@@ -256,11 +349,14 @@ public class KafkaMessageChannelBinder extends
|
||||
|| extendedConsumerProperties.getExtension().isAutoRebalanceEnabled()
|
||||
? new ContainerProperties(destination.getName())
|
||||
: new ContainerProperties(topicPartitionInitialOffsets);
|
||||
if (this.transactionManager != null) {
|
||||
containerProperties.setTransactionManager(this.transactionManager);
|
||||
}
|
||||
containerProperties.setIdleEventInterval(extendedConsumerProperties.getExtension().getIdleEventInterval());
|
||||
int concurrency = Math.min(extendedConsumerProperties.getConcurrency(), listenedPartitions.size());
|
||||
@SuppressWarnings("rawtypes")
|
||||
final ConcurrentMessageListenerContainer<?, ?> messageListenerContainer =
|
||||
new ConcurrentMessageListenerContainer(
|
||||
consumerFactory, containerProperties) {
|
||||
new ConcurrentMessageListenerContainer(consumerFactory, containerProperties) {
|
||||
|
||||
@Override
|
||||
public void stop(Runnable callback) {
|
||||
@@ -269,6 +365,9 @@ public class KafkaMessageChannelBinder extends
|
||||
|
||||
};
|
||||
messageListenerContainer.setConcurrency(concurrency);
|
||||
// these won't be needed if the container is made a bean
|
||||
messageListenerContainer.setApplicationEventPublisher(getApplicationContext());
|
||||
messageListenerContainer.setBeanName(destination.getName() + ".container");
|
||||
if (!extendedConsumerProperties.getExtension().isAutoCommitOffset()) {
|
||||
messageListenerContainer.getContainerProperties()
|
||||
.setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL);
|
||||
@@ -282,8 +381,9 @@ public class KafkaMessageChannelBinder extends
|
||||
this.logger.debug(
|
||||
"Listened partitions: " + StringUtils.collectionToCommaDelimitedString(listenedPartitions));
|
||||
}
|
||||
final KafkaMessageDrivenChannelAdapter<?, ?> kafkaMessageDrivenChannelAdapter = new KafkaMessageDrivenChannelAdapter<>(
|
||||
messageListenerContainer);
|
||||
final KafkaMessageDrivenChannelAdapter<?, ?> kafkaMessageDrivenChannelAdapter =
|
||||
new KafkaMessageDrivenChannelAdapter<>(messageListenerContainer);
|
||||
kafkaMessageDrivenChannelAdapter.setMessageConverter(getMessageConverter(extendedConsumerProperties));
|
||||
kafkaMessageDrivenChannelAdapter.setBeanFactory(this.getBeanFactory());
|
||||
ErrorInfrastructure errorInfrastructure = registerErrorInfrastructure(destination, consumerGroup,
|
||||
extendedConsumerProperties);
|
||||
@@ -297,6 +397,119 @@ public class KafkaMessageChannelBinder extends
|
||||
return kafkaMessageDrivenChannelAdapter;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PolledConsumerResources createPolledConsumerResources(String name, String group,
|
||||
ConsumerDestination destination, ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties) {
|
||||
boolean anonymous = !StringUtils.hasText(group);
|
||||
Assert.isTrue(!anonymous || !consumerProperties.getExtension().isEnableDlq(),
|
||||
"DLQ support is not available for anonymous subscriptions");
|
||||
String consumerGroup = anonymous ? "anonymous." + UUID.randomUUID().toString() : group;
|
||||
final ConsumerFactory<?, ?> consumerFactory = createKafkaConsumerFactory(anonymous, consumerGroup,
|
||||
consumerProperties);
|
||||
KafkaMessageSource<?, ?> source = new KafkaMessageSource<>(consumerFactory, destination.getName());
|
||||
source.setMessageConverter(getMessageConverter(consumerProperties));
|
||||
source.setRawMessageHeader(consumerProperties.getExtension().isEnableDlq());
|
||||
|
||||
// I copied this from the regular consumer - it looks bogus to me - includes all partitions
|
||||
// not just the ones this binding is listening to; doesn't seem right for a health check.
|
||||
Collection<PartitionInfo> partitionInfos = getPartitionInfo(destination, consumerProperties, consumerFactory,
|
||||
-1);
|
||||
this.topicsInUse.put(destination.getName(), new TopicInformation(group, partitionInfos));
|
||||
|
||||
source.setRebalanceListener(new ConsumerRebalanceListener() {
|
||||
|
||||
@Override
|
||||
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
|
||||
KafkaMessageChannelBinder.this.logger.info("Revoked: " + partitions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
|
||||
KafkaMessageChannelBinder.this.logger.info("Assigned: " + partitions);
|
||||
}
|
||||
|
||||
});
|
||||
return new PolledConsumerResources(source,
|
||||
registerErrorInfrastructure(destination, group, consumerProperties, true));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void postProcessPollableSource(DefaultPollableMessageSource bindingTarget) {
|
||||
bindingTarget.setAttributesProvider((accessor, message) -> {
|
||||
Object rawMessage = message.getHeaders().get(KafkaHeaders.RAW_DATA);
|
||||
if (rawMessage != null) {
|
||||
accessor.setAttribute(KafkaHeaders.RAW_DATA, rawMessage);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private MessagingMessageConverter getMessageConverter(
|
||||
final ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties) {
|
||||
MessagingMessageConverter messageConverter;
|
||||
if (extendedConsumerProperties.getExtension().getConverterBeanName() == null) {
|
||||
messageConverter = new MessagingMessageConverter();
|
||||
StandardHeaders standardHeaders = extendedConsumerProperties.getExtension().getStandardHeaders();
|
||||
messageConverter.setGenerateMessageId(StandardHeaders.id.equals(standardHeaders)
|
||||
|| StandardHeaders.both.equals(standardHeaders));
|
||||
messageConverter.setGenerateTimestamp(StandardHeaders.timestamp.equals(standardHeaders)
|
||||
|| StandardHeaders.both.equals(standardHeaders));
|
||||
}
|
||||
else {
|
||||
try {
|
||||
messageConverter = getApplicationContext().getBean(
|
||||
extendedConsumerProperties.getExtension().getConverterBeanName(),
|
||||
MessagingMessageConverter.class);
|
||||
}
|
||||
catch (NoSuchBeanDefinitionException e) {
|
||||
throw new IllegalStateException("Converter bean not present in application context", e);
|
||||
}
|
||||
}
|
||||
messageConverter.setHeaderMapper(getHeaderMapper(extendedConsumerProperties));
|
||||
return messageConverter;
|
||||
}
|
||||
|
||||
private KafkaHeaderMapper getHeaderMapper(
|
||||
final ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties) {
|
||||
KafkaHeaderMapper mapper = null;
|
||||
if (this.configurationProperties.getHeaderMapperBeanName() != null) {
|
||||
mapper = getApplicationContext().getBean(this.configurationProperties.getHeaderMapperBeanName(),
|
||||
KafkaHeaderMapper.class);
|
||||
}
|
||||
if (mapper == null) {
|
||||
DefaultKafkaHeaderMapper headerMapper = new DefaultKafkaHeaderMapper() {
|
||||
|
||||
@Override
|
||||
public void toHeaders(Headers source, Map<String, Object> headers) {
|
||||
super.toHeaders(source, headers);
|
||||
if (headers.size() > 0) {
|
||||
headers.put(BinderHeaders.NATIVE_HEADERS_PRESENT, Boolean.TRUE);
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
String[] trustedPackages = extendedConsumerProperties.getExtension().getTrustedPackages();
|
||||
if (!StringUtils.isEmpty(trustedPackages)) {
|
||||
headerMapper.addTrustedPackages(trustedPackages);
|
||||
}
|
||||
mapper = headerMapper;
|
||||
}
|
||||
return mapper;
|
||||
}
|
||||
|
||||
private Collection<PartitionInfo> getPartitionInfo(final ConsumerDestination destination,
|
||||
final ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties,
|
||||
final ConsumerFactory<?, ?> consumerFactory, int partitionCount) {
|
||||
Collection<PartitionInfo> allPartitions = provisioningProvider.getPartitionsForTopic(partitionCount,
|
||||
extendedConsumerProperties.getExtension().isAutoRebalanceEnabled(),
|
||||
() -> {
|
||||
Consumer<?, ?> consumer = consumerFactory.createConsumer();
|
||||
List<PartitionInfo> partitionsFor = consumer.partitionsFor(destination.getName());
|
||||
consumer.close();
|
||||
return partitionsFor;
|
||||
});
|
||||
return allPartitions;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ErrorMessageStrategy getErrorMessageStrategy() {
|
||||
return new RawRecordHeaderErrorMessageStrategy();
|
||||
@@ -304,55 +517,106 @@ public class KafkaMessageChannelBinder extends
|
||||
|
||||
@Override
|
||||
protected MessageHandler getErrorMessageHandler(final ConsumerDestination destination, final String group,
|
||||
final ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties) {
|
||||
if (extendedConsumerProperties.getExtension().isEnableDlq()) {
|
||||
DefaultKafkaProducerFactory<byte[], byte[]> producerFactory = getProducerFactory(
|
||||
new ExtendedProducerProperties<>(new KafkaProducerProperties()));
|
||||
final KafkaTemplate<byte[], byte[]> kafkaTemplate = new KafkaTemplate<>(producerFactory);
|
||||
return new MessageHandler() {
|
||||
final ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
KafkaConsumerProperties kafkaConsumerProperties = properties.getExtension();
|
||||
if (kafkaConsumerProperties.isEnableDlq()) {
|
||||
KafkaProducerProperties dlqProducerProperties = kafkaConsumerProperties.getDlqProducerProperties();
|
||||
ProducerFactory<?,?> producerFactory = this.transactionManager != null
|
||||
? this.transactionManager.getProducerFactory()
|
||||
: getProducerFactory(null,
|
||||
new ExtendedProducerProperties<>(dlqProducerProperties));
|
||||
final KafkaTemplate<?,?> kafkaTemplate = new KafkaTemplate<>(producerFactory);
|
||||
String dlqName = StringUtils.hasText(kafkaConsumerProperties.getDlqName())
|
||||
? kafkaConsumerProperties.getDlqName()
|
||||
: "error." + destination.getName() + "." + group;
|
||||
|
||||
@Override
|
||||
public void handleMessage(Message<?> message) throws MessagingException {
|
||||
final ConsumerRecord<?, ?> record = message.getHeaders()
|
||||
.get(KafkaMessageDrivenChannelAdapter.KAFKA_RAW_DATA, ConsumerRecord.class);
|
||||
final byte[] key = record.key() != null ? Utils.toArray(ByteBuffer.wrap((byte[]) record.key()))
|
||||
: null;
|
||||
final byte[] payload = record.value() != null
|
||||
? Utils.toArray(ByteBuffer.wrap((byte[]) record.value())) : null;
|
||||
String dlqName = StringUtils.hasText(extendedConsumerProperties.getExtension().getDlqName())
|
||||
? extendedConsumerProperties.getExtension().getDlqName()
|
||||
: "error." + destination.getName() + "." + group;
|
||||
ListenableFuture<SendResult<byte[], byte[]>> sentDlq = kafkaTemplate.send(dlqName,
|
||||
record.partition(), key, payload);
|
||||
sentDlq.addCallback(new ListenableFutureCallback<SendResult<byte[], byte[]>>() {
|
||||
StringBuilder sb = new StringBuilder().append(" a message with key='")
|
||||
.append(toDisplayString(ObjectUtils.nullSafeToString(key), 50)).append("'")
|
||||
.append(" and payload='")
|
||||
.append(toDisplayString(ObjectUtils.nullSafeToString(payload), 50))
|
||||
.append("'").append(" received from ")
|
||||
.append(record.partition());
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
DlqSender<?,?> dlqSender = new DlqSender(kafkaTemplate, dlqName);
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable ex) {
|
||||
KafkaMessageChannelBinder.this.logger.error(
|
||||
"Error sending to DLQ " + sb.toString(), ex);
|
||||
return message -> {
|
||||
final ConsumerRecord<?, ?> record = message.getHeaders()
|
||||
.get(KafkaHeaders.RAW_DATA, ConsumerRecord.class);
|
||||
|
||||
if (properties.isUseNativeDecoding()) {
|
||||
if (record != null) {
|
||||
Map<String, String> configuration = this.transactionManager == null ? dlqProducerProperties.getConfiguration()
|
||||
: this.configurationProperties.getTransaction().getProducer().getConfiguration();
|
||||
if (record.key() != null && !record.key().getClass().isInstance(byte[].class)) {
|
||||
ensureDlqMessageCanBeProperlySerialized(
|
||||
configuration,
|
||||
(Map<String, String> config) -> !config.containsKey("key.serializer"), "Key");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onSuccess(SendResult<byte[], byte[]> result) {
|
||||
if (KafkaMessageChannelBinder.this.logger.isDebugEnabled()) {
|
||||
KafkaMessageChannelBinder.this.logger.debug(
|
||||
"Sent to DLQ " + sb.toString());
|
||||
}
|
||||
if (record.value() != null && !record.value().getClass().isInstance(byte[].class)) {
|
||||
ensureDlqMessageCanBeProperlySerialized(configuration,
|
||||
(Map<String, String> config) -> !config.containsKey("value.serializer"), "Payload");
|
||||
}
|
||||
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (record == null) {
|
||||
this.logger.error("No raw record; cannot send to DLQ: " + message);
|
||||
return;
|
||||
}
|
||||
Headers kafkaHeaders = new RecordHeaders(record.headers().toArray());
|
||||
kafkaHeaders.add(new RecordHeader(X_ORIGINAL_TOPIC,
|
||||
record.topic().getBytes(StandardCharsets.UTF_8)));
|
||||
if (message.getPayload() instanceof Throwable) {
|
||||
Throwable throwable = (Throwable) message.getPayload();
|
||||
kafkaHeaders.add(new RecordHeader(X_EXCEPTION_MESSAGE,
|
||||
throwable.getMessage().getBytes(StandardCharsets.UTF_8)));
|
||||
kafkaHeaders.add(new RecordHeader(X_EXCEPTION_STACKTRACE,
|
||||
getStackTraceAsString(throwable).getBytes(StandardCharsets.UTF_8)));
|
||||
}
|
||||
dlqSender.sendToDlq(record, kafkaHeaders);
|
||||
};
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected MessageHandler getPolledConsumerErrorMessageHandler(ConsumerDestination destination, String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
if (properties.getExtension().isEnableDlq()) {
|
||||
return getErrorMessageHandler(destination, group, properties);
|
||||
}
|
||||
final MessageHandler superHandler = super.getErrorMessageHandler(destination, group, properties);
|
||||
return message -> {
|
||||
ConsumerRecord<?, ?> record = (ConsumerRecord<?, ?>) message.getHeaders().get(KafkaHeaders.RAW_DATA);
|
||||
if (!(message instanceof ErrorMessage)) {
|
||||
logger.error("Expected an ErrorMessage, not a " + message.getClass().toString() + " for: "
|
||||
+ message);
|
||||
}
|
||||
else if (record == null) {
|
||||
if (superHandler != null) {
|
||||
superHandler.handleMessage(message);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (message.getPayload() instanceof MessagingException) {
|
||||
AcknowledgmentCallback ack = StaticMessageHeaderAccessor.getAcknowledgmentCallback(
|
||||
((MessagingException) message.getPayload()).getFailedMessage());
|
||||
if (ack != null) {
|
||||
if (isAutoCommitOnError(properties)) {
|
||||
ack.acknowledge(Status.REJECT);
|
||||
}
|
||||
else {
|
||||
ack.acknowledge(Status.REQUEUE);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private static void ensureDlqMessageCanBeProperlySerialized(Map<String, String> configuration,
|
||||
Predicate<Map<String, String>> configPredicate,
|
||||
String dataType) {
|
||||
if (CollectionUtils.isEmpty(configuration) || configPredicate.test(configuration)) {
|
||||
throw new IllegalArgumentException("Native decoding is used on the consumer. " +
|
||||
dataType + " is not byte[] and no serializer is set on the DLQ producer.");
|
||||
}
|
||||
}
|
||||
|
||||
private ConsumerFactory<?, ?> createKafkaConsumerFactory(boolean anonymous, String consumerGroup,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties) {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
@@ -406,23 +670,30 @@ public class KafkaMessageChannelBinder extends
|
||||
return original.substring(0, maxCharacters) + "...";
|
||||
}
|
||||
|
||||
private String getStackTraceAsString(Throwable cause) {
|
||||
StringWriter stringWriter = new StringWriter();
|
||||
PrintWriter printWriter = new PrintWriter(stringWriter, true);
|
||||
cause.printStackTrace(printWriter);
|
||||
return stringWriter.getBuffer().toString();
|
||||
}
|
||||
|
||||
private final class ProducerConfigurationMessageHandler extends KafkaProducerMessageHandler<byte[], byte[]>
|
||||
implements Lifecycle {
|
||||
|
||||
private boolean running = true;
|
||||
|
||||
private final DefaultKafkaProducerFactory<byte[], byte[]> producerFactory;
|
||||
private final ProducerFactory<byte[], byte[]> producerFactory;
|
||||
|
||||
private ProducerConfigurationMessageHandler(KafkaTemplate<byte[], byte[]> kafkaTemplate, String topic,
|
||||
ProducerConfigurationMessageHandler(KafkaTemplate<byte[], byte[]> kafkaTemplate, String topic,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties,
|
||||
DefaultKafkaProducerFactory<byte[], byte[]> producerFactory) {
|
||||
ProducerFactory<byte[], byte[]> producerFactory) {
|
||||
super(kafkaTemplate);
|
||||
setTopicExpression(new LiteralExpression(topic));
|
||||
setMessageKeyExpression(producerProperties.getExtension().getMessageKeyExpression());
|
||||
setBeanFactory(KafkaMessageChannelBinder.this.getBeanFactory());
|
||||
if (producerProperties.isPartitioned()) {
|
||||
SpelExpressionParser parser = new SpelExpressionParser();
|
||||
setPartitionIdExpression(parser.parseExpression("headers." + BinderHeaders.PARTITION_HEADER));
|
||||
setPartitionIdExpression(parser.parseExpression("headers['" + BinderHeaders.PARTITION_HEADER + "']"));
|
||||
}
|
||||
if (producerProperties.getExtension().isSync()) {
|
||||
setSync(true);
|
||||
@@ -443,7 +714,9 @@ public class KafkaMessageChannelBinder extends
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
producerFactory.stop();
|
||||
if (this.producerFactory instanceof Lifecycle) {
|
||||
((Lifecycle) producerFactory).stop();
|
||||
}
|
||||
this.running = false;
|
||||
}
|
||||
|
||||
@@ -451,31 +724,84 @@ public class KafkaMessageChannelBinder extends
|
||||
public boolean isRunning() {
|
||||
return this.running;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static class TopicInformation {
|
||||
static class TopicInformation {
|
||||
|
||||
private final String consumerGroup;
|
||||
|
||||
private final Collection<PartitionInfo> partitionInfos;
|
||||
|
||||
public TopicInformation(String consumerGroup, Collection<PartitionInfo> partitionInfos) {
|
||||
TopicInformation(String consumerGroup, Collection<PartitionInfo> partitionInfos) {
|
||||
this.consumerGroup = consumerGroup;
|
||||
this.partitionInfos = partitionInfos;
|
||||
}
|
||||
|
||||
public String getConsumerGroup() {
|
||||
String getConsumerGroup() {
|
||||
return consumerGroup;
|
||||
}
|
||||
|
||||
public boolean isConsumerTopic() {
|
||||
boolean isConsumerTopic() {
|
||||
return consumerGroup != null;
|
||||
}
|
||||
|
||||
public Collection<PartitionInfo> getPartitionInfos() {
|
||||
Collection<PartitionInfo> getPartitionInfos() {
|
||||
return partitionInfos;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private final class DlqSender<K,V> {
|
||||
|
||||
private final KafkaTemplate<K,V> kafkaTemplate;
|
||||
private final String dlqName;
|
||||
|
||||
DlqSender(KafkaTemplate<K, V> kafkaTemplate, String dlqName) {
|
||||
this.kafkaTemplate = kafkaTemplate;
|
||||
this.dlqName = dlqName;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
void sendToDlq(ConsumerRecord<?, ?> consumerRecord, Headers headers) {
|
||||
K key = (K)consumerRecord.key();
|
||||
V value = (V)consumerRecord.value();
|
||||
ProducerRecord<K,V> producerRecord = new ProducerRecord<>(this.dlqName, consumerRecord.partition(),
|
||||
key, value, headers);
|
||||
|
||||
StringBuilder sb = new StringBuilder().append(" a message with key='")
|
||||
.append(toDisplayString(ObjectUtils.nullSafeToString(key), 50)).append("'")
|
||||
.append(" and payload='")
|
||||
.append(toDisplayString(ObjectUtils.nullSafeToString(value), 50))
|
||||
.append("'").append(" received from ")
|
||||
.append(consumerRecord.partition());
|
||||
ListenableFuture<SendResult<K, V>> sentDlq = null;
|
||||
try {
|
||||
sentDlq = this.kafkaTemplate.send(producerRecord);
|
||||
sentDlq.addCallback(new ListenableFutureCallback<SendResult<K, V>>() {
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable ex) {
|
||||
KafkaMessageChannelBinder.this.logger.error(
|
||||
"Error sending to DLQ " + sb.toString(), ex);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onSuccess(SendResult<K, V> result) {
|
||||
if (KafkaMessageChannelBinder.this.logger.isDebugEnabled()) {
|
||||
KafkaMessageChannelBinder.this.logger.debug(
|
||||
"Sent to DLQ " + sb.toString());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
catch (Exception ex) {
|
||||
if (sentDlq == null) {
|
||||
KafkaMessageChannelBinder.this.logger.error(
|
||||
"Error sending to DLQ " + sb.toString(), ex);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,43 +20,32 @@ import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import io.micrometer.core.instrument.binder.MeterBinder;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.utils.AppInfoParser;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.actuate.endpoint.PublicMetrics;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
|
||||
import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaBinderHealthIndicator;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaBinderJaasInitializerListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaBinderMetrics;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaMessageChannelBinder;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka09AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.JaasLoginModuleConfiguration;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.config.codec.kryo.KryoCodecAutoConfiguration;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ApplicationListener;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Condition;
|
||||
import org.springframework.context.annotation.ConditionContext;
|
||||
import org.springframework.context.annotation.Conditional;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.core.type.AnnotatedTypeMetadata;
|
||||
import org.springframework.integration.codec.Codec;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.security.jaas.KafkaJaasLoginModuleInitializer;
|
||||
import org.springframework.kafka.support.LoggingProducerListener;
|
||||
import org.springframework.kafka.support.ProducerListener;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
@@ -68,21 +57,16 @@ import org.springframework.util.ObjectUtils;
|
||||
* @author Mark Fisher
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Henryk Konsek
|
||||
* @author Gary Russell
|
||||
*/
|
||||
@Configuration
|
||||
@ConditionalOnMissingBean(Binder.class)
|
||||
@Import({ KryoCodecAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class})
|
||||
@EnableConfigurationProperties({ KafkaBinderConfigurationProperties.class, KafkaExtendedBindingProperties.class })
|
||||
@Import({ PropertyPlaceholderAutoConfiguration.class})
|
||||
@EnableConfigurationProperties({ KafkaExtendedBindingProperties.class })
|
||||
public class KafkaBinderConfiguration {
|
||||
|
||||
protected static final Log logger = LogFactory.getLog(KafkaBinderConfiguration.class);
|
||||
|
||||
@Autowired
|
||||
private Codec codec;
|
||||
|
||||
@Autowired
|
||||
private KafkaBinderConfigurationProperties configurationProperties;
|
||||
|
||||
@Autowired
|
||||
private KafkaExtendedBindingProperties kafkaExtendedBindingProperties;
|
||||
|
||||
@@ -92,19 +76,24 @@ public class KafkaBinderConfiguration {
|
||||
@Autowired
|
||||
private ApplicationContext context;
|
||||
|
||||
@Autowired(required = false)
|
||||
private AdminUtilsOperation adminUtilsOperation;
|
||||
@Autowired
|
||||
private KafkaProperties kafkaProperties;
|
||||
|
||||
@Bean
|
||||
KafkaTopicProvisioner provisioningProvider() {
|
||||
return new KafkaTopicProvisioner(this.configurationProperties, this.adminUtilsOperation);
|
||||
KafkaBinderConfigurationProperties configurationProperties() {
|
||||
return new KafkaBinderConfigurationProperties();
|
||||
}
|
||||
|
||||
@Bean
|
||||
KafkaMessageChannelBinder kafkaMessageChannelBinder() {
|
||||
KafkaTopicProvisioner provisioningProvider(KafkaBinderConfigurationProperties configurationProperties) {
|
||||
return new KafkaTopicProvisioner(configurationProperties, this.kafkaProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
KafkaMessageChannelBinder kafkaMessageChannelBinder(KafkaBinderConfigurationProperties configurationProperties,
|
||||
KafkaTopicProvisioner provisioningProvider) {
|
||||
KafkaMessageChannelBinder kafkaMessageChannelBinder = new KafkaMessageChannelBinder(
|
||||
this.configurationProperties, provisioningProvider());
|
||||
kafkaMessageChannelBinder.setCodec(this.codec);
|
||||
configurationProperties, provisioningProvider);
|
||||
kafkaMessageChannelBinder.setProducerListener(producerListener);
|
||||
kafkaMessageChannelBinder.setExtendedBindingProperties(this.kafkaExtendedBindingProperties);
|
||||
return kafkaMessageChannelBinder;
|
||||
@@ -117,7 +106,8 @@ public class KafkaBinderConfiguration {
|
||||
}
|
||||
|
||||
@Bean
|
||||
KafkaBinderHealthIndicator healthIndicator(KafkaMessageChannelBinder kafkaMessageChannelBinder) {
|
||||
KafkaBinderHealthIndicator healthIndicator(KafkaMessageChannelBinder kafkaMessageChannelBinder,
|
||||
KafkaBinderConfigurationProperties configurationProperties) {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
@@ -125,52 +115,24 @@ public class KafkaBinderConfiguration {
|
||||
props.putAll(configurationProperties.getConsumerConfiguration());
|
||||
}
|
||||
if (!props.containsKey(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configurationProperties.getKafkaConnectionString());
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
|
||||
}
|
||||
ConsumerFactory<?, ?> consumerFactory = new DefaultKafkaConsumerFactory<>(props);
|
||||
return new KafkaBinderHealthIndicator(kafkaMessageChannelBinder, consumerFactory);
|
||||
KafkaBinderHealthIndicator indicator = new KafkaBinderHealthIndicator(kafkaMessageChannelBinder,
|
||||
consumerFactory);
|
||||
indicator.setTimeout(configurationProperties.getHealthTimeout());
|
||||
return indicator;
|
||||
}
|
||||
|
||||
@Bean
|
||||
public PublicMetrics kafkaBinderMetrics(KafkaMessageChannelBinder kafkaMessageChannelBinder) {
|
||||
public MeterBinder kafkaBinderMetrics(KafkaMessageChannelBinder kafkaMessageChannelBinder,
|
||||
KafkaBinderConfigurationProperties configurationProperties) {
|
||||
return new KafkaBinderMetrics(kafkaMessageChannelBinder, configurationProperties);
|
||||
}
|
||||
|
||||
@Bean(name = "adminUtilsOperation")
|
||||
@Conditional(Kafka09Present.class)
|
||||
@ConditionalOnClass(name = "kafka.admin.AdminUtils")
|
||||
public AdminUtilsOperation kafka09AdminUtilsOperation() {
|
||||
logger.info("AdminUtils selected: Kafka 0.9 AdminUtils");
|
||||
return new Kafka09AdminUtilsOperation();
|
||||
}
|
||||
|
||||
@Bean(name = "adminUtilsOperation")
|
||||
@Conditional(Kafka10Present.class)
|
||||
@ConditionalOnClass(name = "kafka.admin.AdminUtils")
|
||||
public AdminUtilsOperation kafka10AdminUtilsOperation() {
|
||||
logger.info("AdminUtils selected: Kafka 0.10 AdminUtils");
|
||||
return new Kafka10AdminUtilsOperation();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ApplicationListener<?> jaasInitializer() throws IOException {
|
||||
return new KafkaBinderJaasInitializerListener();
|
||||
}
|
||||
|
||||
static class Kafka10Present implements Condition {
|
||||
|
||||
@Override
|
||||
public boolean matches(ConditionContext conditionContext, AnnotatedTypeMetadata annotatedTypeMetadata) {
|
||||
return AppInfoParser.getVersion().startsWith("0.10");
|
||||
}
|
||||
}
|
||||
|
||||
static class Kafka09Present implements Condition {
|
||||
|
||||
@Override
|
||||
public boolean matches(ConditionContext conditionContext, AnnotatedTypeMetadata annotatedTypeMetadata) {
|
||||
return AppInfoParser.getVersion().startsWith("0.9");
|
||||
}
|
||||
public KafkaJaasLoginModuleInitializer jaasInitializer() throws IOException {
|
||||
return new KafkaJaasLoginModuleInitializer();
|
||||
}
|
||||
|
||||
public static class JaasConfigurationProperties {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2014-2017 the original author or authors.
|
||||
* Copyright 2014-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -13,61 +13,37 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.springframework.cloud.stream.binder.AbstractTestBinder;
|
||||
import org.springframework.cloud.stream.binder.AbstractPollableConsumerTestBinder;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.context.support.GenericApplicationContext;
|
||||
import org.springframework.integration.channel.PublishSubscribeChannel;
|
||||
import org.springframework.integration.codec.Codec;
|
||||
import org.springframework.integration.codec.kryo.KryoRegistrar;
|
||||
import org.springframework.integration.codec.kryo.PojoCodec;
|
||||
import org.springframework.integration.context.IntegrationContextUtils;
|
||||
import org.springframework.integration.tuple.TupleKryoRegistrar;
|
||||
|
||||
import com.esotericsoftware.kryo.Kryo;
|
||||
import com.esotericsoftware.kryo.Registration;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public abstract class AbstractKafkaTestBinder extends
|
||||
AbstractTestBinder<KafkaMessageChannelBinder, ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>> {
|
||||
AbstractPollableConsumerTestBinder<KafkaMessageChannelBinder,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>> {
|
||||
|
||||
private ApplicationContext applicationContext;
|
||||
|
||||
@Override
|
||||
public void cleanup() {
|
||||
// do nothing - the rule will take care of that
|
||||
}
|
||||
|
||||
protected void addErrorChannel(GenericApplicationContext context) {
|
||||
PublishSubscribeChannel errorChannel = new PublishSubscribeChannel();
|
||||
context.getBeanFactory().initializeBean(errorChannel, IntegrationContextUtils.ERROR_CHANNEL_BEAN_NAME);
|
||||
context.getBeanFactory().registerSingleton(IntegrationContextUtils.ERROR_CHANNEL_BEAN_NAME, errorChannel);
|
||||
protected final void setApplicationContext(ApplicationContext context) {
|
||||
this.applicationContext = context;
|
||||
}
|
||||
|
||||
protected static Codec getCodec() {
|
||||
return new PojoCodec(new TupleRegistrar());
|
||||
}
|
||||
|
||||
private static class TupleRegistrar implements KryoRegistrar {
|
||||
private final TupleKryoRegistrar delegate = new TupleKryoRegistrar();
|
||||
|
||||
@Override
|
||||
public void registerTypes(Kryo kryo) {
|
||||
this.delegate.registerTypes(kryo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Registration> getRegistrations() {
|
||||
return this.delegate.getRegistrations();
|
||||
}
|
||||
public ApplicationContext getApplicationContext() {
|
||||
return this.applicationContext;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
@@ -66,10 +67,10 @@ public class KafkaBinderAutoConfigurationPropertiesTest {
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = new ExtendedProducerProperties<>(
|
||||
new KafkaProducerProperties());
|
||||
Method getProducerFactoryMethod = KafkaMessageChannelBinder.class.getDeclaredMethod("getProducerFactory",
|
||||
ExtendedProducerProperties.class);
|
||||
String.class, ExtendedProducerProperties.class);
|
||||
getProducerFactoryMethod.setAccessible(true);
|
||||
DefaultKafkaProducerFactory producerFactory = (DefaultKafkaProducerFactory) getProducerFactoryMethod
|
||||
.invoke(this.kafkaMessageChannelBinder, producerProperties);
|
||||
.invoke(this.kafkaMessageChannelBinder, "foo", producerProperties);
|
||||
Field producerFactoryConfigField = ReflectionUtils.findField(DefaultKafkaProducerFactory.class, "configs",
|
||||
Map.class);
|
||||
ReflectionUtils.makeAccessible(producerFactoryConfigField);
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
@@ -27,6 +28,7 @@ import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
@@ -46,7 +48,8 @@ import static org.junit.Assert.assertTrue;
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
@RunWith(SpringJUnit4ClassRunner.class)
|
||||
@SpringBootTest(classes = { KafkaBinderConfiguration.class, KafkaBinderConfigurationPropertiesTest.class })
|
||||
@SpringBootTest(classes = { KafkaBinderConfiguration.class, KafkaAutoConfiguration.class,
|
||||
KafkaBinderConfigurationPropertiesTest.class })
|
||||
@TestPropertySource(locations = "classpath:binder-config.properties")
|
||||
public class KafkaBinderConfigurationPropertiesTest {
|
||||
|
||||
@@ -63,10 +66,10 @@ public class KafkaBinderConfigurationPropertiesTest {
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = new ExtendedProducerProperties<>(
|
||||
kafkaProducerProperties);
|
||||
Method getProducerFactoryMethod = KafkaMessageChannelBinder.class.getDeclaredMethod("getProducerFactory",
|
||||
ExtendedProducerProperties.class);
|
||||
String.class, ExtendedProducerProperties.class);
|
||||
getProducerFactoryMethod.setAccessible(true);
|
||||
DefaultKafkaProducerFactory producerFactory = (DefaultKafkaProducerFactory) getProducerFactoryMethod
|
||||
.invoke(this.kafkaMessageChannelBinder, producerProperties);
|
||||
.invoke(this.kafkaMessageChannelBinder, "bar", producerProperties);
|
||||
Field producerFactoryConfigField = ReflectionUtils.findField(DefaultKafkaProducerFactory.class, "configs",
|
||||
Map.class);
|
||||
ReflectionUtils.makeAccessible(producerFactoryConfigField);
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
@@ -21,6 +22,7 @@ import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfiguration;
|
||||
import org.springframework.kafka.support.ProducerListener;
|
||||
@@ -33,7 +35,9 @@ import static org.junit.Assert.assertNotNull;
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
@RunWith(SpringJUnit4ClassRunner.class)
|
||||
@SpringBootTest(classes = { KafkaBinderConfiguration.class, KafkaBinderConfigurationTest.class })
|
||||
@SpringBootTest(classes = { KafkaBinderConfiguration.class,
|
||||
KafkaAutoConfiguration.class,
|
||||
KafkaBinderConfigurationTest.class })
|
||||
public class KafkaBinderConfigurationTest {
|
||||
|
||||
@Autowired
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.ArrayList;
|
||||
@@ -21,22 +22,27 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.KafkaException;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import org.springframework.boot.actuate.health.Health;
|
||||
import org.springframework.boot.actuate.health.Status;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.BDDMockito.given;
|
||||
|
||||
/**
|
||||
* @author Barry Commins
|
||||
* @author Gary Russell
|
||||
* @author Laur Aliste
|
||||
*/
|
||||
public class KafkaBinderHealthIndicatorTest {
|
||||
|
||||
@@ -53,21 +59,22 @@ public class KafkaBinderHealthIndicatorTest {
|
||||
@Mock
|
||||
private KafkaMessageChannelBinder binder;
|
||||
|
||||
private Map<String, KafkaMessageChannelBinder.TopicInformation> topicsInUse = new HashMap<>();
|
||||
private final Map<String, KafkaMessageChannelBinder.TopicInformation> topicsInUse = new HashMap<>();
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
MockitoAnnotations.initMocks(this);
|
||||
given(consumerFactory.createConsumer()).willReturn(consumer);
|
||||
given(binder.getTopicsInUse()).willReturn(topicsInUse);
|
||||
indicator = new KafkaBinderHealthIndicator(binder, consumerFactory);
|
||||
org.mockito.BDDMockito.given(consumerFactory.createConsumer()).willReturn(consumer);
|
||||
org.mockito.BDDMockito.given(binder.getTopicsInUse()).willReturn(topicsInUse);
|
||||
this.indicator = new KafkaBinderHealthIndicator(binder, consumerFactory);
|
||||
this.indicator.setTimeout(10);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void kafkaBinderIsUp() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group", partitions));
|
||||
given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
Health health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.UP);
|
||||
}
|
||||
@@ -76,11 +83,57 @@ public class KafkaBinderHealthIndicatorTest {
|
||||
public void kafkaBinderIsDown() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(-1, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group", partitions));
|
||||
given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
Health health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.DOWN);
|
||||
}
|
||||
|
||||
@Test(timeout = 5000)
|
||||
public void kafkaBinderDoesNotAnswer() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(-1, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willAnswer(new Answer<Object>() {
|
||||
|
||||
@Override
|
||||
public Object answer(InvocationOnMock invocation) throws Throwable {
|
||||
final int fiveMinutes = 1000 * 60 * 5;
|
||||
Thread.sleep(fiveMinutes);
|
||||
return partitions;
|
||||
}
|
||||
|
||||
});
|
||||
this.indicator.setTimeout(1);
|
||||
Health health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.DOWN);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void createsConsumerOnceWhenInvokedMultipleTimes() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
|
||||
indicator.health();
|
||||
Health health = indicator.health();
|
||||
|
||||
assertThat(health.getStatus()).isEqualTo(Status.UP);
|
||||
org.mockito.Mockito.verify(this.consumerFactory).createConsumer();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void consumerCreationFailsFirstTime() {
|
||||
org.mockito.BDDMockito.given(consumerFactory.createConsumer()).willThrow(KafkaException.class)
|
||||
.willReturn(consumer);
|
||||
|
||||
Health health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.DOWN);
|
||||
|
||||
health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.UP);
|
||||
|
||||
org.mockito.Mockito.verify(this.consumerFactory, Mockito.times(2)).createConsumer();
|
||||
}
|
||||
|
||||
private List<PartitionInfo> partitions(Node leader) {
|
||||
List<PartitionInfo> partitions = new ArrayList<>();
|
||||
partitions.add(new PartitionInfo(TEST_TOPIC, 0, leader, null, null));
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import javax.security.auth.login.AppConfigurationEntry;
|
||||
|
||||
import com.sun.security.auth.login.ConfigFile;
|
||||
|
||||
import org.apache.kafka.common.security.JaasUtils;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.core.io.ClassPathResource;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class KafkaBinderJaasInitializerListenerTest {
|
||||
|
||||
@Test
|
||||
public void testConfigurationParsedCorrectlyWithKafkaClient() throws Exception {
|
||||
ConfigFile configFile = new ConfigFile(new ClassPathResource("jaas-sample-kafka-only.conf").getURI());
|
||||
final AppConfigurationEntry[] kafkaConfigurationArray = configFile.getAppConfigurationEntry(JaasUtils.LOGIN_CONTEXT_CLIENT);
|
||||
|
||||
final ConfigurableApplicationContext context =
|
||||
SpringApplication.run(SimpleApplication.class,
|
||||
"--spring.cloud.stream.kafka.binder.jaas.options.useKeyTab=true",
|
||||
"--spring.cloud.stream.kafka.binder.jaas.options.storeKey=true",
|
||||
"--spring.cloud.stream.kafka.binder.jaas.options.keyTab=/etc/security/keytabs/kafka_client.keytab",
|
||||
"--spring.cloud.stream.kafka.binder.jaas.options.principal=kafka-client-1@EXAMPLE.COM");
|
||||
javax.security.auth.login.Configuration configuration = javax.security.auth.login.Configuration.getConfiguration();
|
||||
|
||||
final AppConfigurationEntry[] kafkaConfiguration = configuration.getAppConfigurationEntry(JaasUtils.LOGIN_CONTEXT_CLIENT);
|
||||
assertThat(kafkaConfiguration).hasSize(1);
|
||||
assertThat(kafkaConfiguration[0].getOptions()).isEqualTo(kafkaConfigurationArray[0].getOptions());
|
||||
context.close();
|
||||
}
|
||||
|
||||
@SpringBootApplication
|
||||
public static class SimpleApplication {
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -13,14 +13,17 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.micrometer.core.instrument.search.Search;
|
||||
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.common.Node;
|
||||
@@ -31,17 +34,11 @@ import org.junit.Test;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
|
||||
import org.springframework.boot.actuate.metrics.Metric;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaMessageChannelBinder.TopicInformation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.BDDMockito.given;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.anyCollectionOf;
|
||||
import static org.springframework.cloud.stream.binder.kafka.KafkaBinderMetrics.METRIC_PREFIX;
|
||||
|
||||
/**
|
||||
* @author Henryk Konsek
|
||||
@@ -61,6 +58,8 @@ public class KafkaBinderMetricsTest {
|
||||
@Mock
|
||||
private KafkaMessageChannelBinder binder;
|
||||
|
||||
private MeterRegistry meterRegistry = new SimpleMeterRegistry();
|
||||
|
||||
private Map<String, TopicInformation> topicsInUse = new HashMap<>();
|
||||
|
||||
@Mock
|
||||
@@ -69,24 +68,23 @@ public class KafkaBinderMetricsTest {
|
||||
@Before
|
||||
public void setup() {
|
||||
MockitoAnnotations.initMocks(this);
|
||||
given(consumerFactory.createConsumer()).willReturn(consumer);
|
||||
given(binder.getTopicsInUse()).willReturn(topicsInUse);
|
||||
org.mockito.BDDMockito.given(consumerFactory.createConsumer()).willReturn(consumer);
|
||||
org.mockito.BDDMockito.given(binder.getTopicsInUse()).willReturn(topicsInUse);
|
||||
metrics = new KafkaBinderMetrics(binder, kafkaBinderConfigurationProperties, consumerFactory);
|
||||
given(consumer.endOffsets(anyCollectionOf(TopicPartition.class)))
|
||||
.willReturn(singletonMap(new TopicPartition(TEST_TOPIC, 0), 1000L));
|
||||
org.mockito.BDDMockito.given(consumer.endOffsets(org.mockito.Matchers.anyCollectionOf(TopicPartition.class)))
|
||||
.willReturn(java.util.Collections.singletonMap(new TopicPartition(TEST_TOPIC, 0), 1000L));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldIndicateLag() {
|
||||
given(consumer.committed(any(TopicPartition.class))).willReturn(new OffsetAndMetadata(500));
|
||||
org.mockito.BDDMockito.given(consumer.committed(org.mockito.Matchers.any(TopicPartition.class))).willReturn(new OffsetAndMetadata(500));
|
||||
List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group", partitions));
|
||||
given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
Collection<Metric<?>> collectedMetrics = metrics.metrics();
|
||||
assertThat(collectedMetrics).hasSize(1);
|
||||
assertThat(collectedMetrics.iterator().next().getName())
|
||||
.isEqualTo(String.format("%s.%s.%s.lag", METRIC_PREFIX, "group", TEST_TOPIC));
|
||||
assertThat(collectedMetrics.iterator().next().getValue()).isEqualTo(500L);
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
metrics.bindTo(meterRegistry);
|
||||
assertThat(meterRegistry.getMeters()).hasSize(1);
|
||||
Search group = meterRegistry.find(String.format("%s.%s.%s.lag", KafkaBinderMetrics.METRIC_PREFIX, "group", TEST_TOPIC));
|
||||
assertThat(group.gauge().value()).isEqualTo(500.0);
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -94,36 +92,34 @@ public class KafkaBinderMetricsTest {
|
||||
Map<TopicPartition, Long> endOffsets = new HashMap<>();
|
||||
endOffsets.put(new TopicPartition(TEST_TOPIC, 0), 1000L);
|
||||
endOffsets.put(new TopicPartition(TEST_TOPIC, 1), 1000L);
|
||||
given(consumer.endOffsets(anyCollectionOf(TopicPartition.class))).willReturn(endOffsets);
|
||||
given(consumer.committed(any(TopicPartition.class))).willReturn(new OffsetAndMetadata(500));
|
||||
org.mockito.BDDMockito.given(consumer.endOffsets(org.mockito.Matchers.anyCollectionOf(TopicPartition.class))).willReturn(endOffsets);
|
||||
org.mockito.BDDMockito.given(consumer.committed(org.mockito.Matchers.any(TopicPartition.class))).willReturn(new OffsetAndMetadata(500));
|
||||
List<PartitionInfo> partitions = partitions(new Node(0, null, 0), new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group", partitions));
|
||||
given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
Collection<Metric<?>> collectedMetrics = metrics.metrics();
|
||||
assertThat(collectedMetrics).hasSize(1);
|
||||
assertThat(collectedMetrics.iterator().next().getName())
|
||||
.isEqualTo(String.format("%s.%s.%s.lag", METRIC_PREFIX, "group", TEST_TOPIC));
|
||||
assertThat(collectedMetrics.iterator().next().getValue()).isEqualTo(1000L);
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
metrics.bindTo(meterRegistry);
|
||||
assertThat(meterRegistry.getMeters()).hasSize(1);
|
||||
Search group = meterRegistry.find(String.format("%s.%s.%s.lag", KafkaBinderMetrics.METRIC_PREFIX, "group", TEST_TOPIC));
|
||||
assertThat(group.gauge().value()).isEqualTo(1000.0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldIndicateFullLagForNotCommittedGroups() {
|
||||
List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group", partitions));
|
||||
given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
Collection<Metric<?>> collectedMetrics = metrics.metrics();
|
||||
assertThat(collectedMetrics).hasSize(1);
|
||||
assertThat(collectedMetrics.iterator().next().getName())
|
||||
.isEqualTo(String.format("%s.%s.%s.lag", METRIC_PREFIX, "group", TEST_TOPIC));
|
||||
assertThat(collectedMetrics.iterator().next().getValue()).isEqualTo(1000L);
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
metrics.bindTo(meterRegistry);
|
||||
assertThat(meterRegistry.getMeters()).hasSize(1);
|
||||
Search group = meterRegistry.find(String.format("%s.%s.%s.lag", KafkaBinderMetrics.METRIC_PREFIX, "group", TEST_TOPIC));
|
||||
assertThat(group.gauge().value()).isEqualTo(1000.0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotCalculateLagForProducerTopics() {
|
||||
List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation(null, partitions));
|
||||
Collection<Metric<?>> collectedMetrics = metrics.metrics();
|
||||
assertThat(collectedMetrics).isEmpty();
|
||||
metrics.bindTo(meterRegistry);
|
||||
assertThat(meterRegistry.getMeters()).isEmpty();
|
||||
}
|
||||
|
||||
private List<PartitionInfo> partitions(Node... nodes) {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -23,15 +23,14 @@ import java.util.Map;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.integration.test.util.TestUtils;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
/**
|
||||
* @author Gary Russell
|
||||
@@ -43,9 +42,7 @@ public class KafkaBinderUnitTests {
|
||||
@Test
|
||||
public void testPropertyOverrides() throws Exception {
|
||||
KafkaBinderConfigurationProperties binderConfigurationProperties = new KafkaBinderConfigurationProperties();
|
||||
AdminUtilsOperation adminUtilsOperation = mock(AdminUtilsOperation.class);
|
||||
KafkaTopicProvisioner provisioningProvider = new KafkaTopicProvisioner(binderConfigurationProperties,
|
||||
adminUtilsOperation);
|
||||
KafkaTopicProvisioner provisioningProvider = new KafkaTopicProvisioner(binderConfigurationProperties, new KafkaProperties());
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(binderConfigurationProperties,
|
||||
provisioningProvider);
|
||||
KafkaConsumerProperties consumerProps = new KafkaConsumerProperties();
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2015-2017 the original author or authors.
|
||||
* Copyright 2015-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -17,8 +17,6 @@
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
@@ -31,24 +29,20 @@ import org.springframework.kafka.support.ProducerListener;
|
||||
|
||||
/**
|
||||
* Test support class for {@link KafkaMessageChannelBinder}.
|
||||
*
|
||||
* @author Eric Bottard
|
||||
* @author Marius Bogoevici
|
||||
* @author David Turanski
|
||||
* @author Gary Russell
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class Kafka10TestBinder extends AbstractKafkaTestBinder {
|
||||
public class KafkaTestBinder extends AbstractKafkaTestBinder {
|
||||
|
||||
@SuppressWarnings({ "rawtypes", "unchecked" })
|
||||
public Kafka10TestBinder(KafkaBinderConfigurationProperties binderConfiguration) {
|
||||
@SuppressWarnings({"rawtypes", "unchecked"})
|
||||
KafkaTestBinder(KafkaBinderConfigurationProperties binderConfiguration, KafkaTopicProvisioner kafkaTopicProvisioner) {
|
||||
try {
|
||||
AdminUtilsOperation adminUtilsOperation = new Kafka10AdminUtilsOperation();
|
||||
KafkaTopicProvisioner provisioningProvider =
|
||||
new KafkaTopicProvisioner(binderConfiguration, adminUtilsOperation);
|
||||
provisioningProvider.afterPropertiesSet();
|
||||
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(binderConfiguration,
|
||||
provisioningProvider) {
|
||||
kafkaTopicProvisioner) {
|
||||
|
||||
/*
|
||||
* Some tests use multiple instance indexes for the same topic; we need to make
|
||||
@@ -56,20 +50,20 @@ public class Kafka10TestBinder extends AbstractKafkaTestBinder {
|
||||
*/
|
||||
@Override
|
||||
protected String errorsBaseName(ConsumerDestination destination, String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties) {
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties) {
|
||||
return super.errorsBaseName(destination, group, consumerProperties) + "-"
|
||||
+ consumerProperties.getInstanceIndex();
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
binder.setCodec(AbstractKafkaTestBinder.getCodec());
|
||||
ProducerListener producerListener = new LoggingProducerListener();
|
||||
binder.setProducerListener(producerListener);
|
||||
AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(Config.class);
|
||||
setApplicationContext(context);
|
||||
binder.setApplicationContext(context);
|
||||
binder.afterPropertiesSet();
|
||||
this.setBinder(binder);
|
||||
this.setPollableConsumerBinder(binder);
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
@@ -0,0 +1,97 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.Collections;
|
||||
|
||||
import org.apache.kafka.clients.producer.Callback;
|
||||
import org.apache.kafka.clients.producer.Producer;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.mockito.InOrder;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.context.support.GenericApplicationContext;
|
||||
import org.springframework.integration.channel.DirectChannel;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.messaging.support.GenericMessage;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
import static org.mockito.BDDMockito.willReturn;
|
||||
import static org.mockito.Mockito.inOrder;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.spy;
|
||||
|
||||
/**
|
||||
* @author Gary Russell
|
||||
* @since 2.0
|
||||
*
|
||||
*/
|
||||
public class KafkaTransactionTests {
|
||||
|
||||
@ClassRule
|
||||
public static final KafkaEmbedded embeddedKafka = new KafkaEmbedded(1);
|
||||
|
||||
@SuppressWarnings({ "rawtypes", "unchecked" })
|
||||
@Test
|
||||
public void testProducerRunsInTx() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
kafkaProperties.setBootstrapServers(Collections.singletonList(embeddedKafka.getBrokersAsString()));
|
||||
KafkaBinderConfigurationProperties configurationProperties = new KafkaBinderConfigurationProperties();
|
||||
configurationProperties.getTransaction().setTransactionIdPrefix("foo-");
|
||||
KafkaTopicProvisioner provisioningProvider = new KafkaTopicProvisioner(configurationProperties, kafkaProperties);
|
||||
provisioningProvider.setMetadataRetryOperations(new RetryTemplate());
|
||||
final Producer mockProducer = mock(Producer.class);
|
||||
willReturn(Collections.singletonList(new TopicPartition("foo", 0))).given(mockProducer).partitionsFor(anyString());
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties, provisioningProvider) {
|
||||
|
||||
@Override
|
||||
protected DefaultKafkaProducerFactory<byte[], byte[]> getProducerFactory(String transactionIdPrefix,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties) {
|
||||
DefaultKafkaProducerFactory<byte[], byte[]> producerFactory =
|
||||
spy(super.getProducerFactory(transactionIdPrefix, producerProperties));
|
||||
willReturn(mockProducer).given(producerFactory).createProducer();
|
||||
return producerFactory;
|
||||
}
|
||||
|
||||
};
|
||||
GenericApplicationContext applicationContext = new GenericApplicationContext();
|
||||
applicationContext.refresh();
|
||||
binder.setApplicationContext(applicationContext);
|
||||
DirectChannel channel = new DirectChannel();
|
||||
KafkaProducerProperties extension = new KafkaProducerProperties();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties = new ExtendedProducerProperties<>(extension);
|
||||
binder.bindProducer("foo", channel, properties);
|
||||
channel.send(new GenericMessage<>("foo".getBytes()));
|
||||
InOrder inOrder = inOrder(mockProducer);
|
||||
inOrder.verify(mockProducer).beginTransaction();
|
||||
inOrder.verify(mockProducer).send(any(ProducerRecord.class), any(Callback.class));
|
||||
inOrder.verify(mockProducer).commitTransaction();
|
||||
inOrder.verify(mockProducer).close();
|
||||
inOrder.verifyNoMoreInteractions();
|
||||
}
|
||||
|
||||
}
|
||||
@@ -13,6 +13,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.bootstrap;
|
||||
|
||||
import org.junit.ClassRule;
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
<pattern>%d{ISO8601} %5p %t %c{2}:%L - %m%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
<logger name="org.apache.kafka" level="DEBUG"/>
|
||||
<logger name="org.springframework.integration.kafka" level="INFO"/>
|
||||
<logger name="org.springframework.kafka" level="INFO"/>
|
||||
<logger name="org.springframework.cloud.stream" level="INFO" />
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
{"namespace": "org.springframework.cloud.stream.binder.kafka",
|
||||
"type": "record",
|
||||
"name": "User1",
|
||||
"fields": [
|
||||
{"name": "name", "type": "string"},
|
||||
{"name": "favoriteColor", "type": "string"}
|
||||
]
|
||||
}
|
||||
@@ -1,189 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
|
||||
import org.apache.kafka.common.Configurable;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KeyValueMapper;
|
||||
|
||||
import org.springframework.cloud.stream.binder.AbstractBinder;
|
||||
import org.springframework.cloud.stream.binder.BinderHeaders;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.DefaultBinding;
|
||||
import org.springframework.cloud.stream.binder.EmbeddedHeaderUtils;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
|
||||
import org.springframework.cloud.stream.binder.HeaderMode;
|
||||
import org.springframework.cloud.stream.binder.MessageValues;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kstream.config.KStreamConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kstream.config.KStreamExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kstream.config.KStreamProducerProperties;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.util.MimeType;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class KStreamBinder extends
|
||||
AbstractBinder<KStream<Object, Object>, ExtendedConsumerProperties<KStreamConsumerProperties>, ExtendedProducerProperties<KStreamProducerProperties>>
|
||||
implements ExtendedPropertiesBinder<KStream<Object, Object>, KStreamConsumerProperties, KStreamProducerProperties> {
|
||||
|
||||
private String[] headers;
|
||||
|
||||
private final KafkaTopicProvisioner kafkaTopicProvisioner;
|
||||
|
||||
private final KStreamExtendedBindingProperties kStreamExtendedBindingProperties;
|
||||
|
||||
private final StreamsConfig streamsConfig;
|
||||
|
||||
public KStreamBinder(KafkaBinderConfigurationProperties binderConfigurationProperties, KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KStreamExtendedBindingProperties kStreamExtendedBindingProperties, StreamsConfig streamsConfig) {
|
||||
this.headers = EmbeddedHeaderUtils.headersToEmbed(binderConfigurationProperties.getHeaders());
|
||||
this.kafkaTopicProvisioner = kafkaTopicProvisioner;
|
||||
this.kStreamExtendedBindingProperties = kStreamExtendedBindingProperties;
|
||||
this.streamsConfig = streamsConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Binding<KStream<Object, Object>> doBindConsumer(String name, String group,
|
||||
KStream<Object, Object> inputTarget, ExtendedConsumerProperties<KStreamConsumerProperties> properties) {
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties = new ExtendedConsumerProperties<KafkaConsumerProperties>(
|
||||
new KafkaConsumerProperties());
|
||||
this.kafkaTopicProvisioner.provisionConsumerDestination(name, group, extendedConsumerProperties);
|
||||
return new DefaultBinding<>(name, group, inputTarget, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected Binding<KStream<Object, Object>> doBindProducer(String name, KStream<Object, Object> outboundBindTarget,
|
||||
ExtendedProducerProperties<KStreamProducerProperties> properties) {
|
||||
ExtendedProducerProperties<KafkaProducerProperties> extendedProducerProperties = new ExtendedProducerProperties<KafkaProducerProperties>(
|
||||
new KafkaProducerProperties());
|
||||
this.kafkaTopicProvisioner.provisionProducerDestination(name , extendedProducerProperties);
|
||||
if (HeaderMode.embeddedHeaders.equals(properties.getHeaderMode())) {
|
||||
outboundBindTarget = outboundBindTarget.map(new KeyValueMapper<Object, Object, KeyValue<Object, Object>>() {
|
||||
@Override
|
||||
public KeyValue<Object, Object> apply(Object k, Object v) {
|
||||
if (v instanceof Message) {
|
||||
try {
|
||||
return new KeyValue<>(k, (Object)KStreamBinder.this.serializeAndEmbedHeadersIfApplicable((Message<?>) v));
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new IllegalArgumentException(e);
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw new IllegalArgumentException("Wrong type of message " + v);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
if (!properties.isUseNativeEncoding()) {
|
||||
outboundBindTarget = outboundBindTarget
|
||||
.map(new KeyValueMapper<Object, Object, KeyValue<Object, Object>>() {
|
||||
@Override
|
||||
public KeyValue<Object, Object> apply(Object k, Object v) {
|
||||
return KeyValue.pair(k, (Object)KStreamBinder.this.serializePayloadIfNecessary((Message<?>) v));
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
outboundBindTarget = outboundBindTarget
|
||||
.map(new KeyValueMapper<Object, Object, KeyValue<Object, Object>>() {
|
||||
@Override
|
||||
public KeyValue<Object, Object> apply(Object k, Object v) {
|
||||
return KeyValue.pair(k, ((Message<?>) v).getPayload());
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
if (!properties.isUseNativeEncoding() || StringUtils.hasText(properties.getExtension().getKeySerde()) || StringUtils.hasText(properties.getExtension().getValueSerde())) {
|
||||
Serde<?> keySerde = Serdes.ByteArray();
|
||||
Serde<?> valueSerde = Serdes.ByteArray();
|
||||
try {
|
||||
if (StringUtils.hasText(properties.getExtension().getKeySerde())) {
|
||||
keySerde = Utils.newInstance(properties.getExtension().getKeySerde(), Serde.class);
|
||||
if (keySerde instanceof Configurable) {
|
||||
((Configurable) keySerde).configure(streamsConfig.originals());
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("Serde class not found: ", e);
|
||||
}
|
||||
try {
|
||||
if (StringUtils.hasText(properties.getExtension().getValueSerde())) {
|
||||
valueSerde = Utils.newInstance(properties.getExtension().getValueSerde(), Serde.class);
|
||||
if (valueSerde instanceof Configurable) {
|
||||
((Configurable) valueSerde).configure(streamsConfig.originals());
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("Serde class not found: ", e);
|
||||
}
|
||||
outboundBindTarget.to((Serde<Object>) keySerde, (Serde<Object>) valueSerde, name);
|
||||
}
|
||||
else {
|
||||
outboundBindTarget.to(name);
|
||||
}
|
||||
return new DefaultBinding<>(name, null, outboundBindTarget, null);
|
||||
}
|
||||
|
||||
private byte[] serializeAndEmbedHeadersIfApplicable(Message<?> message) throws Exception {
|
||||
MessageValues transformed = serializePayloadIfNecessary(message);
|
||||
byte[] payload;
|
||||
|
||||
Object contentType = transformed.get(MessageHeaders.CONTENT_TYPE);
|
||||
// transform content type headers to String, so that they can be properly embedded
|
||||
// in JSON
|
||||
if (contentType instanceof MimeType) {
|
||||
transformed.put(MessageHeaders.CONTENT_TYPE, contentType.toString());
|
||||
}
|
||||
Object originalContentType = transformed.get(BinderHeaders.BINDER_ORIGINAL_CONTENT_TYPE);
|
||||
if (originalContentType instanceof MimeType) {
|
||||
transformed.put(BinderHeaders.BINDER_ORIGINAL_CONTENT_TYPE, originalContentType.toString());
|
||||
}
|
||||
payload = EmbeddedHeaderUtils.embedHeaders(transformed, headers);
|
||||
return payload;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KStreamConsumerProperties getExtendedConsumerProperties(String channelName) {
|
||||
return this.kStreamExtendedBindingProperties.getExtendedConsumerProperties(channelName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KStreamProducerProperties getExtendedProducerProperties(String channelName) {
|
||||
return this.kStreamExtendedBindingProperties.getExtendedProducerProperties(channelName);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,167 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.aopalliance.intercept.MethodInterceptor;
|
||||
import org.aopalliance.intercept.MethodInvocation;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KStreamBuilder;
|
||||
import org.apache.kafka.streams.kstream.KeyValueMapper;
|
||||
|
||||
import org.springframework.aop.framework.ProxyFactory;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.EmbeddedHeaderUtils;
|
||||
import org.springframework.cloud.stream.binder.HeaderMode;
|
||||
import org.springframework.cloud.stream.binder.MessageSerializationUtils;
|
||||
import org.springframework.cloud.stream.binder.MessageValues;
|
||||
import org.springframework.cloud.stream.binder.StringConvertingContentTypeResolver;
|
||||
import org.springframework.cloud.stream.binding.AbstractBindingTargetFactory;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.cloud.stream.converter.CompositeMessageConverterFactory;
|
||||
import org.springframework.integration.codec.Codec;
|
||||
import org.springframework.integration.support.MutableMessageHeaders;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.converter.MessageConverter;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.MimeType;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class KStreamBoundElementFactory extends AbstractBindingTargetFactory<KStream> {
|
||||
|
||||
private final KStreamBuilder kStreamBuilder;
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
|
||||
private volatile Codec codec;
|
||||
|
||||
private final StringConvertingContentTypeResolver contentTypeResolver = new StringConvertingContentTypeResolver();
|
||||
|
||||
private volatile Map<String, Class<?>> payloadTypeCache = new ConcurrentHashMap<>();
|
||||
|
||||
private CompositeMessageConverterFactory compositeMessageConverterFactory;
|
||||
|
||||
public KStreamBoundElementFactory(KStreamBuilder streamBuilder, BindingServiceProperties bindingServiceProperties,
|
||||
Codec codec, CompositeMessageConverterFactory compositeMessageConverterFactory) {
|
||||
super(KStream.class);
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.kStreamBuilder = streamBuilder;
|
||||
this.codec = codec;
|
||||
this.compositeMessageConverterFactory = compositeMessageConverterFactory;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KStream createInput(String name) {
|
||||
KStream<Object, Object> stream = kStreamBuilder.stream(bindingServiceProperties.getBindingDestination(name));
|
||||
ConsumerProperties properties = bindingServiceProperties.getConsumerProperties(name);
|
||||
if (HeaderMode.embeddedHeaders.equals(properties.getHeaderMode())) {
|
||||
|
||||
stream = stream.map(new KeyValueMapper<Object, Object, KeyValue<Object, Object>>() {
|
||||
@Override
|
||||
public KeyValue<Object, Object> apply(Object key, Object value) {
|
||||
if (!(value instanceof byte[])) {
|
||||
return new KeyValue<>(key, value);
|
||||
}
|
||||
try {
|
||||
MessageValues messageValues = EmbeddedHeaderUtils
|
||||
.extractHeaders(MessageBuilder.withPayload((byte[]) value).build(), true);
|
||||
messageValues = deserializePayloadIfNecessary(messageValues);
|
||||
return new KeyValue<Object, Object>(null, messageValues.toMessage());
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new IllegalArgumentException(e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public KStream createOutput(final String name) {
|
||||
BindingProperties bindingProperties = bindingServiceProperties.getBindingProperties(name);
|
||||
String contentType = bindingProperties.getContentType();
|
||||
MessageConverter messageConverter = StringUtils.hasText(contentType) ? compositeMessageConverterFactory
|
||||
.getMessageConverterForType(MimeType.valueOf(contentType)) : null;
|
||||
KStreamWrapperHandler handler = new KStreamWrapperHandler(messageConverter);
|
||||
ProxyFactory proxyFactory = new ProxyFactory(KStreamWrapper.class, KStream.class);
|
||||
proxyFactory.addAdvice(handler);
|
||||
return (KStream) proxyFactory.getProxy();
|
||||
}
|
||||
|
||||
private MessageValues deserializePayloadIfNecessary(MessageValues messageValues) {
|
||||
return MessageSerializationUtils.deserializePayload(messageValues, this.contentTypeResolver, this.codec);
|
||||
}
|
||||
|
||||
interface KStreamWrapper {
|
||||
|
||||
void wrap(KStream<Object, Object> delegate);
|
||||
}
|
||||
|
||||
static class KStreamWrapperHandler implements KStreamWrapper, MethodInterceptor {
|
||||
|
||||
private KStream<Object, Object> delegate;
|
||||
|
||||
private final MessageConverter messageConverter;
|
||||
|
||||
public KStreamWrapperHandler(MessageConverter messageConverter) {
|
||||
this.messageConverter = messageConverter;
|
||||
}
|
||||
|
||||
public void wrap(KStream<Object, Object> delegate) {
|
||||
Assert.notNull(delegate, "delegate cannot be null");
|
||||
Assert.isNull(this.delegate, "delegate already set to " + this.delegate);
|
||||
if (messageConverter != null) {
|
||||
KeyValueMapper<Object, Object, KeyValue<Object, Object>> keyValueMapper = new KeyValueMapper<Object, Object, KeyValue<Object, Object>>() {
|
||||
@Override
|
||||
public KeyValue<Object, Object> apply(Object k, Object v) {
|
||||
Message<?> message = (Message<?>) v;
|
||||
return new KeyValue<Object, Object>(k,
|
||||
messageConverter.toMessage(message.getPayload(),
|
||||
new MutableMessageHeaders(((Message<?>) v).getHeaders())));
|
||||
}
|
||||
};
|
||||
delegate = delegate.map(keyValueMapper);
|
||||
}
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object invoke(MethodInvocation methodInvocation) throws Throwable {
|
||||
if (methodInvocation.getMethod().getDeclaringClass().equals(KStream.class)) {
|
||||
Assert.notNull(delegate, "Trying to invoke " + methodInvocation
|
||||
.getMethod() + " but no delegate has been set.");
|
||||
return methodInvocation.getMethod().invoke(delegate, methodInvocation.getArguments());
|
||||
}
|
||||
else if (methodInvocation.getMethod().getDeclaringClass().equals(KStreamWrapper.class)) {
|
||||
return methodInvocation.getMethod().invoke(this, methodInvocation.getArguments());
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException("Only KStream method invocations are permitted");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.annotations;
|
||||
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.Output;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public interface KStreamProcessor {
|
||||
|
||||
@Input("input")
|
||||
KStream<?, ?> input();
|
||||
|
||||
@Output("output")
|
||||
KStream<?, ?> output();
|
||||
}
|
||||
@@ -1,96 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.config;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.utils.AppInfoParser;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka09AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kstream.KStreamBinder;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Condition;
|
||||
import org.springframework.context.annotation.ConditionContext;
|
||||
import org.springframework.context.annotation.Conditional;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.core.type.AnnotatedTypeMetadata;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
@Configuration
|
||||
@EnableConfigurationProperties(KStreamExtendedBindingProperties.class)
|
||||
public class KStreamBinderConfiguration {
|
||||
|
||||
@Autowired(required = false)
|
||||
private AdminUtilsOperation adminUtilsOperation;
|
||||
|
||||
private static final Log logger = LogFactory.getLog(KStreamBinderConfiguration.class);
|
||||
|
||||
@Bean
|
||||
public KafkaTopicProvisioner provisioningProvider(KafkaBinderConfigurationProperties binderConfigurationProperties) {
|
||||
return new KafkaTopicProvisioner(binderConfigurationProperties, adminUtilsOperation);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamBinder kStreamBinder(KafkaBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KStreamExtendedBindingProperties kStreamExtendedBindingProperties, StreamsConfig streamsConfig) {
|
||||
return new KStreamBinder(binderConfigurationProperties, kafkaTopicProvisioner, kStreamExtendedBindingProperties,
|
||||
streamsConfig);
|
||||
}
|
||||
|
||||
@Bean(name = "adminUtilsOperation")
|
||||
@Conditional(Kafka09Present.class)
|
||||
@ConditionalOnClass(name = "kafka.admin.AdminUtils")
|
||||
public AdminUtilsOperation kafka09AdminUtilsOperation() {
|
||||
logger.info("AdminUtils selected: Kafka 0.9 AdminUtils");
|
||||
return new Kafka09AdminUtilsOperation();
|
||||
}
|
||||
|
||||
@Bean(name = "adminUtilsOperation")
|
||||
@Conditional(Kafka10Present.class)
|
||||
@ConditionalOnClass(name = "kafka.admin.AdminUtils")
|
||||
public AdminUtilsOperation kafka10AdminUtilsOperation() {
|
||||
logger.info("AdminUtils selected: Kafka 0.10 AdminUtils");
|
||||
return new Kafka10AdminUtilsOperation();
|
||||
}
|
||||
|
||||
static class Kafka10Present implements Condition {
|
||||
|
||||
@Override
|
||||
public boolean matches(ConditionContext conditionContext, AnnotatedTypeMetadata annotatedTypeMetadata) {
|
||||
return AppInfoParser.getVersion().startsWith("0.10");
|
||||
}
|
||||
}
|
||||
|
||||
static class Kafka09Present implements Condition {
|
||||
|
||||
@Override
|
||||
public boolean matches(ConditionContext conditionContext, AnnotatedTypeMetadata annotatedTypeMetadata) {
|
||||
return AppInfoParser.getVersion().startsWith("0.9");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.config;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.kstream.KStreamBuilder;
|
||||
|
||||
import org.springframework.beans.factory.ObjectProvider;
|
||||
import org.springframework.beans.factory.UnsatisfiedDependencyException;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kstream.KStreamBoundElementFactory;
|
||||
import org.springframework.cloud.stream.binder.kstream.KStreamListenerParameterAdapter;
|
||||
import org.springframework.cloud.stream.binder.kstream.KStreamStreamListenerResultAdapter;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.cloud.stream.converter.CompositeMessageConverterFactory;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.integration.codec.Codec;
|
||||
import org.springframework.kafka.annotation.KafkaStreamsDefaultConfiguration;
|
||||
import org.springframework.kafka.core.KStreamBuilderFactoryBean;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class KStreamBinderSupportAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
@ConfigurationProperties(prefix = "spring.cloud.stream.kstream.binder")
|
||||
public KafkaBinderConfigurationProperties binderConfigurationProperties() {
|
||||
return new KafkaBinderConfigurationProperties();
|
||||
}
|
||||
|
||||
@Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_KSTREAM_BUILDER_BEAN_NAME)
|
||||
public KStreamBuilderFactoryBean defaultKStreamBuilder(
|
||||
@Qualifier(KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME) ObjectProvider<StreamsConfig> streamsConfigProvider) {
|
||||
StreamsConfig streamsConfig = streamsConfigProvider.getIfAvailable();
|
||||
if (streamsConfig != null) {
|
||||
KStreamBuilderFactoryBean kStreamBuilderFactoryBean = new KStreamBuilderFactoryBean(streamsConfig);
|
||||
kStreamBuilderFactoryBean.setPhase(Integer.MAX_VALUE - 500);
|
||||
return kStreamBuilderFactoryBean;
|
||||
}
|
||||
else {
|
||||
throw new UnsatisfiedDependencyException(KafkaStreamsDefaultConfiguration.class.getName(),
|
||||
KafkaStreamsDefaultConfiguration.DEFAULT_KSTREAM_BUILDER_BEAN_NAME, "streamsConfig",
|
||||
"There is no '" + KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME
|
||||
+ "' StreamsConfig bean in the application context.\n");
|
||||
}
|
||||
}
|
||||
|
||||
@Bean(KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME)
|
||||
public StreamsConfig streamsConfig(KafkaBinderConfigurationProperties binderConfigurationProperties) {
|
||||
Properties props = new Properties();
|
||||
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, binderConfigurationProperties.getKafkaConnectionString());
|
||||
props.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName());
|
||||
props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName());
|
||||
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "default");
|
||||
props.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, binderConfigurationProperties.getZkConnectionString());
|
||||
if (!ObjectUtils.isEmpty(binderConfigurationProperties.getConfiguration())) {
|
||||
props.putAll(binderConfigurationProperties.getConfiguration());
|
||||
}
|
||||
return new StreamsConfig(props);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamStreamListenerResultAdapter kStreamStreamListenerResultAdapter() {
|
||||
return new KStreamStreamListenerResultAdapter();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamListenerParameterAdapter kStreamListenerParameterAdapter(
|
||||
CompositeMessageConverterFactory compositeMessageConverterFactory) {
|
||||
return new KStreamListenerParameterAdapter(
|
||||
compositeMessageConverterFactory.getMessageConverterForAllRegistered());
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamBoundElementFactory kStreamBindableTargetFactory(KStreamBuilder kStreamBuilder,
|
||||
BindingServiceProperties bindingServiceProperties, Codec codec,
|
||||
CompositeMessageConverterFactory compositeMessageConverterFactory) {
|
||||
return new KStreamBoundElementFactory(kStreamBuilder, bindingServiceProperties, codec,
|
||||
compositeMessageConverterFactory);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.config;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class KStreamConsumerProperties extends KStreamCommonProperties {
|
||||
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.config;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class KStreamProducerProperties extends KStreamCommonProperties {
|
||||
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user