Compare commits
22 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8c8245789c | ||
|
|
6867d26dc2 | ||
|
|
73536b16d4 | ||
|
|
a3ec01ee9e | ||
|
|
df0c94891e | ||
|
|
a22150f273 | ||
|
|
e940198e12 | ||
|
|
bd10766503 | ||
|
|
1dae650c03 | ||
|
|
e3687cc5ab | ||
|
|
d9c40d823a | ||
|
|
ca3b89b405 | ||
|
|
ef220a551a | ||
|
|
585448dbad | ||
|
|
4093a24d3e | ||
|
|
7d4323cb2e | ||
|
|
76a53049c5 | ||
|
|
b29498617d | ||
|
|
547ea77b2f | ||
|
|
699dc06bc5 | ||
|
|
e822e59859 | ||
|
|
cc630f2be0 |
@@ -21,7 +21,7 @@
|
||||
<repository>
|
||||
<id>spring-snapshots</id>
|
||||
<name>Spring Snapshots</name>
|
||||
<url>http://repo.spring.io/libs-snapshot-local</url>
|
||||
<url>https://repo.spring.io/libs-snapshot-local</url>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
@@ -29,7 +29,7 @@
|
||||
<repository>
|
||||
<id>spring-milestones</id>
|
||||
<name>Spring Milestones</name>
|
||||
<url>http://repo.spring.io/libs-milestone-local</url>
|
||||
<url>https://repo.spring.io/libs-milestone-local</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
@@ -37,7 +37,7 @@
|
||||
<repository>
|
||||
<id>spring-releases</id>
|
||||
<name>Spring Releases</name>
|
||||
<url>http://repo.spring.io/release</url>
|
||||
<url>https://repo.spring.io/release</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
@@ -47,7 +47,7 @@
|
||||
<pluginRepository>
|
||||
<id>spring-snapshots</id>
|
||||
<name>Spring Snapshots</name>
|
||||
<url>http://repo.spring.io/libs-snapshot-local</url>
|
||||
<url>https://repo.spring.io/libs-snapshot-local</url>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
@@ -55,7 +55,7 @@
|
||||
<pluginRepository>
|
||||
<id>spring-milestones</id>
|
||||
<name>Spring Milestones</name>
|
||||
<url>http://repo.spring.io/libs-milestone-local</url>
|
||||
<url>https://repo.spring.io/libs-milestone-local</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
|
||||
4
LICENSE
4
LICENSE
@@ -1,6 +1,6 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
@@ -192,7 +192,7 @@
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
2
mvnw
vendored
2
mvnw
vendored
@@ -8,7 +8,7 @@
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
|
||||
2
mvnw.cmd
vendored
2
mvnw.cmd
vendored
@@ -7,7 +7,7 @@
|
||||
@REM "License"); you may not use this file except in compliance
|
||||
@REM with the License. You may obtain a copy of the License at
|
||||
@REM
|
||||
@REM http://www.apache.org/licenses/LICENSE-2.0
|
||||
@REM https://www.apache.org/licenses/LICENSE-2.0
|
||||
@REM
|
||||
@REM Unless required by applicable law or agreed to in writing,
|
||||
@REM software distributed under the License is distributed on an
|
||||
|
||||
150
pom.xml
150
pom.xml
@@ -1,58 +1,60 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.1.0.M2</version>
|
||||
<version>1.1.3.BUILD-SNAPSHOT</version>
|
||||
<packaging>pom</packaging>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-build</artifactId>
|
||||
<version>2.1.0.M1</version>
|
||||
<version>1.2.3.BUILD-SNAPSHOT</version>
|
||||
<relativePath />
|
||||
</parent>
|
||||
<properties>
|
||||
<java.version>1.8</java.version>
|
||||
<spring-kafka.version>2.2.0.M2</spring-kafka.version>
|
||||
<spring-integration-kafka.version>3.1.0.M1</spring-integration-kafka.version>
|
||||
<kafka.version>2.0.0</kafka.version>
|
||||
<spring-cloud-stream.version>2.1.0.M2</spring-cloud-stream.version>
|
||||
<java.version>1.7</java.version>
|
||||
<kafka.version>0.9.0.1</kafka.version>
|
||||
<spring-kafka.version>1.0.5.RELEASE</spring-kafka.version>
|
||||
<spring-integration-kafka.version>2.0.1.RELEASE</spring-integration-kafka.version>
|
||||
<spring-cloud-stream.version>1.1.3.BUILD-SNAPSHOT</spring-cloud-stream.version>
|
||||
</properties>
|
||||
<modules>
|
||||
<module>spring-cloud-stream-binder-kafka</module>
|
||||
<module>spring-cloud-starter-stream-kafka</module>
|
||||
<module>spring-cloud-stream-binder-kafka-docs</module>
|
||||
<module>spring-cloud-stream-binder-kafka-core</module>
|
||||
<module>spring-cloud-stream-binder-kafka-streams</module>
|
||||
</modules>
|
||||
|
||||
<module>spring-cloud-stream-binder-kafka-0.10-test</module>
|
||||
<module>spring-cloud-stream-binder-kafka-test-support</module>
|
||||
</modules>
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-codec</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
@@ -76,62 +78,15 @@
|
||||
<scope>test</scope>
|
||||
<version>${spring-kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-streams</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
<version>${kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>jline</groupId>
|
||||
<artifactId>jline</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
<version>${kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>jline</groupId>
|
||||
<artifactId>jline</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
|
||||
|
||||
<build>
|
||||
<pluginManagement>
|
||||
<plugins>
|
||||
@@ -140,6 +95,18 @@
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<version>1.7</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
<version>2.17</version>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.puppycrawl.tools</groupId>
|
||||
<artifactId>checkstyle</artifactId>
|
||||
<version>7.1</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
@@ -163,15 +130,26 @@
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-tools</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
<artifactId>spring-cloud-build-tools</artifactId>
|
||||
<version>1.2.3.BUILD-SNAPSHOT</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<configuration>
|
||||
<configLocation>checkstyle.xml</configLocation>
|
||||
<headerLocation>checkstyle-header.txt</headerLocation>
|
||||
<includeTestSourceDirectory>true</includeTestSourceDirectory>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>checkstyle-validation</id>
|
||||
<phase>validate</phase>
|
||||
<configuration>
|
||||
<configLocation>checkstyle.xml</configLocation>
|
||||
<encoding>UTF-8</encoding>
|
||||
<consoleOutput>true</consoleOutput>
|
||||
<failsOnError>true</failsOnError>
|
||||
<includeTestSourceDirectory>true</includeTestSourceDirectory>
|
||||
</configuration>
|
||||
<goals>
|
||||
<goal>check</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
@@ -183,7 +161,7 @@
|
||||
<repository>
|
||||
<id>spring-snapshots</id>
|
||||
<name>Spring Snapshots</name>
|
||||
<url>http://repo.spring.io/libs-snapshot-local</url>
|
||||
<url>https://repo.spring.io/libs-snapshot-local</url>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
@@ -194,7 +172,7 @@
|
||||
<repository>
|
||||
<id>spring-milestones</id>
|
||||
<name>Spring Milestones</name>
|
||||
<url>http://repo.spring.io/libs-milestone-local</url>
|
||||
<url>https://repo.spring.io/libs-milestone-local</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
@@ -202,7 +180,7 @@
|
||||
<repository>
|
||||
<id>spring-releases</id>
|
||||
<name>Spring Releases</name>
|
||||
<url>http://repo.spring.io/release</url>
|
||||
<url>https://repo.spring.io/release</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
@@ -212,7 +190,7 @@
|
||||
<pluginRepository>
|
||||
<id>spring-snapshots</id>
|
||||
<name>Spring Snapshots</name>
|
||||
<url>http://repo.spring.io/libs-snapshot-local</url>
|
||||
<url>https://repo.spring.io/libs-snapshot-local</url>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
@@ -223,7 +201,7 @@
|
||||
<pluginRepository>
|
||||
<id>spring-milestones</id>
|
||||
<name>Spring Milestones</name>
|
||||
<url>http://repo.spring.io/libs-milestone-local</url>
|
||||
<url>https://repo.spring.io/libs-milestone-local</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
@@ -231,7 +209,7 @@
|
||||
<pluginRepository>
|
||||
<id>spring-releases</id>
|
||||
<name>Spring Releases</name>
|
||||
<url>http://repo.spring.io/libs-release-local</url>
|
||||
<url>https://repo.spring.io/libs-release-local</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.1.0.M2</version>
|
||||
<version>1.1.3.BUILD-SNAPSHOT</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
<description>Spring Cloud Starter Stream Kafka</description>
|
||||
<url>http://projects.spring.io/spring-cloud</url>
|
||||
<url>https://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>http://www.spring.io</url>
|
||||
<url>https://www.spring.io</url>
|
||||
</organization>
|
||||
<properties>
|
||||
<main.basedir>${basedir}/../..</main.basedir>
|
||||
@@ -20,7 +20,7 @@
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<version>1.1.3.BUILD-SNAPSHOT</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
|
||||
104
spring-cloud-stream-binder-kafka-0.10-test/pom.xml
Normal file
104
spring-cloud-stream-binder-kafka-0.10-test/pom.xml
Normal file
@@ -0,0 +1,104 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.1.3.BUILD-SNAPSHOT</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-0.10-test</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder 0.10 Tests</description>
|
||||
<url>https://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>https://www.spring.io</url>
|
||||
</organization>
|
||||
<properties>
|
||||
<main.basedir>${basedir}/../..</main.basedir>
|
||||
<!--
|
||||
Override Kafka dependencies to Kafka 0.10 and supporting Spring Kafka and
|
||||
Spring Integration Kafka versions
|
||||
-->
|
||||
<kafka.version>0.10.0.0</kafka.version>
|
||||
<spring-kafka.version>1.1.1.RELEASE</spring-kafka.version>
|
||||
<spring-integration-kafka.version>2.1.0.RELEASE</spring-integration-kafka.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<scope>test</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-schema</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-avro-serializer</artifactId>
|
||||
<version>3.0.1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-schema-registry</artifactId>
|
||||
<version>3.0.1</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>confluent</id>
|
||||
<url>https://packages.confluent.io/maven/</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
|
||||
|
||||
</project>
|
||||
@@ -0,0 +1,245 @@
|
||||
/*
|
||||
* Copyright 2014-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.UUID;
|
||||
|
||||
import io.confluent.kafka.schemaregistry.rest.SchemaRegistryConfig;
|
||||
import io.confluent.kafka.schemaregistry.rest.SchemaRegistryRestApplication;
|
||||
import kafka.utils.ZKStringSerializer$;
|
||||
import kafka.utils.ZkUtils;
|
||||
import org.I0Itec.zkclient.ZkClient;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.Deserializer;
|
||||
import org.eclipse.jetty.server.Server;
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.Spy;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.integration.channel.DirectChannel;
|
||||
import org.springframework.integration.channel.QueueChannel;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.support.KafkaHeaders;
|
||||
import org.springframework.kafka.test.core.BrokerAddress;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
import org.springframework.messaging.SubscribableChannel;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.retry.RetryOperations;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.assertj.core.api.Assertions.fail;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Integration tests for the {@link KafkaMessageChannelBinder}.
|
||||
*
|
||||
* @author Eric Bottard
|
||||
* @author Marius Bogoevici
|
||||
* @author Mark Fisher
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
public class Kafka10BinderTests extends KafkaBinderTests {
|
||||
|
||||
private final String CLASS_UNDER_TEST_NAME = KafkaMessageChannelBinder.class.getSimpleName();
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, 10);
|
||||
|
||||
private Kafka10TestBinder binder;
|
||||
|
||||
private Kafka10AdminUtilsOperation adminUtilsOperation = new Kafka10AdminUtilsOperation();
|
||||
|
||||
@Override
|
||||
protected void binderBindUnbindLatency() throws InterruptedException {
|
||||
Thread.sleep(500);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Kafka10TestBinder getBinder() {
|
||||
if (binder == null) {
|
||||
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
|
||||
binder = new Kafka10TestBinder(binderConfiguration);
|
||||
}
|
||||
return binder;
|
||||
}
|
||||
|
||||
protected KafkaBinderConfigurationProperties createConfigurationProperties() {
|
||||
KafkaBinderConfigurationProperties binderConfiguration = new KafkaBinderConfigurationProperties();
|
||||
BrokerAddress[] brokerAddresses = embeddedKafka.getBrokerAddresses();
|
||||
List<String> bAddresses = new ArrayList<>();
|
||||
for (BrokerAddress bAddress : brokerAddresses) {
|
||||
bAddresses.add(bAddress.toString());
|
||||
}
|
||||
String[] foo = new String[bAddresses.size()];
|
||||
binderConfiguration.setBrokers(bAddresses.toArray(foo));
|
||||
binderConfiguration.setZkNodes(embeddedKafka.getZookeeperConnectionString());
|
||||
return binderConfiguration;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int partitionSize(String topic) {
|
||||
return consumerFactory().createConsumer().partitionsFor(topic).size();
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected void setMetadataRetryOperations(Binder binder, RetryOperations retryOperations) {
|
||||
((Kafka10TestBinder) binder).getBinder().setMetadataRetryOperations(retryOperations);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ZkUtils getZkUtils(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties) {
|
||||
final ZkClient zkClient = new ZkClient(kafkaBinderConfigurationProperties.getZkConnectionString(),
|
||||
kafkaBinderConfigurationProperties.getZkSessionTimeout(), kafkaBinderConfigurationProperties.getZkConnectionTimeout(),
|
||||
ZKStringSerializer$.MODULE$);
|
||||
|
||||
return new ZkUtils(zkClient, null, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions, int replicationFactor, Properties topicConfig) {
|
||||
adminUtilsOperation.invokeCreateTopic(zkUtils, topic, partitions, replicationFactor, new Properties());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int invokePartitionSize(String topic, ZkUtils zkUtils) {
|
||||
return adminUtilsOperation.partitionSize(topic, zkUtils);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getKafkaOffsetHeaderKey() {
|
||||
return KafkaHeaders.OFFSET;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Binder getBinder(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties) {
|
||||
return new Kafka10TestBinder(kafkaBinderConfigurationProperties);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void init() {
|
||||
String multiplier = System.getenv("KAFKA_TIMEOUT_MULTIPLIER");
|
||||
if (multiplier != null) {
|
||||
timeoutMultiplier = Double.parseDouble(multiplier);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean usesExplicitRouting() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getClassUnderTestName() {
|
||||
return CLASS_UNDER_TEST_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Spy spyOn(final String name) {
|
||||
throw new UnsupportedOperationException("'spyOn' is not used by Kafka tests");
|
||||
}
|
||||
|
||||
|
||||
private ConsumerFactory<byte[], byte[]> consumerFactory() {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
|
||||
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
|
||||
props.put(ConsumerConfig.GROUP_ID_CONFIG, "TEST-CONSUMER-GROUP");
|
||||
Deserializer<byte[]> valueDecoder = new ByteArrayDeserializer();
|
||||
Deserializer<byte[]> keyDecoder = new ByteArrayDeserializer();
|
||||
|
||||
return new DefaultKafkaConsumerFactory<>(props, keyDecoder, valueDecoder);
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testCustomAvroSerialization() throws Exception {
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
final ZkClient zkClient = new ZkClient(configurationProperties.getZkConnectionString(),
|
||||
configurationProperties.getZkSessionTimeout(), configurationProperties.getZkConnectionTimeout(),
|
||||
ZKStringSerializer$.MODULE$);
|
||||
final ZkUtils zkUtils = new ZkUtils(zkClient, null, false);
|
||||
Map<String, Object> schemaRegistryProps = new HashMap<>();
|
||||
schemaRegistryProps.put("kafkastore.connection.url", configurationProperties.getZkConnectionString());
|
||||
schemaRegistryProps.put("listeners", "https://0.0.0.0:8082");
|
||||
schemaRegistryProps.put("port", "8082");
|
||||
schemaRegistryProps.put("kafkastore.topic", "_schemas");
|
||||
SchemaRegistryConfig config = new SchemaRegistryConfig(schemaRegistryProps);
|
||||
SchemaRegistryRestApplication app = new SchemaRegistryRestApplication(config);
|
||||
Server server = app.createServer();
|
||||
server.start();
|
||||
long endTime = System.currentTimeMillis() + 5000;
|
||||
while(true) {
|
||||
if (server.isRunning()) {
|
||||
break;
|
||||
}
|
||||
else if (System.currentTimeMillis() > endTime) {
|
||||
fail("Kafka Schema Registry Server failed to start");
|
||||
}
|
||||
}
|
||||
User1 firstOutboundFoo = new User1();
|
||||
String userName1 = "foo-name" + UUID.randomUUID().toString();
|
||||
String favColor1 = "foo-color" + UUID.randomUUID().toString();
|
||||
firstOutboundFoo.setName(userName1);
|
||||
firstOutboundFoo.setFavoriteColor(favColor1);
|
||||
Message<?> message = MessageBuilder.withPayload(firstOutboundFoo).build();
|
||||
SubscribableChannel moduleOutputChannel = new DirectChannel();
|
||||
String testTopicName = "existing" + System.currentTimeMillis();
|
||||
invokeCreateTopic(zkUtils, testTopicName, 6, 1, new Properties());
|
||||
configurationProperties.setAutoAddPartitions(true);
|
||||
Binder binder = getBinder(configurationProperties);
|
||||
QueueChannel moduleInputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.getExtension().getConfiguration().put("value.serializer", "io.confluent.kafka.serializers.KafkaAvroSerializer");
|
||||
producerProperties.getExtension().getConfiguration().put("schema.registry.url", "http://localhost:8082");
|
||||
producerProperties.setUseNativeEncoding(true);
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, moduleOutputChannel, producerProperties);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
consumerProperties.getExtension().getConfiguration().put("value.deserializer", "io.confluent.kafka.serializers.KafkaAvroDeserializer");
|
||||
consumerProperties.getExtension().getConfiguration().put("schema.registry.url", "http://localhost:8082");
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer(testTopicName, "test", moduleInputChannel, consumerProperties);
|
||||
// Let the consumer actually bind to the producer before sending a msg
|
||||
binderBindUnbindLatency();
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(moduleInputChannel);
|
||||
assertThat(inbound).isNotNull();
|
||||
assertTrue(message.getPayload() instanceof User1);
|
||||
User1 receivedUser = (User1) message.getPayload();
|
||||
assertThat(receivedUser.getName()).isEqualTo(userName1);
|
||||
assertThat(receivedUser.getFavoriteColor()).isEqualTo(favColor1);
|
||||
producerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Copyright 2015-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.context.support.GenericApplicationContext;
|
||||
import org.springframework.kafka.support.LoggingProducerListener;
|
||||
import org.springframework.kafka.support.ProducerListener;
|
||||
|
||||
/**
|
||||
* Test support class for {@link KafkaMessageChannelBinder}.
|
||||
* @author Eric Bottard
|
||||
* @author Marius Bogoevici
|
||||
* @author David Turanski
|
||||
* @author Gary Russell
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class Kafka10TestBinder extends AbstractKafkaTestBinder {
|
||||
|
||||
public Kafka10TestBinder(KafkaBinderConfigurationProperties binderConfiguration) {
|
||||
try {
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(binderConfiguration);
|
||||
binder.setCodec(getCodec());
|
||||
ProducerListener producerListener = new LoggingProducerListener();
|
||||
binder.setProducerListener(producerListener);
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
binder.setApplicationContext(context);
|
||||
binder.setAdminUtilsOperation(new Kafka10AdminUtilsOperation());
|
||||
binder.afterPropertiesSet();
|
||||
this.setBinder(binder);
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.avro.Schema;
|
||||
import org.apache.avro.reflect.Nullable;
|
||||
import org.apache.avro.specific.SpecificRecordBase;
|
||||
|
||||
import org.springframework.core.io.ClassPathResource;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
public class User1 extends SpecificRecordBase {
|
||||
|
||||
@Nullable
|
||||
private String name;
|
||||
|
||||
@Nullable
|
||||
private String favoriteColor;
|
||||
|
||||
public String getName() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String getFavoriteColor() {
|
||||
return this.favoriteColor;
|
||||
}
|
||||
|
||||
public void setFavoriteColor(String favoriteColor) {
|
||||
this.favoriteColor = favoriteColor;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Schema getSchema() {
|
||||
try {
|
||||
return new Schema.Parser().parse(new ClassPathResource("schemas/users_v1.schema").getInputStream());
|
||||
}
|
||||
catch (IOException e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object get(int i) {
|
||||
if (i == 0) {
|
||||
return getName().toString();
|
||||
}
|
||||
if (i == 1) {
|
||||
return getFavoriteColor().toString();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(int i, Object o) {
|
||||
if (i == 0) {
|
||||
setName((String) o);
|
||||
}
|
||||
if (i == 1) {
|
||||
setFavoriteColor((String) o);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
{"namespace": "org.springframework.cloud.stream.binder.kafka",
|
||||
"type": "record",
|
||||
"name": "User1",
|
||||
"fields": [
|
||||
{"name": "name", "type": "string"},
|
||||
{"name": "favoriteColor", "type": "string"}
|
||||
]
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
eclipse.preferences.version=1
|
||||
org.eclipse.jdt.ui.ignorelowercasenames=true
|
||||
org.eclipse.jdt.ui.importorder=java;javax;com;org;org.springframework;ch.qos;\#;
|
||||
org.eclipse.jdt.ui.ondemandthreshold=99
|
||||
org.eclipse.jdt.ui.staticondemandthreshold=99
|
||||
@@ -1,49 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.1.0.M2</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder Core</description>
|
||||
<url>http://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>http://www.spring.io</url>
|
||||
</organization>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-configuration-processor</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
</project>
|
||||
@@ -1,62 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Properties for configuring topics.
|
||||
*
|
||||
* @author Gary Russell
|
||||
* @since 2.0
|
||||
*
|
||||
*/
|
||||
public class KafkaAdminProperties {
|
||||
|
||||
private Short replicationFactor;
|
||||
|
||||
private Map<Integer, List<Integer>> replicasAssignments = new HashMap<>();
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
public Short getReplicationFactor() {
|
||||
return this.replicationFactor;
|
||||
}
|
||||
|
||||
public void setReplicationFactor(Short replicationFactor) {
|
||||
this.replicationFactor = replicationFactor;
|
||||
}
|
||||
|
||||
public Map<Integer, List<Integer>> getReplicasAssignments() {
|
||||
return this.replicasAssignments;
|
||||
}
|
||||
|
||||
public void setReplicasAssignments(Map<Integer, List<Integer>> replicasAssignments) {
|
||||
this.replicasAssignments = replicasAssignments;
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,606 +0,0 @@
|
||||
/*
|
||||
* Copyright 2015-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.boot.context.properties.DeprecatedConfigurationProperty;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* @author David Turanski
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
* @author Rafal Zukowski
|
||||
*/
|
||||
@ConfigurationProperties(prefix = "spring.cloud.stream.kafka.binder")
|
||||
public class KafkaBinderConfigurationProperties {
|
||||
|
||||
private static final String DEFAULT_KAFKA_CONNECTION_STRING = "localhost:9092";
|
||||
|
||||
private final Transaction transaction = new Transaction();
|
||||
|
||||
private final KafkaProperties kafkaProperties;
|
||||
|
||||
private String[] zkNodes = new String[] { "localhost" };
|
||||
|
||||
/**
|
||||
* Arbitrary kafka properties that apply to both producers and consumers.
|
||||
*/
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Arbitrary kafka consumer properties.
|
||||
*/
|
||||
private Map<String, String> consumerProperties = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Arbitrary kafka producer properties.
|
||||
*/
|
||||
private Map<String, String> producerProperties = new HashMap<>();
|
||||
|
||||
private String defaultZkPort = "2181";
|
||||
|
||||
private String[] brokers = new String[] { "localhost" };
|
||||
|
||||
private String defaultBrokerPort = "9092";
|
||||
|
||||
private String[] headers = new String[] {};
|
||||
|
||||
private int offsetUpdateTimeWindow = 10000;
|
||||
|
||||
private int offsetUpdateCount;
|
||||
|
||||
private int offsetUpdateShutdownTimeout = 2000;
|
||||
|
||||
private int maxWait = 100;
|
||||
|
||||
private boolean autoCreateTopics = true;
|
||||
|
||||
private boolean autoAddPartitions;
|
||||
|
||||
private int socketBufferSize = 2097152;
|
||||
|
||||
/**
|
||||
* ZK session timeout in milliseconds.
|
||||
*/
|
||||
private int zkSessionTimeout = 10000;
|
||||
|
||||
/**
|
||||
* ZK Connection timeout in milliseconds.
|
||||
*/
|
||||
private int zkConnectionTimeout = 10000;
|
||||
|
||||
private String requiredAcks = "1";
|
||||
|
||||
private short replicationFactor = 1;
|
||||
|
||||
private int fetchSize = 1024 * 1024;
|
||||
|
||||
private int minPartitionCount = 1;
|
||||
|
||||
private int queueSize = 8192;
|
||||
|
||||
/**
|
||||
* Time to wait to get partition information in seconds; default 60.
|
||||
*/
|
||||
private int healthTimeout = 60;
|
||||
|
||||
private JaasLoginModuleConfiguration jaas;
|
||||
|
||||
/**
|
||||
* The bean name of a custom header mapper to use instead of a {@link org.springframework.kafka.support.DefaultKafkaHeaderMapper}.
|
||||
*/
|
||||
private String headerMapperBeanName;
|
||||
|
||||
|
||||
public KafkaBinderConfigurationProperties(KafkaProperties kafkaProperties) {
|
||||
Assert.notNull(kafkaProperties, "'kafkaProperties' cannot be null");
|
||||
this.kafkaProperties = kafkaProperties;
|
||||
}
|
||||
|
||||
public Transaction getTransaction() {
|
||||
return this.transaction;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the connection String
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
@Deprecated
|
||||
public String getZkConnectionString() {
|
||||
return toConnectionString(this.zkNodes, this.defaultZkPort);
|
||||
}
|
||||
|
||||
public String getKafkaConnectionString() {
|
||||
return toConnectionString(this.brokers, this.defaultBrokerPort);
|
||||
}
|
||||
|
||||
public String getDefaultKafkaConnectionString() {
|
||||
return DEFAULT_KAFKA_CONNECTION_STRING;
|
||||
}
|
||||
|
||||
public String[] getHeaders() {
|
||||
return this.headers;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the window.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getOffsetUpdateTimeWindow() {
|
||||
return this.offsetUpdateTimeWindow;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the count.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getOffsetUpdateCount() {
|
||||
return this.offsetUpdateCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the timeout.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getOffsetUpdateShutdownTimeout() {
|
||||
return this.offsetUpdateShutdownTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper nodes.
|
||||
* @return the nodes.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public String[] getZkNodes() {
|
||||
return this.zkNodes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper nodes.
|
||||
* @param zkNodes the nodes.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public void setZkNodes(String... zkNodes) {
|
||||
this.zkNodes = zkNodes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper port.
|
||||
* @param defaultZkPort the port.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public void setDefaultZkPort(String defaultZkPort) {
|
||||
this.defaultZkPort = defaultZkPort;
|
||||
}
|
||||
|
||||
public String[] getBrokers() {
|
||||
return this.brokers;
|
||||
}
|
||||
|
||||
public void setBrokers(String... brokers) {
|
||||
this.brokers = brokers;
|
||||
}
|
||||
|
||||
public void setDefaultBrokerPort(String defaultBrokerPort) {
|
||||
this.defaultBrokerPort = defaultBrokerPort;
|
||||
}
|
||||
|
||||
public void setHeaders(String... headers) {
|
||||
this.headers = headers;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param offsetUpdateTimeWindow the window.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setOffsetUpdateTimeWindow(int offsetUpdateTimeWindow) {
|
||||
this.offsetUpdateTimeWindow = offsetUpdateTimeWindow;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param offsetUpdateCount the count.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setOffsetUpdateCount(int offsetUpdateCount) {
|
||||
this.offsetUpdateCount = offsetUpdateCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param offsetUpdateShutdownTimeout the timeout.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setOffsetUpdateShutdownTimeout(int offsetUpdateShutdownTimeout) {
|
||||
this.offsetUpdateShutdownTimeout = offsetUpdateShutdownTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper session timeout.
|
||||
* @return the timeout.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public int getZkSessionTimeout() {
|
||||
return this.zkSessionTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper session timeout.
|
||||
* @param zkSessionTimeout the timout
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public void setZkSessionTimeout(int zkSessionTimeout) {
|
||||
this.zkSessionTimeout = zkSessionTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper connection timeout.
|
||||
* @return the timout.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public int getZkConnectionTimeout() {
|
||||
return this.zkConnectionTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Zookeeper connection timeout.
|
||||
* @param zkConnectionTimeout the timeout.
|
||||
* @deprecated connection to zookeeper is no longer necessary
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "No longer necessary since 2.0")
|
||||
public void setZkConnectionTimeout(int zkConnectionTimeout) {
|
||||
this.zkConnectionTimeout = zkConnectionTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts an array of host values to a comma-separated String.
|
||||
*
|
||||
* It will append the default port value, if not already specified.
|
||||
*/
|
||||
private String toConnectionString(String[] hosts, String defaultPort) {
|
||||
String[] fullyFormattedHosts = new String[hosts.length];
|
||||
for (int i = 0; i < hosts.length; i++) {
|
||||
if (hosts[i].contains(":") || StringUtils.isEmpty(defaultPort)) {
|
||||
fullyFormattedHosts[i] = hosts[i];
|
||||
}
|
||||
else {
|
||||
fullyFormattedHosts[i] = hosts[i] + ":" + defaultPort;
|
||||
}
|
||||
}
|
||||
return StringUtils.arrayToCommaDelimitedString(fullyFormattedHosts);
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the wait.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getMaxWait() {
|
||||
return this.maxWait;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer user.
|
||||
* @param maxWait the wait.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setMaxWait(int maxWait) {
|
||||
this.maxWait = maxWait;
|
||||
}
|
||||
|
||||
public String getRequiredAcks() {
|
||||
return this.requiredAcks;
|
||||
}
|
||||
|
||||
public void setRequiredAcks(int requiredAcks) {
|
||||
this.requiredAcks = String.valueOf(requiredAcks);
|
||||
}
|
||||
|
||||
public void setRequiredAcks(String requiredAcks) {
|
||||
this.requiredAcks = requiredAcks;
|
||||
}
|
||||
|
||||
public short getReplicationFactor() {
|
||||
return this.replicationFactor;
|
||||
}
|
||||
|
||||
public void setReplicationFactor(short replicationFactor) {
|
||||
this.replicationFactor = replicationFactor;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the size.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getFetchSize() {
|
||||
return this.fetchSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param fetchSize the size.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setFetchSize(int fetchSize) {
|
||||
this.fetchSize = fetchSize;
|
||||
}
|
||||
|
||||
public int getMinPartitionCount() {
|
||||
return this.minPartitionCount;
|
||||
}
|
||||
|
||||
public void setMinPartitionCount(int minPartitionCount) {
|
||||
this.minPartitionCount = minPartitionCount;
|
||||
}
|
||||
|
||||
public int getHealthTimeout() {
|
||||
return this.healthTimeout;
|
||||
}
|
||||
|
||||
public void setHealthTimeout(int healthTimeout) {
|
||||
this.healthTimeout = healthTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the queue size.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public int getQueueSize() {
|
||||
return this.queueSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param queueSize the queue size.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0")
|
||||
public void setQueueSize(int queueSize) {
|
||||
this.queueSize = queueSize;
|
||||
}
|
||||
|
||||
public boolean isAutoCreateTopics() {
|
||||
return this.autoCreateTopics;
|
||||
}
|
||||
|
||||
public void setAutoCreateTopics(boolean autoCreateTopics) {
|
||||
this.autoCreateTopics = autoCreateTopics;
|
||||
}
|
||||
|
||||
public boolean isAutoAddPartitions() {
|
||||
return this.autoAddPartitions;
|
||||
}
|
||||
|
||||
public void setAutoAddPartitions(boolean autoAddPartitions) {
|
||||
this.autoAddPartitions = autoAddPartitions;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used; set properties such as this via {@link #getConfiguration()
|
||||
* configuration}.
|
||||
* @return the size.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0, set properties such as this via 'configuration'")
|
||||
public int getSocketBufferSize() {
|
||||
return this.socketBufferSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used; set properties such as this via {@link #getConfiguration()
|
||||
* configuration}.
|
||||
* @param socketBufferSize the size.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
@DeprecatedConfigurationProperty(reason = "Not used since 2.0, set properties such as this via 'configuration'")
|
||||
public void setSocketBufferSize(int socketBufferSize) {
|
||||
this.socketBufferSize = socketBufferSize;
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return configuration;
|
||||
}
|
||||
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
public Map<String, String> getConsumerProperties() {
|
||||
return this.consumerProperties;
|
||||
}
|
||||
|
||||
public void setConsumerProperties(Map<String, String> consumerProperties) {
|
||||
Assert.notNull(consumerProperties, "'consumerProperties' cannot be null");
|
||||
this.consumerProperties = consumerProperties;
|
||||
}
|
||||
|
||||
public Map<String, String> getProducerProperties() {
|
||||
return this.producerProperties;
|
||||
}
|
||||
|
||||
public void setProducerProperties(Map<String, String> producerProperties) {
|
||||
Assert.notNull(producerProperties, "'producerProperties' cannot be null");
|
||||
this.producerProperties = producerProperties;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge boot consumer properties, general properties from
|
||||
* {@link #setConfiguration(Map)} that apply to consumers, properties from
|
||||
* {@link #setConsumerProperties(Map)}, in that order.
|
||||
* @return the merged properties.
|
||||
*/
|
||||
public Map<String, Object> mergedConsumerConfiguration() {
|
||||
Map<String, Object> consumerConfiguration = new HashMap<>();
|
||||
consumerConfiguration.putAll(this.kafkaProperties.buildConsumerProperties());
|
||||
// Copy configured binder properties that apply to consumers
|
||||
for (Map.Entry<String, String> configurationEntry : this.configuration.entrySet()) {
|
||||
if (ConsumerConfig.configNames().contains(configurationEntry.getKey())) {
|
||||
consumerConfiguration.put(configurationEntry.getKey(), configurationEntry.getValue());
|
||||
}
|
||||
}
|
||||
consumerConfiguration.putAll(this.consumerProperties);
|
||||
// Override Spring Boot bootstrap server setting if left to default with the value
|
||||
// configured in the binder
|
||||
if (ObjectUtils.isEmpty(consumerConfiguration.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
consumerConfiguration.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, getKafkaConnectionString());
|
||||
}
|
||||
else {
|
||||
Object boostrapServersConfig = consumerConfiguration.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
if (boostrapServersConfig instanceof List) {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<String> bootStrapServers = (List<String>) consumerConfiguration
|
||||
.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
if (bootStrapServers.size() == 1 && bootStrapServers.get(0).equals("localhost:9092")) {
|
||||
consumerConfiguration.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, getKafkaConnectionString());
|
||||
}
|
||||
}
|
||||
}
|
||||
return Collections.unmodifiableMap(consumerConfiguration);
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge boot producer properties, general properties from
|
||||
* {@link #setConfiguration(Map)} that apply to producers, properties from
|
||||
* {@link #setProducerProperties(Map)}, in that order.
|
||||
* @return the merged properties.
|
||||
*/
|
||||
public Map<String, Object> mergedProducerConfiguration() {
|
||||
Map<String, Object> producerConfiguration = new HashMap<>();
|
||||
producerConfiguration.putAll(this.kafkaProperties.buildProducerProperties());
|
||||
// Copy configured binder properties that apply to producers
|
||||
for (Map.Entry<String, String> configurationEntry : configuration.entrySet()) {
|
||||
if (ProducerConfig.configNames().contains(configurationEntry.getKey())) {
|
||||
producerConfiguration.put(configurationEntry.getKey(), configurationEntry.getValue());
|
||||
}
|
||||
}
|
||||
producerConfiguration.putAll(this.producerProperties);
|
||||
// Override Spring Boot bootstrap server setting if left to default with the value
|
||||
// configured in the binder
|
||||
if (ObjectUtils.isEmpty(producerConfiguration.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
producerConfiguration.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, getKafkaConnectionString());
|
||||
}
|
||||
else {
|
||||
Object boostrapServersConfig = producerConfiguration.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
if (boostrapServersConfig instanceof List) {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<String> bootStrapServers = (List<String>) producerConfiguration
|
||||
.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
if (bootStrapServers.size() == 1 && bootStrapServers.get(0).equals("localhost:9092")) {
|
||||
producerConfiguration.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, getKafkaConnectionString());
|
||||
}
|
||||
}
|
||||
}
|
||||
return Collections.unmodifiableMap(producerConfiguration);
|
||||
}
|
||||
|
||||
public JaasLoginModuleConfiguration getJaas() {
|
||||
return this.jaas;
|
||||
}
|
||||
|
||||
public void setJaas(JaasLoginModuleConfiguration jaas) {
|
||||
this.jaas = jaas;
|
||||
}
|
||||
|
||||
public String getHeaderMapperBeanName() {
|
||||
return this.headerMapperBeanName;
|
||||
}
|
||||
|
||||
public void setHeaderMapperBeanName(String headerMapperBeanName) {
|
||||
this.headerMapperBeanName = headerMapperBeanName;
|
||||
}
|
||||
|
||||
public static class Transaction {
|
||||
|
||||
private final KafkaProducerProperties producer = new KafkaProducerProperties();
|
||||
|
||||
private String transactionIdPrefix;
|
||||
|
||||
public String getTransactionIdPrefix() {
|
||||
return this.transactionIdPrefix;
|
||||
}
|
||||
|
||||
public void setTransactionIdPrefix(String transactionIdPrefix) {
|
||||
this.transactionIdPrefix = transactionIdPrefix;
|
||||
}
|
||||
|
||||
public KafkaProducerProperties getProducer() {
|
||||
return this.producer;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,237 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*
|
||||
* <p>
|
||||
* Thanks to Laszlo Szabo for providing the initial patch for generic property support.
|
||||
* </p>
|
||||
*/
|
||||
public class KafkaConsumerProperties {
|
||||
|
||||
public enum StartOffset {
|
||||
earliest(-2L),
|
||||
latest(-1L);
|
||||
|
||||
private final long referencePoint;
|
||||
|
||||
StartOffset(long referencePoint) {
|
||||
this.referencePoint = referencePoint;
|
||||
}
|
||||
|
||||
public long getReferencePoint() {
|
||||
return this.referencePoint;
|
||||
}
|
||||
}
|
||||
|
||||
public enum StandardHeaders {
|
||||
none,
|
||||
id,
|
||||
timestamp,
|
||||
both
|
||||
}
|
||||
|
||||
private boolean ackEachRecord;
|
||||
|
||||
private boolean autoRebalanceEnabled = true;
|
||||
|
||||
private boolean autoCommitOffset = true;
|
||||
|
||||
private Boolean autoCommitOnError;
|
||||
|
||||
private StartOffset startOffset;
|
||||
|
||||
private boolean resetOffsets;
|
||||
|
||||
private boolean enableDlq;
|
||||
|
||||
private String dlqName;
|
||||
|
||||
private KafkaProducerProperties dlqProducerProperties = new KafkaProducerProperties();
|
||||
|
||||
private int recoveryInterval = 5000;
|
||||
|
||||
private String[] trustedPackages;
|
||||
|
||||
private StandardHeaders standardHeaders = StandardHeaders.none;
|
||||
|
||||
private String converterBeanName;
|
||||
|
||||
private long idleEventInterval = 30_000;
|
||||
|
||||
private boolean destinationIsPattern;
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
private KafkaAdminProperties admin = new KafkaAdminProperties();
|
||||
|
||||
public boolean isAckEachRecord() {
|
||||
return this.ackEachRecord;
|
||||
}
|
||||
|
||||
public void setAckEachRecord(boolean ackEachRecord) {
|
||||
this.ackEachRecord = ackEachRecord;
|
||||
}
|
||||
|
||||
public boolean isAutoCommitOffset() {
|
||||
return this.autoCommitOffset;
|
||||
}
|
||||
|
||||
public void setAutoCommitOffset(boolean autoCommitOffset) {
|
||||
this.autoCommitOffset = autoCommitOffset;
|
||||
}
|
||||
|
||||
public StartOffset getStartOffset() {
|
||||
return this.startOffset;
|
||||
}
|
||||
|
||||
public void setStartOffset(StartOffset startOffset) {
|
||||
this.startOffset = startOffset;
|
||||
}
|
||||
|
||||
public boolean isResetOffsets() {
|
||||
return this.resetOffsets;
|
||||
}
|
||||
|
||||
public void setResetOffsets(boolean resetOffsets) {
|
||||
this.resetOffsets = resetOffsets;
|
||||
}
|
||||
|
||||
public boolean isEnableDlq() {
|
||||
return this.enableDlq;
|
||||
}
|
||||
|
||||
public void setEnableDlq(boolean enableDlq) {
|
||||
this.enableDlq = enableDlq;
|
||||
}
|
||||
|
||||
public Boolean getAutoCommitOnError() {
|
||||
return this.autoCommitOnError;
|
||||
}
|
||||
|
||||
public void setAutoCommitOnError(Boolean autoCommitOnError) {
|
||||
this.autoCommitOnError = autoCommitOnError;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the interval.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
public int getRecoveryInterval() {
|
||||
return this.recoveryInterval;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param recoveryInterval the interval.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
public void setRecoveryInterval(int recoveryInterval) {
|
||||
this.recoveryInterval = recoveryInterval;
|
||||
}
|
||||
|
||||
public boolean isAutoRebalanceEnabled() {
|
||||
return this.autoRebalanceEnabled;
|
||||
}
|
||||
|
||||
public void setAutoRebalanceEnabled(boolean autoRebalanceEnabled) {
|
||||
this.autoRebalanceEnabled = autoRebalanceEnabled;
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
public String getDlqName() {
|
||||
return dlqName;
|
||||
}
|
||||
|
||||
public void setDlqName(String dlqName) {
|
||||
this.dlqName = dlqName;
|
||||
}
|
||||
|
||||
public String[] getTrustedPackages() {
|
||||
return trustedPackages;
|
||||
}
|
||||
|
||||
public void setTrustedPackages(String[] trustedPackages) {
|
||||
this.trustedPackages = trustedPackages;
|
||||
}
|
||||
|
||||
public KafkaProducerProperties getDlqProducerProperties() {
|
||||
return dlqProducerProperties;
|
||||
}
|
||||
|
||||
public void setDlqProducerProperties(KafkaProducerProperties dlqProducerProperties) {
|
||||
this.dlqProducerProperties = dlqProducerProperties;
|
||||
}
|
||||
public StandardHeaders getStandardHeaders() {
|
||||
return this.standardHeaders;
|
||||
}
|
||||
|
||||
public void setStandardHeaders(StandardHeaders standardHeaders) {
|
||||
this.standardHeaders = standardHeaders;
|
||||
}
|
||||
|
||||
public String getConverterBeanName() {
|
||||
return this.converterBeanName;
|
||||
}
|
||||
|
||||
public void setConverterBeanName(String converterBeanName) {
|
||||
this.converterBeanName = converterBeanName;
|
||||
}
|
||||
|
||||
public long getIdleEventInterval() {
|
||||
return this.idleEventInterval;
|
||||
}
|
||||
|
||||
public void setIdleEventInterval(long idleEventInterval) {
|
||||
this.idleEventInterval = idleEventInterval;
|
||||
}
|
||||
|
||||
public boolean isDestinationIsPattern() {
|
||||
return this.destinationIsPattern;
|
||||
}
|
||||
|
||||
public void setDestinationIsPattern(boolean destinationIsPattern) {
|
||||
this.destinationIsPattern = destinationIsPattern;
|
||||
}
|
||||
|
||||
public KafkaAdminProperties getAdmin() {
|
||||
return this.admin;
|
||||
}
|
||||
|
||||
public void setAdmin(KafkaAdminProperties admin) {
|
||||
this.admin = admin;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedBindingProperties;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Gary Russell
|
||||
*/
|
||||
@ConfigurationProperties("spring.cloud.stream.kafka")
|
||||
public class KafkaExtendedBindingProperties
|
||||
implements ExtendedBindingProperties<KafkaConsumerProperties, KafkaProducerProperties> {
|
||||
|
||||
private Map<String, KafkaBindingProperties> bindings = new HashMap<>();
|
||||
|
||||
public Map<String, KafkaBindingProperties> getBindings() {
|
||||
return this.bindings;
|
||||
}
|
||||
|
||||
public void setBindings(Map<String, KafkaBindingProperties> bindings) {
|
||||
this.bindings = bindings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized KafkaConsumerProperties getExtendedConsumerProperties(String channelName) {
|
||||
if (bindings.containsKey(channelName)) {
|
||||
if (bindings.get(channelName).getConsumer() != null) {
|
||||
return bindings.get(channelName).getConsumer();
|
||||
}
|
||||
else {
|
||||
KafkaConsumerProperties properties = new KafkaConsumerProperties();
|
||||
this.bindings.get(channelName).setConsumer(properties);
|
||||
return properties;
|
||||
}
|
||||
}
|
||||
else {
|
||||
KafkaConsumerProperties properties = new KafkaConsumerProperties();
|
||||
KafkaBindingProperties rbp = new KafkaBindingProperties();
|
||||
rbp.setConsumer(properties);
|
||||
bindings.put(channelName, rbp);
|
||||
return properties;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized KafkaProducerProperties getExtendedProducerProperties(String channelName) {
|
||||
if (bindings.containsKey(channelName)) {
|
||||
if (bindings.get(channelName).getProducer() != null) {
|
||||
return bindings.get(channelName).getProducer();
|
||||
}
|
||||
else {
|
||||
KafkaProducerProperties properties = new KafkaProducerProperties();
|
||||
this.bindings.get(channelName).setProducer(properties);
|
||||
return properties;
|
||||
}
|
||||
}
|
||||
else {
|
||||
KafkaProducerProperties properties = new KafkaProducerProperties();
|
||||
KafkaBindingProperties rbp = new KafkaBindingProperties();
|
||||
rbp.setProducer(properties);
|
||||
bindings.put(channelName, rbp);
|
||||
return properties;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,484 +0,0 @@
|
||||
/*
|
||||
* Copyright 2014-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.provisioning;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
import org.apache.kafka.clients.admin.AdminClientConfig;
|
||||
import org.apache.kafka.clients.admin.CreatePartitionsResult;
|
||||
import org.apache.kafka.clients.admin.CreateTopicsResult;
|
||||
import org.apache.kafka.clients.admin.DescribeTopicsResult;
|
||||
import org.apache.kafka.clients.admin.ListTopicsResult;
|
||||
import org.apache.kafka.clients.admin.NewPartitions;
|
||||
import org.apache.kafka.clients.admin.NewTopic;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.apache.kafka.common.KafkaFuture;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
|
||||
import org.springframework.beans.factory.InitializingBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.BinderException;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaAdminProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.utils.KafkaTopicUtils;
|
||||
import org.springframework.cloud.stream.provisioning.ConsumerDestination;
|
||||
import org.springframework.cloud.stream.provisioning.ProducerDestination;
|
||||
import org.springframework.cloud.stream.provisioning.ProvisioningException;
|
||||
import org.springframework.cloud.stream.provisioning.ProvisioningProvider;
|
||||
import org.springframework.retry.RetryOperations;
|
||||
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
|
||||
import org.springframework.retry.policy.SimpleRetryPolicy;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Kafka implementation for {@link ProvisioningProvider}
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Simon Flandergan
|
||||
* @author Oleg Zhurakousky
|
||||
*/
|
||||
public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsumerProperties<KafkaConsumerProperties>,
|
||||
ExtendedProducerProperties<KafkaProducerProperties>>, InitializingBean {
|
||||
|
||||
private static final int DEFAULT_OPERATION_TIMEOUT = 30;
|
||||
|
||||
private final Log logger = LogFactory.getLog(getClass());
|
||||
|
||||
private final KafkaBinderConfigurationProperties configurationProperties;
|
||||
|
||||
private final int operationTimeout = DEFAULT_OPERATION_TIMEOUT;
|
||||
|
||||
private final Map<String, Object> adminClientProperties;
|
||||
|
||||
private RetryOperations metadataRetryOperations;
|
||||
|
||||
public KafkaTopicProvisioner(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties) {
|
||||
Assert.isTrue(kafkaProperties != null, "KafkaProperties cannot be null");
|
||||
this.adminClientProperties = kafkaProperties.buildAdminProperties();
|
||||
this.configurationProperties = kafkaBinderConfigurationProperties;
|
||||
normalalizeBootPropsWithBinder(adminClientProperties, kafkaProperties, kafkaBinderConfigurationProperties);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param metadataRetryOperations the retry configuration
|
||||
*/
|
||||
public void setMetadataRetryOperations(RetryOperations metadataRetryOperations) {
|
||||
this.metadataRetryOperations = metadataRetryOperations;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterPropertiesSet() throws Exception {
|
||||
if (this.metadataRetryOperations == null) {
|
||||
RetryTemplate retryTemplate = new RetryTemplate();
|
||||
|
||||
SimpleRetryPolicy simpleRetryPolicy = new SimpleRetryPolicy();
|
||||
simpleRetryPolicy.setMaxAttempts(10);
|
||||
retryTemplate.setRetryPolicy(simpleRetryPolicy);
|
||||
|
||||
ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy();
|
||||
backOffPolicy.setInitialInterval(100);
|
||||
backOffPolicy.setMultiplier(2);
|
||||
backOffPolicy.setMaxInterval(1000);
|
||||
retryTemplate.setBackOffPolicy(backOffPolicy);
|
||||
this.metadataRetryOperations = retryTemplate;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ProducerDestination provisionProducerDestination(final String name,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties) {
|
||||
|
||||
if (this.logger.isInfoEnabled()) {
|
||||
this.logger.info("Using kafka topic for outbound: " + name);
|
||||
}
|
||||
KafkaTopicUtils.validateTopicName(name);
|
||||
try (AdminClient adminClient = AdminClient.create(this.adminClientProperties)) {
|
||||
createTopic(adminClient, name, properties.getPartitionCount(), false, properties.getExtension().getAdmin());
|
||||
int partitions = 0;
|
||||
if (this.configurationProperties.isAutoCreateTopics()) {
|
||||
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult.all();
|
||||
|
||||
Map<String, TopicDescription> topicDescriptions = null;
|
||||
try {
|
||||
topicDescriptions = all.get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new ProvisioningException("Problems encountered with partitions finding", e);
|
||||
}
|
||||
TopicDescription topicDescription = topicDescriptions.get(name);
|
||||
partitions = topicDescription.partitions().size();
|
||||
}
|
||||
return new KafkaProducerDestination(name, partitions);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConsumerDestination provisionConsumerDestination(final String name, final String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
if (!properties.isMultiplex()) {
|
||||
return doProvisionConsumerDestination(name, group, properties);
|
||||
}
|
||||
else {
|
||||
String[] destinations = StringUtils.commaDelimitedListToStringArray(name);
|
||||
for (String destination : destinations) {
|
||||
doProvisionConsumerDestination(destination.trim(), group, properties);
|
||||
}
|
||||
return new KafkaConsumerDestination(name);
|
||||
}
|
||||
}
|
||||
|
||||
private ConsumerDestination doProvisionConsumerDestination(final String name, final String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
|
||||
if (properties.getExtension().isDestinationIsPattern()) {
|
||||
Assert.isTrue(!properties.getExtension().isEnableDlq(),
|
||||
"enableDLQ is not allowed when listening to topic patterns");
|
||||
if (this.logger.isDebugEnabled()) {
|
||||
this.logger.debug("Listening to a topic pattern - " + name
|
||||
+ " - no provisioning performed");
|
||||
}
|
||||
return new KafkaConsumerDestination(name);
|
||||
}
|
||||
KafkaTopicUtils.validateTopicName(name);
|
||||
boolean anonymous = !StringUtils.hasText(group);
|
||||
Assert.isTrue(!anonymous || !properties.getExtension().isEnableDlq(),
|
||||
"DLQ support is not available for anonymous subscriptions");
|
||||
if (properties.getInstanceCount() == 0) {
|
||||
throw new IllegalArgumentException("Instance count cannot be zero");
|
||||
}
|
||||
int partitionCount = properties.getInstanceCount() * properties.getConcurrency();
|
||||
ConsumerDestination consumerDestination = new KafkaConsumerDestination(name);
|
||||
try (AdminClient adminClient = createAdminClient()) {
|
||||
createTopic(adminClient, name, partitionCount, properties.getExtension().isAutoRebalanceEnabled(),
|
||||
properties.getExtension().getAdmin());
|
||||
if (this.configurationProperties.isAutoCreateTopics()) {
|
||||
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult.all();
|
||||
try {
|
||||
Map<String, TopicDescription> topicDescriptions = all.get(operationTimeout, TimeUnit.SECONDS);
|
||||
TopicDescription topicDescription = topicDescriptions.get(name);
|
||||
int partitions = topicDescription.partitions().size();
|
||||
consumerDestination = createDlqIfNeedBe(adminClient, name, group, properties, anonymous, partitions);
|
||||
if (consumerDestination == null) {
|
||||
consumerDestination = new KafkaConsumerDestination(name, partitions);
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new ProvisioningException("provisioning exception", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
return consumerDestination;
|
||||
}
|
||||
|
||||
AdminClient createAdminClient() {
|
||||
return AdminClient.create(this.adminClientProperties);
|
||||
}
|
||||
|
||||
/**
|
||||
* In general, binder properties supersede boot kafka properties.
|
||||
* The one exception is the bootstrap servers. In that case, we should only override
|
||||
* the boot properties if (there is a binder property AND it is a non-default value)
|
||||
* OR (if there is no boot property); this is needed because the binder property
|
||||
* never returns a null value.
|
||||
* @param adminProps the admin properties to normalize.
|
||||
* @param bootProps the boot kafka properties.
|
||||
* @param binderProps the binder kafka properties.
|
||||
*/
|
||||
private void normalalizeBootPropsWithBinder(Map<String, Object> adminProps, KafkaProperties bootProps,
|
||||
KafkaBinderConfigurationProperties binderProps) {
|
||||
// First deal with the outlier
|
||||
String kafkaConnectionString = binderProps.getKafkaConnectionString();
|
||||
if (ObjectUtils.isEmpty(adminProps.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG))
|
||||
|| !kafkaConnectionString.equals(binderProps.getDefaultKafkaConnectionString())) {
|
||||
adminProps.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConnectionString);
|
||||
}
|
||||
// Now override any boot values with binder values
|
||||
Map<String, String> binderProperties = binderProps.getConfiguration();
|
||||
Set<String> adminConfigNames = AdminClientConfig.configNames();
|
||||
binderProperties.forEach((key, value) -> {
|
||||
if (key.equals(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)) {
|
||||
throw new IllegalStateException(
|
||||
"Set binder bootstrap servers via the 'brokers' property, not 'configuration'");
|
||||
}
|
||||
if (adminConfigNames.contains(key)) {
|
||||
Object replaced = adminProps.put(key, value);
|
||||
if (replaced != null && this.logger.isDebugEnabled()) {
|
||||
logger.debug("Overrode boot property: [" + key + "], from: [" + replaced + "] to: [" + value + "]");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private ConsumerDestination createDlqIfNeedBe(AdminClient adminClient, String name, String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties,
|
||||
boolean anonymous, int partitions) {
|
||||
if (properties.getExtension().isEnableDlq() && !anonymous) {
|
||||
String dlqTopic = StringUtils.hasText(properties.getExtension().getDlqName()) ?
|
||||
properties.getExtension().getDlqName() : "error." + name + "." + group;
|
||||
try {
|
||||
createTopicAndPartitions(adminClient, dlqTopic, partitions,
|
||||
properties.getExtension().isAutoRebalanceEnabled(), properties.getExtension().getAdmin());
|
||||
}
|
||||
catch (Throwable throwable) {
|
||||
if (throwable instanceof Error) {
|
||||
throw (Error) throwable;
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("provisioning exception", throwable);
|
||||
}
|
||||
}
|
||||
return new KafkaConsumerDestination(name, partitions, dlqTopic);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private void createTopic(AdminClient adminClient, String name, int partitionCount, boolean tolerateLowerPartitionsOnBroker,
|
||||
KafkaAdminProperties properties) {
|
||||
try {
|
||||
createTopicIfNecessary(adminClient, name, partitionCount, tolerateLowerPartitionsOnBroker, properties);
|
||||
}
|
||||
catch (Throwable throwable) {
|
||||
if (throwable instanceof Error) {
|
||||
throw (Error) throwable;
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("provisioning exception", throwable);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void createTopicIfNecessary(AdminClient adminClient, final String topicName, final int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker, KafkaAdminProperties properties) throws Throwable {
|
||||
|
||||
if (this.configurationProperties.isAutoCreateTopics()) {
|
||||
createTopicAndPartitions(adminClient, topicName, partitionCount, tolerateLowerPartitionsOnBroker,
|
||||
properties);
|
||||
}
|
||||
else {
|
||||
this.logger.info("Auto creation of topics is disabled.");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Kafka topic if needed, or try to increase its partition count to the
|
||||
* desired number.
|
||||
* @param adminClient
|
||||
* @param adminProperties
|
||||
*/
|
||||
private void createTopicAndPartitions(AdminClient adminClient, final String topicName, final int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker, KafkaAdminProperties adminProperties) throws Throwable {
|
||||
|
||||
ListTopicsResult listTopicsResult = adminClient.listTopics();
|
||||
KafkaFuture<Set<String>> namesFutures = listTopicsResult.names();
|
||||
|
||||
Set<String> names = namesFutures.get(operationTimeout, TimeUnit.SECONDS);
|
||||
if (names.contains(topicName)) {
|
||||
// only consider minPartitionCount for resizing if autoAddPartitions is true
|
||||
int effectivePartitionCount = this.configurationProperties.isAutoAddPartitions()
|
||||
? Math.max(this.configurationProperties.getMinPartitionCount(), partitionCount)
|
||||
: partitionCount;
|
||||
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singletonList(topicName));
|
||||
KafkaFuture<Map<String, TopicDescription>> topicDescriptionsFuture = describeTopicsResult.all();
|
||||
Map<String, TopicDescription> topicDescriptions = topicDescriptionsFuture.get(operationTimeout, TimeUnit.SECONDS);
|
||||
TopicDescription topicDescription = topicDescriptions.get(topicName);
|
||||
int partitionSize = topicDescription.partitions().size();
|
||||
if (partitionSize < effectivePartitionCount) {
|
||||
if (this.configurationProperties.isAutoAddPartitions()) {
|
||||
CreatePartitionsResult partitions = adminClient.createPartitions(
|
||||
Collections.singletonMap(topicName, NewPartitions.increaseTo(effectivePartitionCount)));
|
||||
partitions.all().get(operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
else if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "There will be " + (effectivePartitionCount - partitionSize) + " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "Consider either increasing the partition count of the topic or enabling " +
|
||||
"`autoAddPartitions`");
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// always consider minPartitionCount for topic creation
|
||||
final int effectivePartitionCount = Math.max(this.configurationProperties.getMinPartitionCount(),
|
||||
partitionCount);
|
||||
this.metadataRetryOperations.execute(context -> {
|
||||
|
||||
NewTopic newTopic;
|
||||
Map<Integer, List<Integer>> replicasAssignments = adminProperties.getReplicasAssignments();
|
||||
if (replicasAssignments != null && replicasAssignments.size() > 0) {
|
||||
newTopic = new NewTopic(topicName, adminProperties.getReplicasAssignments());
|
||||
}
|
||||
else {
|
||||
newTopic = new NewTopic(topicName, effectivePartitionCount,
|
||||
adminProperties.getReplicationFactor() != null
|
||||
? adminProperties.getReplicationFactor()
|
||||
: configurationProperties.getReplicationFactor());
|
||||
}
|
||||
if (adminProperties.getConfiguration().size() > 0) {
|
||||
newTopic.configs(adminProperties.getConfiguration());
|
||||
}
|
||||
CreateTopicsResult createTopicsResult = adminClient.createTopics(Collections.singletonList(newTopic));
|
||||
try {
|
||||
createTopicsResult.all().get(operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
catch (Exception e) {
|
||||
if (e instanceof ExecutionException) {
|
||||
String exceptionMessage = e.getMessage();
|
||||
if (exceptionMessage.contains("org.apache.kafka.common.errors.TopicExistsException")) {
|
||||
if (logger.isWarnEnabled()) {
|
||||
logger.warn("Attempt to create topic: " + topicName + ". Topic already exists.");
|
||||
}
|
||||
}
|
||||
else {
|
||||
logger.error("Failed to create topics", e.getCause());
|
||||
throw e.getCause();
|
||||
}
|
||||
}
|
||||
else {
|
||||
logger.error("Failed to create topics", e.getCause());
|
||||
throw e.getCause();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public Collection<PartitionInfo> getPartitionsForTopic(final int partitionCount,
|
||||
final boolean tolerateLowerPartitionsOnBroker,
|
||||
final Callable<Collection<PartitionInfo>> callable) {
|
||||
try {
|
||||
return this.metadataRetryOperations
|
||||
.execute(context -> {
|
||||
Collection<PartitionInfo> partitions = callable.call();
|
||||
// do a sanity check on the partition set
|
||||
int partitionSize = partitions.size();
|
||||
if (partitionSize < partitionCount) {
|
||||
if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "There will be " + (partitionCount - partitionSize) + " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ") + "been found instead");
|
||||
}
|
||||
}
|
||||
return partitions;
|
||||
});
|
||||
}
|
||||
catch (Exception e) {
|
||||
this.logger.error("Cannot initialize Binder", e);
|
||||
throw new BinderException("Cannot initialize binder:", e);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class KafkaProducerDestination implements ProducerDestination {
|
||||
|
||||
private final String producerDestinationName;
|
||||
|
||||
private final int partitions;
|
||||
|
||||
KafkaProducerDestination(String destinationName, Integer partitions) {
|
||||
this.producerDestinationName = destinationName;
|
||||
this.partitions = partitions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return producerDestinationName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getNameForPartition(int partition) {
|
||||
return producerDestinationName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KafkaProducerDestination{" +
|
||||
"producerDestinationName='" + producerDestinationName + '\'' +
|
||||
", partitions=" + partitions +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
private static final class KafkaConsumerDestination implements ConsumerDestination {
|
||||
|
||||
private final String consumerDestinationName;
|
||||
|
||||
private final int partitions;
|
||||
|
||||
private final String dlqName;
|
||||
|
||||
KafkaConsumerDestination(String consumerDestinationName) {
|
||||
this(consumerDestinationName, 0, null);
|
||||
}
|
||||
|
||||
KafkaConsumerDestination(String consumerDestinationName, int partitions) {
|
||||
this(consumerDestinationName, partitions, null);
|
||||
}
|
||||
|
||||
KafkaConsumerDestination(String consumerDestinationName, Integer partitions, String dlqName) {
|
||||
this.consumerDestinationName = consumerDestinationName;
|
||||
this.partitions = partitions;
|
||||
this.dlqName = dlqName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return this.consumerDestinationName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KafkaConsumerDestination{" +
|
||||
"consumerDestinationName='" + consumerDestinationName + '\'' +
|
||||
", partitions=" + partitions +
|
||||
", dlqName='" + dlqName + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,99 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.provisioning;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
import org.apache.kafka.clients.admin.AdminClientConfig;
|
||||
import org.apache.kafka.common.config.SslConfigs;
|
||||
import org.apache.kafka.common.network.SslChannelBuilder;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.core.io.ClassPathResource;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.assertj.core.api.Assertions.fail;
|
||||
|
||||
|
||||
/**
|
||||
* @author Gary Russell
|
||||
* @since 2.0
|
||||
*
|
||||
*/
|
||||
public class KafkaTopicProvisionerTests {
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
@Test
|
||||
public void bootPropertiesOverriddenExceptServers() throws Exception {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
bootConfig.getProperties().put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "PLAINTEXT");
|
||||
bootConfig.setBootstrapServers(Collections.singletonList("localhost:1234"));
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties(bootConfig);
|
||||
binderConfig.getConfiguration().put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, "SSL");
|
||||
ClassPathResource ts = new ClassPathResource("test.truststore.ks");
|
||||
binderConfig.getConfiguration().put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, ts.getFile().getAbsolutePath());
|
||||
binderConfig.setBrokers("localhost:9092");
|
||||
KafkaTopicProvisioner provisioner = new KafkaTopicProvisioner(binderConfig, bootConfig);
|
||||
AdminClient adminClient = provisioner.createAdminClient();
|
||||
assertThat(KafkaTestUtils.getPropertyValue(adminClient, "client.selector.channelBuilder")).isInstanceOf(SslChannelBuilder.class);
|
||||
Map configs = KafkaTestUtils.getPropertyValue(adminClient, "client.selector.channelBuilder.configs", Map.class);
|
||||
assertThat(((List) configs.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)).get(0)).isEqualTo("localhost:1234");
|
||||
adminClient.close();
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
@Test
|
||||
public void bootPropertiesOverriddenIncludingServers() throws Exception {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
bootConfig.getProperties().put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "PLAINTEXT");
|
||||
bootConfig.setBootstrapServers(Collections.singletonList("localhost:9092"));
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties(bootConfig);
|
||||
binderConfig.getConfiguration().put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, "SSL");
|
||||
ClassPathResource ts = new ClassPathResource("test.truststore.ks");
|
||||
binderConfig.getConfiguration().put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, ts.getFile().getAbsolutePath());
|
||||
binderConfig.setBrokers("localhost:1234");
|
||||
KafkaTopicProvisioner provisioner = new KafkaTopicProvisioner(binderConfig, bootConfig);
|
||||
AdminClient adminClient = provisioner.createAdminClient();
|
||||
assertThat(KafkaTestUtils.getPropertyValue(adminClient, "client.selector.channelBuilder")).isInstanceOf(SslChannelBuilder.class);
|
||||
Map configs = KafkaTestUtils.getPropertyValue(adminClient, "client.selector.channelBuilder.configs", Map.class);
|
||||
assertThat(((List) configs.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)).get(0)).isEqualTo("localhost:1234");
|
||||
adminClient.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void brokersInvalid() throws Exception {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties(bootConfig);
|
||||
binderConfig.getConfiguration().put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "localhost:1234");
|
||||
try {
|
||||
new KafkaTopicProvisioner(binderConfig, bootConfig);
|
||||
fail("Expected illegal state");
|
||||
}
|
||||
catch (IllegalStateException e) {
|
||||
assertThat(e.getMessage())
|
||||
.isEqualTo("Set binder bootstrap servers via the 'brokers' property, not 'configuration'");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
Binary file not shown.
@@ -1,11 +1,11 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.1.0.M2</version>
|
||||
<version>1.1.3.BUILD-SNAPSHOT</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>spring-cloud-stream-binder-kafka-docs</artifactId>
|
||||
@@ -71,8 +71,8 @@
|
||||
<quiet>true</quiet>
|
||||
<stylesheetfile>${basedir}/src/main/javadoc/spring-javadoc.css</stylesheetfile>
|
||||
<links>
|
||||
<link>http://docs.spring.io/spring-framework/docs/${spring.version}/javadoc-api/</link>
|
||||
<link>http://docs.spring.io/spring-shell/docs/current/api/</link>
|
||||
<link>https://docs.spring.io/spring-framework/docs/${spring.version}/javadoc-api/</link>
|
||||
<link>https://docs.spring.io/spring-shell/docs/current/api/</link>
|
||||
</links>
|
||||
</configuration>
|
||||
</execution>
|
||||
|
||||
@@ -34,7 +34,7 @@ source control.
|
||||
|
||||
The projects that require middleware generally include a
|
||||
`docker-compose.yml`, so consider using
|
||||
http://compose.docker.io/[Docker Compose] to run the middeware servers
|
||||
https://compose.docker.io/[Docker Compose] to run the middeware servers
|
||||
in Docker containers.
|
||||
|
||||
=== Documentation
|
||||
@@ -43,13 +43,13 @@ There is a "full" profile that will generate documentation.
|
||||
|
||||
=== Working with the code
|
||||
If you don't have an IDE preference we would recommend that you use
|
||||
http://www.springsource.com/developer/sts[Spring Tools Suite] or
|
||||
http://eclipse.org[Eclipse] when working with the code. We use the
|
||||
http://eclipse.org/m2e/[m2eclipe] eclipse plugin for maven support. Other IDEs and tools
|
||||
https://www.springsource.com/developer/sts[Spring Tools Suite] or
|
||||
https://eclipse.org[Eclipse] when working with the code. We use the
|
||||
https://eclipse.org/m2e/[m2eclipe] eclipse plugin for maven support. Other IDEs and tools
|
||||
should also work without issue.
|
||||
|
||||
==== Importing into eclipse with m2eclipse
|
||||
We recommend the http://eclipse.org/m2e/[m2eclipe] eclipse plugin when working with
|
||||
We recommend the https://eclipse.org/m2e/[m2eclipe] eclipse plugin when working with
|
||||
eclipse. If you don't already have m2eclipse installed it is available from the "eclipse
|
||||
marketplace".
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ added after the original pull request but before a merge.
|
||||
`eclipse-code-formatter.xml` file from the
|
||||
https://github.com/spring-cloud/build/tree/master/eclipse-coding-conventions.xml[Spring
|
||||
Cloud Build] project. If using IntelliJ, you can use the
|
||||
http://plugins.jetbrains.com/plugin/6546[Eclipse Code Formatter
|
||||
https://plugins.jetbrains.com/plugin/6546[Eclipse Code Formatter
|
||||
Plugin] to import the same file.
|
||||
* Make sure all new `.java` files to have a simple Javadoc class comment with at least an
|
||||
`@author` tag identifying you, and preferably at least a paragraph on what the class is
|
||||
@@ -37,6 +37,6 @@ added after the original pull request but before a merge.
|
||||
* A few unit tests would help a lot as well -- someone has to do it.
|
||||
* If no-one else is using your branch, please rebase it against the current master (or
|
||||
other target branch in the main project).
|
||||
* When writing a commit message please follow http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html[these conventions],
|
||||
* When writing a commit message please follow https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html[these conventions],
|
||||
if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit
|
||||
message (where XXXX is the issue number).
|
||||
@@ -1,107 +0,0 @@
|
||||
[[kafka-dlq-processing]]
|
||||
== Dead-Letter Topic Processing
|
||||
|
||||
Because you cannot anticipate how users would want to dispose of dead-lettered messages, the framework does not provide any standard mechanism to handle them.
|
||||
If the reason for the dead-lettering is transient, you may wish to route the messages back to the original topic.
|
||||
However, if the problem is a permanent issue, that could cause an infinite loop.
|
||||
The sample Spring Boot application within this topic is an example of how to route those messages back to the original topic, but it moves them to a "`parking lot`" topic after three attempts.
|
||||
The application is another spring-cloud-stream application that reads from the dead-letter topic.
|
||||
It terminates when no messages are received for 5 seconds.
|
||||
|
||||
The examples assume the original destination is `so8400out` and the consumer group is `so8400`.
|
||||
|
||||
There are a couple of strategies to consider:
|
||||
|
||||
* Consider running the rerouting only when the main application is not running.
|
||||
Otherwise, the retries for transient errors are used up very quickly.
|
||||
* Alternatively, use a two-stage approach: Use this application to route to a third topic and another to route from there back to the main topic.
|
||||
|
||||
The following code listings show the sample application:
|
||||
|
||||
.application.properties
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.input.group=so8400replay
|
||||
spring.cloud.stream.bindings.input.destination=error.so8400out.so8400
|
||||
|
||||
spring.cloud.stream.bindings.output.destination=so8400out
|
||||
spring.cloud.stream.bindings.output.producer.partitioned=true
|
||||
|
||||
spring.cloud.stream.bindings.parkingLot.destination=so8400in.parkingLot
|
||||
spring.cloud.stream.bindings.parkingLot.producer.partitioned=true
|
||||
|
||||
spring.cloud.stream.kafka.binder.configuration.auto.offset.reset=earliest
|
||||
|
||||
spring.cloud.stream.kafka.binder.headers=x-retries
|
||||
----
|
||||
|
||||
.Application
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(TwoOutputProcessor.class)
|
||||
public class ReRouteDlqKApplication implements CommandLineRunner {
|
||||
|
||||
private static final String X_RETRIES_HEADER = "x-retries";
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(ReRouteDlqKApplication.class, args).close();
|
||||
}
|
||||
|
||||
private final AtomicInteger processed = new AtomicInteger();
|
||||
|
||||
@Autowired
|
||||
private MessageChannel parkingLot;
|
||||
|
||||
@StreamListener(Processor.INPUT)
|
||||
@SendTo(Processor.OUTPUT)
|
||||
public Message<?> reRoute(Message<?> failed) {
|
||||
processed.incrementAndGet();
|
||||
Integer retries = failed.getHeaders().get(X_RETRIES_HEADER, Integer.class);
|
||||
if (retries == null) {
|
||||
System.out.println("First retry for " + failed);
|
||||
return MessageBuilder.fromMessage(failed)
|
||||
.setHeader(X_RETRIES_HEADER, new Integer(1))
|
||||
.setHeader(BinderHeaders.PARTITION_OVERRIDE,
|
||||
failed.getHeaders().get(KafkaHeaders.RECEIVED_PARTITION_ID))
|
||||
.build();
|
||||
}
|
||||
else if (retries.intValue() < 3) {
|
||||
System.out.println("Another retry for " + failed);
|
||||
return MessageBuilder.fromMessage(failed)
|
||||
.setHeader(X_RETRIES_HEADER, new Integer(retries.intValue() + 1))
|
||||
.setHeader(BinderHeaders.PARTITION_OVERRIDE,
|
||||
failed.getHeaders().get(KafkaHeaders.RECEIVED_PARTITION_ID))
|
||||
.build();
|
||||
}
|
||||
else {
|
||||
System.out.println("Retries exhausted for " + failed);
|
||||
parkingLot.send(MessageBuilder.fromMessage(failed)
|
||||
.setHeader(BinderHeaders.PARTITION_OVERRIDE,
|
||||
failed.getHeaders().get(KafkaHeaders.RECEIVED_PARTITION_ID))
|
||||
.build());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run(String... args) throws Exception {
|
||||
while (true) {
|
||||
int count = this.processed.get();
|
||||
Thread.sleep(5000);
|
||||
if (count == this.processed.get()) {
|
||||
System.out.println("Idle, terminating");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public interface TwoOutputProcessor extends Processor {
|
||||
|
||||
@Output("parkingLot")
|
||||
MessageChannel parkingLot();
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 119 KiB |
@@ -1,6 +1,6 @@
|
||||
[[spring-cloud-stream-binder-kafka-reference]]
|
||||
= Spring Cloud Stream Kafka Binder Reference Guide
|
||||
Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinathan, Gunnar Hillert, Mark Pollack, Patrick Peralta, Glenn Renfro, Thomas Risberg, Dave Syer, David Turanski, Janne Valkealahti, Benjamin Klein, Henryk Konsek, Gary Russell
|
||||
Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinathan, Gunnar Hillert, Mark Pollack, Patrick Peralta, Glenn Renfro, Thomas Risberg, Dave Syer, David Turanski, Janne Valkealahti, Benjamin Klein
|
||||
:doctype: book
|
||||
:toc:
|
||||
:toclevels: 4
|
||||
@@ -11,27 +11,23 @@ Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinat
|
||||
:spring-cloud-stream-binder-kafka-repo: snapshot
|
||||
:github-tag: master
|
||||
:spring-cloud-stream-binder-kafka-docs-version: current
|
||||
:spring-cloud-stream-binder-kafka-docs: http://docs.spring.io/spring-cloud-stream-binder-kafka/docs/{spring-cloud-stream-binder-kafka-docs-version}/reference
|
||||
:spring-cloud-stream-binder-kafka-docs-current: http://docs.spring.io/spring-cloud-stream-binder-kafka/docs/current-SNAPSHOT/reference/html/
|
||||
:spring-cloud-stream-binder-kafka-docs: https://docs.spring.io/spring-cloud-stream-binder-kafka/docs/{spring-cloud-stream-binder-kafka-docs-version}/reference
|
||||
:spring-cloud-stream-binder-kafka-docs-current: https://docs.spring.io/spring-cloud-stream-binder-kafka/docs/current-SNAPSHOT/reference/html/
|
||||
:github-repo: spring-cloud/spring-cloud-stream-binder-kafka
|
||||
:github-raw: http://raw.github.com/{github-repo}/{github-tag}
|
||||
:github-code: http://github.com/{github-repo}/tree/{github-tag}
|
||||
:github-wiki: http://github.com/{github-repo}/wiki
|
||||
:github-master-code: http://github.com/{github-repo}/tree/master
|
||||
:github-raw: https://raw.github.com/{github-repo}/{github-tag}
|
||||
:github-code: https://github.com/{github-repo}/tree/{github-tag}
|
||||
:github-wiki: https://github.com/{github-repo}/wiki
|
||||
:github-master-code: https://github.com/{github-repo}/tree/master
|
||||
:sc-ext: java
|
||||
// ======================================================================================
|
||||
|
||||
= Reference Guide
|
||||
include::overview.adoc[]
|
||||
|
||||
include::dlq.adoc[]
|
||||
|
||||
include::partitions.adoc[]
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
include::building.adoc[]
|
||||
|
||||
include::contributing.adoc[]
|
||||
|
||||
// ======================================================================================
|
||||
|
||||
@@ -1,674 +0,0 @@
|
||||
== Usage
|
||||
|
||||
For using the Kafka Streams binder, you just need to add it to your Spring Cloud Stream application, using the following
|
||||
Maven coordinates:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-streams</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
== Kafka Streams Binder Overview
|
||||
|
||||
Spring Cloud Stream's Apache Kafka support also includes a binder implementation designed explicitly for Apache Kafka
|
||||
Streams binding. With this native integration, a Spring Cloud Stream "processor" application can directly use the
|
||||
https://kafka.apache.org/documentation/streams/developer-guide[Apache Kafka Streams] APIs in the core business logic.
|
||||
|
||||
Kafka Streams binder implementation builds on the foundation provided by the http://docs.spring.io/spring-kafka/reference/html/_reference.html#kafka-streams[Kafka Streams in Spring Kafka]
|
||||
project.
|
||||
|
||||
As part of this native integration, the high-level https://docs.confluent.io/current/streams/developer-guide/dsl-api.html[Streams DSL]
|
||||
provided by the Kafka Streams API is available for use in the business logic, too.
|
||||
|
||||
An early version of the https://docs.confluent.io/current/streams/developer-guide/processor-api.html[Processor API]
|
||||
support is available as well.
|
||||
|
||||
As noted early-on, Kafka Streams support in Spring Cloud Stream strictly only available for use in the Processor model.
|
||||
A model in which the messages read from an inbound topic, business processing can be applied, and the transformed messages
|
||||
can be written to an outbound topic. It can also be used in Processor applications with a no-outbound destination.
|
||||
|
||||
=== Streams DSL
|
||||
|
||||
This application consumes data from a Kafka topic (e.g., `words`), computes word count for each unique word in a 5 seconds
|
||||
time window, and the computed results are sent to a downstream topic (e.g., `counts`) for further processing.
|
||||
|
||||
[source]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(KStreamProcessor.class)
|
||||
public class WordCountProcessorApplication {
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<?, WordCount> process(KStream<?, String> input) {
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.groupBy((key, value) -> value)
|
||||
.windowedBy(TimeWindows.of(5000))
|
||||
.count(Materialized.as("WordCounts-multi"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key.key(), value, new Date(key.window().start()), new Date(key.window().end()))));
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(WordCountProcessorApplication.class, args);
|
||||
}
|
||||
----
|
||||
|
||||
Once built as a uber-jar (e.g., `wordcount-processor.jar`), you can run the above example like the following.
|
||||
|
||||
[source]
|
||||
----
|
||||
java -jar wordcount-processor.jar --spring.cloud.stream.bindings.input.destination=words --spring.cloud.stream.bindings.output.destination=counts
|
||||
----
|
||||
|
||||
This application will consume messages from the Kafka topic `words` and the computed results are published to an output
|
||||
topic `counts`.
|
||||
|
||||
Spring Cloud Stream will ensure that the messages from both the incoming and outgoing topics are automatically bound as
|
||||
KStream objects. As a developer, you can exclusively focus on the business aspects of the code, i.e. writing the logic
|
||||
required in the processor. Setting up the Streams DSL specific configuration required by the Kafka Streams infrastructure
|
||||
is automatically handled by the framework.
|
||||
|
||||
== Configuration Options
|
||||
|
||||
This section contains the configuration options used by the Kafka Streams binder.
|
||||
|
||||
For common configuration options and properties pertaining to binder, refer to the <<binding-properties,core documentation>>.
|
||||
|
||||
=== Kafka Streams Properties
|
||||
|
||||
The following properties are available at the binder level and must be prefixed with `spring.cloud.stream.kafka.streams.binder.`
|
||||
literal.
|
||||
|
||||
configuration::
|
||||
Map with a key/value pair containing properties pertaining to Apache Kafka Streams API.
|
||||
This property must be prefixed with `spring.cloud.stream.kafka.streams.binder.`.
|
||||
Following are some examples of using this property.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000
|
||||
----
|
||||
|
||||
For more information about all the properties that may go into streams configuration, see StreamsConfig JavaDocs in
|
||||
Apache Kafka Streams docs.
|
||||
|
||||
brokers::
|
||||
Broker URL
|
||||
+
|
||||
Default: `localhost`
|
||||
zkNodes::
|
||||
Zookeeper URL
|
||||
+
|
||||
Default: `localhost`
|
||||
serdeError::
|
||||
Deserialization error handler type.
|
||||
Possible values are - `logAndContinue`, `logAndFail` or `sendToDlq`
|
||||
+
|
||||
Default: `logAndFail`
|
||||
applicationId::
|
||||
Application ID for all the stream configurations in the current application context.
|
||||
You can override the application id for an individual `StreamListener` method using the `group` property on the binding.
|
||||
You have to ensure that you are using the same group name for all input bindings in the case of multiple inputs on the same methods.
|
||||
+
|
||||
Default: `default`
|
||||
|
||||
The following properties are _only_ available for Kafka Streams producers and must be prefixed with `spring.cloud.stream.kafka.streams.bindings.<binding name>.producer.`
|
||||
literal.
|
||||
|
||||
keySerde::
|
||||
key serde to use
|
||||
+
|
||||
Default: `none`.
|
||||
valueSerde::
|
||||
value serde to use
|
||||
+
|
||||
Default: `none`.
|
||||
useNativeEncoding::
|
||||
flag to enable native encoding
|
||||
+
|
||||
Default: `false`.
|
||||
|
||||
The following properties are _only_ available for Kafka Streams consumers and must be prefixed with `spring.cloud.stream.kafka.streams.bindings.<binding name>.consumer.`
|
||||
literal.
|
||||
|
||||
keySerde::
|
||||
key serde to use
|
||||
+
|
||||
Default: `none`.
|
||||
valueSerde::
|
||||
value serde to use
|
||||
+
|
||||
Default: `none`.
|
||||
materializedAs::
|
||||
state store to materialize when using incoming KTable types
|
||||
+
|
||||
Default: `none`.
|
||||
useNativeDecoding::
|
||||
flag to enable native decoding
|
||||
+
|
||||
Default: `false`.
|
||||
dlqName::
|
||||
DLQ topic name.
|
||||
+
|
||||
Default: `none`.
|
||||
|
||||
=== TimeWindow properties:
|
||||
|
||||
Windowing is an important concept in stream processing applications. Following properties are available to configure
|
||||
time-window computations.
|
||||
|
||||
spring.cloud.stream.kafka.streams.timeWindow.length::
|
||||
When this property is given, you can autowire a `TimeWindows` bean into the application.
|
||||
The value is expressed in milliseconds.
|
||||
+
|
||||
Default: `none`.
|
||||
spring.cloud.stream.kafka.streams.timeWindow.advanceBy::
|
||||
Value is given in milliseconds.
|
||||
+
|
||||
Default: `none`.
|
||||
|
||||
== Multiple Input Bindings
|
||||
|
||||
For use cases that requires multiple incoming KStream objects or a combination of KStream and KTable objects, the Kafka
|
||||
Streams binder provides multiple bindings support.
|
||||
|
||||
Let's see it in action.
|
||||
|
||||
=== Multiple Input Bindings as a Sink
|
||||
|
||||
[source]
|
||||
----
|
||||
@EnableBinding(KStreamKTableBinding.class)
|
||||
.....
|
||||
.....
|
||||
@StreamListener
|
||||
public void process(@Input("inputStream") KStream<String, PlayEvent> playEvents,
|
||||
@Input("inputTable") KTable<Long, Song> songTable) {
|
||||
....
|
||||
....
|
||||
}
|
||||
|
||||
interface KStreamKTableBinding {
|
||||
|
||||
@Input("inputStream")
|
||||
KStream<?, ?> inputStream();
|
||||
|
||||
@Input("inputTable")
|
||||
KTable<?, ?> inputTable();
|
||||
}
|
||||
|
||||
----
|
||||
|
||||
In the above example, the application is written as a sink, i.e. there are no output bindings and the application has to
|
||||
decide concerning downstream processing. When you write applications in this style, you might want to send the information
|
||||
downstream or store them in a state store (See below for Queryable State Stores).
|
||||
|
||||
In the case of incoming KTable, if you want to materialize the computations to a state store, you have to express it
|
||||
through the following property.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.inputTable.consumer.materializedAs: all-songs
|
||||
----
|
||||
|
||||
=== Multiple Input Bindings as a Processor
|
||||
|
||||
[source]
|
||||
----
|
||||
@EnableBinding(KStreamKTableBinding.class)
|
||||
....
|
||||
....
|
||||
|
||||
@StreamListener
|
||||
@SendTo("output")
|
||||
public KStream<String, Long> process(@Input("input") KStream<String, Long> userClicksStream,
|
||||
@Input("inputTable") KTable<String, String> userRegionsTable) {
|
||||
....
|
||||
....
|
||||
}
|
||||
|
||||
interface KStreamKTableBinding extends KafkaStreamsProcessor {
|
||||
|
||||
@Input("inputX")
|
||||
KTable<?, ?> inputTable();
|
||||
}
|
||||
|
||||
----
|
||||
|
||||
== Multiple Output Bindings (aka Branching)
|
||||
|
||||
Kafka Streams allow outbound data to be split into multiple topics based on some predicates. The Kafka Streams binder provides
|
||||
support for this feature without compromising the programming model exposed through `StreamListener` in the end user application.
|
||||
|
||||
You can write the application in the usual way as demonstrated above in the word count example. However, when using the
|
||||
branching feature, you are required to do a few things. First, you need to make sure that your return type is `KStream[]`
|
||||
instead of a regular `KStream`. Second, you need to use the `SendTo` annotation containing the output bindings in the order
|
||||
(see example below). For each of these output bindings, you need to configure destination, content-type etc., complying with
|
||||
the standard Spring Cloud Stream expectations.
|
||||
|
||||
Here is an example:
|
||||
|
||||
[source]
|
||||
----
|
||||
@EnableBinding(KStreamProcessorWithBranches.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class WordCountProcessorApplication {
|
||||
|
||||
@Autowired
|
||||
private TimeWindows timeWindows;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo({"output1","output2","output3})
|
||||
public KStream<?, WordCount>[] process(KStream<Object, String> input) {
|
||||
|
||||
Predicate<Object, WordCount> isEnglish = (k, v) -> v.word.equals("english");
|
||||
Predicate<Object, WordCount> isFrench = (k, v) -> v.word.equals("french");
|
||||
Predicate<Object, WordCount> isSpanish = (k, v) -> v.word.equals("spanish");
|
||||
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.groupBy((key, value) -> value)
|
||||
.windowedBy(timeWindows)
|
||||
.count(Materialized.as("WordCounts-1"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key.key(), value, new Date(key.window().start()), new Date(key.window().end()))))
|
||||
.branch(isEnglish, isFrench, isSpanish);
|
||||
}
|
||||
|
||||
interface KStreamProcessorWithBranches {
|
||||
|
||||
@Input("input")
|
||||
KStream<?, ?> input();
|
||||
|
||||
@Output("output1")
|
||||
KStream<?, ?> output1();
|
||||
|
||||
@Output("output2")
|
||||
KStream<?, ?> output2();
|
||||
|
||||
@Output("output3")
|
||||
KStream<?, ?> output3();
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
Properties:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.output1.contentType: application/json
|
||||
spring.cloud.stream.bindings.output2.contentType: application/json
|
||||
spring.cloud.stream.bindings.output3.contentType: application/json
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms: 1000
|
||||
spring.cloud.stream.kafka.streams.binder.configuration:
|
||||
default.key.serde: org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
default.value.serde: org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
spring.cloud.stream.bindings.output1:
|
||||
destination: foo
|
||||
producer:
|
||||
headerMode: raw
|
||||
spring.cloud.stream.bindings.output2:
|
||||
destination: bar
|
||||
producer:
|
||||
headerMode: raw
|
||||
spring.cloud.stream.bindings.output3:
|
||||
destination: fox
|
||||
producer:
|
||||
headerMode: raw
|
||||
spring.cloud.stream.bindings.input:
|
||||
destination: words
|
||||
consumer:
|
||||
headerMode: raw
|
||||
----
|
||||
|
||||
== Message Conversion
|
||||
|
||||
Similar to message-channel based binder applications, the Kafka Streams binder adapts to the out-of-the-box content-type
|
||||
conversions without any compromise.
|
||||
|
||||
It is typical for Kafka Streams operations to know the type of SerDe’s used to transform the key and value correctly.
|
||||
Therefore, it may be more natural to rely on the SerDe facilities provided by the Apache Kafka Streams library itself at
|
||||
the inbound and outbound conversions rather than using the content-type conversions offered by the framework.
|
||||
On the other hand, you might be already familiar with the content-type conversion patterns provided by the framework, and
|
||||
that, you'd like to continue using for inbound and outbound conversions.
|
||||
|
||||
Both the options are supported in the Kafka Streams binder implementation.
|
||||
|
||||
==== Outbound serialization
|
||||
|
||||
If native encoding is disabled (which is the default), then the framework will convert the message using the contentType
|
||||
set by the user (otherwise, the default `application/json` will be applied). It will ignore any SerDe set on the outbound
|
||||
in this case for outbound serialization.
|
||||
|
||||
Here is the property to set the contentType on the outbound.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.output.contentType: application/json
|
||||
----
|
||||
|
||||
Here is the property to enable native encoding.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.output.nativeEncoding: true
|
||||
----
|
||||
|
||||
If native encoding is enabled on the output binding (user has to enable it as above explicitly), then the framework will
|
||||
skip any form of automatic message conversion on the outbound. In that case, it will switch to the Serde set by the user.
|
||||
The `valueSerde` property set on the actual output binding will be used. Here is an example.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.output.producer.valueSerde: org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
----
|
||||
If this property is not set, then it will use the "default" SerDe: `spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde`.
|
||||
|
||||
It is worth to mention that Kafka Streams binder does not serialize the keys on outbound - it simply relies on Kafka itself.
|
||||
Therefore, you either have to specify the `keySerde` property on the binding or it will default to the application-wide common
|
||||
`keySerde`.
|
||||
|
||||
Binding level key serde:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.output.producer.keySerde
|
||||
----
|
||||
|
||||
Common Key serde:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde
|
||||
----
|
||||
|
||||
If branching is used, then you need to use multiple output bindings. For example,
|
||||
|
||||
[source]
|
||||
----
|
||||
interface KStreamProcessorWithBranches {
|
||||
|
||||
@Input("input")
|
||||
KStream<?, ?> input();
|
||||
|
||||
@Output("output1")
|
||||
KStream<?, ?> output1();
|
||||
|
||||
@Output("output2")
|
||||
KStream<?, ?> output2();
|
||||
|
||||
@Output("output3")
|
||||
KStream<?, ?> output3();
|
||||
}
|
||||
----
|
||||
|
||||
If `nativeEncoding` is set, then you can set different SerDe's on individual output bindings as below.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.output1.producer.valueSerde=IntegerSerde
|
||||
spring.cloud.stream.kafka.streams.bindings.output2.producer.valueSerde=StringSerde
|
||||
spring.cloud.stream.kafka.streams.bindings.output3.producer.valueSerde=JsonSerde
|
||||
----
|
||||
|
||||
Then if you have `SendTo` like this, @SendTo({"output1", "output2", "output3"}), the `KStream[]` from the branches are
|
||||
applied with proper SerDe objects as defined above. If you are not enabling `nativeEncoding`, you can then set different
|
||||
contentType values on the output bindings as below. In that case, the framework will use the appropriate message converter
|
||||
to convert the messages before sending to Kafka.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.output1.contentType: application/json
|
||||
spring.cloud.stream.bindings.output2.contentType: application/java-serialzied-object
|
||||
spring.cloud.stream.bindings.output3.contentType: application/octet-stream
|
||||
----
|
||||
|
||||
==== Inbound Deserialization
|
||||
|
||||
Similar rules apply to data deserialization on the inbound.
|
||||
|
||||
If native decoding is disabled (which is the default), then the framework will convert the message using the contentType
|
||||
set by the user (otherwise, the default `application/json` will be applied). It will ignore any SerDe set on the inbound
|
||||
in this case for inbound deserialization.
|
||||
|
||||
Here is the property to set the contentType on the inbound.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.input.contentType: application/json
|
||||
----
|
||||
|
||||
Here is the property to enable native decoding.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.input.nativeDecoding: true
|
||||
----
|
||||
|
||||
If native decoding is enabled on the input binding (user has to enable it as above explicitly), then the framework will
|
||||
skip doing any message conversion on the inbound. In that case, it will switch to the SerDe set by the user. The `valueSerde`
|
||||
property set on the actual output binding will be used. Here is an example.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.input.consumer.valueSerde: org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
----
|
||||
|
||||
If this property is not set, it will use the default SerDe: `spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde`.
|
||||
|
||||
It is worth to mention that Kafka Streams binder does not deserialize the keys on inbound - it simply relies on Kafka itself.
|
||||
Therefore, you either have to specify the `keySerde` property on the binding or it will default to the application-wide common
|
||||
`keySerde`.
|
||||
|
||||
Binding level key serde:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.input.consumer.keySerde
|
||||
----
|
||||
|
||||
Common Key serde:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde
|
||||
----
|
||||
|
||||
As in the case of KStream branching on the outbound, the benefit of setting value SerDe per binding is that if you have
|
||||
multiple input bindings (multiple KStreams object) and they all require separate value SerDe's, then you can configure
|
||||
them individually. If you use the common configuration approach, then this feature won't be applicable.
|
||||
|
||||
== Error Handling
|
||||
|
||||
Apache Kafka Streams provide the capability for natively handling exceptions from deserialization errors.
|
||||
For details on this support, please see https://cwiki.apache.org/confluence/display/KAFKA/KIP-161%3A+streams+deserialization+exception+handlers[this]
|
||||
Out of the box, Apache Kafka Streams provide two kinds of deserialization exception handlers - `logAndContinue` and `logAndFail`.
|
||||
As the name indicates, the former will log the error and continue processing the next records and the latter will log the
|
||||
error and fail. `LogAndFail` is the default deserialization exception handler.
|
||||
|
||||
=== Handling Deserialization Exceptions
|
||||
|
||||
Kafka Streams binder supports a selection of exception handlers through the following properties.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.binder.serdeError: logAndContinue
|
||||
----
|
||||
|
||||
In addition to the above two deserialization exception handlers, the binder also provides a third one for sending the erroneous
|
||||
records (poison pills) to a DLQ topic. Here is how you enable this DLQ exception handler.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.binder.serdeError: sendToDlq
|
||||
----
|
||||
When the above property is set, all the deserialization error records are automatically sent to the DLQ topic.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.bindings.input.consumer.dlqName: foo-dlq
|
||||
----
|
||||
|
||||
If this is set, then the error records are sent to the topic `foo-dlq`. If this is not set, then it will create a DLQ
|
||||
topic with the name `error.<input-topic-name>.<group-name>`.
|
||||
|
||||
A couple of things to keep in mind when using the exception handling feature in Kafka Streams binder.
|
||||
|
||||
* The property `spring.cloud.stream.kafka.streams.binder.serdeError` is applicable for the entire application. This implies
|
||||
that if there are multiple `StreamListener` methods in the same application, this property is applied to all of them.
|
||||
* The exception handling for deserialization works consistently with native deserialization and framework provided message
|
||||
conversion.
|
||||
|
||||
=== Handling Non-Deserialization Exceptions
|
||||
|
||||
For general error handling in Kafka Streams binder, it is up to the end user applications to handle application level errors.
|
||||
As a side effect of providing a DLQ for deserialization exception handlers, Kafka Streams binder provides a way to get
|
||||
access to the DLQ sending bean directly from your application.
|
||||
Once you get access to that bean, you can programmatically send any exception records from your application to the DLQ.
|
||||
|
||||
It continues to remain hard to robust error handling using the high-level DSL; Kafka Streams doesn't natively support error
|
||||
handling yet.
|
||||
|
||||
However, when you use the low-level Processor API in your application, there are options to control this behavior. See
|
||||
below.
|
||||
|
||||
[source]
|
||||
----
|
||||
@Autowired
|
||||
private SendToDlqAndContinue dlqHandler;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<?, WordCount> process(KStream<Object, String> input) {
|
||||
|
||||
input.process(() -> new Processor() {
|
||||
ProcessorContext context;
|
||||
|
||||
@Override
|
||||
public void init(ProcessorContext context) {
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(Object o, Object o2) {
|
||||
|
||||
try {
|
||||
.....
|
||||
.....
|
||||
}
|
||||
catch(Exception e) {
|
||||
//explicitly provide the kafka topic corresponding to the input binding as the first argument.
|
||||
//DLQ handler will correctly map to the dlq topic from the actual incoming destination.
|
||||
dlqHandler.sendToDlq("topic-name", (byte[]) o1, (byte[]) o2, context.partition());
|
||||
}
|
||||
}
|
||||
|
||||
.....
|
||||
.....
|
||||
});
|
||||
}
|
||||
----
|
||||
|
||||
== State Store
|
||||
|
||||
State store is created automatically by Kafka Streams when the DSL is used.
|
||||
When processor API is used, you need to register a state store manually. In order to do so, you can use `KafkaStreamsStateStore` annotation.
|
||||
You can specify the name and type of the store, flags to control log and disabling cache, etc.
|
||||
Once the store is created by the binder during the bootstrapping phase, you can access this state store through the processor API.
|
||||
Below are some primitives for doing this.
|
||||
|
||||
Creating a state store:
|
||||
[source]
|
||||
----
|
||||
@KafkaStreamsStateStore(name="mystate", type= KafkaStreamsStateStoreProperties.StoreType.WINDOW, lengthMs=300000)
|
||||
public void process(KStream<Object, Product> input) {
|
||||
...
|
||||
}
|
||||
----
|
||||
|
||||
Accessing the state store:
|
||||
[source]
|
||||
----
|
||||
Processor<Object, Product>() {
|
||||
|
||||
WindowStore<Object, String> state;
|
||||
|
||||
@Override
|
||||
public void init(ProcessorContext processorContext) {
|
||||
state = (WindowStore)processorContext.getStateStore("mystate");
|
||||
}
|
||||
...
|
||||
}
|
||||
----
|
||||
|
||||
== Interactive Queries
|
||||
|
||||
As part of the public Kafka Streams binder API, we expose a class called `InteractiveQueryService`.
|
||||
You can access this as a Spring bean in your application. An easy way to get access to this bean from your application is to "autowire" the bean.
|
||||
|
||||
[source]
|
||||
----
|
||||
@Autowired
|
||||
private InteractiveQueryService interactiveQueryService;
|
||||
----
|
||||
|
||||
Once you gain access to this bean, then you can query for the particular state-store that you are interested. See below.
|
||||
|
||||
[source]
|
||||
----
|
||||
ReadOnlyKeyValueStore<Object, Object> keyValueStore =
|
||||
interactiveQueryService.getQueryableStoreType("my-store", QueryableStoreTypes.keyValueStore());
|
||||
----
|
||||
|
||||
If there are multiple instances of the kafka streams application running, then before you can query them interactively, you need to identify which application instance hosts the key.
|
||||
`InteractiveQueryService` API provides methods for identifying the host information.
|
||||
|
||||
In order for this to work, you must configure the property `application.server` as below:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.streams.binder.configuration.application.server: <server>:<port>
|
||||
----
|
||||
|
||||
Here are some code snippets:
|
||||
|
||||
[source]
|
||||
----
|
||||
org.apache.kafka.streams.state.HostInfo hostInfo = interactiveQueryService.getHostInfo("store-name",
|
||||
key, keySerializer);
|
||||
|
||||
if (interactiveQueryService.getCurrentHostInfo().equals(hostInfo)) {
|
||||
|
||||
//query from the store that is locally available
|
||||
}
|
||||
else {
|
||||
//query from the remote host
|
||||
}
|
||||
----
|
||||
|
||||
== Accessing the underlying KafkaStreams object
|
||||
|
||||
`StreamBuilderFactoryBean` from spring-kafka that is responsible for constructing the `KafkaStreams` object can be accessed programmatically.
|
||||
Each `StreamBuilderFactoryBean` is registered as `stream-builder` and appended with the `StreamListener` method name.
|
||||
If your `StreamListener` method is named as `process` for example, the stream builder bean is named as `stream-builder-process`.
|
||||
Since this is a factory bean, it should be accessed by prepending an ampersand (`&`) when accessing it programmatically.
|
||||
Following is an example and it assumes the `StreamListener` method is named as `process`
|
||||
|
||||
[source]
|
||||
----
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = context.getBean("&stream-builder-process", StreamsBuilderFactoryBean.class);
|
||||
KafkaStreams kafkaStreams = streamsBuilderFactoryBean.getKafkaStreams();
|
||||
----
|
||||
|
||||
== State Cleanup
|
||||
|
||||
By default, the `Kafkastreams.cleanup()` method is called when the binding is stopped.
|
||||
See https://docs.spring.io/spring-kafka/reference/html/_reference.html#_configuration[the Spring Kafka documentation].
|
||||
To modify this behavior simply add a single `CleanupConfig` `@Bean` (configured to clean up on start, stop, or neither) to the application context; the bean will be detected and wired into the factory bean.
|
||||
@@ -1,13 +1,12 @@
|
||||
[partintro]
|
||||
--
|
||||
This guide describes the Apache Kafka implementation of the Spring Cloud Stream Binder.
|
||||
It contains information about its design, usage, and configuration options, as well as information on how the Stream Cloud Stream concepts map onto Apache Kafka specific constructs.
|
||||
In addition, this guide explains the Kafka Streams binding capabilities of Spring Cloud Stream.
|
||||
It contains information about its design, usage and configuration options, as well as information on how the Stream Cloud Stream concepts map into Apache Kafka specific constructs.
|
||||
--
|
||||
|
||||
== Usage
|
||||
|
||||
To use Apache Kafka binder, you need to add `spring-cloud-stream-binder-kafka` as a dependency to your Spring Cloud Stream application, as shown in the following example for Maven:
|
||||
For using the Apache Kafka binder, you just need to add it to your Spring Cloud Stream application, using the following Maven coordinates:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
@@ -17,7 +16,7 @@ To use Apache Kafka binder, you need to add `spring-cloud-stream-binder-kafka` a
|
||||
</dependency>
|
||||
----
|
||||
|
||||
Alternatively, you can also use the Spring Cloud Stream Kafka Starter, as shown inn the following example for Maven:
|
||||
Alternatively, you can also use the Spring Cloud Stream Kafka Starter.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
@@ -29,295 +28,186 @@ Alternatively, you can also use the Spring Cloud Stream Kafka Starter, as shown
|
||||
|
||||
== Apache Kafka Binder Overview
|
||||
|
||||
The following image shows a simplified diagram of how the Apache Kafka binder operates:
|
||||
A simplified diagram of how the Apache Kafka binder operates can be seen below.
|
||||
|
||||
.Kafka Binder
|
||||
image::images/kafka-binder.png[width=300,scaledwidth="50%"]
|
||||
image::kafka-binder.png[width=300,scaledwidth="50%"]
|
||||
|
||||
The Apache Kafka Binder implementation maps each destination to an Apache Kafka topic.
|
||||
The consumer group maps directly to the same Apache Kafka concept.
|
||||
Partitioning also maps directly to Apache Kafka partitions as well.
|
||||
|
||||
The binder currently uses the Apache Kafka `kafka-clients` 1.0.0 jar and is designed to be used with a broker of at least that version.
|
||||
This client can communicate with older brokers (see the Kafka documentation), but certain features may not be available.
|
||||
For example, with versions earlier than 0.11.x.x, native headers are not supported.
|
||||
Also, 0.11.x.x does not support the `autoAddPartitions` property.
|
||||
|
||||
== Configuration Options
|
||||
|
||||
This section contains the configuration options used by the Apache Kafka binder.
|
||||
|
||||
For common configuration options and properties pertaining to binder, see the <<binding-properties,core documentation>>.
|
||||
For common configuration options and properties pertaining to binder, refer to the https://github.com/spring-cloud/spring-cloud-stream/blob/master/spring-cloud-stream-docs/src/main/asciidoc/spring-cloud-stream-overview.adoc#configuration-options[core docs].
|
||||
|
||||
=== Kafka Binder Properties
|
||||
|
||||
spring.cloud.stream.kafka.binder.brokers::
|
||||
A list of brokers to which the Kafka binder connects.
|
||||
A list of brokers to which the Kafka binder will connect.
|
||||
+
|
||||
Default: `localhost`.
|
||||
spring.cloud.stream.kafka.binder.defaultBrokerPort::
|
||||
`brokers` allows hosts specified with or without port information (for example, `host1,host2:port2`).
|
||||
`brokers` allows hosts specified with or without port information (e.g., `host1,host2:port2`).
|
||||
This sets the default port when no port is configured in the broker list.
|
||||
+
|
||||
Default: `9092`.
|
||||
spring.cloud.stream.kafka.binder.configuration::
|
||||
Key/Value map of client properties (both producers and consumer) passed to all clients created by the binder.
|
||||
Due to the fact that these properties are used by both producers and consumers, usage should be restricted to common properties -- for example, security settings.
|
||||
Properties here supersede any properties set in boot.
|
||||
spring.cloud.stream.kafka.binder.zkNodes::
|
||||
A list of ZooKeeper nodes to which the Kafka binder can connect.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.consumerProperties::
|
||||
Key/Value map of arbitrary Kafka client consumer properties.
|
||||
Properties here supersede any properties set in boot and in the `configuration` property above.
|
||||
Default: `localhost`.
|
||||
spring.cloud.stream.kafka.binder.defaultZkPort::
|
||||
`zkNodes` allows hosts specified with or without port information (e.g., `host1,host2:port2`).
|
||||
This sets the default port when no port is configured in the node list.
|
||||
+
|
||||
Default: `2181`.
|
||||
spring.cloud.stream.kafka.binder.configuration::
|
||||
Key/Value map of client properties (both producers and consumer) passed to all clients created by the binder.
|
||||
Due to the fact that these properties will be used by both producers and consumers, usage should be restricted to common properties, especially security settings.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.headers::
|
||||
The list of custom headers that are transported by the binder.
|
||||
Only required when communicating with older applications (<= 1.3.x) with a `kafka-clients` version < 0.11.0.0. Newer versions support headers natively.
|
||||
The list of custom headers that will be transported by the binder.
|
||||
+
|
||||
Default: empty.
|
||||
spring.cloud.stream.kafka.binder.healthTimeout::
|
||||
The time to wait to get partition information, in seconds.
|
||||
Health reports as down if this timer expires.
|
||||
spring.cloud.stream.kafka.binder.offsetUpdateTimeWindow::
|
||||
The frequency, in milliseconds, with which offsets are saved.
|
||||
Ignored if `0`.
|
||||
+
|
||||
Default: 10.
|
||||
Default: `10000`.
|
||||
spring.cloud.stream.kafka.binder.offsetUpdateCount::
|
||||
The frequency, in number of updates, which which consumed offsets are persisted.
|
||||
Ignored if `0`.
|
||||
Mutually exclusive with `offsetUpdateTimeWindow`.
|
||||
+
|
||||
Default: `0`.
|
||||
spring.cloud.stream.kafka.binder.requiredAcks::
|
||||
The number of required acks on the broker.
|
||||
See the Kafka documentation for the producer `acks` property.
|
||||
The number of required acks on the broker.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.minPartitionCount::
|
||||
Effective only if `autoCreateTopics` or `autoAddPartitions` is set.
|
||||
The global minimum number of partitions that the binder configures on topics on which it produces or consumes data.
|
||||
It can be superseded by the `partitionCount` setting of the producer or by the value of `instanceCount * concurrency` settings of the producer (if either is larger).
|
||||
Effective only if `autoCreateTopics` or `autoAddPartitions` is set.
|
||||
The global minimum number of partitions that the binder will configure on topics on which it produces/consumes data.
|
||||
It can be superseded by the `partitionCount` setting of the producer or by the value of `instanceCount` * `concurrency` settings of the producer (if either is larger).
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.producerProperties::
|
||||
Key/Value map of arbitrary Kafka client producer properties.
|
||||
Properties here supersede any properties set in boot and in the `configuration` property above.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.replicationFactor::
|
||||
The replication factor of auto-created topics if `autoCreateTopics` is active.
|
||||
Can be overridden on each binding.
|
||||
The replication factor of auto-created topics if `autoCreateTopics` is active.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.autoCreateTopics::
|
||||
If set to `true`, the binder creates new topics automatically.
|
||||
If set to `false`, the binder relies on the topics being already configured.
|
||||
In the latter case, if the topics do not exist, the binder fails to start.
|
||||
+
|
||||
NOTE: This setting is independent of the `auto.topic.create.enable` setting of the broker and does not influence it.
|
||||
If the server is set to auto-create topics, they may be created as part of the metadata retrieval request, with default broker settings.
|
||||
If set to `true`, the binder will create new topics automatically.
|
||||
If set to `false`, the binder will rely on the topics being already configured.
|
||||
In the latter case, if the topics do not exist, the binder will fail to start.
|
||||
Of note, this setting is independent of the `auto.topic.create.enable` setting of the broker and it does not influence it: if the server is set to auto-create topics, they may be created as part of the metadata retrieval request, with default broker settings.
|
||||
+
|
||||
Default: `true`.
|
||||
spring.cloud.stream.kafka.binder.autoAddPartitions::
|
||||
If set to `true`, the binder creates new partitions if required.
|
||||
If set to `false`, the binder relies on the partition size of the topic being already configured.
|
||||
If the partition count of the target topic is smaller than the expected value, the binder fails to start.
|
||||
If set to `true`, the binder will create add new partitions if required.
|
||||
If set to `false`, the binder will rely on the partition size of the topic being already configured.
|
||||
If the partition count of the target topic is smaller than the expected value, the binder will fail to start.
|
||||
+
|
||||
Default: `false`.
|
||||
spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix::
|
||||
Enables transactions in the binder. See `transaction.id` in the Kafka documentation and https://docs.spring.io/spring-kafka/reference/html/_reference.html#transactions[Transactions] in the `spring-kafka` documentation.
|
||||
When transactions are enabled, individual `producer` properties are ignored and all producers use the `spring.cloud.stream.kafka.binder.transaction.producer.*` properties.
|
||||
spring.cloud.stream.kafka.binder.socketBufferSize::
|
||||
Size (in bytes) of the socket buffer to be used by the Kafka consumers.
|
||||
+
|
||||
Default `null` (no transactions)
|
||||
spring.cloud.stream.kafka.binder.transaction.producer.*::
|
||||
Global producer properties for producers in a transactional binder.
|
||||
See `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` and <<kafka-producer-properties>> and the general producer properties supported by all binders.
|
||||
+
|
||||
Default: See individual producer properties.
|
||||
Default: `2097152`.
|
||||
|
||||
spring.cloud.stream.kafka.binder.headerMapperBeanName::
|
||||
The bean name of a `KafkaHeaderMapper` used for mapping `spring-messaging` headers to and from Kafka headers.
|
||||
Use this, for example, if you wish to customize the trusted packages in a `DefaultKafkaHeaderMapper` that uses JSON deserialization for the headers.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
[[kafka-consumer-properties]]
|
||||
=== Kafka Consumer Properties
|
||||
|
||||
The following properties are available for Kafka consumers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.consumer.`.
|
||||
|
||||
admin.configuration::
|
||||
A `Map` of Kafka topic properties used when provisioning topics -- for example, `spring.cloud.stream.kafka.bindings.input.consumer.admin.configuration.message.format.version=0.9.0.0`
|
||||
+
|
||||
Default: none.
|
||||
|
||||
admin.replicas-assignment::
|
||||
A Map<Integer, List<Integer>> of replica assignments, with the key being the partition and the value being the assignments.
|
||||
Used when provisioning new topics.
|
||||
See the `NewTopic` Javadocs in the `kafka-clients` jar.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
admin.replication-factor::
|
||||
The replication factor to use when provisioning topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of 1 is used).
|
||||
|
||||
autoRebalanceEnabled::
|
||||
When `true`, topic partitions is automatically rebalanced between the members of a consumer group.
|
||||
When `false`, each consumer is assigned a fixed set of partitions based on `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex`.
|
||||
This requires both the `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex` properties to be set appropriately on each launched instance.
|
||||
The value of the `spring.cloud.stream.instanceCount` property must typically be greater than 1 in this case.
|
||||
When `true`, topic partitions will be automatically rebalanced between the members of a consumer group.
|
||||
When `false`, each consumer will be assigned a fixed set of partitions based on `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex`.
|
||||
This requires both `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex` properties to be set appropriately on each launched instance.
|
||||
The property `spring.cloud.stream.instanceCount` must typically be greater than 1 in this case.
|
||||
+
|
||||
Default: `true`.
|
||||
ackEachRecord::
|
||||
When `autoCommitOffset` is `true`, this setting dictates whether to commit the offset after each record is processed.
|
||||
By default, offsets are committed after all records in the batch of records returned by `consumer.poll()` have been processed.
|
||||
The number of records returned by a poll can be controlled with the `max.poll.records` Kafka property, which is set through the consumer `configuration` property.
|
||||
Setting this to `true` may cause a degradation in performance, but doing so reduces the likelihood of redelivered records when a failure occurs.
|
||||
Also, see the binder `requiredAcks` property, which also affects the performance of committing offsets.
|
||||
+
|
||||
Default: `false`.
|
||||
autoCommitOffset::
|
||||
Whether to autocommit offsets when a message has been processed.
|
||||
If set to `false`, a header with the key `kafka_acknowledgment` of the type `org.springframework.kafka.support.Acknowledgment` header is present in the inbound message.
|
||||
Whether to autocommit offsets when a message has been processed.
|
||||
If set to `false`, a header with the key `kafka_acknowledgment` of the type `org.springframework.kafka.support.Acknowledgment` header will be present in the inbound message.
|
||||
Applications may use this header for acknowledging messages.
|
||||
See the examples section for details.
|
||||
When this property is set to `false`, Kafka binder sets the ack mode to `org.springframework.kafka.listener.AbstractMessageListenerContainer.AckMode.MANUAL` and the application is responsible for acknowledging records.
|
||||
Also see `ackEachRecord`.
|
||||
When this property is set to `false`, Kafka binder will set the ack mode to `org.springframework.kafka.listener.AbstractMessageListenerContainer.AckMode.MANUAL`.
|
||||
+
|
||||
Default: `true`.
|
||||
autoCommitOnError::
|
||||
Effective only if `autoCommitOffset` is set to `true`.
|
||||
If set to `false`, it suppresses auto-commits for messages that result in errors and commits only for successful messages. It allows a stream to automatically replay from the last successfully processed message, in case of persistent failures.
|
||||
If set to `true`, it always auto-commits (if auto-commit is enabled).
|
||||
If not set (the default), it effectively has the same value as `enableDlq`, auto-committing erroneous messages if they are sent to a DLQ and not committing them otherwise.
|
||||
Effective only if `autoCommitOffset` is set to `true`.
|
||||
If set to `false` it suppresses auto-commits for messages that result in errors, and will commit only for successful messages, allows a stream to automatically replay from the last successfully processed message, in case of persistent failures.
|
||||
If set to `true`, it will always auto-commit (if auto-commit is enabled).
|
||||
If not set (default), it effectively has the same value as `enableDlq`, auto-committing erroneous messages if they are sent to a DLQ, and not committing them otherwise.
|
||||
+
|
||||
Default: not set.
|
||||
recoveryInterval::
|
||||
The interval between connection recovery attempts, in milliseconds.
|
||||
+
|
||||
Default: `5000`.
|
||||
resetOffsets::
|
||||
Whether to reset offsets on the consumer to the value provided by startOffset.
|
||||
Whether to reset offsets on the consumer to the value provided by `startOffset`.
|
||||
+
|
||||
Default: `false`.
|
||||
startOffset::
|
||||
The starting offset for new groups.
|
||||
Allowed values: `earliest` and `latest`.
|
||||
If the consumer group is set explicitly for the consumer 'binding' (through `spring.cloud.stream.bindings.<channelName>.group`), 'startOffset' is set to `earliest`. Otherwise, it is set to `latest` for the `anonymous` consumer group.
|
||||
Also see `resetOffsets` (earlier in this list).
|
||||
The starting offset for new groups, or when `resetOffsets` is `true`.
|
||||
Allowed values: `earliest`, `latest`.
|
||||
If the consumer group is set explicitly for the consumer 'binding' (via `spring.cloud.stream.bindings.<channelName>.group`), then 'startOffset' is set to `earliest`; otherwise it is set to `latest` for the `anonymous` consumer group.
|
||||
+
|
||||
Default: null (equivalent to `earliest`).
|
||||
enableDlq::
|
||||
When set to true, it enables DLQ behavior for the consumer.
|
||||
By default, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`.
|
||||
The DLQ topic name can be configurable by setting the `dlqName` property.
|
||||
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
|
||||
See <<kafka-dlq-processing>> processing for more information.
|
||||
Starting with version 2.0, messages sent to the DLQ topic are enhanced with the following headers: `x-original-topic`, `x-exception-message`, and `x-exception-stacktrace` as `byte[]`.
|
||||
**Not allowed when `destinationIsPattern` is `true`.**
|
||||
When set to true, it will send enable DLQ behavior for the consumer.
|
||||
Messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`.
|
||||
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
|
||||
+
|
||||
Default: `false`.
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka consumer properties.
|
||||
Map with a key/value pair containing generic Kafka consumer properties.
|
||||
+
|
||||
Default: Empty map.
|
||||
dlqName::
|
||||
The name of the DLQ topic to receive the error messages.
|
||||
+
|
||||
Default: null (If not specified, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`).
|
||||
dlqProducerProperties::
|
||||
Using this, DLQ-specific producer properties can be set.
|
||||
All the properties available through kafka producer properties can be set through this property.
|
||||
+
|
||||
Default: Default Kafka producer properties.
|
||||
standardHeaders::
|
||||
Indicates which standard headers are populated by the inbound channel adapter.
|
||||
Allowed values: `none`, `id`, `timestamp`, or `both`.
|
||||
Useful if using native deserialization and the first component to receive a message needs an `id` (such as an aggregator that is configured to use a JDBC message store).
|
||||
+
|
||||
Default: `none`
|
||||
converterBeanName::
|
||||
The name of a bean that implements `RecordMessageConverter`. Used in the inbound channel adapter to replace the default `MessagingMessageConverter`.
|
||||
+
|
||||
Default: `null`
|
||||
idleEventInterval::
|
||||
The interval, in milliseconds, between events indicating that no messages have recently been received.
|
||||
Use an `ApplicationListener<ListenerContainerIdleEvent>` to receive these events.
|
||||
See <<pause-resume>> for a usage example.
|
||||
+
|
||||
Default: `30000`
|
||||
destinationIsPattern::
|
||||
When true, the destination is treated as a regular expression `Pattern` used to match topic names by the broker.
|
||||
When true, topics are not provisioned, and `enableDlq` is not allowed, because the binder does not know the topic names during the provisioning phase.
|
||||
Note, the time taken to detect new topics that match the pattern is controlled by the consumer property `metadata.max.age.ms`, which (at the time of writing) defaults to 300,000ms (5 minutes).
|
||||
This can be configured using the `configuration` property above.
|
||||
+
|
||||
Default: `false`
|
||||
|
||||
[[kafka-producer-properties]]
|
||||
=== Kafka Producer Properties
|
||||
|
||||
The following properties are available for Kafka producers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.producer.`.
|
||||
|
||||
admin.configuration::
|
||||
A `Map` of Kafka topic properties used when provisioning new topics -- for example, `spring.cloud.stream.kafka.bindings.input.consumer.admin.configuration.message.format.version=0.9.0.0`
|
||||
+
|
||||
Default: none.
|
||||
|
||||
admin.replicas-assignment::
|
||||
A Map<Integer, List<Integer>> of replica assignments, with the key being the partition and the value being the assignments.
|
||||
Used when provisioning new topics.
|
||||
See `NewTopic` javadocs in the `kafka-clients` jar.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
admin.replication-factor::
|
||||
The replication factor to use when provisioning new topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of 1 is used).
|
||||
|
||||
bufferSize::
|
||||
Upper limit, in bytes, of how much data the Kafka producer attempts to batch before sending.
|
||||
Upper limit, in bytes, of how much data the Kafka producer will attempt to batch before sending.
|
||||
+
|
||||
Default: `16384`.
|
||||
sync::
|
||||
Whether the producer is synchronous.
|
||||
Whether the producer is synchronous.
|
||||
+
|
||||
Default: `false`.
|
||||
batchTimeout::
|
||||
How long the producer waits to allow more messages to accumulate in the same batch before sending the messages.
|
||||
(Normally, the producer does not wait at all and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.
|
||||
How long the producer will wait before sending in order to allow more messages to accumulate in the same batch.
|
||||
(Normally the producer does not wait at all, and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.
|
||||
+
|
||||
Default: `0`.
|
||||
messageKeyExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message -- for example, `headers['myKey']`.
|
||||
The payload cannot be used because, by the time this expression is evaluated, the payload is already in the form of a `byte[]`.
|
||||
+
|
||||
Default: `none`.
|
||||
headerPatterns::
|
||||
A comma-delimited list of simple patterns to match Spring messaging headers to be mapped to the Kafka `Headers` in the `ProducerRecord`.
|
||||
Patterns can begin or end with the wildcard character (asterisk).
|
||||
Patterns can be negated by prefixing with `!`.
|
||||
Matching stops after the first match (positive or negative).
|
||||
For example `!ask,as*` will pass `ash` but not `ask`.
|
||||
`id` and `timestamp` are never mapped.
|
||||
+
|
||||
Default: `*` (all headers - except the `id` and `timestamp`)
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka producer properties.
|
||||
Map with a key/value pair containing generic Kafka producer properties.
|
||||
+
|
||||
Default: Empty map.
|
||||
|
||||
NOTE: The Kafka binder uses the `partitionCount` setting of the producer as a hint to create a topic with the given partition count (in conjunction with the `minPartitionCount`, the maximum of the two being the value being used).
|
||||
Exercise caution when configuring both `minPartitionCount` for a binder and `partitionCount` for an application, as the larger value is used.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is disabled (the default), the binder fails to start.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is enabled, new partitions are added.
|
||||
If a topic already exists with a larger number of partitions than the maximum of (`minPartitionCount` or `partitionCount`), the existing partition count is used.
|
||||
[NOTE]
|
||||
====
|
||||
The Kafka binder will use the `partitionCount` setting of the producer as a hint to create a topic with the given partition count (in conjunction with the `minPartitionCount`, the maximum of the two being the value being used).
|
||||
Exercise caution when configuring both `minPartitionCount` for a binder and `partitionCount` for an application, as the larger value will be used.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is disabled (the default), then the binder will fail to start.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is enabled, new partitions will be added.
|
||||
If a topic already exists with a larger number of partitions than the maximum of (`minPartitionCount` and `partitionCount`), the existing partition count will be used.
|
||||
====
|
||||
|
||||
=== Usage examples
|
||||
|
||||
In this section, we show the use of the preceding properties for specific scenarios.
|
||||
In this section, we illustrate the use of the above properties for specific scenarios.
|
||||
|
||||
==== Example: Setting `autoCommitOffset` to `false` and Relying on Manual Acking
|
||||
==== Example: Setting `autoCommitOffset` false and relying on manual acking.
|
||||
|
||||
This example illustrates how one may manually acknowledge offsets in a consumer application.
|
||||
|
||||
This example requires that `spring.cloud.stream.kafka.bindings.input.consumer.autoCommitOffset` be set to `false`.
|
||||
This example requires that `spring.cloud.stream.kafka.bindings.input.consumer.autoCommitOffset` is set to false.
|
||||
Use the corresponding input channel name for your example.
|
||||
|
||||
[source]
|
||||
@@ -341,13 +231,13 @@ public class ManuallyAcknowdledgingConsumer {
|
||||
}
|
||||
----
|
||||
|
||||
==== Example: Security Configuration
|
||||
==== Example: security configuration
|
||||
|
||||
Apache Kafka 0.9 supports secure connections between client and brokers.
|
||||
To take advantage of this feature, follow the guidelines in the http://kafka.apache.org/090/documentation.html#security_configclients[Apache Kafka Documentation] as well as the Kafka 0.9 http://docs.confluent.io/2.0.0/kafka/security.html[security guidelines from the Confluent documentation].
|
||||
To take advantage of this feature, follow the guidelines in the https://kafka.apache.org/090/documentation.html#security_configclients[Apache Kafka Documentation] as well as the Kafka 0.9 https://docs.confluent.io/2.0.0/kafka/security.html[security guidelines from the Confluent documentation].
|
||||
Use the `spring.cloud.stream.kafka.binder.configuration` option to set security properties for all clients created by the binder.
|
||||
|
||||
For example, to set `security.protocol` to `SASL_SSL`, set the following property:
|
||||
For example, for setting `security.protocol` to `SASL_SSL`, set:
|
||||
|
||||
[source]
|
||||
----
|
||||
@@ -356,47 +246,49 @@ spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_SSL
|
||||
|
||||
All the other security properties can be set in a similar manner.
|
||||
|
||||
When using Kerberos, follow the instructions in the http://kafka.apache.org/090/documentation.html#security_sasl_clientconfig[reference documentation] for creating and referencing the JAAS configuration.
|
||||
When using Kerberos, follow the instructions in the https://kafka.apache.org/090/documentation.html#security_sasl_clientconfig[reference documentation] for creating and referencing the JAAS configuration.
|
||||
|
||||
Spring Cloud Stream supports passing JAAS configuration information to the application by using a JAAS configuration file and using Spring Boot properties.
|
||||
Spring Cloud Stream supports passing JAAS configuration information to the application using a JAAS configuration file and using Spring Boot properties.
|
||||
|
||||
===== Using JAAS Configuration Files
|
||||
===== Using JAAS configuration files
|
||||
|
||||
The JAAS and (optionally) krb5 file locations can be set for Spring Cloud Stream applications by using system properties.
|
||||
The following example shows how to launch a Spring Cloud Stream application with SASL and Kerberos by using a JAAS configuration file:
|
||||
The JAAS, and (optionally) krb5 file locations can be set for Spring Cloud Stream applications by using system properties.
|
||||
Here is an example of launching a Spring Cloud Stream application with SASL and Kerberos using a JAAS configuration file:
|
||||
|
||||
[source,bash]
|
||||
[source]
|
||||
----
|
||||
java -Djava.security.auth.login.config=/path.to/kafka_client_jaas.conf -jar log.jar \
|
||||
--spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.kafka.binder.zkNodes=secure.zookeeper:2181 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT
|
||||
----
|
||||
|
||||
===== Using Spring Boot Properties
|
||||
===== Using Spring Boot properties
|
||||
|
||||
As an alternative to having a JAAS configuration file, Spring Cloud Stream provides a mechanism for setting up the JAAS configuration for Spring Cloud Stream applications by using Spring Boot properties.
|
||||
As an alternative to having a JAAS configuration file, Spring Cloud Stream provides a mechanism for setting up the JAAS configuration for Spring Cloud Stream applications using Spring Boot properties.
|
||||
|
||||
The following properties can be used to configure the login context of the Kafka client:
|
||||
The following properties can be used for configuring the login context of the Kafka client.
|
||||
|
||||
spring.cloud.stream.kafka.binder.jaas.loginModule::
|
||||
The login module name. Not necessary to be set in normal cases.
|
||||
The login module name. Not necessary to be set in normal cases.
|
||||
+
|
||||
Default: `com.sun.security.auth.module.Krb5LoginModule`.
|
||||
spring.cloud.stream.kafka.binder.jaas.controlFlag::
|
||||
The control flag of the login module.
|
||||
The control flag of the login module.
|
||||
+
|
||||
Default: `required`.
|
||||
spring.cloud.stream.kafka.binder.jaas.options::
|
||||
Map with a key/value pair containing the login module options.
|
||||
Map with a key/value pair containing the login module options.
|
||||
+
|
||||
Default: Empty map.
|
||||
|
||||
The following example shows how to launch a Spring Cloud Stream application with SASL and Kerberos by using Spring Boot configuration properties:
|
||||
Here is an example of launching a Spring Cloud Stream application with SASL and Kerberos using Spring Boot configuration properties:
|
||||
|
||||
[source,bash]
|
||||
[source]
|
||||
----
|
||||
java --spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.kafka.binder.zkNodes=secure.zookeeper:2181 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.autoCreateTopics=false \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT \
|
||||
@@ -406,7 +298,7 @@ The following example shows how to launch a Spring Cloud Stream application with
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.principal=kafka-client-1@EXAMPLE.COM
|
||||
----
|
||||
|
||||
The preceding example represents the equivalent of the following JAAS file:
|
||||
This represents the equivalent of the following JAAS file:
|
||||
|
||||
[source]
|
||||
----
|
||||
@@ -419,75 +311,86 @@ KafkaClient {
|
||||
};
|
||||
----
|
||||
|
||||
If the topics required already exist on the broker or will be created by an administrator, autocreation can be turned off and only client JAAS properties need to be sent.
|
||||
If the topics required already exist on the broker, or will be created by an administrator, autocreation can be turned off and only client JAAS properties need to be sent. As an alternative to setting `spring.cloud.stream.kafka.binder.autoCreateTopics` you can simply remove the broker dependency from the application. See <<exclude-admin-utils>> for details.
|
||||
|
||||
NOTE: Do not mix JAAS configuration files and Spring Boot properties in the same application.
|
||||
If the `-Djava.security.auth.login.config` system property is already present, Spring Cloud Stream ignores the Spring Boot properties.
|
||||
[NOTE]
|
||||
====
|
||||
Do not mix JAAS configuration files and Spring Boot properties in the same application.
|
||||
If the `-Djava.security.auth.login.config` system property is already present, Spring Cloud Stream will ignore the Spring Boot properties.
|
||||
|
||||
NOTE: Be careful when using the `autoCreateTopics` and `autoAddPartitions` with Kerberos.
|
||||
Usually, applications may use principals that do not have administrative rights in Kafka and Zookeeper.
|
||||
Consequently, relying on Spring Cloud Stream to create/modify topics may fail.
|
||||
In secure environments, we strongly recommend creating topics and managing ACLs administratively by using Kafka tooling.
|
||||
====
|
||||
|
||||
[[pause-resume]]
|
||||
==== Example: Pausing and Resuming the Consumer
|
||||
[NOTE]
|
||||
====
|
||||
Exercise caution when using the `autoCreateTopics` and `autoAddPartitions` if using Kerberos.
|
||||
Usually applications may use principals that do not have administrative rights in Kafka and Zookeeper, and relying on Spring Cloud Stream to create/modify topics may fail.
|
||||
In secure environments, we strongly recommend creating topics and managing ACLs administratively using Kafka tooling.
|
||||
====
|
||||
|
||||
If you wish to suspend consumption but not cause a partition rebalance, you can pause and resume the consumer.
|
||||
This is facilitated by adding the `Consumer` as a parameter to your `@StreamListener`.
|
||||
To resume, you need an `ApplicationListener` for `ListenerContainerIdleEvent` instances.
|
||||
The frequency at which events are published is controlled by the `idleEventInterval` property.
|
||||
Since the consumer is not thread-safe, you must call these methods on the calling thread.
|
||||
==== Using the binder with Apache Kafka 0.10
|
||||
|
||||
The following simple application shows how to pause and resume:
|
||||
The binder also supports connecting to Kafka 0.10 brokers.
|
||||
In order to support this, when you create the project that contains your application, include `spring-cloud-starter-stream-kafka` as you normally would do for 0.9 based applications.
|
||||
Then add these dependencies at the top of the `<dependencies>` section in the pom.xml file to override the Apache Kafka, Spring Kafka, and Spring Integration Kafka with 0.10-compatible versions as in the following example:
|
||||
|
||||
[source, java]
|
||||
[source,xml]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class Application {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(Application.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void in(String in, @Header(KafkaHeaders.CONSUMER) Consumer<?, ?> consumer) {
|
||||
System.out.println(in);
|
||||
consumer.pause(Collections.singleton(new TopicPartition("myTopic", 0)));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ApplicationListener<ListenerContainerIdleEvent> idleListener() {
|
||||
return event -> {
|
||||
System.out.println(event);
|
||||
if (event.getConsumer().paused().size() > 0) {
|
||||
event.getConsumer().resume(event.getConsumer().paused());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<version>1.1.1.RELEASE</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
<version>2.1.0.RELEASE</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>0.10.0.0</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
[[kafka-error-channels]]
|
||||
== Error Channels
|
||||
[NOTE]
|
||||
====
|
||||
The versions above are provided only for the sake of the example.
|
||||
For best results, we recommend using the most recent 0.10-compatible versions of the projects.
|
||||
====
|
||||
|
||||
Starting with version 1.3, the binder unconditionally sends exceptions to an error channel for each consumer destination and can also be configured to send async producer send failures to an error channel.
|
||||
See <<spring-cloud-stream-overview-error-handling>> for more information.
|
||||
[[exclude-admin-utils]]
|
||||
==== Excluding Kafka broker jar from the classpath of the binder based application
|
||||
|
||||
The payload of the `ErrorMessage` for a send failure is a `KafkaSendFailureException` with properties:
|
||||
The Apache Kafka Binder uses the administrative utilities which are part of the Apache Kafka server library to create and reconfigure topics.
|
||||
If the inclusion of the Apache Kafka server library and its dependencies is not necessary at runtime because the application will rely on the topics being configured administratively, the Kafka binder allows for Apache Kafka server dependency to be excluded from the application.
|
||||
|
||||
* `failedMessage`: The Spring Messaging `Message<?>` that failed to be sent.
|
||||
* `record`: The raw `ProducerRecord` that was created from the `failedMessage`
|
||||
If you use Kafka 10 dependencies as advised above, all you have to do is not to include the kafka broker dependency.
|
||||
If you use Kafka 0.9, then ensure that you exclude the kafka broker jar from the `spring-cloud-starter-stream-kafka` dependency as following.
|
||||
|
||||
There is no automatic handling of producer exceptions (such as sending to a <<kafka-dlq-processing, Dead-Letter queue>>).
|
||||
You can consume these exceptions with your own Spring Integration flow.
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
[[kafka-metrics]]
|
||||
== Kafka Metrics
|
||||
If you exclude the Apache Kafka server dependency and the topic is not present on the server, then the Apache Kafka broker will create the topic if auto topic creation is enabled on the server.
|
||||
Please keep in mind that if you are relying on this, then the Kafka server will use the default number of partitions and replication factors.
|
||||
On the other hand, if auto topic creation is disabled on the server, then care must be taken before running the application to create the topic with the desired number of partitions.
|
||||
|
||||
If you want to have full control over how partitions are allocated, then leave the default settings as they are, i.e. do not exclude the kafka broker jar and ensure that `spring.cloud.stream.kafka.binder.autoCreateTopics` is set to `true`, which is the default.
|
||||
|
||||
Kafka binder module exposes the following metrics:
|
||||
|
||||
`spring.cloud.stream.binder.kafka.offset`: This metric indicates how many messages have not been yet consumed from a given binder's topic by a given consumer group.
|
||||
The metrics provided are based on the Mircometer metrics library. The metric contains the consumer group information, topic and the actual lag in committed offset from the latest offset on the topic.
|
||||
This metric is particularly useful for providing auto-scaling feedback to a PaaS platform.
|
||||
|
||||
@@ -1,104 +0,0 @@
|
||||
== Partitioning with the Kafka Binder
|
||||
|
||||
Apache Kafka supports topic partitioning natively.
|
||||
|
||||
Sometimes it is advantageous to send data to specific partitions -- for example, when you want to strictly order message processing (all messages for a particular customer should go to the same partition).
|
||||
|
||||
The following example shows how to configure the producer and consumer side:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Source.class)
|
||||
public class KafkaPartitionProducerApplication {
|
||||
|
||||
private static final Random RANDOM = new Random(System.currentTimeMillis());
|
||||
|
||||
private static final String[] data = new String[] {
|
||||
"foo1", "bar1", "qux1",
|
||||
"foo2", "bar2", "qux2",
|
||||
"foo3", "bar3", "qux3",
|
||||
"foo4", "bar4", "qux4",
|
||||
};
|
||||
|
||||
public static void main(String[] args) {
|
||||
new SpringApplicationBuilder(KafkaPartitionProducerApplication.class)
|
||||
.web(false)
|
||||
.run(args);
|
||||
}
|
||||
|
||||
@InboundChannelAdapter(channel = Source.OUTPUT, poller = @Poller(fixedRate = "5000"))
|
||||
public Message<?> generate() {
|
||||
String value = data[RANDOM.nextInt(data.length)];
|
||||
System.out.println("Sending: " + value);
|
||||
return MessageBuilder.withPayload(value)
|
||||
.setHeader("partitionKey", value)
|
||||
.build();
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
.application.yml
|
||||
[source, yaml]
|
||||
----
|
||||
spring:
|
||||
cloud:
|
||||
stream:
|
||||
bindings:
|
||||
output:
|
||||
destination: partitioned.topic
|
||||
producer:
|
||||
partitioned: true
|
||||
partition-key-expression: headers['partitionKey']
|
||||
partition-count: 12
|
||||
----
|
||||
|
||||
IMPORTANT: The topic must be provisioned to have enough partitions to achieve the desired concurrency for all consumer groups.
|
||||
The above configuration supports up to 12 consumer instances (6 if their `concurrency` is 2, 4 if their concurrency is 3, and so on).
|
||||
It is generally best to "`over-provision`" the partitions to allow for future increases in consumers or concurrency.
|
||||
|
||||
NOTE: The preceding configuration uses the default partitioning (`key.hashCode() % partitionCount`).
|
||||
This may or may not provide a suitably balanced algorithm, depending on the key values.
|
||||
You can override this default by using the `partitionSelectorExpression` or `partitionSelectorClass` properties.
|
||||
|
||||
Since partitions are natively handled by Kafka, no special configuration is needed on the consumer side.
|
||||
Kafka allocates partitions across the instances.
|
||||
|
||||
The following Spring Boot application listens to a Kafka stream and prints (to the console) the partition ID to which each message goes:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class KafkaPartitionConsumerApplication {
|
||||
|
||||
public static void main(String[] args) {
|
||||
new SpringApplicationBuilder(KafkaPartitionConsumerApplication.class)
|
||||
.web(false)
|
||||
.run(args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void listen(@Payload String in, @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition) {
|
||||
System.out.println(in + " received from partition " + partition);
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
.application.yml
|
||||
[source, yaml]
|
||||
----
|
||||
spring:
|
||||
cloud:
|
||||
stream:
|
||||
bindings:
|
||||
input:
|
||||
destination: partitioned.topic
|
||||
group: myGroup
|
||||
----
|
||||
|
||||
You can add instances as needed.
|
||||
Kafka rebalances the partition allocations.
|
||||
If the instance count (or `instance count * concurrency`) exceeds the number of partitions, some consumers are idle.
|
||||
@@ -1,3 +0,0 @@
|
||||
include::overview.adoc[leveloffset=+1]
|
||||
include::dlq.adoc[leveloffset=+1]
|
||||
include::partitions.adoc[leveloffset=+1]
|
||||
@@ -9,7 +9,7 @@
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
@@ -20,7 +20,7 @@
|
||||
-->
|
||||
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:xslthl="http://xslthl.sf.net"
|
||||
xmlns:xslthl="http://xslthl.sourceforge.net/"
|
||||
xmlns:d="http://docbook.org/ns/docbook"
|
||||
exclude-result-prefixes="xslthl d"
|
||||
version='1.0'>
|
||||
|
||||
@@ -9,7 +9,7 @@ to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
@@ -20,7 +20,7 @@ under the License.
|
||||
-->
|
||||
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:xslthl="http://xslthl.sf.net"
|
||||
xmlns:xslthl="http://xslthl.sourceforge.net/"
|
||||
xmlns:d="http://docbook.org/ns/docbook"
|
||||
exclude-result-prefixes="xslthl d"
|
||||
version='1.0'>
|
||||
|
||||
@@ -9,7 +9,7 @@ to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
|
||||
@@ -9,7 +9,7 @@ to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
|
||||
@@ -9,7 +9,7 @@ to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
@@ -20,7 +20,7 @@ under the License.
|
||||
-->
|
||||
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:xslthl="http://xslthl.sf.net"
|
||||
xmlns:xslthl="http://xslthl.sourceforge.net/"
|
||||
xmlns:d="http://docbook.org/ns/docbook"
|
||||
exclude-result-prefixes="xslthl"
|
||||
version='1.0'>
|
||||
|
||||
@@ -9,7 +9,7 @@ to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
@@ -22,7 +22,7 @@ under the License.
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:d="http://docbook.org/ns/docbook"
|
||||
xmlns:fo="http://www.w3.org/1999/XSL/Format"
|
||||
xmlns:xslthl="http://xslthl.sf.net"
|
||||
xmlns:xslthl="http://xslthl.sourceforge.net/"
|
||||
xmlns:xlink='http://www.w3.org/1999/xlink'
|
||||
xmlns:exsl="http://exslt.org/common"
|
||||
exclude-result-prefixes="exsl xslthl d xlink"
|
||||
|
||||
@@ -19,5 +19,5 @@
|
||||
<highlighter id="properties" file="./xslthl/properties-hl.xml" />
|
||||
<highlighter id="json" file="./xslthl/json-hl.xml" />
|
||||
<highlighter id="yaml" file="./xslthl/yaml-hl.xml" />
|
||||
<namespace prefix="xslthl" uri="http://xslthl.sf.net" />
|
||||
<namespace prefix="xslthl" uri="http://xslthl.sourceforge.net/" />
|
||||
</xslthl-config>
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
Syntax highlighting definition for SH
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
https://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2010 Mathieu Malaterre
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
Syntax highlighting definition for C
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
https://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
Syntax highlighting definition for C++
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
https://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
Syntax highlighting definition for C#
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
https://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
Syntax highlighting definition for CSS files
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
https://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2011-2012 Martin Hujer, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
@@ -26,7 +26,7 @@ freely, subject to the following restrictions:
|
||||
Martin Hujer <mhujer at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
Reference: http://www.w3.org/TR/CSS21/propidx.html
|
||||
Reference: https://www.w3.org/TR/CSS21/propidx.html
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
myxml-hl.xml - konfigurace zvyraznovace XML, ktera zvlast zvyrazni
|
||||
HTML elementy a XSL elementy
|
||||
|
||||
This file has been customized for the Asciidoctor project (http://asciidoctor.org).
|
||||
This file has been customized for the Asciidoctor project (https://asciidoctor.org).
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="xml">
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
Syntax highlighting definition for ini files
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
https://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
Syntax highlighting definition for Java
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
https://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
Syntax highlighting definition for JavaScript
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
https://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
Syntax highlighting definition for Perl
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
https://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
Syntax highlighting definition for PHP
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
https://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
Syntax highlighting definition for Java
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
https://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
Syntax highlighting definition for Python
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
https://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
Syntax highlighting definition for Ruby
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
https://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
Syntax highlighting definition for SQL:1999
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
https://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2012 Michiel Hendriks, Martin Hujer, k42b3
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>spring-cloud-stream-binder-kafka-streams</artifactId>
|
||||
<packaging>jar</packaging>
|
||||
<name>spring-cloud-stream-binder-kafka-streams</name>
|
||||
<description>Kafka Streams Binder Implementation</description>
|
||||
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.1.0.M2</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-configuration-processor</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-autoconfigure</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-streams</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
</dependency>
|
||||
<!-- Added back since Kafka still depends on it, but it has been removed by Boot due to EOL -->
|
||||
<dependency>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
<version>1.2.17</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-autoconfigure-processor</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<!-- Following dependencies are needed to support Kafka 1.1.0 client-->
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
@@ -1,124 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.errors.InvalidStateStoreException;
|
||||
import org.apache.kafka.streams.state.HostInfo;
|
||||
import org.apache.kafka.streams.state.QueryableStoreType;
|
||||
import org.apache.kafka.streams.state.StreamsMetadata;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Services pertinent to the interactive query capabilities of Kafka Streams. This class provides
|
||||
* services such as querying for a particular store, which instance is hosting a particular store etc.
|
||||
* This is part of the public API of the kafka streams binder and the users can inject this service in their
|
||||
* applications to make use of it.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Renwei Han
|
||||
* @since 2.1.0
|
||||
*/
|
||||
public class InteractiveQueryService {
|
||||
|
||||
private final KafkaStreamsRegistry kafkaStreamsRegistry;
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
/**
|
||||
*
|
||||
* @param kafkaStreamsRegistry holding {@link KafkaStreamsRegistry}
|
||||
* @param binderConfigurationProperties Kafka Streams binder configuration properties
|
||||
*/
|
||||
public InteractiveQueryService(KafkaStreamsRegistry kafkaStreamsRegistry,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
this.kafkaStreamsRegistry = kafkaStreamsRegistry;
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve and return a queryable store by name created in the application.
|
||||
*
|
||||
* @param storeName name of the queryable store
|
||||
* @param storeType type of the queryable store
|
||||
* @param <T> generic queryable store
|
||||
* @return queryable store.
|
||||
*/
|
||||
public <T> T getQueryableStore(String storeName, QueryableStoreType<T> storeType) {
|
||||
for (KafkaStreams kafkaStream : this.kafkaStreamsRegistry.getKafkaStreams()) {
|
||||
try{
|
||||
T store = kafkaStream.store(storeName, storeType);
|
||||
if (store != null) {
|
||||
return store;
|
||||
}
|
||||
}
|
||||
catch (InvalidStateStoreException ignored) {
|
||||
//pass through
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the current {@link HostInfo} that the calling kafka streams application is running on.
|
||||
*
|
||||
* Note that the end user applications must provide `applicaiton.server` as a configuration property
|
||||
* when calling this method. If this is not available, then null is returned.
|
||||
*
|
||||
* @return the current {@link HostInfo}
|
||||
*/
|
||||
public HostInfo getCurrentHostInfo() {
|
||||
Map<String, String> configuration = this.binderConfigurationProperties.getConfiguration();
|
||||
if (configuration.containsKey("application.server")) {
|
||||
|
||||
String applicationServer = configuration.get("application.server");
|
||||
String[] splits = StringUtils.split(applicationServer, ":");
|
||||
|
||||
return new HostInfo(splits[0], Integer.valueOf(splits[1]));
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the {@link HostInfo} where the provided store and key are hosted on. This may not be the
|
||||
* current host that is running the application. Kafka Streams will look through all the consumer instances
|
||||
* under the same application id and retrieves the proper host.
|
||||
*
|
||||
* Note that the end user applications must provide `applicaiton.server` as a configuration property
|
||||
* for all the application instances when calling this method. If this is not available, then null maybe returned.
|
||||
*
|
||||
* @param store store name
|
||||
* @param key key to look for
|
||||
* @param serializer {@link Serializer} for the key
|
||||
* @return the {@link HostInfo} where the key for the provided store is hosted currently
|
||||
*/
|
||||
public <K> HostInfo getHostInfo(String store, K key, Serializer<K> serializer) {
|
||||
StreamsMetadata streamsMetadata = this.kafkaStreamsRegistry.getKafkaStreams()
|
||||
.stream()
|
||||
.map(k -> Optional.ofNullable(k.metadataForKey(store, key, serializer)))
|
||||
.filter(Optional::isPresent)
|
||||
.map(Optional::get)
|
||||
.findFirst()
|
||||
.orElse(null);
|
||||
return streamsMetadata != null ? streamsMetadata.hostInfo() : null;
|
||||
}
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.errors.DeserializationExceptionHandler;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Produced;
|
||||
|
||||
import org.springframework.cloud.stream.binder.AbstractBinder;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.DefaultBinding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsProducerProperties;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* {@link org.springframework.cloud.stream.binder.Binder} implementation for {@link KStream}.
|
||||
* This implemenation extends from the {@link AbstractBinder} directly.
|
||||
* <p>
|
||||
* Provides both producer and consumer bindings for the bound KStream.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KStreamBinder extends
|
||||
AbstractBinder<KStream<Object, Object>, ExtendedConsumerProperties<KafkaStreamsConsumerProperties>, ExtendedProducerProperties<KafkaStreamsProducerProperties>>
|
||||
implements ExtendedPropertiesBinder<KStream<Object, Object>, KafkaStreamsConsumerProperties, KafkaStreamsProducerProperties> {
|
||||
|
||||
private final static Log LOG = LogFactory.getLog(KStreamBinder.class);
|
||||
|
||||
private final KafkaTopicProvisioner kafkaTopicProvisioner;
|
||||
|
||||
private KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties = new KafkaStreamsExtendedBindingProperties();
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
private final KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
private final KeyValueSerdeResolver keyValueSerdeResolver;
|
||||
|
||||
KStreamBinder(KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver) {
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
this.kafkaTopicProvisioner = kafkaTopicProvisioner;
|
||||
this.kafkaStreamsMessageConversionDelegate = kafkaStreamsMessageConversionDelegate;
|
||||
this.KafkaStreamsBindingInformationCatalogue = KafkaStreamsBindingInformationCatalogue;
|
||||
this.keyValueSerdeResolver = keyValueSerdeResolver;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected Binding<KStream<Object, Object>> doBindConsumer(String name, String group,
|
||||
KStream<Object, Object> inputTarget,
|
||||
ExtendedConsumerProperties<KafkaStreamsConsumerProperties> properties) {
|
||||
this.KafkaStreamsBindingInformationCatalogue.registerConsumerProperties(inputTarget, properties.getExtension());
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties = new ExtendedConsumerProperties<>(
|
||||
properties.getExtension());
|
||||
if (binderConfigurationProperties.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.sendToDlq) {
|
||||
extendedConsumerProperties.getExtension().setEnableDlq(true);
|
||||
}
|
||||
if (!StringUtils.hasText(group)) {
|
||||
group = binderConfigurationProperties.getApplicationId();
|
||||
}
|
||||
|
||||
String[] inputTopics = StringUtils.commaDelimitedListToStringArray(name);
|
||||
for (String inputTopic : inputTopics) {
|
||||
this.kafkaTopicProvisioner.provisionConsumerDestination(inputTopic, group, extendedConsumerProperties);
|
||||
}
|
||||
|
||||
if (extendedConsumerProperties.getExtension().isEnableDlq()) {
|
||||
StreamsConfig streamsConfig = this.KafkaStreamsBindingInformationCatalogue.getStreamsConfig(inputTarget);
|
||||
|
||||
KafkaStreamsDlqDispatch kafkaStreamsDlqDispatch = !StringUtils.isEmpty(extendedConsumerProperties.getExtension().getDlqName()) ?
|
||||
new KafkaStreamsDlqDispatch(extendedConsumerProperties.getExtension().getDlqName(), binderConfigurationProperties,
|
||||
extendedConsumerProperties.getExtension()) : null;
|
||||
for (String inputTopic : inputTopics) {
|
||||
if (StringUtils.isEmpty(extendedConsumerProperties.getExtension().getDlqName())) {
|
||||
String dlqName = "error." + inputTopic + "." + group;
|
||||
kafkaStreamsDlqDispatch = new KafkaStreamsDlqDispatch(dlqName, binderConfigurationProperties,
|
||||
extendedConsumerProperties.getExtension());
|
||||
}
|
||||
SendToDlqAndContinue sendToDlqAndContinue = this.getApplicationContext().getBean(SendToDlqAndContinue.class);
|
||||
sendToDlqAndContinue.addKStreamDlqDispatch(inputTopic, kafkaStreamsDlqDispatch);
|
||||
|
||||
DeserializationExceptionHandler deserializationExceptionHandler = streamsConfig.defaultDeserializationExceptionHandler();
|
||||
if (deserializationExceptionHandler instanceof SendToDlqAndContinue) {
|
||||
((SendToDlqAndContinue) deserializationExceptionHandler).addKStreamDlqDispatch(inputTopic, kafkaStreamsDlqDispatch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return new DefaultBinding<>(name, group, inputTarget, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected Binding<KStream<Object, Object>> doBindProducer(String name, KStream<Object, Object> outboundBindTarget,
|
||||
ExtendedProducerProperties<KafkaStreamsProducerProperties> properties) {
|
||||
ExtendedProducerProperties<KafkaProducerProperties> extendedProducerProperties = new ExtendedProducerProperties<>(
|
||||
new KafkaProducerProperties());
|
||||
this.kafkaTopicProvisioner.provisionProducerDestination(name, extendedProducerProperties);
|
||||
Serde<?> keySerde = this.keyValueSerdeResolver.getOuboundKeySerde(properties.getExtension());
|
||||
Serde<?> valueSerde = this.keyValueSerdeResolver.getOutboundValueSerde(properties, properties.getExtension());
|
||||
to(properties.isUseNativeEncoding(), name, outboundBindTarget, (Serde<Object>) keySerde, (Serde<Object>) valueSerde);
|
||||
return new DefaultBinding<>(name, null, outboundBindTarget, null);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void to(boolean isNativeEncoding, String name, KStream<Object, Object> outboundBindTarget,
|
||||
Serde<Object> keySerde, Serde<Object> valueSerde) {
|
||||
if (!isNativeEncoding) {
|
||||
LOG.info("Native encoding is disabled for " + name + ". Outbound message conversion done by Spring Cloud Stream.");
|
||||
kafkaStreamsMessageConversionDelegate.serializeOnOutbound(outboundBindTarget)
|
||||
.to(name, Produced.with(keySerde, valueSerde));
|
||||
} else {
|
||||
LOG.info("Native encoding is enabled for " + name + ". Outbound serialization done at the broker.");
|
||||
outboundBindTarget.to(name, Produced.with(keySerde, valueSerde));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaStreamsConsumerProperties getExtendedConsumerProperties(String channelName) {
|
||||
return this.kafkaStreamsExtendedBindingProperties.getExtendedConsumerProperties(channelName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaStreamsProducerProperties getExtendedProducerProperties(String channelName) {
|
||||
return this.kafkaStreamsExtendedBindingProperties.getExtendedProducerProperties(channelName);
|
||||
}
|
||||
|
||||
public void setKafkaStreamsExtendedBindingProperties(KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties) {
|
||||
this.kafkaStreamsExtendedBindingProperties = kafkaStreamsExtendedBindingProperties;
|
||||
}
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.springframework.beans.factory.config.BeanFactoryPostProcessor;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Import;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Gary Russell
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@Configuration
|
||||
@Import({KafkaAutoConfiguration.class})
|
||||
public class KStreamBinderConfiguration {
|
||||
|
||||
@Bean
|
||||
@ConditionalOnBean(name = "outerContext")
|
||||
public BeanFactoryPostProcessor outerContextBeanFactoryPostProcessor() {
|
||||
return beanFactory -> {
|
||||
ApplicationContext outerContext = (ApplicationContext) beanFactory.getBean("outerContext");
|
||||
beanFactory.registerSingleton(KafkaStreamsBinderConfigurationProperties.class.getSimpleName(), outerContext
|
||||
.getBean(KafkaStreamsBinderConfigurationProperties.class));
|
||||
beanFactory.registerSingleton(KafkaStreamsMessageConversionDelegate.class.getSimpleName(), outerContext
|
||||
.getBean(KafkaStreamsMessageConversionDelegate.class));
|
||||
beanFactory.registerSingleton(KafkaStreamsBindingInformationCatalogue.class.getSimpleName(), outerContext
|
||||
.getBean(KafkaStreamsBindingInformationCatalogue.class));
|
||||
beanFactory.registerSingleton(KeyValueSerdeResolver.class.getSimpleName(), outerContext
|
||||
.getBean(KeyValueSerdeResolver.class));
|
||||
beanFactory.registerSingleton(KafkaStreamsExtendedBindingProperties.class.getSimpleName(), outerContext
|
||||
.getBean(KafkaStreamsExtendedBindingProperties.class));
|
||||
};
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaTopicProvisioner provisioningProvider(KafkaBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties) {
|
||||
return new KafkaTopicProvisioner(binderConfigurationProperties, kafkaProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamBinder kStreamBinder(KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsMessageConversionDelegate KafkaStreamsMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties) {
|
||||
KStreamBinder kStreamBinder = new KStreamBinder(binderConfigurationProperties, kafkaTopicProvisioner,
|
||||
KafkaStreamsMessageConversionDelegate, KafkaStreamsBindingInformationCatalogue,
|
||||
keyValueSerdeResolver);
|
||||
kStreamBinder.setKafkaStreamsExtendedBindingProperties(kafkaStreamsExtendedBindingProperties);
|
||||
return kStreamBinder;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.aopalliance.intercept.MethodInterceptor;
|
||||
import org.aopalliance.intercept.MethodInvocation;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.aop.framework.ProxyFactory;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binding.AbstractBindingTargetFactory;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
/**
|
||||
* {@link org.springframework.cloud.stream.binding.BindingTargetFactory} for{@link KStream}.
|
||||
*
|
||||
* The implementation creates proxies for both input and output binding.
|
||||
* The actual target will be created downstream through further binding process.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KStreamBoundElementFactory extends AbstractBindingTargetFactory<KStream> {
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
KStreamBoundElementFactory(BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue) {
|
||||
super(KStream.class);
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.kafkaStreamsBindingInformationCatalogue = KafkaStreamsBindingInformationCatalogue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KStream createInput(String name) {
|
||||
ConsumerProperties consumerProperties = this.bindingServiceProperties.getConsumerProperties(name);
|
||||
//Always set multiplex to true in the kafka streams binder
|
||||
consumerProperties.setMultiplex(true);
|
||||
return createProxyForKStream(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public KStream createOutput(final String name) {
|
||||
return createProxyForKStream(name);
|
||||
}
|
||||
|
||||
private KStream createProxyForKStream(String name) {
|
||||
KStreamWrapperHandler wrapper= new KStreamWrapperHandler();
|
||||
ProxyFactory proxyFactory = new ProxyFactory(KStreamWrapper.class, KStream.class);
|
||||
proxyFactory.addAdvice(wrapper);
|
||||
|
||||
KStream proxy = (KStream) proxyFactory.getProxy();
|
||||
|
||||
//Add the binding properties to the catalogue for later retrieval during further binding steps downstream.
|
||||
BindingProperties bindingProperties = bindingServiceProperties.getBindingProperties(name);
|
||||
this.kafkaStreamsBindingInformationCatalogue.registerBindingProperties(proxy, bindingProperties);
|
||||
return proxy;
|
||||
}
|
||||
|
||||
public interface KStreamWrapper {
|
||||
|
||||
void wrap(KStream<Object, Object> delegate);
|
||||
|
||||
}
|
||||
|
||||
private static class KStreamWrapperHandler implements KStreamWrapper, MethodInterceptor {
|
||||
|
||||
private KStream<Object, Object> delegate;
|
||||
|
||||
public void wrap(KStream<Object, Object> delegate) {
|
||||
Assert.notNull(delegate, "delegate cannot be null");
|
||||
Assert.isNull(this.delegate, "delegate already set to " + this.delegate);
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object invoke(MethodInvocation methodInvocation) throws Throwable {
|
||||
if (methodInvocation.getMethod().getDeclaringClass().equals(KStream.class)) {
|
||||
Assert.notNull(delegate, "Trying to invoke " + methodInvocation
|
||||
.getMethod() + " but no delegate has been set.");
|
||||
return methodInvocation.getMethod().invoke(delegate, methodInvocation.getArguments());
|
||||
}
|
||||
else if (methodInvocation.getMethod().getDeclaringClass().equals(KStreamWrapper.class)) {
|
||||
return methodInvocation.getMethod().invoke(this, methodInvocation.getArguments());
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException("Only KStream method invocations are permitted");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KeyValueMapper;
|
||||
|
||||
import org.springframework.cloud.stream.binding.StreamListenerParameterAdapter;
|
||||
import org.springframework.core.MethodParameter;
|
||||
import org.springframework.core.ResolvableType;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KStreamStreamListenerParameterAdapter implements StreamListenerParameterAdapter<KStream<?,?>, KStream<?, ?>> {
|
||||
|
||||
private final KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate;
|
||||
private final KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
KStreamStreamListenerParameterAdapter(KafkaStreamsMessageConversionDelegate kafkaStreamsMessageConversionDelegate,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue) {
|
||||
this.kafkaStreamsMessageConversionDelegate = kafkaStreamsMessageConversionDelegate;
|
||||
this.KafkaStreamsBindingInformationCatalogue = KafkaStreamsBindingInformationCatalogue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean supports(Class bindingTargetType, MethodParameter methodParameter) {
|
||||
return KStream.class.isAssignableFrom(bindingTargetType)
|
||||
&& KStream.class.isAssignableFrom(methodParameter.getParameterType());
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public KStream adapt(KStream<?, ?> bindingTarget, MethodParameter parameter) {
|
||||
ResolvableType resolvableType = ResolvableType.forMethodParameter(parameter);
|
||||
final Class<?> valueClass = (resolvableType.getGeneric(1).getRawClass() != null)
|
||||
? (resolvableType.getGeneric(1).getRawClass()) : Object.class;
|
||||
if (this.KafkaStreamsBindingInformationCatalogue.isUseNativeDecoding(bindingTarget)) {
|
||||
return bindingTarget.map((KeyValueMapper) KeyValue::new);
|
||||
}
|
||||
else {
|
||||
return kafkaStreamsMessageConversionDelegate.deserializeOnInbound(valueClass, bindingTarget);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.cloud.stream.binding.StreamListenerResultAdapter;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KStreamStreamListenerResultAdapter implements StreamListenerResultAdapter<KStream, KStreamBoundElementFactory.KStreamWrapper> {
|
||||
|
||||
@Override
|
||||
public boolean supports(Class<?> resultType, Class<?> boundElement) {
|
||||
return KStream.class.isAssignableFrom(resultType) && KStream.class.isAssignableFrom(boundElement);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public Closeable adapt(KStream streamListenerResult, KStreamBoundElementFactory.KStreamWrapper boundElement) {
|
||||
boundElement.wrap(streamListenerResult.map(KeyValue::new));
|
||||
return new NoOpCloseable();
|
||||
}
|
||||
|
||||
private static final class NoOpCloseable implements Closeable {
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,121 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.errors.DeserializationExceptionHandler;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
|
||||
import org.springframework.cloud.stream.binder.AbstractBinder;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.DefaultBinding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsProducerProperties;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* {@link org.springframework.cloud.stream.binder.Binder} implementation for {@link KTable}.
|
||||
* This implemenation extends from the {@link AbstractBinder} directly.
|
||||
*
|
||||
* Provides only consumer binding for the bound KTable as output bindings are not allowed on it.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KTableBinder extends
|
||||
AbstractBinder<KTable<Object, Object>, ExtendedConsumerProperties<KafkaStreamsConsumerProperties>, ExtendedProducerProperties<KafkaStreamsProducerProperties>>
|
||||
implements ExtendedPropertiesBinder<KTable<Object, Object>, KafkaStreamsConsumerProperties, KafkaStreamsProducerProperties> {
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
private final KafkaTopicProvisioner kafkaTopicProvisioner;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
private KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties = new KafkaStreamsExtendedBindingProperties();
|
||||
|
||||
KTableBinder(KafkaStreamsBinderConfigurationProperties binderConfigurationProperties, KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue) {
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
this.kafkaTopicProvisioner = kafkaTopicProvisioner;
|
||||
this.KafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected Binding<KTable<Object, Object>> doBindConsumer(String name, String group, KTable<Object, Object> inputTarget,
|
||||
ExtendedConsumerProperties<KafkaStreamsConsumerProperties> properties) {
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties = new ExtendedConsumerProperties<>(
|
||||
properties.getExtension());
|
||||
if (binderConfigurationProperties.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.sendToDlq) {
|
||||
extendedConsumerProperties.getExtension().setEnableDlq(true);
|
||||
}
|
||||
if (!StringUtils.hasText(group)) {
|
||||
group = binderConfigurationProperties.getApplicationId();
|
||||
}
|
||||
|
||||
String[] inputTopics = StringUtils.commaDelimitedListToStringArray(name);
|
||||
for (String inputTopic : inputTopics) {
|
||||
this.kafkaTopicProvisioner.provisionConsumerDestination(inputTopic, group, extendedConsumerProperties);
|
||||
}
|
||||
|
||||
if (extendedConsumerProperties.getExtension().isEnableDlq()) {
|
||||
StreamsConfig streamsConfig = this.KafkaStreamsBindingInformationCatalogue.getStreamsConfig(inputTarget);
|
||||
|
||||
KafkaStreamsDlqDispatch kafkaStreamsDlqDispatch = !StringUtils.isEmpty(extendedConsumerProperties.getExtension().getDlqName()) ?
|
||||
new KafkaStreamsDlqDispatch(extendedConsumerProperties.getExtension().getDlqName(), binderConfigurationProperties,
|
||||
extendedConsumerProperties.getExtension()) : null;
|
||||
for (String inputTopic : inputTopics) {
|
||||
if (StringUtils.isEmpty(extendedConsumerProperties.getExtension().getDlqName())) {
|
||||
String dlqName = "error." + inputTopic + "." + group;
|
||||
kafkaStreamsDlqDispatch = new KafkaStreamsDlqDispatch(dlqName, binderConfigurationProperties,
|
||||
extendedConsumerProperties.getExtension());
|
||||
}
|
||||
SendToDlqAndContinue sendToDlqAndContinue = this.getApplicationContext().getBean(SendToDlqAndContinue.class);
|
||||
sendToDlqAndContinue.addKStreamDlqDispatch(inputTopic, kafkaStreamsDlqDispatch);
|
||||
|
||||
DeserializationExceptionHandler deserializationExceptionHandler = streamsConfig.defaultDeserializationExceptionHandler();
|
||||
if (deserializationExceptionHandler instanceof SendToDlqAndContinue) {
|
||||
((SendToDlqAndContinue) deserializationExceptionHandler).addKStreamDlqDispatch(inputTopic, kafkaStreamsDlqDispatch);
|
||||
}
|
||||
}
|
||||
}
|
||||
return new DefaultBinding<>(name, group, inputTarget, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Binding<KTable<Object, Object>> doBindProducer(String name, KTable<Object, Object> outboundBindTarget,
|
||||
ExtendedProducerProperties<KafkaStreamsProducerProperties> properties) {
|
||||
throw new UnsupportedOperationException("No producer level binding is allowed for KTable");
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaStreamsConsumerProperties getExtendedConsumerProperties(String channelName) {
|
||||
return this.kafkaStreamsExtendedBindingProperties.getExtendedConsumerProperties(channelName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaStreamsProducerProperties getExtendedProducerProperties(String channelName) {
|
||||
return this.kafkaStreamsExtendedBindingProperties.getExtendedProducerProperties(channelName);
|
||||
}
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.springframework.beans.factory.config.BeanFactoryPostProcessor;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@Configuration
|
||||
public class KTableBinderConfiguration {
|
||||
|
||||
@Bean
|
||||
@ConditionalOnBean(name = "outerContext")
|
||||
public BeanFactoryPostProcessor outerContextBeanFactoryPostProcessor() {
|
||||
return beanFactory -> {
|
||||
ApplicationContext outerContext = (ApplicationContext) beanFactory.getBean("outerContext");
|
||||
beanFactory.registerSingleton(KafkaStreamsBinderConfigurationProperties.class.getSimpleName(), outerContext
|
||||
.getBean(KafkaStreamsBinderConfigurationProperties.class));
|
||||
beanFactory.registerSingleton(KafkaStreamsBindingInformationCatalogue.class.getSimpleName(), outerContext
|
||||
.getBean(KafkaStreamsBindingInformationCatalogue.class));
|
||||
};
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaTopicProvisioner provisioningProvider(KafkaBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties) {
|
||||
return new KafkaTopicProvisioner(binderConfigurationProperties, kafkaProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KTableBinder kTableBinder(KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue) {
|
||||
KTableBinder kStreamBinder = new KTableBinder(binderConfigurationProperties, kafkaTopicProvisioner,
|
||||
KafkaStreamsBindingInformationCatalogue);
|
||||
return kStreamBinder;
|
||||
}
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.aopalliance.intercept.MethodInterceptor;
|
||||
import org.aopalliance.intercept.MethodInvocation;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
|
||||
import org.springframework.aop.framework.ProxyFactory;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binding.AbstractBindingTargetFactory;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
/**
|
||||
* {@link org.springframework.cloud.stream.binding.BindingTargetFactory} for {@link KTable}
|
||||
*
|
||||
* Input bindings are only created as output bindings on KTable are not allowed.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KTableBoundElementFactory extends AbstractBindingTargetFactory<KTable> {
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
|
||||
KTableBoundElementFactory(BindingServiceProperties bindingServiceProperties) {
|
||||
super(KTable.class);
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KTable createInput(String name) {
|
||||
ConsumerProperties consumerProperties = this.bindingServiceProperties.getConsumerProperties(name);
|
||||
//Always set multiplex to true in the kafka streams binder
|
||||
consumerProperties.setMultiplex(true);
|
||||
|
||||
KTableBoundElementFactory.KTableWrapperHandler wrapper= new KTableBoundElementFactory.KTableWrapperHandler();
|
||||
ProxyFactory proxyFactory = new ProxyFactory(KTableBoundElementFactory.KTableWrapper.class, KTable.class);
|
||||
proxyFactory.addAdvice(wrapper);
|
||||
|
||||
return (KTable) proxyFactory.getProxy();
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public KTable createOutput(final String name) {
|
||||
throw new UnsupportedOperationException("Outbound operations are not allowed on target type KTable");
|
||||
}
|
||||
|
||||
public interface KTableWrapper {
|
||||
void wrap(KTable<Object, Object> delegate);
|
||||
}
|
||||
|
||||
private static class KTableWrapperHandler implements KTableBoundElementFactory.KTableWrapper, MethodInterceptor {
|
||||
|
||||
private KTable<Object, Object> delegate;
|
||||
|
||||
public void wrap(KTable<Object, Object> delegate) {
|
||||
Assert.notNull(delegate, "delegate cannot be null");
|
||||
Assert.isNull(this.delegate, "delegate already set to " + this.delegate);
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object invoke(MethodInvocation methodInvocation) throws Throwable {
|
||||
if (methodInvocation.getMethod().getDeclaringClass().equals(KTable.class)) {
|
||||
Assert.notNull(delegate, "Trying to invoke " + methodInvocation
|
||||
.getMethod() + " but no delegate has been set.");
|
||||
return methodInvocation.getMethod().invoke(delegate, methodInvocation.getArguments());
|
||||
}
|
||||
else if (methodInvocation.getMethod().getDeclaringClass().equals(KTableBoundElementFactory.KTableWrapper.class)) {
|
||||
return methodInvocation.getMethod().invoke(this, methodInvocation.getArguments());
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException("Only KStream method invocations are permitted");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@Configuration
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
public class KafkaStreamsApplicationSupportAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
@ConditionalOnProperty("spring.cloud.stream.kafka.streams.timeWindow.length")
|
||||
public TimeWindows configuredTimeWindow(KafkaStreamsApplicationSupportProperties processorProperties) {
|
||||
return processorProperties.getTimeWindow().getAdvanceBy() > 0
|
||||
? TimeWindows.of(processorProperties.getTimeWindow().getLength()).advanceBy(processorProperties.getTimeWindow().getAdvanceBy())
|
||||
: TimeWindows.of(processorProperties.getTimeWindow().getLength());
|
||||
}
|
||||
}
|
||||
@@ -1,175 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.errors.LogAndContinueExceptionHandler;
|
||||
import org.apache.kafka.streams.errors.LogAndFailExceptionHandler;
|
||||
|
||||
import org.springframework.beans.factory.ObjectProvider;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.boot.autoconfigure.AutoConfigureAfter;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binding.BindingService;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerResultAdapter;
|
||||
import org.springframework.cloud.stream.config.BindingServiceConfiguration;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.cloud.stream.converter.CompositeMessageConverterFactory;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.core.CleanupConfig;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
@EnableConfigurationProperties(KafkaStreamsExtendedBindingProperties.class)
|
||||
@ConditionalOnBean(BindingService.class)
|
||||
@AutoConfigureAfter(BindingServiceConfiguration.class)
|
||||
public class KafkaStreamsBinderSupportAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
@ConfigurationProperties(prefix = "spring.cloud.stream.kafka.streams.binder")
|
||||
public KafkaStreamsBinderConfigurationProperties binderConfigurationProperties(KafkaProperties kafkaProperties) {
|
||||
return new KafkaStreamsBinderConfigurationProperties(kafkaProperties);
|
||||
}
|
||||
|
||||
@Bean("streamConfigGlobalProperties")
|
||||
public Map<String, Object> streamConfigGlobalProperties(KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, binderConfigurationProperties.getKafkaConnectionString());
|
||||
props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName());
|
||||
props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName());
|
||||
props.put(StreamsConfig.APPLICATION_ID_CONFIG, binderConfigurationProperties.getApplicationId());
|
||||
|
||||
if (binderConfigurationProperties.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.logAndContinue) {
|
||||
props.put(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
LogAndContinueExceptionHandler.class);
|
||||
} else if (binderConfigurationProperties.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.logAndFail) {
|
||||
props.put(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
LogAndFailExceptionHandler.class);
|
||||
} else if (binderConfigurationProperties.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.sendToDlq) {
|
||||
props.put(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
|
||||
SendToDlqAndContinue.class);
|
||||
}
|
||||
|
||||
if (!ObjectUtils.isEmpty(binderConfigurationProperties.getConfiguration())) {
|
||||
props.putAll(binderConfigurationProperties.getConfiguration());
|
||||
}
|
||||
|
||||
return props;
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamStreamListenerResultAdapter kstreamStreamListenerResultAdapter() {
|
||||
return new KStreamStreamListenerResultAdapter();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamStreamListenerParameterAdapter kstreamStreamListenerParameterAdapter(
|
||||
KafkaStreamsMessageConversionDelegate kstreamBoundMessageConversionDelegate, KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue) {
|
||||
return new KStreamStreamListenerParameterAdapter(kstreamBoundMessageConversionDelegate, KafkaStreamsBindingInformationCatalogue);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaStreamsStreamListenerSetupMethodOrchestrator kafkaStreamsStreamListenerSetupMethodOrchestrator(
|
||||
BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
KStreamStreamListenerParameterAdapter kafkaStreamListenerParameterAdapter,
|
||||
Collection<StreamListenerResultAdapter> streamListenerResultAdapters,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
ObjectProvider<CleanupConfig> cleanupConfig) {
|
||||
return new KafkaStreamsStreamListenerSetupMethodOrchestrator(bindingServiceProperties,
|
||||
kafkaStreamsExtendedBindingProperties, keyValueSerdeResolver, kafkaStreamsBindingInformationCatalogue,
|
||||
kafkaStreamListenerParameterAdapter, streamListenerResultAdapters, binderConfigurationProperties,
|
||||
cleanupConfig.getIfUnique());
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaStreamsMessageConversionDelegate messageConversionDelegate(CompositeMessageConverterFactory compositeMessageConverterFactory,
|
||||
SendToDlqAndContinue sendToDlqAndContinue,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
return new KafkaStreamsMessageConversionDelegate(compositeMessageConverterFactory, sendToDlqAndContinue,
|
||||
KafkaStreamsBindingInformationCatalogue, binderConfigurationProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamBoundElementFactory kStreamBoundElementFactory(BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsBindingInformationCatalogue KafkaStreamsBindingInformationCatalogue) {
|
||||
return new KStreamBoundElementFactory(bindingServiceProperties,
|
||||
KafkaStreamsBindingInformationCatalogue);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KTableBoundElementFactory kTableBoundElementFactory(BindingServiceProperties bindingServiceProperties) {
|
||||
return new KTableBoundElementFactory(bindingServiceProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public SendToDlqAndContinue sendToDlqAndContinue() {
|
||||
return new SendToDlqAndContinue();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue() {
|
||||
return new KafkaStreamsBindingInformationCatalogue();
|
||||
}
|
||||
|
||||
@Bean
|
||||
@SuppressWarnings("unchecked")
|
||||
public KeyValueSerdeResolver keyValueSerdeResolver(@Qualifier("streamConfigGlobalProperties") Object streamConfigGlobalProperties,
|
||||
KafkaStreamsBinderConfigurationProperties kafkaStreamsBinderConfigurationProperties) {
|
||||
return new KeyValueSerdeResolver((Map<String, Object>) streamConfigGlobalProperties, kafkaStreamsBinderConfigurationProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public QueryableStoreRegistry queryableStoreTypeRegistry(KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
return new QueryableStoreRegistry(kafkaStreamsRegistry);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public InteractiveQueryService interactiveQueryServices(KafkaStreamsRegistry kafkaStreamsRegistry,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
return new InteractiveQueryService(kafkaStreamsRegistry, binderConfigurationProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaStreamsRegistry kafkaStreamsRegistry() {
|
||||
return new KafkaStreamsRegistry();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public StreamsBuilderFactoryManager streamsBuilderFactoryManager(KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
return new StreamsBuilderFactoryManager(kafkaStreamsBindingInformationCatalogue, kafkaStreamsRegistry);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,143 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.kafka.core.StreamsBuilderFactoryBean;
|
||||
|
||||
/**
|
||||
* A catalogue that provides binding information for Kafka Streams target types such as KStream.
|
||||
* It also keeps a catalogue for the underlying {@link StreamsBuilderFactoryBean} and
|
||||
* {@link StreamsConfig} associated with various {@link org.springframework.cloud.stream.annotation.StreamListener}
|
||||
* methods in the {@link org.springframework.context.ApplicationContext}.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KafkaStreamsBindingInformationCatalogue {
|
||||
|
||||
private final Map<KStream<?, ?>, BindingProperties> bindingProperties = new ConcurrentHashMap<>();
|
||||
|
||||
private final Map<KStream<?, ?>, KafkaStreamsConsumerProperties> consumerProperties = new ConcurrentHashMap<>();
|
||||
|
||||
private final Map<Object, StreamsConfig> streamsConfigs = new ConcurrentHashMap<>();
|
||||
|
||||
private final Set<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans = new HashSet<>();
|
||||
|
||||
/**
|
||||
* For a given bounded {@link KStream}, retrieve it's corresponding destination
|
||||
* on the broker.
|
||||
*
|
||||
* @param bindingTarget KStream binding target
|
||||
* @return destination topic on Kafka
|
||||
*/
|
||||
String getDestination(KStream<?,?> bindingTarget) {
|
||||
BindingProperties bindingProperties = this.bindingProperties.get(bindingTarget);
|
||||
return bindingProperties.getDestination();
|
||||
}
|
||||
|
||||
/**
|
||||
* Is native decoding is enabled on this {@link KStream}.
|
||||
*
|
||||
* @param bindingTarget KStream binding target
|
||||
* @return true if native decoding is enabled, fasle otherwise.
|
||||
*/
|
||||
boolean isUseNativeDecoding(KStream<?,?> bindingTarget) {
|
||||
BindingProperties bindingProperties = this.bindingProperties.get(bindingTarget);
|
||||
if (bindingProperties.getConsumer() == null) {
|
||||
bindingProperties.setConsumer(new ConsumerProperties());
|
||||
}
|
||||
return bindingProperties.getConsumer().isUseNativeDecoding();
|
||||
}
|
||||
|
||||
/**
|
||||
* Is DLQ enabled for this {@link KStream}
|
||||
*
|
||||
* @param bindingTarget KStream binding target
|
||||
* @return true if DLQ is enabled, false otherwise.
|
||||
*/
|
||||
boolean isDlqEnabled(KStream<?,?> bindingTarget) {
|
||||
return consumerProperties.get(bindingTarget).isEnableDlq();
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the content type associated with a given {@link KStream}
|
||||
*
|
||||
* @param bindingTarget KStream binding target
|
||||
* @return content Type associated.
|
||||
*/
|
||||
String getContentType(KStream<?,?> bindingTarget) {
|
||||
BindingProperties bindingProperties = this.bindingProperties.get(bindingTarget);
|
||||
return bindingProperties.getContentType();
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve and return the registered {@link StreamsBuilderFactoryBean} for the given KStream
|
||||
*
|
||||
* @param bindingTarget KStream binding target
|
||||
* @return corresponding {@link StreamsBuilderFactoryBean}
|
||||
*/
|
||||
StreamsConfig getStreamsConfig(Object bindingTarget) {
|
||||
return streamsConfigs.get(bindingTarget);
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a cache for bounded KStream -> {@link BindingProperties}
|
||||
*
|
||||
* @param bindingTarget KStream binding target
|
||||
* @param bindingProperties {@link BindingProperties} for this KStream
|
||||
*/
|
||||
void registerBindingProperties(KStream<?,?> bindingTarget, BindingProperties bindingProperties) {
|
||||
this.bindingProperties.put(bindingTarget, bindingProperties);
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a cache for bounded KStream -> {@link KafkaStreamsConsumerProperties}
|
||||
*
|
||||
* @param bindingTarget KStream binding target
|
||||
* @param kafkaStreamsConsumerProperties Consumer properties for this KStream
|
||||
*/
|
||||
void registerConsumerProperties(KStream<?,?> bindingTarget, KafkaStreamsConsumerProperties kafkaStreamsConsumerProperties) {
|
||||
this.consumerProperties.put(bindingTarget, kafkaStreamsConsumerProperties);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a mapping for KStream -> {@link StreamsBuilderFactoryBean}
|
||||
*
|
||||
* @param streamsBuilderFactoryBean provides the {@link StreamsBuilderFactoryBean} mapped to the KStream
|
||||
*/
|
||||
void addStreamBuilderFactory(StreamsBuilderFactoryBean streamsBuilderFactoryBean) {
|
||||
this.streamsBuilderFactoryBeans.add(streamsBuilderFactoryBean);
|
||||
}
|
||||
|
||||
void addStreamsConfigs(Object bindingTarget, StreamsConfig streamsConfig) {
|
||||
this.streamsConfigs.put(bindingTarget, streamsConfig);
|
||||
}
|
||||
|
||||
Set<StreamsBuilderFactoryBean> getStreamsBuilderFactoryBeans() {
|
||||
return streamsBuilderFactoryBeans;
|
||||
}
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @author Rafal Zukowski
|
||||
* @author Gary Russell
|
||||
*/
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.core.ProducerFactory;
|
||||
import org.springframework.kafka.support.SendResult;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.concurrent.ListenableFuture;
|
||||
import org.springframework.util.concurrent.ListenableFutureCallback;
|
||||
|
||||
class KafkaStreamsDlqDispatch {
|
||||
|
||||
private final Log logger = LogFactory.getLog(getClass());
|
||||
|
||||
private final KafkaTemplate<byte[],byte[]> kafkaTemplate;
|
||||
|
||||
private final String dlqName;
|
||||
|
||||
KafkaStreamsDlqDispatch(String dlqName,
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties,
|
||||
KafkaConsumerProperties kafkaConsumerProperties) {
|
||||
ProducerFactory<byte[],byte[]> producerFactory = getProducerFactory(
|
||||
new ExtendedProducerProperties<>(kafkaConsumerProperties.getDlqProducerProperties()),
|
||||
kafkaBinderConfigurationProperties);
|
||||
|
||||
this.kafkaTemplate = new KafkaTemplate<>(producerFactory);
|
||||
this.dlqName = dlqName;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void sendToDlq(byte[] key, byte[] value, int partittion) {
|
||||
ProducerRecord<byte[],byte[]> producerRecord = new ProducerRecord<>(this.dlqName, partittion,
|
||||
key, value, null);
|
||||
|
||||
StringBuilder sb = new StringBuilder().append(" a message with key='")
|
||||
.append(toDisplayString(ObjectUtils.nullSafeToString(key))).append("'")
|
||||
.append(" and payload='")
|
||||
.append(toDisplayString(ObjectUtils.nullSafeToString(value)))
|
||||
.append("'").append(" received from ")
|
||||
.append(partittion);
|
||||
ListenableFuture<SendResult<byte[],byte[]>> sentDlq = null;
|
||||
try {
|
||||
sentDlq = this.kafkaTemplate.send(producerRecord);
|
||||
sentDlq.addCallback(new ListenableFutureCallback<SendResult<byte[],byte[]>>() {
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable ex) {
|
||||
KafkaStreamsDlqDispatch.this.logger.error(
|
||||
"Error sending to DLQ " + sb.toString(), ex);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onSuccess(SendResult<byte[],byte[]> result) {
|
||||
if (KafkaStreamsDlqDispatch.this.logger.isDebugEnabled()) {
|
||||
KafkaStreamsDlqDispatch.this.logger.debug(
|
||||
"Sent to DLQ " + sb.toString());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
catch (Exception ex) {
|
||||
if (sentDlq == null) {
|
||||
KafkaStreamsDlqDispatch.this.logger.error(
|
||||
"Error sending to DLQ " + sb.toString(), ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private DefaultKafkaProducerFactory<byte[],byte[]> getProducerFactory(ExtendedProducerProperties<KafkaProducerProperties> producerProperties,
|
||||
KafkaBinderConfigurationProperties configurationProperties) {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ProducerConfig.RETRIES_CONFIG, 0);
|
||||
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
|
||||
props.put(ProducerConfig.ACKS_CONFIG, configurationProperties.getRequiredAcks());
|
||||
Map<String, Object> mergedConfig = configurationProperties.mergedProducerConfiguration();
|
||||
if (!ObjectUtils.isEmpty(mergedConfig)) {
|
||||
props.putAll(mergedConfig);
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.BATCH_SIZE_CONFIG))) {
|
||||
props.put(ProducerConfig.BATCH_SIZE_CONFIG,
|
||||
String.valueOf(producerProperties.getExtension().getBufferSize()));
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.LINGER_MS_CONFIG))) {
|
||||
props.put(ProducerConfig.LINGER_MS_CONFIG,
|
||||
String.valueOf(producerProperties.getExtension().getBatchTimeout()));
|
||||
}
|
||||
if (ObjectUtils.isEmpty(props.get(ProducerConfig.COMPRESSION_TYPE_CONFIG))) {
|
||||
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG,
|
||||
producerProperties.getExtension().getCompressionType().toString());
|
||||
}
|
||||
if (!ObjectUtils.isEmpty(producerProperties.getExtension().getConfiguration())) {
|
||||
props.putAll(producerProperties.getExtension().getConfiguration());
|
||||
}
|
||||
//Always send as byte[] on dlq (the same byte[] that the consumer received)
|
||||
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
|
||||
return new DefaultKafkaProducerFactory<>(props);
|
||||
}
|
||||
|
||||
private String toDisplayString(String original) {
|
||||
if (original.length() <= 50) {
|
||||
return original;
|
||||
}
|
||||
return original.substring(0, 50) + "...";
|
||||
}
|
||||
}
|
||||
@@ -1,191 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.processor.Processor;
|
||||
import org.apache.kafka.streams.processor.ProcessorContext;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.converter.CompositeMessageConverterFactory;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.converter.MessageConverter;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Delegate for handling all framework level message conversions inbound and outbound on {@link KStream}.
|
||||
* If native encoding is not enabled, then serialization will be performed on outbound messages based
|
||||
* on a contentType. Similarly, if native decoding is not enabled, deserialization will be performed on
|
||||
* inbound messages based on a contentType. Based on the contentType, a {@link MessageConverter} will
|
||||
* be resolved.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KafkaStreamsMessageConversionDelegate {
|
||||
|
||||
private static final ThreadLocal<KeyValue<Object, Object>> keyValueThreadLocal = new ThreadLocal<>();
|
||||
|
||||
private final CompositeMessageConverterFactory compositeMessageConverterFactory;
|
||||
|
||||
private final SendToDlqAndContinue sendToDlqAndContinue;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kstreamBindingInformationCatalogue;
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties kstreamBinderConfigurationProperties;
|
||||
|
||||
KafkaStreamsMessageConversionDelegate(CompositeMessageConverterFactory compositeMessageConverterFactory,
|
||||
SendToDlqAndContinue sendToDlqAndContinue,
|
||||
KafkaStreamsBindingInformationCatalogue kstreamBindingInformationCatalogue,
|
||||
KafkaStreamsBinderConfigurationProperties kstreamBinderConfigurationProperties) {
|
||||
this.compositeMessageConverterFactory = compositeMessageConverterFactory;
|
||||
this.sendToDlqAndContinue = sendToDlqAndContinue;
|
||||
this.kstreamBindingInformationCatalogue = kstreamBindingInformationCatalogue;
|
||||
this.kstreamBinderConfigurationProperties = kstreamBinderConfigurationProperties;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serialize {@link KStream} records on outbound based on contentType.
|
||||
*
|
||||
* @param outboundBindTarget outbound KStream target
|
||||
* @return serialized KStream
|
||||
*/
|
||||
public KStream serializeOnOutbound(KStream<?,?> outboundBindTarget) {
|
||||
String contentType = this.kstreamBindingInformationCatalogue.getContentType(outboundBindTarget);
|
||||
MessageConverter messageConverter = compositeMessageConverterFactory.getMessageConverterForAllRegistered();
|
||||
|
||||
return outboundBindTarget.mapValues((v) -> {
|
||||
Message<?> message = v instanceof Message<?> ? (Message<?>) v :
|
||||
MessageBuilder.withPayload(v).build();
|
||||
Map<String, Object> headers = new HashMap<>(message.getHeaders());
|
||||
if (!StringUtils.isEmpty(contentType)) {
|
||||
headers.put(MessageHeaders.CONTENT_TYPE, contentType);
|
||||
}
|
||||
MessageHeaders messageHeaders = new MessageHeaders(headers);
|
||||
return
|
||||
messageConverter.toMessage(message.getPayload(),
|
||||
messageHeaders).getPayload();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Deserialize incoming {@link KStream} based on contentType.
|
||||
*
|
||||
* @param valueClass on KStream value
|
||||
* @param bindingTarget inbound KStream target
|
||||
* @return deserialized KStream
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public KStream deserializeOnInbound(Class<?> valueClass, KStream<?, ?> bindingTarget) {
|
||||
MessageConverter messageConverter = compositeMessageConverterFactory.getMessageConverterForAllRegistered();
|
||||
|
||||
//Deserialize using a branching strategy
|
||||
KStream<?, ?>[] branch = bindingTarget.branch(
|
||||
//First filter where the message is converted and return true if everything went well, return false otherwise.
|
||||
(o, o2) -> {
|
||||
boolean isValidRecord = false;
|
||||
|
||||
try {
|
||||
if (valueClass.isAssignableFrom(o2.getClass())) {
|
||||
keyValueThreadLocal.set(new KeyValue<>(o, o2));
|
||||
}
|
||||
else if (o2 instanceof Message) {
|
||||
if (valueClass.isAssignableFrom(((Message) o2).getPayload().getClass())) {
|
||||
keyValueThreadLocal.set(new KeyValue<>(o, ((Message) o2).getPayload()));
|
||||
}
|
||||
else {
|
||||
convertAndSetMessage(o, valueClass, messageConverter, (Message) o2);
|
||||
}
|
||||
}
|
||||
else if (o2 instanceof String || o2 instanceof byte[]) {
|
||||
Message<?> message = MessageBuilder.withPayload(o2).build();
|
||||
convertAndSetMessage(o, valueClass, messageConverter, message);
|
||||
}
|
||||
else {
|
||||
keyValueThreadLocal.set(new KeyValue<>(o, o2));
|
||||
}
|
||||
isValidRecord = true;
|
||||
}
|
||||
catch (Exception ignored) {
|
||||
//pass through
|
||||
}
|
||||
return isValidRecord;
|
||||
},
|
||||
//sedond filter that catches any messages for which an exception thrown in the first filter above.
|
||||
(k, v) -> true
|
||||
);
|
||||
//process errors from the second filter in the branch above.
|
||||
processErrorFromDeserialization(bindingTarget, branch[1]);
|
||||
|
||||
//first branch above is the branch where the messages are converted, let it go through further processing.
|
||||
return branch[0].mapValues((o2) -> {
|
||||
Object objectValue = keyValueThreadLocal.get().value;
|
||||
keyValueThreadLocal.remove();
|
||||
return objectValue;
|
||||
});
|
||||
}
|
||||
|
||||
private void convertAndSetMessage(Object o, Class<?> valueClass, MessageConverter messageConverter, Message<?> msg) {
|
||||
Object messageConverted = messageConverter.fromMessage(msg, valueClass);
|
||||
if (messageConverted == null) {
|
||||
throw new IllegalStateException("Inbound data conversion failed.");
|
||||
}
|
||||
keyValueThreadLocal.set(new KeyValue<>(o, messageConverted));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void processErrorFromDeserialization(KStream<?, ?> bindingTarget, KStream<?, ?> branch) {
|
||||
branch.process(() -> new Processor() {
|
||||
ProcessorContext context;
|
||||
|
||||
@Override
|
||||
public void init(ProcessorContext context) {
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(Object o, Object o2) {
|
||||
if (kstreamBindingInformationCatalogue.isDlqEnabled(bindingTarget)) {
|
||||
String destination = context.topic();
|
||||
if (o2 instanceof Message) {
|
||||
Message message = (Message) o2;
|
||||
sendToDlqAndContinue.sendToDlq(destination, (byte[]) o, (byte[]) message.getPayload(), context.partition());
|
||||
}
|
||||
else {
|
||||
sendToDlqAndContinue.sendToDlq(destination, (byte[]) o, (byte[]) o2, context.partition());
|
||||
}
|
||||
}
|
||||
else if (kstreamBinderConfigurationProperties.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.logAndFail) {
|
||||
throw new IllegalStateException("Inbound deserialization failed.");
|
||||
}
|
||||
else if (kstreamBinderConfigurationProperties.getSerdeError() == KafkaStreamsBinderConfigurationProperties.SerdeError.logAndContinue) {
|
||||
//quietly pass through. No action needed, this is similar to log and continue.
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
|
||||
/**
|
||||
* An internal registry for holding {@KafkaStreams} objects maintained through
|
||||
* {@link StreamsBuilderFactoryManager}.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class KafkaStreamsRegistry {
|
||||
|
||||
private final Set<KafkaStreams> kafkaStreams = new HashSet<>();
|
||||
|
||||
Set<KafkaStreams> getKafkaStreams() {
|
||||
return kafkaStreams;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register the {@link KafkaStreams} object created in the application.
|
||||
*
|
||||
* @param kafkaStreams {@link KafkaStreams} object created in the application
|
||||
*/
|
||||
void registerKafkaStreams(KafkaStreams kafkaStreams) {
|
||||
this.kafkaStreams.add(kafkaStreams);
|
||||
}
|
||||
}
|
||||
@@ -1,514 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.apache.kafka.streams.StreamsBuilder;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.errors.DeserializationExceptionHandler;
|
||||
import org.apache.kafka.streams.kstream.Consumed;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.state.KeyValueStore;
|
||||
import org.apache.kafka.streams.state.StoreBuilder;
|
||||
import org.apache.kafka.streams.state.Stores;
|
||||
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.BeanInitializationException;
|
||||
import org.springframework.beans.factory.config.BeanDefinition;
|
||||
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionBuilder;
|
||||
import org.springframework.beans.factory.support.BeanDefinitionRegistry;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsStateStore;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsStateStoreProperties;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerErrorMessages;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerParameterAdapter;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerResultAdapter;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerSetupMethodOrchestrator;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ApplicationContextAware;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.core.MethodParameter;
|
||||
import org.springframework.core.annotation.AnnotationUtils;
|
||||
import org.springframework.kafka.core.CleanupConfig;
|
||||
import org.springframework.kafka.core.StreamsBuilderFactoryBean;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Kafka Streams specific implementation for {@link StreamListenerSetupMethodOrchestrator}
|
||||
* that overrides the default mechanisms for invoking StreamListener adapters.
|
||||
*
|
||||
* The orchestration primarily focus on the following areas:
|
||||
*
|
||||
* 1. Allow multiple KStream output bindings (KStream branching) by allowing more than one output values on {@link SendTo}
|
||||
* 2. Allow multiple inbound bindings for multiple KStream and or KTable types.
|
||||
* 3. Each StreamListener method that it orchestrates gets its own {@link StreamsBuilderFactoryBean} and {@link StreamsConfig}
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Lei Chen
|
||||
* @author Gary Russell
|
||||
*/
|
||||
class KafkaStreamsStreamListenerSetupMethodOrchestrator implements StreamListenerSetupMethodOrchestrator, ApplicationContextAware {
|
||||
|
||||
private final static Log LOG = LogFactory.getLog(KafkaStreamsStreamListenerSetupMethodOrchestrator.class);
|
||||
|
||||
private final StreamListenerParameterAdapter streamListenerParameterAdapter;
|
||||
|
||||
private final Collection<StreamListenerResultAdapter> streamListenerResultAdapters;
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
|
||||
private final KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties;
|
||||
|
||||
private final KeyValueSerdeResolver keyValueSerdeResolver;
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
|
||||
private final Map<Method, StreamsBuilderFactoryBean> methodStreamsBuilderFactoryBeanMap = new HashMap<>();
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
private final CleanupConfig cleanupConfig;
|
||||
|
||||
private ConfigurableApplicationContext applicationContext;
|
||||
|
||||
KafkaStreamsStreamListenerSetupMethodOrchestrator(BindingServiceProperties bindingServiceProperties,
|
||||
KafkaStreamsExtendedBindingProperties kafkaStreamsExtendedBindingProperties,
|
||||
KeyValueSerdeResolver keyValueSerdeResolver,
|
||||
KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
StreamListenerParameterAdapter streamListenerParameterAdapter,
|
||||
Collection<StreamListenerResultAdapter> streamListenerResultAdapters,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties,
|
||||
CleanupConfig cleanupConfig) {
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.kafkaStreamsExtendedBindingProperties = kafkaStreamsExtendedBindingProperties;
|
||||
this.keyValueSerdeResolver = keyValueSerdeResolver;
|
||||
this.kafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
this.streamListenerParameterAdapter = streamListenerParameterAdapter;
|
||||
this.streamListenerResultAdapters = streamListenerResultAdapters;
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
this.cleanupConfig = cleanupConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean supports(Method method) {
|
||||
return methodParameterSupports(method) &&
|
||||
(methodReturnTypeSuppports(method) || Void.TYPE.equals(method.getReturnType()));
|
||||
}
|
||||
|
||||
private boolean methodReturnTypeSuppports(Method method) {
|
||||
Class<?> returnType = method.getReturnType();
|
||||
if (returnType.equals(KStream.class) ||
|
||||
(returnType.isArray() && returnType.getComponentType().equals(KStream.class))) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private boolean methodParameterSupports(Method method) {
|
||||
boolean supports = false;
|
||||
for (int i = 0; i < method.getParameterCount(); i++) {
|
||||
MethodParameter methodParameter = MethodParameter.forExecutable(method, i);
|
||||
Class<?> parameterType = methodParameter.getParameterType();
|
||||
if (parameterType.equals(KStream.class) || parameterType.equals(KTable.class)) {
|
||||
supports = true;
|
||||
}
|
||||
}
|
||||
return supports;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings({"rawtypes", "unchecked"})
|
||||
public void orchestrateStreamListenerSetupMethod(StreamListener streamListener, Method method, Object bean) {
|
||||
String[] methodAnnotatedOutboundNames = getOutboundBindingTargetNames(method);
|
||||
validateStreamListenerMethod(streamListener, method, methodAnnotatedOutboundNames);
|
||||
String methodAnnotatedInboundName = streamListener.value();
|
||||
Object[] adaptedInboundArguments = adaptAndRetrieveInboundArguments(method, methodAnnotatedInboundName,
|
||||
this.applicationContext,
|
||||
this.streamListenerParameterAdapter);
|
||||
try {
|
||||
ReflectionUtils.makeAccessible(method);
|
||||
if (Void.TYPE.equals(method.getReturnType())) {
|
||||
method.invoke(bean, adaptedInboundArguments);
|
||||
}
|
||||
else {
|
||||
Object result = method.invoke(bean, adaptedInboundArguments);
|
||||
|
||||
if (result.getClass().isArray()) {
|
||||
Assert.isTrue(methodAnnotatedOutboundNames.length == ((Object[]) result).length,
|
||||
"Result does not match with the number of declared outbounds");
|
||||
} else {
|
||||
Assert.isTrue(methodAnnotatedOutboundNames.length == 1,
|
||||
"Result does not match with the number of declared outbounds");
|
||||
}
|
||||
if (result.getClass().isArray()) {
|
||||
Object[] outboundKStreams = (Object[]) result;
|
||||
int i = 0;
|
||||
for (Object outboundKStream : outboundKStreams) {
|
||||
Object targetBean = this.applicationContext.getBean(methodAnnotatedOutboundNames[i++]);
|
||||
for (StreamListenerResultAdapter streamListenerResultAdapter : streamListenerResultAdapters) {
|
||||
if (streamListenerResultAdapter.supports(outboundKStream.getClass(), targetBean.getClass())) {
|
||||
streamListenerResultAdapter.adapt(outboundKStream, targetBean);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Object targetBean = this.applicationContext.getBean(methodAnnotatedOutboundNames[0]);
|
||||
for (StreamListenerResultAdapter streamListenerResultAdapter : streamListenerResultAdapters) {
|
||||
if (streamListenerResultAdapter.supports(result.getClass(), targetBean.getClass())) {
|
||||
streamListenerResultAdapter.adapt(result, targetBean);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new BeanInitializationException("Cannot setup StreamListener for " + method, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings({"unchecked"})
|
||||
public Object[] adaptAndRetrieveInboundArguments(Method method, String inboundName,
|
||||
ApplicationContext applicationContext,
|
||||
StreamListenerParameterAdapter... streamListenerParameterAdapters) {
|
||||
Object[] arguments = new Object[method.getParameterTypes().length];
|
||||
for (int parameterIndex = 0; parameterIndex < arguments.length; parameterIndex++) {
|
||||
MethodParameter methodParameter = MethodParameter.forExecutable(method, parameterIndex);
|
||||
Class<?> parameterType = methodParameter.getParameterType();
|
||||
Object targetReferenceValue = null;
|
||||
if (methodParameter.hasParameterAnnotation(Input.class)) {
|
||||
targetReferenceValue = AnnotationUtils.getValue(methodParameter.getParameterAnnotation(Input.class));
|
||||
Input methodAnnotation = methodParameter.getParameterAnnotation(Input.class);
|
||||
inboundName = methodAnnotation.value();
|
||||
}
|
||||
else if (arguments.length == 1 && StringUtils.hasText(inboundName)) {
|
||||
targetReferenceValue = inboundName;
|
||||
}
|
||||
if (targetReferenceValue != null) {
|
||||
Assert.isInstanceOf(String.class, targetReferenceValue, "Annotation value must be a String");
|
||||
Object targetBean = applicationContext.getBean((String) targetReferenceValue);
|
||||
BindingProperties bindingProperties = bindingServiceProperties.getBindingProperties(inboundName);
|
||||
enableNativeDecodingForKTableAlways(parameterType, bindingProperties);
|
||||
StreamsConfig streamsConfig = null;
|
||||
//Retrieve the StreamsConfig created for this method if available.
|
||||
//Otherwise, create the StreamsBuilderFactory and get the underlying config.
|
||||
if (!methodStreamsBuilderFactoryBeanMap.containsKey(method)) {
|
||||
streamsConfig = buildStreamsBuilderAndRetrieveConfig(method, applicationContext, bindingProperties);
|
||||
}
|
||||
try {
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = methodStreamsBuilderFactoryBeanMap.get(method);
|
||||
StreamsBuilder streamsBuilder = streamsBuilderFactoryBean.getObject();
|
||||
KafkaStreamsConsumerProperties extendedConsumerProperties = kafkaStreamsExtendedBindingProperties.getExtendedConsumerProperties(inboundName);
|
||||
//get state store spec
|
||||
KafkaStreamsStateStoreProperties spec = buildStateStoreSpec(method);
|
||||
Serde<?> keySerde = this.keyValueSerdeResolver.getInboundKeySerde(extendedConsumerProperties);
|
||||
Serde<?> valueSerde = this.keyValueSerdeResolver.getInboundValueSerde(bindingProperties.getConsumer(), extendedConsumerProperties);
|
||||
if (parameterType.isAssignableFrom(KStream.class)) {
|
||||
KStream<?, ?> stream = getkStream(inboundName, spec, bindingProperties, streamsBuilder, keySerde, valueSerde);
|
||||
KStreamBoundElementFactory.KStreamWrapper kStreamWrapper = (KStreamBoundElementFactory.KStreamWrapper) targetBean;
|
||||
//wrap the proxy created during the initial target type binding with real object (KStream)
|
||||
kStreamWrapper.wrap((KStream<Object, Object>) stream);
|
||||
kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactory(streamsBuilderFactoryBean);
|
||||
if (streamsConfig != null){
|
||||
kafkaStreamsBindingInformationCatalogue.addStreamsConfigs(kStreamWrapper, streamsConfig);
|
||||
}
|
||||
for (StreamListenerParameterAdapter streamListenerParameterAdapter : streamListenerParameterAdapters) {
|
||||
if (streamListenerParameterAdapter.supports(stream.getClass(), methodParameter)) {
|
||||
arguments[parameterIndex] = streamListenerParameterAdapter.adapt(kStreamWrapper, methodParameter);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (arguments[parameterIndex] == null && parameterType.isAssignableFrom(stream.getClass())) {
|
||||
arguments[parameterIndex] = stream;
|
||||
}
|
||||
Assert.notNull(arguments[parameterIndex], "Cannot convert argument " + parameterIndex + " of " + method
|
||||
+ "from " + stream.getClass() + " to " + parameterType);
|
||||
}
|
||||
else if (parameterType.isAssignableFrom(KTable.class)) {
|
||||
String materializedAs = extendedConsumerProperties.getMaterializedAs();
|
||||
String bindingDestination = bindingServiceProperties.getBindingDestination(inboundName);
|
||||
KTable<?, ?> table = materializedAs != null ?
|
||||
materializedAs(streamsBuilder, bindingDestination, materializedAs, keySerde, valueSerde ) :
|
||||
streamsBuilder.table(bindingDestination,
|
||||
Consumed.with(keySerde, valueSerde));
|
||||
KTableBoundElementFactory.KTableWrapper kTableWrapper = (KTableBoundElementFactory.KTableWrapper) targetBean;
|
||||
//wrap the proxy created during the initial target type binding with real object (KTable)
|
||||
kTableWrapper.wrap((KTable<Object, Object>) table);
|
||||
kafkaStreamsBindingInformationCatalogue.addStreamBuilderFactory(streamsBuilderFactoryBean);
|
||||
if (streamsConfig != null){
|
||||
kafkaStreamsBindingInformationCatalogue.addStreamsConfigs(kTableWrapper, streamsConfig);
|
||||
}
|
||||
arguments[parameterIndex] = table;
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException(StreamListenerErrorMessages.INVALID_DECLARATIVE_METHOD_PARAMETERS);
|
||||
}
|
||||
}
|
||||
return arguments;
|
||||
}
|
||||
|
||||
private <K,V> KTable<K,V> materializedAs(StreamsBuilder streamsBuilder, String destination, String storeName, Serde<K> k, Serde<V> v) {
|
||||
|
||||
return streamsBuilder.table(bindingServiceProperties.getBindingDestination(destination),
|
||||
Materialized.<K, V, KeyValueStore<Bytes, byte[]>>as(storeName)
|
||||
.withKeySerde(k)
|
||||
.withValueSerde(v));
|
||||
}
|
||||
|
||||
private StoreBuilder buildStateStore(KafkaStreamsStateStoreProperties spec) {
|
||||
try {
|
||||
Serde<?> keySerde = this.keyValueSerdeResolver.getStateStoreKeySerde(spec.getKeySerdeString());
|
||||
Serde<?> valueSerde = this.keyValueSerdeResolver.getStateStoreValueSerde(spec.getValueSerdeString());
|
||||
StoreBuilder builder;
|
||||
switch (spec.getType()) {
|
||||
case KEYVALUE:
|
||||
builder = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore(spec.getName()), keySerde, valueSerde);
|
||||
break;
|
||||
case WINDOW:
|
||||
builder = Stores.windowStoreBuilder(Stores.persistentWindowStore(spec.getName(), spec.getRetention(), 3, spec.getLength(), false),
|
||||
keySerde,
|
||||
valueSerde);
|
||||
break;
|
||||
case SESSION:
|
||||
builder = Stores.sessionStoreBuilder(Stores.persistentSessionStore(spec.getName(), spec.getRetention()), keySerde, valueSerde);
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException("state store type (" + spec.getType() + ") is not supported!");
|
||||
}
|
||||
if (spec.isCacheEnabled()) {
|
||||
builder = builder.withCachingEnabled();
|
||||
}
|
||||
if (spec.isLoggingDisabled()) {
|
||||
builder = builder.withLoggingDisabled();
|
||||
}
|
||||
|
||||
return builder;
|
||||
|
||||
}catch (Exception e) {
|
||||
LOG.error("failed to build state store exception : " + e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private KStream<?, ?> getkStream(String inboundName, KafkaStreamsStateStoreProperties storeSpec,
|
||||
BindingProperties bindingProperties, StreamsBuilder streamsBuilder,
|
||||
Serde<?> keySerde, Serde<?> valueSerde) {
|
||||
if (storeSpec != null) {
|
||||
StoreBuilder storeBuilder = buildStateStore(storeSpec);
|
||||
streamsBuilder.addStateStore(storeBuilder);
|
||||
if (LOG.isInfoEnabled()) {
|
||||
LOG.info("state store " + storeBuilder.name() + " added to topology");
|
||||
}
|
||||
}
|
||||
String[] bindingTargets = StringUtils
|
||||
.commaDelimitedListToStringArray(bindingServiceProperties.getBindingDestination(inboundName));
|
||||
KStream<?, ?> stream = streamsBuilder.stream(Arrays.asList(bindingTargets),
|
||||
Consumed.with(keySerde, valueSerde));
|
||||
final boolean nativeDecoding = bindingServiceProperties.getConsumerProperties(inboundName).isUseNativeDecoding();
|
||||
if (nativeDecoding){
|
||||
LOG.info("Native decoding is enabled for " + inboundName + ". Inbound deserialization done at the broker.");
|
||||
}
|
||||
else {
|
||||
LOG.info("Native decoding is disabled for " + inboundName + ". Inbound message conversion done by Spring Cloud Stream.");
|
||||
}
|
||||
|
||||
stream = stream.mapValues(value -> {
|
||||
Object returnValue;
|
||||
String contentType = bindingProperties.getContentType();
|
||||
if (!StringUtils.isEmpty(contentType) && !nativeDecoding) {
|
||||
Message<?> message = MessageBuilder.withPayload(value)
|
||||
.setHeader(MessageHeaders.CONTENT_TYPE, contentType).build();
|
||||
returnValue = message;
|
||||
} else {
|
||||
returnValue = value;
|
||||
}
|
||||
return returnValue;
|
||||
});
|
||||
return stream;
|
||||
}
|
||||
|
||||
private void enableNativeDecodingForKTableAlways(Class<?> parameterType, BindingProperties bindingProperties) {
|
||||
if (parameterType.isAssignableFrom(KTable.class)) {
|
||||
if (bindingProperties.getConsumer() == null) {
|
||||
bindingProperties.setConsumer(new ConsumerProperties());
|
||||
}
|
||||
//No framework level message conversion provided for KTable, its done by the broker.
|
||||
bindingProperties.getConsumer().setUseNativeDecoding(true);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked"})
|
||||
private StreamsConfig buildStreamsBuilderAndRetrieveConfig(Method method, ApplicationContext applicationContext,
|
||||
BindingProperties bindingProperties) {
|
||||
ConfigurableListableBeanFactory beanFactory = this.applicationContext.getBeanFactory();
|
||||
String group = bindingProperties.getGroup();
|
||||
if (!StringUtils.hasText(group)) {
|
||||
group = binderConfigurationProperties.getApplicationId();
|
||||
}
|
||||
Map<String, Object> streamConfigGlobalProperties = applicationContext.getBean("streamConfigGlobalProperties", Map.class);
|
||||
streamConfigGlobalProperties.put(StreamsConfig.APPLICATION_ID_CONFIG, group);
|
||||
|
||||
//Custom StreamsConfig implementation that overrides to guarantee that the deserialization handler is cached.
|
||||
StreamsConfig streamsConfig = new StreamsConfig(streamConfigGlobalProperties) {
|
||||
DeserializationExceptionHandler deserializationExceptionHandler;
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public <T> T getConfiguredInstance(String key, Class<T> clazz) {
|
||||
if (key.equals(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG)){
|
||||
if (deserializationExceptionHandler != null){
|
||||
return (T)deserializationExceptionHandler;
|
||||
}
|
||||
else {
|
||||
T t = super.getConfiguredInstance(key, clazz);
|
||||
deserializationExceptionHandler = (DeserializationExceptionHandler)t;
|
||||
return t;
|
||||
}
|
||||
}
|
||||
return super.getConfiguredInstance(key, clazz);
|
||||
}
|
||||
};
|
||||
StreamsBuilderFactoryBean streamsBuilder = this.cleanupConfig == null
|
||||
? new StreamsBuilderFactoryBean(streamsConfig)
|
||||
: new StreamsBuilderFactoryBean(streamsConfig, this.cleanupConfig);
|
||||
streamsBuilder.setAutoStartup(false);
|
||||
BeanDefinition streamsBuilderBeanDefinition =
|
||||
BeanDefinitionBuilder.genericBeanDefinition((Class<StreamsBuilderFactoryBean>) streamsBuilder.getClass(), () -> streamsBuilder)
|
||||
.getRawBeanDefinition();
|
||||
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition("stream-builder-" + method.getName(), streamsBuilderBeanDefinition);
|
||||
StreamsBuilderFactoryBean streamsBuilderX = applicationContext.getBean("&stream-builder-" + method.getName(), StreamsBuilderFactoryBean.class);
|
||||
BeanDefinition streamsConfigBeanDefinition =
|
||||
BeanDefinitionBuilder.genericBeanDefinition((Class<StreamsConfig>) streamsConfig.getClass(), () -> streamsConfig)
|
||||
.getRawBeanDefinition();
|
||||
((BeanDefinitionRegistry) beanFactory).registerBeanDefinition("streamsConfig-" + method.getName(), streamsConfigBeanDefinition);
|
||||
|
||||
methodStreamsBuilderFactoryBeanMap.put(method, streamsBuilderX);
|
||||
return streamsConfig;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
|
||||
this.applicationContext = (ConfigurableApplicationContext) applicationContext;
|
||||
}
|
||||
|
||||
private void validateStreamListenerMethod(StreamListener streamListener, Method method, String[] methodAnnotatedOutboundNames) {
|
||||
String methodAnnotatedInboundName = streamListener.value();
|
||||
if (methodAnnotatedOutboundNames != null) {
|
||||
for (String s : methodAnnotatedOutboundNames) {
|
||||
if (StringUtils.hasText(s)) {
|
||||
Assert.isTrue(isDeclarativeOutput(method, s), "Method must be declarative");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (StringUtils.hasText(methodAnnotatedInboundName)) {
|
||||
int methodArgumentsLength = method.getParameterTypes().length;
|
||||
|
||||
for (int parameterIndex = 0; parameterIndex < methodArgumentsLength; parameterIndex++) {
|
||||
MethodParameter methodParameter = MethodParameter.forExecutable(method, parameterIndex);
|
||||
Assert.isTrue(isDeclarativeInput(methodAnnotatedInboundName, methodParameter), "Method must be declarative");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private boolean isDeclarativeOutput(Method m, String targetBeanName) {
|
||||
boolean declarative;
|
||||
Class<?> returnType = m.getReturnType();
|
||||
if (returnType.isArray()){
|
||||
Class<?> targetBeanClass = this.applicationContext.getType(targetBeanName);
|
||||
declarative = this.streamListenerResultAdapters.stream()
|
||||
.anyMatch(slpa -> slpa.supports(returnType.getComponentType(), targetBeanClass));
|
||||
return declarative;
|
||||
}
|
||||
Class<?> targetBeanClass = this.applicationContext.getType(targetBeanName);
|
||||
declarative = this.streamListenerResultAdapters.stream()
|
||||
.anyMatch(slpa -> slpa.supports(returnType, targetBeanClass));
|
||||
return declarative;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private boolean isDeclarativeInput(String targetBeanName, MethodParameter methodParameter) {
|
||||
if (!methodParameter.getParameterType().isAssignableFrom(Object.class) && this.applicationContext.containsBean(targetBeanName)) {
|
||||
Class<?> targetBeanClass = this.applicationContext.getType(targetBeanName);
|
||||
return this.streamListenerParameterAdapter.supports(targetBeanClass, methodParameter);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private static String[] getOutboundBindingTargetNames(Method method) {
|
||||
SendTo sendTo = AnnotationUtils.findAnnotation(method, SendTo.class);
|
||||
if (sendTo != null) {
|
||||
Assert.isTrue(!ObjectUtils.isEmpty(sendTo.value()), StreamListenerErrorMessages.ATLEAST_ONE_OUTPUT);
|
||||
Assert.isTrue(sendTo.value().length >= 1, "At least one outbound destination need to be provided.");
|
||||
return sendTo.value();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked"})
|
||||
private static KafkaStreamsStateStoreProperties buildStateStoreSpec(Method method) {
|
||||
KafkaStreamsStateStore spec = AnnotationUtils.findAnnotation(method, KafkaStreamsStateStore.class);
|
||||
if (spec != null) {
|
||||
Assert.isTrue(!ObjectUtils.isEmpty(spec.name()), "name cannot be empty");
|
||||
Assert.isTrue(spec.name().length() >= 1, "name cannot be empty.");
|
||||
KafkaStreamsStateStoreProperties props = new KafkaStreamsStateStoreProperties();
|
||||
props.setName(spec.name());
|
||||
props.setType(spec.type());
|
||||
props.setLength(spec.lengthMs());
|
||||
props.setKeySerdeString(spec.keySerde());
|
||||
props.setRetention(spec.retentionMs());
|
||||
props.setValueSerdeString(spec.valueSerde());
|
||||
props.setCacheEnabled(spec.cache());
|
||||
props.setLoggingDisabled(!spec.logging());
|
||||
return props;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -1,192 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
|
||||
import org.springframework.cloud.stream.binder.ConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsProducerProperties;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Resolver for key and value Serde.
|
||||
*
|
||||
* On the inbound, if native decoding is enabled, then any deserialization on the value is handled by Kafka.
|
||||
* First, we look for any key/value Serde set on the binding itself, if that is not available then look at the
|
||||
* common Serde set at the global level. If that fails, it falls back to byte[].
|
||||
* If native decoding is disabled, then the binder will do the deserialization on value and ignore any Serde set for value
|
||||
* and rely on the contentType provided. Keys are always deserialized at the broker.
|
||||
*
|
||||
*
|
||||
* Same rules apply on the outbound. If native encoding is enabled, then value serialization is done at the broker using
|
||||
* any binder level Serde for value, if not using common Serde, if not, then byte[].
|
||||
* If native encoding is disabled, then the binder will do serialization using a contentType. Keys are always serialized
|
||||
* by the broker.
|
||||
*
|
||||
* For state store, use serdes class specified in {@link KafkaStreamsStateStore} to create Serde accordingly.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Lei Chen
|
||||
*/
|
||||
class KeyValueSerdeResolver {
|
||||
|
||||
private final Map<String,Object> streamConfigGlobalProperties;
|
||||
|
||||
private final KafkaStreamsBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
KeyValueSerdeResolver(Map<String,Object> streamConfigGlobalProperties,
|
||||
KafkaStreamsBinderConfigurationProperties binderConfigurationProperties) {
|
||||
this.streamConfigGlobalProperties = streamConfigGlobalProperties;
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for inbound key
|
||||
*
|
||||
* @param extendedConsumerProperties binding level extended {@link KafkaStreamsConsumerProperties}
|
||||
* @return configurd {@link Serde} for the inbound key.
|
||||
*/
|
||||
public Serde<?> getInboundKeySerde(KafkaStreamsConsumerProperties extendedConsumerProperties) {
|
||||
String keySerdeString = extendedConsumerProperties.getKeySerde();
|
||||
|
||||
return getKeySerde(keySerdeString);
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for inbound value
|
||||
*
|
||||
* @param consumerProperties {@link ConsumerProperties} on binding
|
||||
* @param extendedConsumerProperties binding level extended {@link KafkaStreamsConsumerProperties}
|
||||
* @return configurd {@link Serde} for the inbound value.
|
||||
*/
|
||||
public Serde<?> getInboundValueSerde(ConsumerProperties consumerProperties, KafkaStreamsConsumerProperties extendedConsumerProperties) {
|
||||
Serde<?> valueSerde;
|
||||
|
||||
String valueSerdeString = extendedConsumerProperties.getValueSerde();
|
||||
try {
|
||||
if (consumerProperties != null &&
|
||||
consumerProperties.isUseNativeDecoding()) {
|
||||
valueSerde = getValueSerde(valueSerdeString);
|
||||
}
|
||||
else {
|
||||
valueSerde = Serdes.ByteArray();
|
||||
}
|
||||
valueSerde.configure(streamConfigGlobalProperties, false);
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("Serde class not found: ", e);
|
||||
}
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for outbound key
|
||||
*
|
||||
* @param properties binding level extended {@link KafkaStreamsProducerProperties}
|
||||
* @return configurd {@link Serde} for the outbound key.
|
||||
*/
|
||||
public Serde<?> getOuboundKeySerde(KafkaStreamsProducerProperties properties) {
|
||||
return getKeySerde(properties.getKeySerde());
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for outbound value
|
||||
*
|
||||
* @param producerProperties {@link ProducerProperties} on binding
|
||||
* @param kafkaStreamsProducerProperties binding level extended {@link KafkaStreamsProducerProperties}
|
||||
* @return configurd {@link Serde} for the outbound value.
|
||||
*/
|
||||
public Serde<?> getOutboundValueSerde(ProducerProperties producerProperties, KafkaStreamsProducerProperties kafkaStreamsProducerProperties) {
|
||||
Serde<?> valueSerde;
|
||||
try {
|
||||
if (producerProperties.isUseNativeEncoding()) {
|
||||
valueSerde = getValueSerde(kafkaStreamsProducerProperties.getValueSerde());
|
||||
}
|
||||
else {
|
||||
valueSerde = Serdes.ByteArray();
|
||||
}
|
||||
valueSerde.configure(streamConfigGlobalProperties, false);
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("Serde class not found: ", e);
|
||||
}
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for state store
|
||||
*
|
||||
* @param keySerdeString serde class used for key
|
||||
* @return {@link Serde} for the state store key.
|
||||
*/
|
||||
public Serde<?> getStateStoreKeySerde(String keySerdeString) {
|
||||
return getKeySerde(keySerdeString);
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide the {@link Serde} for state store value
|
||||
*
|
||||
* @param valueSerdeString serde class used for value
|
||||
* @return {@link Serde} for the state store value.
|
||||
*/
|
||||
public Serde<?> getStateStoreValueSerde(String valueSerdeString) {
|
||||
try {
|
||||
return getValueSerde(valueSerdeString);
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("Serde class not found: ", e);
|
||||
}
|
||||
}
|
||||
|
||||
private Serde<?> getKeySerde(String keySerdeString) {
|
||||
Serde<?> keySerde;
|
||||
try {
|
||||
if (StringUtils.hasText(keySerdeString)) {
|
||||
keySerde = Utils.newInstance(keySerdeString, Serde.class);
|
||||
}
|
||||
else {
|
||||
keySerde = this.binderConfigurationProperties.getConfiguration().containsKey("default.key.serde") ?
|
||||
Utils.newInstance(this.binderConfigurationProperties.getConfiguration().get("default.key.serde"), Serde.class) : Serdes.ByteArray();
|
||||
}
|
||||
keySerde.configure(streamConfigGlobalProperties, true);
|
||||
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("Serde class not found: ", e);
|
||||
}
|
||||
return keySerde;
|
||||
}
|
||||
|
||||
private Serde<?> getValueSerde(String valueSerdeString) throws ClassNotFoundException {
|
||||
Serde<?> valueSerde;
|
||||
if (StringUtils.hasText(valueSerdeString)) {
|
||||
valueSerde = Utils.newInstance(valueSerdeString, Serde.class);
|
||||
}
|
||||
else {
|
||||
valueSerde = this.binderConfigurationProperties.getConfiguration().containsKey("default.value.serde") ?
|
||||
Utils.newInstance(this.binderConfigurationProperties.getConfiguration().get("default.value.serde"), Serde.class) : Serdes.ByteArray();
|
||||
}
|
||||
return valueSerde;
|
||||
}
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.errors.InvalidStateStoreException;
|
||||
import org.apache.kafka.streams.state.QueryableStoreType;
|
||||
|
||||
/**
|
||||
* Registry that contains {@link QueryableStoreType}s those created from
|
||||
* the user applications.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Renwei Han
|
||||
* @since 2.0.0
|
||||
* @deprecated in favor of {@link InteractiveQueryService}
|
||||
*/
|
||||
public class QueryableStoreRegistry {
|
||||
|
||||
private final KafkaStreamsRegistry kafkaStreamsRegistry;
|
||||
|
||||
public QueryableStoreRegistry(KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
this.kafkaStreamsRegistry = kafkaStreamsRegistry;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve and return a queryable store by name created in the application.
|
||||
*
|
||||
* @param storeName name of the queryable store
|
||||
* @param storeType type of the queryable store
|
||||
* @param <T> generic queryable store
|
||||
* @return queryable store.
|
||||
* @deprecated in favor of {@link InteractiveQueryService#getQueryableStore(String, QueryableStoreType)}
|
||||
*/
|
||||
public <T> T getQueryableStoreType(String storeName, QueryableStoreType<T> storeType) {
|
||||
|
||||
for (KafkaStreams kafkaStream : this.kafkaStreamsRegistry.getKafkaStreams()) {
|
||||
try{
|
||||
T store = kafkaStream.store(storeName, storeType);
|
||||
if (store != null) {
|
||||
return store;
|
||||
}
|
||||
}
|
||||
catch (InvalidStateStoreException ignored) {
|
||||
//pass through
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,108 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.streams.errors.DeserializationExceptionHandler;
|
||||
import org.apache.kafka.streams.processor.ProcessorContext;
|
||||
import org.apache.kafka.streams.processor.internals.ProcessorContextImpl;
|
||||
import org.apache.kafka.streams.processor.internals.StreamTask;
|
||||
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
|
||||
/**
|
||||
* Custom implementation for {@link DeserializationExceptionHandler} that sends the records
|
||||
* in error to a DLQ topic, then continue stream processing on new records.
|
||||
*
|
||||
* @since 2.0.0
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class SendToDlqAndContinue implements DeserializationExceptionHandler{
|
||||
|
||||
/**
|
||||
* DLQ dispatcher per topic in the application context. The key here is not the actual DLQ topic
|
||||
* but the incoming topic that caused the error.
|
||||
*/
|
||||
private Map<String, KafkaStreamsDlqDispatch> dlqDispatchers = new HashMap<>();
|
||||
|
||||
/**
|
||||
* For a given topic, send the key/value record to DLQ topic.
|
||||
*
|
||||
* @param topic incoming topic that caused the error
|
||||
* @param key to send
|
||||
* @param value to send
|
||||
* @param partition for the topic where this record should be sent
|
||||
*/
|
||||
public void sendToDlq(String topic, byte[] key, byte[] value, int partition){
|
||||
KafkaStreamsDlqDispatch kafkaStreamsDlqDispatch = dlqDispatchers.get(topic);
|
||||
kafkaStreamsDlqDispatch.sendToDlq(key,value, partition);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public DeserializationHandlerResponse handle(ProcessorContext context, ConsumerRecord<byte[], byte[]> record, Exception exception) {
|
||||
KafkaStreamsDlqDispatch kafkaStreamsDlqDispatch = dlqDispatchers.get(record.topic());
|
||||
kafkaStreamsDlqDispatch.sendToDlq(record.key(), record.value(), record.partition());
|
||||
context.commit();
|
||||
|
||||
// The following conditional block should be reconsidered when we have a solution for this SO problem:
|
||||
// https://stackoverflow.com/questions/48470899/kafka-streams-deserialization-handler
|
||||
// Currently it seems like when deserialization error happens, there is no commits happening and the
|
||||
// following code will use reflection to get access to the underlying KafkaConsumer.
|
||||
// It works with Kafka 1.0.0, but there is no guarantee it will work in future versions of kafka as
|
||||
// we access private fields by name using reflection, but it is a temporary fix.
|
||||
if (context instanceof ProcessorContextImpl){
|
||||
ProcessorContextImpl processorContextImpl = (ProcessorContextImpl)context;
|
||||
Field task = ReflectionUtils.findField(ProcessorContextImpl.class, "task");
|
||||
ReflectionUtils.makeAccessible(task);
|
||||
Object taskField = ReflectionUtils.getField(task, processorContextImpl);
|
||||
|
||||
if (taskField.getClass().isAssignableFrom(StreamTask.class)){
|
||||
StreamTask streamTask = (StreamTask)taskField;
|
||||
Field consumer = ReflectionUtils.findField(StreamTask.class, "consumer");
|
||||
ReflectionUtils.makeAccessible(consumer);
|
||||
Object kafkaConsumerField = ReflectionUtils.getField(consumer, streamTask);
|
||||
if (kafkaConsumerField.getClass().isAssignableFrom(KafkaConsumer.class)){
|
||||
KafkaConsumer kafkaConsumer = (KafkaConsumer)kafkaConsumerField;
|
||||
final Map<TopicPartition, OffsetAndMetadata> consumedOffsetsAndMetadata = new HashMap<>();
|
||||
TopicPartition tp = new TopicPartition(record.topic(), record.partition());
|
||||
OffsetAndMetadata oam = new OffsetAndMetadata(record.offset() + 1);
|
||||
consumedOffsetsAndMetadata.put(tp, oam);
|
||||
kafkaConsumer.commitSync(consumedOffsetsAndMetadata);
|
||||
}
|
||||
}
|
||||
}
|
||||
return DeserializationHandlerResponse.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(Map<String, ?> configs) {
|
||||
|
||||
}
|
||||
|
||||
void addKStreamDlqDispatch(String topic, KafkaStreamsDlqDispatch kafkaStreamsDlqDispatch){
|
||||
dlqDispatchers.put(topic, kafkaStreamsDlqDispatch);
|
||||
}
|
||||
}
|
||||
@@ -1,108 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
import org.springframework.context.SmartLifecycle;
|
||||
import org.springframework.kafka.KafkaException;
|
||||
import org.springframework.kafka.core.StreamsBuilderFactoryBean;
|
||||
|
||||
/**
|
||||
* Iterate through all {@link StreamsBuilderFactoryBean} in the application context
|
||||
* and start them. As each one completes starting, register the associated KafkaStreams
|
||||
* object into {@link QueryableStoreRegistry}.
|
||||
*
|
||||
* This {@link SmartLifecycle} class ensures that the bean created from it is started very late
|
||||
* through the bootstrap process by setting the phase value closer to Integer.MAX_VALUE.
|
||||
* This is to guarantee that the {@link StreamsBuilderFactoryBean} on a
|
||||
* {@link org.springframework.cloud.stream.annotation.StreamListener} method with multiple
|
||||
* bindings is only started after all the binding phases have completed successfully.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
class StreamsBuilderFactoryManager implements SmartLifecycle {
|
||||
|
||||
private final KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue;
|
||||
private final KafkaStreamsRegistry kafkaStreamsRegistry;
|
||||
|
||||
private volatile boolean running;
|
||||
|
||||
StreamsBuilderFactoryManager(KafkaStreamsBindingInformationCatalogue kafkaStreamsBindingInformationCatalogue,
|
||||
KafkaStreamsRegistry kafkaStreamsRegistry) {
|
||||
this.kafkaStreamsBindingInformationCatalogue = kafkaStreamsBindingInformationCatalogue;
|
||||
this.kafkaStreamsRegistry = kafkaStreamsRegistry;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAutoStartup() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop(Runnable callback) {
|
||||
stop();
|
||||
if (callback != null) {
|
||||
callback.run();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void start() {
|
||||
if (!this.running) {
|
||||
try {
|
||||
Set<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans = this.kafkaStreamsBindingInformationCatalogue.getStreamsBuilderFactoryBeans();
|
||||
for (StreamsBuilderFactoryBean streamsBuilderFactoryBean : streamsBuilderFactoryBeans) {
|
||||
streamsBuilderFactoryBean.start();
|
||||
kafkaStreamsRegistry.registerKafkaStreams(streamsBuilderFactoryBean.getKafkaStreams());
|
||||
}
|
||||
this.running = true;
|
||||
} catch (Exception e) {
|
||||
throw new KafkaException("Could not start stream: ", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void stop() {
|
||||
if (this.running) {
|
||||
try {
|
||||
Set<StreamsBuilderFactoryBean> streamsBuilderFactoryBeans = this.kafkaStreamsBindingInformationCatalogue.getStreamsBuilderFactoryBeans();
|
||||
for (StreamsBuilderFactoryBean streamsBuilderFactoryBean : streamsBuilderFactoryBeans) {
|
||||
streamsBuilderFactoryBean.stop();
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
finally {
|
||||
this.running = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean isRunning() {
|
||||
return this.running;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getPhase() {
|
||||
return Integer.MAX_VALUE - 100;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.annotations;
|
||||
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.Output;
|
||||
|
||||
/**
|
||||
* Bindable interface for {@link KStream} input and output.
|
||||
*
|
||||
* This interface can be used as a bindable interface with {@link org.springframework.cloud.stream.annotation.EnableBinding}
|
||||
* when both input and output types are single KStream. In other scenarios where multiple types are required, other
|
||||
* similar bindable interfaces can be created and used. For example, there are cases in which multiple KStreams
|
||||
* are required on the outbound in the case of KStream branching or multiple input types are required either in the
|
||||
* form of multiple KStreams and a combination of KStreams and KTables. In those cases, new bindable interfaces compatible
|
||||
* with the requirements must be created. Here are some examples.
|
||||
*
|
||||
* <pre class="code">
|
||||
* interface KStreamBranchProcessor {
|
||||
* @Input("input")
|
||||
* KStream<?, ?> input();
|
||||
*
|
||||
* @Output("output-1")
|
||||
* KStream<?, ?> output1();
|
||||
*
|
||||
* @Output("output-2")
|
||||
* KStream<?, ?> output2();
|
||||
*
|
||||
* @Output("output-3")
|
||||
* KStream<?, ?> output3();
|
||||
*
|
||||
* ......
|
||||
*
|
||||
* }
|
||||
*</pre>
|
||||
*
|
||||
* <pre class="code">
|
||||
* interface KStreamKtableProcessor {
|
||||
* @Input("input-1")
|
||||
* KStream<?, ?> input1();
|
||||
*
|
||||
* @Input("input-2")
|
||||
* KTable<?, ?> input2();
|
||||
*
|
||||
* @Output("output")
|
||||
* KStream<?, ?> output();
|
||||
*
|
||||
* ......
|
||||
*
|
||||
* }
|
||||
*</pre>
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public interface KafkaStreamsProcessor {
|
||||
|
||||
/**
|
||||
* @return {@link Input} binding for {@link KStream} type.
|
||||
*/
|
||||
@Input("input")
|
||||
KStream<?, ?> input();
|
||||
|
||||
/**
|
||||
* @return {@link Output} binding for {@link KStream} type.
|
||||
*/
|
||||
@Output("output")
|
||||
KStream<?, ?> output();
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.annotations;
|
||||
|
||||
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
import java.lang.annotation.Target;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsStateStoreProperties;
|
||||
|
||||
|
||||
/**
|
||||
* Interface for Kafka Stream state store.
|
||||
*
|
||||
* This interface can be used to inject a state store specification into KStream building process so
|
||||
* that the desired store can be built by StreamBuilder and added to topology for later use by processors.
|
||||
* This is particularly useful when need to combine stream DSL with low level processor APIs. In those cases,
|
||||
* if a writable state store is desired in processors, it needs to be created using this annotation.
|
||||
* Here is the example.
|
||||
*
|
||||
* <pre class="code">
|
||||
* @StreamListener("input")
|
||||
* @KafkaStreamsStateStore(name="mystate", type= KafkaStreamsStateStoreProperties.StoreType.WINDOW, size=300000)
|
||||
* public void process(KStream<Object, Product> input) {
|
||||
* ......
|
||||
* }
|
||||
*</pre>
|
||||
*
|
||||
* With that, you should be able to read/write this state store in your processor/transformer code.
|
||||
*
|
||||
* <pre class="code">
|
||||
* new Processor<Object, Product>() {
|
||||
* WindowStore<Object, String> state;
|
||||
* @Override
|
||||
* public void init(ProcessorContext processorContext) {
|
||||
* state = (WindowStore)processorContext.getStateStore("mystate");
|
||||
* ......
|
||||
* }
|
||||
* }
|
||||
*</pre>
|
||||
*
|
||||
* @author Lei Chen
|
||||
*/
|
||||
|
||||
@Target({ ElementType.TYPE, ElementType.METHOD, ElementType.ANNOTATION_TYPE })
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
|
||||
public @interface KafkaStreamsStateStore {
|
||||
|
||||
/**
|
||||
* @return name of state store.
|
||||
*/
|
||||
String name() default "";
|
||||
|
||||
/**
|
||||
* @return {@link KafkaStreamsStateStoreProperties.StoreType} of state store.
|
||||
*/
|
||||
KafkaStreamsStateStoreProperties.StoreType type() default KafkaStreamsStateStoreProperties.StoreType.KEYVALUE;
|
||||
|
||||
/**
|
||||
* @return key serde of state store.
|
||||
*/
|
||||
String keySerde() default "org.apache.kafka.common.serialization.Serdes$StringSerde";
|
||||
|
||||
/**
|
||||
* @return value serde of state store.
|
||||
*/
|
||||
String valueSerde() default "org.apache.kafka.common.serialization.Serdes$StringSerde";
|
||||
|
||||
/**
|
||||
* @return length in milli-second of window(for windowed store).
|
||||
*/
|
||||
long lengthMs() default 0;
|
||||
|
||||
/**
|
||||
* @return the maximum period of time in milli-second to keep each window in this store(for windowed store).
|
||||
*/
|
||||
long retentionMs() default 0;
|
||||
|
||||
/**
|
||||
* @return whether caching should be enabled on the created store.
|
||||
*/
|
||||
boolean cache() default false;
|
||||
|
||||
/**
|
||||
* @return whether logging should be enabled on the created store.
|
||||
*/
|
||||
boolean logging() default true;
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.properties;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
|
||||
/**
|
||||
* {@link ConfigurationProperties} that can be used by end user Kafka Stream applications. This class provides
|
||||
* convenient ways to access the commonly used kafka stream properties from the user application. For example, windowing
|
||||
* operations are common use cases in stream processing and one can provide window specific properties at runtime and use
|
||||
* those properties in the applications using this class.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@ConfigurationProperties("spring.cloud.stream.kafka.streams")
|
||||
public class KafkaStreamsApplicationSupportProperties {
|
||||
|
||||
private TimeWindow timeWindow;
|
||||
|
||||
public TimeWindow getTimeWindow() {
|
||||
return timeWindow;
|
||||
}
|
||||
|
||||
public void setTimeWindow(TimeWindow timeWindow) {
|
||||
this.timeWindow = timeWindow;
|
||||
}
|
||||
|
||||
public static class TimeWindow {
|
||||
|
||||
private int length;
|
||||
|
||||
private int advanceBy;
|
||||
|
||||
public int getLength() {
|
||||
return length;
|
||||
}
|
||||
|
||||
public void setLength(int length) {
|
||||
this.length = length;
|
||||
}
|
||||
|
||||
public int getAdvanceBy() {
|
||||
return advanceBy;
|
||||
}
|
||||
|
||||
public void setAdvanceBy(int advanceBy) {
|
||||
this.advanceBy = advanceBy;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.properties;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KafkaStreamsBinderConfigurationProperties extends KafkaBinderConfigurationProperties {
|
||||
|
||||
public KafkaStreamsBinderConfigurationProperties(KafkaProperties kafkaProperties) {
|
||||
super(kafkaProperties);
|
||||
}
|
||||
|
||||
public enum SerdeError {
|
||||
logAndContinue,
|
||||
logAndFail,
|
||||
sendToDlq
|
||||
}
|
||||
|
||||
private String applicationId = "default";
|
||||
|
||||
public String getApplicationId() {
|
||||
return applicationId;
|
||||
}
|
||||
|
||||
public void setApplicationId(String applicationId) {
|
||||
this.applicationId = applicationId;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link org.apache.kafka.streams.errors.DeserializationExceptionHandler} to use
|
||||
* when there is a Serde error. {@link KafkaStreamsBinderConfigurationProperties.SerdeError}
|
||||
* values are used to provide the exception handler on consumer binding.
|
||||
*/
|
||||
private KafkaStreamsBinderConfigurationProperties.SerdeError serdeError;
|
||||
|
||||
public KafkaStreamsBinderConfigurationProperties.SerdeError getSerdeError() {
|
||||
return serdeError;
|
||||
}
|
||||
|
||||
public void setSerdeError(KafkaStreamsBinderConfigurationProperties.SerdeError serdeError) {
|
||||
this.serdeError = serdeError;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.properties;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class KafkaStreamsBindingProperties {
|
||||
|
||||
private KafkaStreamsConsumerProperties consumer = new KafkaStreamsConsumerProperties();
|
||||
|
||||
private KafkaStreamsProducerProperties producer = new KafkaStreamsProducerProperties();
|
||||
|
||||
public KafkaStreamsConsumerProperties getConsumer() {
|
||||
return consumer;
|
||||
}
|
||||
|
||||
public void setConsumer(KafkaStreamsConsumerProperties consumer) {
|
||||
this.consumer = consumer;
|
||||
}
|
||||
|
||||
public KafkaStreamsProducerProperties getProducer() {
|
||||
return producer;
|
||||
}
|
||||
|
||||
public void setProducer(KafkaStreamsProducerProperties producer) {
|
||||
this.producer = producer;
|
||||
}
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.properties;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KafkaStreamsConsumerProperties extends KafkaConsumerProperties {
|
||||
|
||||
/**
|
||||
* Key serde specified per binding.
|
||||
*/
|
||||
private String keySerde;
|
||||
|
||||
/**
|
||||
* Value serde specified per binding.
|
||||
*/
|
||||
private String valueSerde;
|
||||
|
||||
/**
|
||||
* Materialized as a KeyValueStore
|
||||
*/
|
||||
private String materializedAs;
|
||||
|
||||
public String getKeySerde() {
|
||||
return keySerde;
|
||||
}
|
||||
|
||||
public void setKeySerde(String keySerde) {
|
||||
this.keySerde = keySerde;
|
||||
}
|
||||
|
||||
public String getValueSerde() {
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
public void setValueSerde(String valueSerde) {
|
||||
this.valueSerde = valueSerde;
|
||||
}
|
||||
|
||||
public String getMaterializedAs() {
|
||||
return materializedAs;
|
||||
}
|
||||
|
||||
public void setMaterializedAs(String materializedAs) {
|
||||
this.materializedAs = materializedAs;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedBindingProperties;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
@ConfigurationProperties("spring.cloud.stream.kafka.streams")
|
||||
public class KafkaStreamsExtendedBindingProperties
|
||||
implements ExtendedBindingProperties<KafkaStreamsConsumerProperties, KafkaStreamsProducerProperties> {
|
||||
|
||||
private Map<String, KafkaStreamsBindingProperties> bindings = new HashMap<>();
|
||||
|
||||
public Map<String, KafkaStreamsBindingProperties> getBindings() {
|
||||
return this.bindings;
|
||||
}
|
||||
|
||||
public void setBindings(Map<String, KafkaStreamsBindingProperties> bindings) {
|
||||
this.bindings = bindings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaStreamsConsumerProperties getExtendedConsumerProperties(String binding) {
|
||||
if (this.bindings.containsKey(binding) && this.bindings.get(binding).getConsumer() != null) {
|
||||
return this.bindings.get(binding).getConsumer();
|
||||
}
|
||||
else {
|
||||
return new KafkaStreamsConsumerProperties();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaStreamsProducerProperties getExtendedProducerProperties(String binding) {
|
||||
if (this.bindings.containsKey(binding) && this.bindings.get(binding).getProducer() != null) {
|
||||
return this.bindings.get(binding).getProducer();
|
||||
}
|
||||
else {
|
||||
return new KafkaStreamsProducerProperties();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.properties;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KafkaStreamsProducerProperties extends KafkaProducerProperties {
|
||||
|
||||
/**
|
||||
* Key serde specified per binding.
|
||||
*/
|
||||
private String keySerde;
|
||||
|
||||
/**
|
||||
* Value serde specified per binding.
|
||||
*/
|
||||
private String valueSerde;
|
||||
|
||||
public String getKeySerde() {
|
||||
return keySerde;
|
||||
}
|
||||
|
||||
public void setKeySerde(String keySerde) {
|
||||
this.keySerde = keySerde;
|
||||
}
|
||||
|
||||
public String getValueSerde() {
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
public void setValueSerde(String valueSerde) {
|
||||
this.valueSerde = valueSerde;
|
||||
}
|
||||
}
|
||||
@@ -1,151 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.properties;
|
||||
|
||||
|
||||
/**
|
||||
* @author Lei Chen
|
||||
*/
|
||||
public class KafkaStreamsStateStoreProperties {
|
||||
|
||||
public enum StoreType {
|
||||
KEYVALUE("keyvalue"),
|
||||
WINDOW("window"),
|
||||
SESSION("session")
|
||||
;
|
||||
|
||||
private final String type;
|
||||
|
||||
/**
|
||||
* @param type
|
||||
*/
|
||||
StoreType(final String type) {
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return type;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* name for this state store
|
||||
*/
|
||||
private String name;
|
||||
|
||||
/**
|
||||
* type for this state store
|
||||
*/
|
||||
private StoreType type;
|
||||
|
||||
/**
|
||||
* Size/length of this state store in ms. Only applicable for window store.
|
||||
*/
|
||||
private long length;
|
||||
|
||||
/**
|
||||
* Retention period for this state store in ms.
|
||||
*/
|
||||
private long retention;
|
||||
|
||||
/**
|
||||
* Key serde class specified per state store.
|
||||
*/
|
||||
private String keySerdeString;
|
||||
|
||||
/**
|
||||
* Value serde class specified per state store.
|
||||
*/
|
||||
private String valueSerdeString;
|
||||
|
||||
/**
|
||||
* Whether enable cache in this state store.
|
||||
*/
|
||||
private boolean cacheEnabled;
|
||||
|
||||
/**
|
||||
* Whether enable logging in this state store.
|
||||
*/
|
||||
private boolean loggingDisabled;
|
||||
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public StoreType getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public void setType(StoreType type) {
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
public long getLength() {
|
||||
return length;
|
||||
}
|
||||
|
||||
public void setLength(long length) {
|
||||
this.length = length;
|
||||
}
|
||||
|
||||
public long getRetention() {
|
||||
return retention;
|
||||
}
|
||||
|
||||
public void setRetention(long retention) {
|
||||
this.retention = retention;
|
||||
}
|
||||
|
||||
public String getKeySerdeString() {
|
||||
return keySerdeString;
|
||||
}
|
||||
|
||||
public void setKeySerdeString(String keySerdeString) {
|
||||
this.keySerdeString = keySerdeString;
|
||||
}
|
||||
|
||||
public String getValueSerdeString() {
|
||||
return valueSerdeString;
|
||||
}
|
||||
|
||||
public void setValueSerdeString(String valueSerdeString) {
|
||||
this.valueSerdeString = valueSerdeString;
|
||||
}
|
||||
|
||||
public boolean isCacheEnabled() {
|
||||
return cacheEnabled;
|
||||
}
|
||||
|
||||
public void setCacheEnabled(boolean cacheEnabled) {
|
||||
this.cacheEnabled = cacheEnabled;
|
||||
}
|
||||
|
||||
public boolean isLoggingDisabled() {
|
||||
return loggingDisabled;
|
||||
}
|
||||
|
||||
public void setLoggingDisabled(boolean loggingDisabled) {
|
||||
this.loggingDisabled = loggingDisabled;
|
||||
}
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
kstream:\
|
||||
org.springframework.cloud.stream.binder.kafka.streams.KStreamBinderConfiguration
|
||||
ktable:\
|
||||
org.springframework.cloud.stream.binder.kafka.streams.KTableBinderConfiguration
|
||||
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
org.springframework.boot.autoconfigure.EnableAutoConfiguration=\
|
||||
org.springframework.cloud.stream.binder.kafka.streams.KafkaStreamsBinderSupportAutoConfiguration,\
|
||||
org.springframework.cloud.stream.binder.kafka.streams.KafkaStreamsApplicationSupportAutoConfiguration
|
||||
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.bootstrap;
|
||||
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
import org.springframework.boot.builder.SpringApplicationBuilder;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@Ignore("Temporarily disabling the test as builds are getting slower due to this.")
|
||||
public class KafkaStreamsBinderBootstrapTest {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, 10);
|
||||
|
||||
@Test
|
||||
public void testKafkaStreamsBinderWithCustomEnvironmentCanStart() {
|
||||
ConfigurableApplicationContext applicationContext = new SpringApplicationBuilder(SimpleApplication.class)
|
||||
.web(WebApplicationType.NONE)
|
||||
.run("--spring.cloud.stream.bindings.input.destination=foo",
|
||||
"--spring.cloud.stream.bindings.input.binder=kBind1",
|
||||
"--spring.cloud.stream.binders.kBind1.type=kstream",
|
||||
"--spring.cloud.stream.binders.kBind1.environment.spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.binders.kBind1.environment.spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
|
||||
applicationContext.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKafkaStreamsBinderWithStandardConfigurationCanStart() {
|
||||
ConfigurableApplicationContext applicationContext = new SpringApplicationBuilder(SimpleApplication.class)
|
||||
.web(WebApplicationType.NONE)
|
||||
.run("--spring.cloud.stream.bindings.input.destination=foo",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
|
||||
applicationContext.close();
|
||||
}
|
||||
|
||||
@SpringBootApplication
|
||||
@EnableBinding(StreamSourceProcessor.class)
|
||||
static class SimpleApplication {
|
||||
|
||||
@StreamListener
|
||||
public void handle(@Input("input") KStream<Object, String> stream) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
interface StreamSourceProcessor {
|
||||
@Input("input")
|
||||
KStream<?, ?> inputStream();
|
||||
}
|
||||
}
|
||||
@@ -1,200 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.test.mock.mockito.SpyBean;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.annotation.PropertySource;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
import org.springframework.test.annotation.DirtiesContext;
|
||||
import org.springframework.test.context.ContextConfiguration;
|
||||
import org.springframework.test.context.junit4.SpringRunner;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@RunWith(SpringRunner.class)
|
||||
@ContextConfiguration
|
||||
@DirtiesContext
|
||||
public abstract class DeserializationErrorHandlerByKafkaTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts", "error.words.group",
|
||||
"error.word1.groupx", "error.word2.groupx");
|
||||
|
||||
@SpyBean
|
||||
org.springframework.cloud.stream.binder.kafka.streams.KafkaStreamsMessageConversionDelegate KafkaStreamsMessageConversionDelegate;
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
System.setProperty("spring.cloud.stream.kafka.streams.binder.brokers", embeddedKafka.getBrokersAsString());
|
||||
System.setProperty("spring.cloud.stream.kafka.streams.binder.zkNodes", embeddedKafka.getZookeeperConnectionString());
|
||||
|
||||
System.setProperty("server.port","0");
|
||||
System.setProperty("spring.jmx.enabled","false");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("fooc", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@SpringBootTest(properties = {
|
||||
"spring.cloud.stream.bindings.input.consumer.useNativeDecoding=true",
|
||||
"spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"spring.cloud.stream.bindings.input.group=group",
|
||||
"spring.cloud.stream.kafka.streams.binder.serdeError=sendToDlq",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=" +
|
||||
"org.apache.kafka.common.serialization.Serdes$IntegerSerde"},
|
||||
webEnvironment= SpringBootTest.WebEnvironment.NONE
|
||||
)
|
||||
public static class DeserializationByKafkaAndDlqTests extends DeserializationErrorHandlerByKafkaTests {
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void test() throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("words");
|
||||
template.sendDefault("foobar");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("foobar", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
Consumer<String, String> consumer1 = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer1, "error.words.group");
|
||||
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer1, "error.words.group");
|
||||
assertThat(cr.value().equals("foobar")).isTrue();
|
||||
|
||||
//Ensuring that the deserialization was indeed done by Kafka natively
|
||||
verify(KafkaStreamsMessageConversionDelegate, never()).deserializeOnInbound(any(Class.class), any(KStream.class));
|
||||
verify(KafkaStreamsMessageConversionDelegate, never()).serializeOnOutbound(any(KStream.class));
|
||||
}
|
||||
}
|
||||
|
||||
@SpringBootTest(properties = {
|
||||
"spring.cloud.stream.bindings.input.consumer.useNativeDecoding=true",
|
||||
"spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"spring.cloud.stream.bindings.input.destination=word1,word2",
|
||||
"spring.cloud.stream.bindings.input.group=groupx",
|
||||
"spring.cloud.stream.kafka.streams.binder.serdeError=sendToDlq",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=" +
|
||||
"org.apache.kafka.common.serialization.Serdes$IntegerSerde"},
|
||||
webEnvironment= SpringBootTest.WebEnvironment.NONE
|
||||
)
|
||||
public static class DeserializationByKafkaAndDlqTestsWithMultipleInputs extends DeserializationErrorHandlerByKafkaTests {
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void test() throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("word1");
|
||||
template.sendDefault("foobar");
|
||||
|
||||
template.setDefaultTopic("word2");
|
||||
template.sendDefault("foobar");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("foobarx", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
Consumer<String, String> consumer1 = cf.createConsumer();
|
||||
embeddedKafka.consumeFromEmbeddedTopics(consumer1, "error.word1.groupx", "error.word2.groupx");
|
||||
|
||||
//TODO: Investigate why the ordering matters below: i.e. if we consume from error.word1.groupx first, an exception is thrown.
|
||||
ConsumerRecord<String, String> cr1 = KafkaTestUtils.getSingleRecord(consumer1, "error.word2.groupx");
|
||||
assertThat(cr1.value().equals("foobar")).isTrue();
|
||||
ConsumerRecord<String, String> cr2 = KafkaTestUtils.getSingleRecord(consumer1, "error.word1.groupx");
|
||||
assertThat(cr2.value().equals("foobar")).isTrue();
|
||||
|
||||
//Ensuring that the deserialization was indeed done by Kafka natively
|
||||
verify(KafkaStreamsMessageConversionDelegate, never()).deserializeOnInbound(any(Class.class), any(KStream.class));
|
||||
verify(KafkaStreamsMessageConversionDelegate, never()).serializeOnOutbound(any(KStream.class));
|
||||
}
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
@PropertySource("classpath:/org/springframework/cloud/stream/binder/kstream/integTest-1.properties")
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
public static class WordCountProcessorApplication {
|
||||
|
||||
@Autowired
|
||||
private TimeWindows timeWindows;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<?, String> process(KStream<Object, String> input) {
|
||||
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Serialized.with(Serdes.String(), Serdes.String()))
|
||||
.windowedBy(timeWindows)
|
||||
.count(Materialized.as("foo-WordCounts-x"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, "Count for " + key.key() + " : " + value));
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,205 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.test.mock.mockito.SpyBean;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.support.serializer.JsonSerde;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
import org.springframework.test.annotation.DirtiesContext;
|
||||
import org.springframework.test.context.ContextConfiguration;
|
||||
import org.springframework.test.context.junit4.SpringRunner;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@RunWith(SpringRunner.class)
|
||||
@ContextConfiguration
|
||||
@DirtiesContext
|
||||
public abstract class DeserializtionErrorHandlerByBinderTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts-id", "error.foos.foobar-group",
|
||||
"error.foos1.fooz-group", "error.foos2.fooz-group");
|
||||
|
||||
@SpyBean
|
||||
org.springframework.cloud.stream.binder.kafka.streams.KafkaStreamsMessageConversionDelegate KafkaStreamsMessageConversionDelegate;
|
||||
|
||||
private static Consumer<Integer, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
System.setProperty("spring.cloud.stream.kafka.streams.binder.brokers", embeddedKafka.getBrokersAsString());
|
||||
System.setProperty("spring.cloud.stream.kafka.streams.binder.zkNodes", embeddedKafka.getZookeeperConnectionString());
|
||||
|
||||
System.setProperty("server.port","0");
|
||||
System.setProperty("spring.jmx.enabled","false");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("foob", "false", embeddedKafka);
|
||||
//consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Deserializer.class.getName());
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts-id");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@SpringBootTest(properties = {
|
||||
"spring.cloud.stream.bindings.input.destination=foos",
|
||||
"spring.cloud.stream.bindings.output.destination=counts-id",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"spring.cloud.stream.kafka.streams.bindings.output.producer.keySerde=org.apache.kafka.common.serialization.Serdes$IntegerSerde",
|
||||
"spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"spring.cloud.stream.kafka.streams.binder.serdeError=sendToDlq",
|
||||
"spring.cloud.stream.bindings.input.group=foobar-group"},
|
||||
webEnvironment= SpringBootTest.WebEnvironment.NONE
|
||||
)
|
||||
public static class DeserializationByBinderAndDlqTests extends DeserializtionErrorHandlerByBinderTests {
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void test() throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foos");
|
||||
template.sendDefault("hello");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("foobar", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
Consumer<String, String> consumer1 = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer1, "error.foos.foobar-group");
|
||||
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer1, "error.foos.foobar-group");
|
||||
assertThat(cr.value().equals("hello")).isTrue();
|
||||
|
||||
//Ensuring that the deserialization was indeed done by the binder
|
||||
verify(KafkaStreamsMessageConversionDelegate).deserializeOnInbound(any(Class.class), any(KStream.class));
|
||||
}
|
||||
}
|
||||
|
||||
@SpringBootTest(properties = {
|
||||
"spring.cloud.stream.bindings.input.destination=foos1,foos2",
|
||||
"spring.cloud.stream.bindings.output.destination=counts-id",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"spring.cloud.stream.kafka.streams.bindings.output.producer.keySerde=org.apache.kafka.common.serialization.Serdes$IntegerSerde",
|
||||
"spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"spring.cloud.stream.kafka.streams.binder.serdeError=sendToDlq",
|
||||
"spring.cloud.stream.bindings.input.group=fooz-group"},
|
||||
webEnvironment= SpringBootTest.WebEnvironment.NONE
|
||||
)
|
||||
public static class DeserializationByBinderAndDlqTestsWithMultipleInputs extends DeserializtionErrorHandlerByBinderTests {
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void test() throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foos1");
|
||||
template.sendDefault("hello");
|
||||
|
||||
template.setDefaultTopic("foos2");
|
||||
template.sendDefault("hello");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("foobar1", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
Consumer<String, String> consumer1 = cf.createConsumer();
|
||||
embeddedKafka.consumeFromEmbeddedTopics(consumer1, "error.foos1.fooz-group", "error.foos2.fooz-group");
|
||||
|
||||
ConsumerRecord<String, String> cr1 = KafkaTestUtils.getSingleRecord(consumer1, "error.foos1.fooz-group");
|
||||
assertThat(cr1.value().equals("hello")).isTrue();
|
||||
|
||||
ConsumerRecord<String, String> cr2 = KafkaTestUtils.getSingleRecord(consumer1, "error.foos2.fooz-group");
|
||||
assertThat(cr2.value().equals("hello")).isTrue();
|
||||
|
||||
//Ensuring that the deserialization was indeed done by the binder
|
||||
verify(KafkaStreamsMessageConversionDelegate).deserializeOnInbound(any(Class.class), any(KStream.class));
|
||||
}
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class ProductCountApplication {
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<Integer, Long> process(KStream<Object, Product> input) {
|
||||
return input
|
||||
.filter((key, product) -> product.getId() == 123)
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Serialized.with(new JsonSerde<>(Product.class), new JsonSerde<>(Product.class)))
|
||||
.windowedBy(TimeWindows.of(5000))
|
||||
.count(Materialized.as("id-count-store-x"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(key.key().id, value));
|
||||
}
|
||||
}
|
||||
static class Product {
|
||||
|
||||
Integer id;
|
||||
|
||||
public Integer getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(Integer id) {
|
||||
this.id = id;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,195 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Sarath Shyam
|
||||
*
|
||||
* This test case demonstrates a kafk-streams topology which consumes messages from
|
||||
* multiple kafka topics(destinations).
|
||||
* See {@link KafkaStreamsBinderMultipleInputTopicsTest#testKstreamWordCountWithStringInputAndPojoOuput} where
|
||||
* the input topic names are specified as comma-separated String values for
|
||||
* the property spring.cloud.stream.bindings.input.destination.
|
||||
*
|
||||
*
|
||||
*/
|
||||
public class KafkaStreamsBinderMultipleInputTopicsTest {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts");
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKstreamWordCountWithStringInputAndPojoOuput() throws Exception {
|
||||
SpringApplication app = new SpringApplication(WordCountProcessorApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.input.destination=words1,words2",
|
||||
"--spring.cloud.stream.bindings.output.destination=counts",
|
||||
"--spring.cloud.stream.bindings.output.contentType=application/json",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kafka.streams.timeWindow.length=5000",
|
||||
"--spring.cloud.stream.kafka.streams.timeWindow.advanceBy=0",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidate(context);
|
||||
} finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidate(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("words1");
|
||||
template.sendDefault("foobar1");
|
||||
template.setDefaultTopic("words2");
|
||||
template.sendDefault("foobar2");
|
||||
|
||||
//Sleep a bit so that both the messages are processed before reading from the output topic.
|
||||
//Else assertions might fail arbitrarily.
|
||||
Thread.sleep(5000);
|
||||
|
||||
ConsumerRecords<String, String> received = KafkaTestUtils.getRecords(consumer);
|
||||
|
||||
List<String> wordCounts = new ArrayList<>(2);
|
||||
|
||||
received.records("counts").forEach((consumerRecord) -> {
|
||||
wordCounts.add((consumerRecord.value()));
|
||||
});
|
||||
System.out.println(wordCounts);
|
||||
assertThat(wordCounts.contains("{\"word\":\"foobar1\",\"count\":1}")).isTrue();
|
||||
assertThat(wordCounts.contains("{\"word\":\"foobar2\",\"count\":1}")).isTrue();
|
||||
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
static class WordCountProcessorApplication {
|
||||
|
||||
|
||||
@StreamListener
|
||||
@SendTo("output")
|
||||
public KStream<?, WordCount> process(@Input("input") KStream<Object, String> input) {
|
||||
|
||||
input.map((k,v) -> {
|
||||
System.out.println(k);
|
||||
System.out.println(v);
|
||||
return new KeyValue<>(k,v);
|
||||
});
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Serialized.with(Serdes.String(), Serdes.String()))
|
||||
.count(Materialized.as("WordCounts"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key, value)));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class WordCount {
|
||||
|
||||
private String word;
|
||||
|
||||
private long count;
|
||||
|
||||
WordCount(String word, long count) {
|
||||
this.word = word;
|
||||
this.count = count;
|
||||
}
|
||||
|
||||
public String getWord() {
|
||||
return word;
|
||||
}
|
||||
|
||||
public void setWord(String word) {
|
||||
this.word = word;
|
||||
}
|
||||
|
||||
public long getCount() {
|
||||
return count;
|
||||
}
|
||||
|
||||
public void setCount(long count) {
|
||||
this.count = count;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.support.serializer.JsonSerde;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KafkaStreamsBinderPojoInputAndPrimitiveTypeOutputTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts-id");
|
||||
|
||||
private static Consumer<Integer, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group-id", "false", embeddedKafka);
|
||||
//consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Deserializer.class.getName());
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts-id");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKstreamBinderWithPojoInputAndStringOuput() throws Exception {
|
||||
SpringApplication app = new SpringApplication(ProductCountApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.input.destination=foos",
|
||||
"--spring.cloud.stream.bindings.output.destination=counts-id",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.output.producer.keySerde=org.apache.kafka.common.serialization.Serdes$IntegerSerde",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidateFoo(context);
|
||||
} finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foos");
|
||||
template.sendDefault("{\"id\":\"123\"}");
|
||||
ConsumerRecord<Integer, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts-id");
|
||||
|
||||
assertThat(cr.key()).isEqualTo(123);
|
||||
ObjectMapper om = new ObjectMapper();
|
||||
Long aLong = om.readValue(cr.value(), Long.class);
|
||||
assertThat(aLong).isEqualTo(1L);
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class ProductCountApplication {
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<Integer, Long> process(KStream<Object, Product> input) {
|
||||
return input
|
||||
.filter((key, product) -> product.getId() == 123)
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Serialized.with(new JsonSerde<>(Product.class), new JsonSerde<>(Product.class)))
|
||||
.windowedBy(TimeWindows.of(5000))
|
||||
.count(Materialized.as("id-count-store-x"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(key.key().id, value));
|
||||
}
|
||||
}
|
||||
|
||||
static class Product {
|
||||
|
||||
Integer id;
|
||||
|
||||
public Integer getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(Integer id) {
|
||||
this.id = id;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,218 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Date;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.apache.kafka.streams.state.QueryableStoreTypes;
|
||||
import org.apache.kafka.streams.state.ReadOnlyWindowStore;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.integration.test.util.TestUtils;
|
||||
import org.springframework.kafka.core.CleanupConfig;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.core.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KafkaStreamsBinderWordCountIntegrationTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts");
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKstreamWordCountWithStringInputAndPojoOuput() throws Exception {
|
||||
SpringApplication app = new SpringApplication(WordCountProcessorApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.input.destination=words",
|
||||
"--spring.cloud.stream.bindings.output.destination=counts",
|
||||
"--spring.cloud.stream.bindings.output.contentType=application/json",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kafka.streams.timeWindow.length=5000",
|
||||
"--spring.cloud.stream.kafka.streams.timeWindow.advanceBy=0",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidate(context);
|
||||
//Assertions on StreamBuilderFactoryBean
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = context.getBean("&stream-builder-process", StreamsBuilderFactoryBean.class);
|
||||
KafkaStreams kafkaStreams = streamsBuilderFactoryBean.getKafkaStreams();
|
||||
ReadOnlyWindowStore<Object, Object> store = kafkaStreams.store("foo-WordCounts", QueryableStoreTypes.windowStore());
|
||||
assertThat(store).isNotNull();
|
||||
CleanupConfig cleanup = TestUtils.getPropertyValue(streamsBuilderFactoryBean, "cleanupConfig",
|
||||
CleanupConfig.class);
|
||||
assertThat(cleanup.cleanupOnStart()).isTrue();
|
||||
assertThat(cleanup.cleanupOnStop()).isFalse();
|
||||
}
|
||||
finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidate(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("words");
|
||||
template.sendDefault("foobar");
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts");
|
||||
assertThat(cr.value().contains("\"word\":\"foobar\",\"count\":1")).isTrue();
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
static class WordCountProcessorApplication {
|
||||
|
||||
@Autowired
|
||||
private TimeWindows timeWindows;
|
||||
|
||||
@StreamListener
|
||||
@SendTo("output")
|
||||
public KStream<?, WordCount> process(@Input("input") KStream<Object, String> input) {
|
||||
|
||||
input.map((k,v) -> {
|
||||
return new KeyValue<>(k,v);
|
||||
});
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Serialized.with(Serdes.String(), Serdes.String()))
|
||||
.windowedBy(timeWindows)
|
||||
.count(Materialized.as("foo-WordCounts"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key.key(), value, new Date(key.window().start()), new Date(key.window().end()))));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public CleanupConfig cleanupConfig() {
|
||||
return new CleanupConfig(true, false);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class WordCount {
|
||||
|
||||
private String word;
|
||||
|
||||
private long count;
|
||||
|
||||
private Date start;
|
||||
|
||||
private Date end;
|
||||
|
||||
WordCount(String word, long count, Date start, Date end) {
|
||||
this.word = word;
|
||||
this.count = count;
|
||||
this.start = start;
|
||||
this.end = end;
|
||||
}
|
||||
|
||||
public String getWord() {
|
||||
return word;
|
||||
}
|
||||
|
||||
public void setWord(String word) {
|
||||
this.word = word;
|
||||
}
|
||||
|
||||
public long getCount() {
|
||||
return count;
|
||||
}
|
||||
|
||||
public void setCount(long count) {
|
||||
this.count = count;
|
||||
}
|
||||
|
||||
public Date getStart() {
|
||||
return start;
|
||||
}
|
||||
|
||||
public void setStart(Date start) {
|
||||
this.start = start;
|
||||
}
|
||||
|
||||
public Date getEnd() {
|
||||
return end;
|
||||
}
|
||||
|
||||
public void setEnd(Date end) {
|
||||
this.end = end;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,180 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.IntegerSerializer;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.state.HostInfo;
|
||||
import org.apache.kafka.streams.state.QueryableStoreTypes;
|
||||
import org.apache.kafka.streams.state.ReadOnlyKeyValueStore;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.InteractiveQueryService;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.support.serializer.JsonSerde;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KafkaStreamsInteractiveQueryIntegrationTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts-id");
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group-id", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts-id");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKstreamBinderWithPojoInputAndStringOuput() throws Exception {
|
||||
SpringApplication app = new SpringApplication(ProductCountApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.input.destination=foos",
|
||||
"--spring.cloud.stream.bindings.output.destination=counts-id",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.application.server=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidateFoo(context);
|
||||
} finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foos");
|
||||
template.sendDefault("{\"id\":\"123\"}");
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts-id");
|
||||
assertThat(cr.value().contains("Count for product with ID 123: 1")).isTrue();
|
||||
|
||||
ProductCountApplication.Foo foo = context.getBean(ProductCountApplication.Foo.class);
|
||||
assertThat(foo.getProductStock(123).equals(1L));
|
||||
|
||||
//perform assertions on HostInfo related methods in InteractiveQueryService
|
||||
InteractiveQueryService interactiveQueryService = context.getBean(InteractiveQueryService.class);
|
||||
HostInfo currentHostInfo = interactiveQueryService.getCurrentHostInfo();
|
||||
assertThat(currentHostInfo.host() + ":" + currentHostInfo.port()).isEqualTo(embeddedKafka.getBrokersAsString());
|
||||
|
||||
HostInfo hostInfo = interactiveQueryService.getHostInfo("prod-id-count-store", 123, new IntegerSerializer());
|
||||
assertThat(hostInfo.host() + ":" + hostInfo.port()).isEqualTo(embeddedKafka.getBrokersAsString());
|
||||
|
||||
HostInfo hostInfoFoo = interactiveQueryService.getHostInfo("prod-id-count-store-foo", 123, new IntegerSerializer());
|
||||
assertThat(hostInfoFoo).isNull();
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class ProductCountApplication {
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
@SuppressWarnings("deprecation")
|
||||
public KStream<?, String> process(KStream<Object, Product> input) {
|
||||
|
||||
return input
|
||||
.filter((key, product) -> product.getId() == 123)
|
||||
.map((key, value) -> new KeyValue<>(value.id, value))
|
||||
.groupByKey(Serialized.with(new Serdes.IntegerSerde(), new JsonSerde<>(Product.class)))
|
||||
.count(Materialized.as("prod-id-count-store"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, "Count for product with ID 123: " + value));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public Foo foo(InteractiveQueryService interactiveQueryService) {
|
||||
return new Foo(interactiveQueryService);
|
||||
}
|
||||
|
||||
static class Foo {
|
||||
InteractiveQueryService interactiveQueryService;
|
||||
|
||||
Foo(InteractiveQueryService interactiveQueryService) {
|
||||
this.interactiveQueryService = interactiveQueryService;
|
||||
}
|
||||
|
||||
public Long getProductStock(Integer id) {
|
||||
ReadOnlyKeyValueStore<Object, Object> keyValueStore =
|
||||
interactiveQueryService.getQueryableStore("prod-id-count-store", QueryableStoreTypes.keyValueStore());
|
||||
|
||||
return (Long) keyValueStore.get(id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static class Product {
|
||||
|
||||
Integer id;
|
||||
|
||||
public Integer getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(Integer id) {
|
||||
this.id = id;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,162 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.boot.test.mock.mockito.SpyBean;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.annotation.PropertySource;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
import org.springframework.test.annotation.DirtiesContext;
|
||||
import org.springframework.test.context.ContextConfiguration;
|
||||
import org.springframework.test.context.junit4.SpringRunner;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@RunWith(SpringRunner.class)
|
||||
@ContextConfiguration
|
||||
@DirtiesContext
|
||||
public abstract class KafkaStreamsNativeEncodingDecodingTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts");
|
||||
|
||||
@SpyBean
|
||||
org.springframework.cloud.stream.binder.kafka.streams.KafkaStreamsMessageConversionDelegate KafkaStreamsMessageConversionDelegate;
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
System.setProperty("spring.cloud.stream.kafka.streams.binder.brokers", embeddedKafka.getBrokersAsString());
|
||||
System.setProperty("spring.cloud.stream.kafka.streams.binder.zkNodes", embeddedKafka.getZookeeperConnectionString());
|
||||
|
||||
System.setProperty("server.port","0");
|
||||
System.setProperty("spring.jmx.enabled","false");
|
||||
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@SpringBootTest(properties = {
|
||||
"spring.cloud.stream.bindings.input.consumer.useNativeDecoding=true",
|
||||
"spring.cloud.stream.bindings.output.producer.useNativeEncoding=true"},
|
||||
webEnvironment= SpringBootTest.WebEnvironment.NONE
|
||||
)
|
||||
public static class NativeEncodingDecodingEnabledTests extends KafkaStreamsNativeEncodingDecodingTests {
|
||||
|
||||
@Test
|
||||
public void test() throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("words");
|
||||
template.sendDefault("foobar");
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts");
|
||||
assertThat(cr.value().equals("Count for foobar : 1")).isTrue();
|
||||
|
||||
verify(KafkaStreamsMessageConversionDelegate, never()).serializeOnOutbound(any(KStream.class));
|
||||
verify(KafkaStreamsMessageConversionDelegate, never()).deserializeOnInbound(any(Class.class), any(KStream.class));
|
||||
}
|
||||
}
|
||||
|
||||
@SpringBootTest(webEnvironment= SpringBootTest.WebEnvironment.NONE)
|
||||
public static class NativeEncodingDecodingDisabledTests extends KafkaStreamsNativeEncodingDecodingTests {
|
||||
|
||||
@Test
|
||||
public void test() throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("words");
|
||||
template.sendDefault("foobar");
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts");
|
||||
assertThat(cr.value().equals("Count for foobar : 1")).isTrue();
|
||||
|
||||
verify(KafkaStreamsMessageConversionDelegate).serializeOnOutbound(any(KStream.class));
|
||||
verify(KafkaStreamsMessageConversionDelegate).deserializeOnInbound(any(Class.class), any(KStream.class));
|
||||
}
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
@PropertySource("classpath:/org/springframework/cloud/stream/binder/kstream/integTest-1.properties")
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
public static class WordCountProcessorApplication {
|
||||
|
||||
@Autowired
|
||||
private TimeWindows timeWindows;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<?, String> process(KStream<Object, String> input) {
|
||||
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Serialized.with(Serdes.String(), Serdes.String()))
|
||||
.windowedBy(timeWindows)
|
||||
.count(Materialized.as("foo-WordCounts-x"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, "Count for " + key.key() + " : " + value));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,147 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.processor.Processor;
|
||||
import org.apache.kafka.streams.processor.ProcessorContext;
|
||||
import org.apache.kafka.streams.state.WindowStore;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsStateStore;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsStateStoreProperties;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Lei Chen
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KafkaStreamsStateStoreIntegrationTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts-id");
|
||||
|
||||
@Test
|
||||
public void testKstreamStateStore() throws Exception {
|
||||
SpringApplication app = new SpringApplication(ProductCountApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.input.destination=foobar",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
Thread.sleep(2000);
|
||||
receiveAndValidateFoo(context);
|
||||
} catch (Exception e) {
|
||||
throw e;
|
||||
} finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foobar");
|
||||
template.sendDefault("{\"id\":\"123\"}");
|
||||
Thread.sleep(1000);
|
||||
|
||||
//assertions
|
||||
ProductCountApplication productCount = context.getBean(ProductCountApplication.class);
|
||||
WindowStore<Object, String> state = productCount.state;
|
||||
assertThat(state != null).isTrue();
|
||||
assertThat(state.name()).isEqualTo("mystate");
|
||||
assertThat(state.persistent()).isTrue();
|
||||
assertThat(productCount.processed).isTrue();
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessorX.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class ProductCountApplication {
|
||||
|
||||
WindowStore<Object, String> state;
|
||||
boolean processed;
|
||||
|
||||
@StreamListener("input")
|
||||
@KafkaStreamsStateStore(name = "mystate", type = KafkaStreamsStateStoreProperties.StoreType.WINDOW, lengthMs = 300000)
|
||||
@SuppressWarnings({"deprecation", "unchecked"})
|
||||
public void process(KStream<Object, Product> input) {
|
||||
|
||||
input
|
||||
.process(() -> new Processor<Object, Product>() {
|
||||
|
||||
@Override
|
||||
public void init(ProcessorContext processorContext) {
|
||||
state = (WindowStore) processorContext.getStateStore("mystate");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(Object s, Product product) {
|
||||
processed = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (state != null) {
|
||||
state.close();
|
||||
}
|
||||
}
|
||||
}, "mystate");
|
||||
}
|
||||
}
|
||||
|
||||
public static class Product {
|
||||
|
||||
Integer id;
|
||||
|
||||
public Integer getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(Integer id) {
|
||||
this.id = id;
|
||||
}
|
||||
}
|
||||
|
||||
interface KafkaStreamsProcessorX {
|
||||
|
||||
@Input("input")
|
||||
KStream<?, ?> input();
|
||||
}
|
||||
}
|
||||
@@ -1,152 +0,0 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.integration.test.util.TestUtils;
|
||||
import org.springframework.kafka.core.CleanupConfig;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.core.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.support.serializer.JsonSerde;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KafkastreamsBinderPojoInputStringOutputIntegrationTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts-id");
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group-id", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts-id");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKstreamBinderWithPojoInputAndStringOuput() throws Exception {
|
||||
SpringApplication app = new SpringApplication(ProductCountApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.input.destination=foos",
|
||||
"--spring.cloud.stream.bindings.output.destination=counts-id",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.output.producer.keySerde=org.apache.kafka.common.serialization.Serdes$IntegerSerde",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidateFoo(context);
|
||||
//Assertions on StreamBuilderFactoryBean
|
||||
StreamsBuilderFactoryBean streamsBuilderFactoryBean = context.getBean("&stream-builder-process",
|
||||
StreamsBuilderFactoryBean.class);
|
||||
CleanupConfig cleanup = TestUtils.getPropertyValue(streamsBuilderFactoryBean, "cleanupConfig",
|
||||
CleanupConfig.class);
|
||||
assertThat(cleanup.cleanupOnStart()).isFalse();
|
||||
assertThat(cleanup.cleanupOnStop()).isTrue();
|
||||
}
|
||||
finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foos");
|
||||
template.sendDefault("{\"id\":\"123\"}");
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts-id");
|
||||
assertThat(cr.value().contains("Count for product with ID 123: 1")).isTrue();
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class ProductCountApplication {
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<Integer, String> process(KStream<Object, Product> input) {
|
||||
|
||||
return input
|
||||
.filter((key, product) -> product.getId() == 123)
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Serialized.with(new JsonSerde<>(Product.class), new JsonSerde<>(Product.class)))
|
||||
.windowedBy(TimeWindows.of(5000))
|
||||
.count(Materialized.as("id-count-store"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(key.key().id, "Count for product with ID 123: " + value));
|
||||
}
|
||||
}
|
||||
|
||||
static class Product {
|
||||
|
||||
Integer id;
|
||||
|
||||
public Integer getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(Integer id) {
|
||||
this.id = id;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,247 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.streams.integration;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.common.serialization.LongDeserializer;
|
||||
import org.apache.kafka.common.serialization.LongSerializer;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.common.serialization.StringDeserializer;
|
||||
import org.apache.kafka.common.serialization.StringSerializer;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.Joined;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KTable;
|
||||
import org.apache.kafka.streams.kstream.Serialized;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.WebApplicationType;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.annotations.KafkaStreamsProcessor;
|
||||
import org.springframework.cloud.stream.binder.kafka.streams.properties.KafkaStreamsApplicationSupportProperties;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class StreamToTableJoinIntegrationTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "output-topic");
|
||||
|
||||
private static Consumer<String, Long> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
|
||||
consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class);
|
||||
DefaultKafkaConsumerFactory<String, Long> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "output-topic");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@EnableBinding(KafkaStreamsProcessorX.class)
|
||||
@EnableAutoConfiguration
|
||||
@EnableConfigurationProperties(KafkaStreamsApplicationSupportProperties.class)
|
||||
public static class CountClicksPerRegionApplication {
|
||||
|
||||
@StreamListener
|
||||
@SendTo("output")
|
||||
public KStream<String, Long> process(@Input("input") KStream<String, Long> userClicksStream,
|
||||
@Input("inputX") KTable<String, String> userRegionsTable) {
|
||||
|
||||
return userClicksStream
|
||||
.leftJoin(userRegionsTable, (clicks, region) -> new RegionWithClicks(region == null ? "UNKNOWN" : region, clicks),
|
||||
Joined.with(Serdes.String(), Serdes.Long(), null))
|
||||
.map((user, regionWithClicks) -> new KeyValue<>(regionWithClicks.getRegion(), regionWithClicks.getClicks()))
|
||||
.groupByKey(Serialized.with(Serdes.String(), Serdes.Long()))
|
||||
.reduce((firstClicks, secondClicks) -> firstClicks + secondClicks)
|
||||
.toStream();
|
||||
}
|
||||
}
|
||||
|
||||
interface KafkaStreamsProcessorX extends KafkaStreamsProcessor {
|
||||
|
||||
@Input("inputX")
|
||||
KTable<?, ?> inputX();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStreamToTable() throws Exception {
|
||||
SpringApplication app = new SpringApplication(CountClicksPerRegionApplication.class);
|
||||
app.setWebApplicationType(WebApplicationType.NONE);
|
||||
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.jmx.enabled=false",
|
||||
"--spring.cloud.stream.bindings.input.destination=user-clicks",
|
||||
"--spring.cloud.stream.bindings.inputX.destination=user-regions",
|
||||
"--spring.cloud.stream.bindings.output.destination=output-topic",
|
||||
"--spring.cloud.stream.bindings.input.consumer.useNativeDecoding=true",
|
||||
"--spring.cloud.stream.bindings.inputX.consumer.useNativeDecoding=true",
|
||||
"--spring.cloud.stream.bindings.output.producer.useNativeEncoding=true",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input.consumer.keySerde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.input.consumer.valueSerde=org.apache.kafka.common.serialization.Serdes$LongSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.inputX.consumer.keySerde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.inputX.consumer.valueSerde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.output.producer.keySerde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.bindings.output.producer.valueSerde=org.apache.kafka.common.serialization.Serdes$LongSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.default.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kafka.streams.binder.configuration.commit.interval.ms=10000",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.inputX.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kafka.streams.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.streams.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
// Input 1: Clicks per user (multiple records allowed per user).
|
||||
List<KeyValue<String, Long>> userClicks = Arrays.asList(
|
||||
new KeyValue<>("alice", 13L),
|
||||
new KeyValue<>("bob", 4L),
|
||||
new KeyValue<>("chao", 25L),
|
||||
new KeyValue<>("bob", 19L),
|
||||
new KeyValue<>("dave", 56L),
|
||||
new KeyValue<>("eve", 78L),
|
||||
new KeyValue<>("alice", 40L),
|
||||
new KeyValue<>("fang", 99L)
|
||||
);
|
||||
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
senderProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
|
||||
senderProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, LongSerializer.class);
|
||||
|
||||
DefaultKafkaProducerFactory<String, Long> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<String, Long> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("user-clicks");
|
||||
|
||||
for (KeyValue<String,Long> keyValue : userClicks) {
|
||||
template.sendDefault(keyValue.key, keyValue.value);
|
||||
}
|
||||
|
||||
// Input 2: Region per user (multiple records allowed per user).
|
||||
List<KeyValue<String, String>> userRegions = Arrays.asList(
|
||||
new KeyValue<>("alice", "asia"), /* Alice lived in Asia originally... */
|
||||
new KeyValue<>("bob", "americas"),
|
||||
new KeyValue<>("chao", "asia"),
|
||||
new KeyValue<>("dave", "europe"),
|
||||
new KeyValue<>("alice", "europe"), /* ...but moved to Europe some time later. */
|
||||
new KeyValue<>("eve", "americas"),
|
||||
new KeyValue<>("fang", "asia")
|
||||
);
|
||||
|
||||
Map<String, Object> senderProps1 = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
senderProps1.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
|
||||
senderProps1.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
|
||||
|
||||
DefaultKafkaProducerFactory<String, String> pf1 = new DefaultKafkaProducerFactory<>(senderProps1);
|
||||
KafkaTemplate<String, String> template1 = new KafkaTemplate<>(pf1, true);
|
||||
template1.setDefaultTopic("user-regions");
|
||||
|
||||
for (KeyValue<String,String> keyValue : userRegions) {
|
||||
template1.sendDefault(keyValue.key, keyValue.value);
|
||||
}
|
||||
|
||||
List<KeyValue<String, Long>> expectedClicksPerRegion = Arrays.asList(
|
||||
new KeyValue<>("americas", 101L),
|
||||
new KeyValue<>("europe", 109L),
|
||||
new KeyValue<>("asia", 124L)
|
||||
);
|
||||
|
||||
//Verify that we receive the expected data
|
||||
int count = 0;
|
||||
long start = System.currentTimeMillis();
|
||||
List<KeyValue<String, Long>> actualClicksPerRegion = new ArrayList<>();
|
||||
do {
|
||||
ConsumerRecords<String, Long> records = KafkaTestUtils.getRecords(consumer);
|
||||
count = count + records.count();
|
||||
for (ConsumerRecord<String, Long> record : records) {
|
||||
actualClicksPerRegion.add(new KeyValue<>(record.key(), record.value()));
|
||||
}
|
||||
} while (count < expectedClicksPerRegion.size() && (System.currentTimeMillis() - start) < 30000 );
|
||||
|
||||
assertThat(count == expectedClicksPerRegion.size()).isTrue();
|
||||
assertThat(actualClicksPerRegion).hasSameElementsAs(expectedClicksPerRegion);
|
||||
}
|
||||
catch (Exception e){
|
||||
System.out.println(e);
|
||||
}
|
||||
finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Tuple for a region and its associated number of clicks.
|
||||
*/
|
||||
private static final class RegionWithClicks {
|
||||
|
||||
private final String region;
|
||||
private final long clicks;
|
||||
|
||||
RegionWithClicks(String region, long clicks) {
|
||||
if (region == null || region.isEmpty()) {
|
||||
throw new IllegalArgumentException("region must be set");
|
||||
}
|
||||
if (clicks < 0) {
|
||||
throw new IllegalArgumentException("clicks must not be negative");
|
||||
}
|
||||
this.region = region;
|
||||
this.clicks = clicks;
|
||||
}
|
||||
|
||||
public String getRegion() {
|
||||
return region;
|
||||
}
|
||||
|
||||
public long getClicks() {
|
||||
return clicks;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user