Porting kafka binder specific code from spring cloud streams

This commit is contained in:
Soby Chacko
2016-06-09 17:51:39 -04:00
parent 8f4528b587
commit 8362cc3569
29 changed files with 3979 additions and 0 deletions

24
.gitignore vendored Normal file
View File

@@ -0,0 +1,24 @@
/application.yml
/application.properties
asciidoctor.css
*~
.#*
*#
target/
build/
bin/
_site/
.classpath
.project
.settings
.springBeans
.DS_Store
*.sw*
*.iml
*.ipr
*.iws
.idea/*
.factorypath
dump.rdb
.apt_generated
artifacts

1
.mvn/jvm.config Normal file
View File

@@ -0,0 +1 @@
-Xmx1024m -XX:CICompilerCount=1 -XX:TieredStopAtLevel=1 -Djava.security.egd=file:/dev/./urandom

1
.mvn/maven.config Normal file
View File

@@ -0,0 +1 @@
-DaltSnapshotDeploymentRepository=repo.spring.io::default::https://repo.spring.io/libs-snapshot-local -P spring

BIN
.mvn/wrapper/maven-wrapper.jar vendored Normal file

Binary file not shown.

1
.mvn/wrapper/maven-wrapper.properties vendored Normal file
View File

@@ -0,0 +1 @@
distributionUrl=https://repo1.maven.org/maven2/org/apache/maven/apache-maven/3.3.3/apache-maven-3.3.3-bin.zip

66
.settings.xml Normal file
View File

@@ -0,0 +1,66 @@
<?xml version="1.0" encoding="UTF-8"?>
<settings>
<servers>
<server>
<id>repo.spring.io</id>
<username>${env.CI_DEPLOY_USERNAME}</username>
<password>${env.CI_DEPLOY_PASSWORD}</password>
</server>
</servers>
<profiles>
<profile>
<!--
N.B. this profile is only here to support users and IDEs that do not use Maven 3.3.
It isn't needed on the command line if you use the wrapper script (mvnw) or if you use
a native Maven with the right version. Eclipse users should points their Maven tooling to
this settings file, or copy the profile into their ~/.m2/settings.xml.
-->
<id>spring</id>
<activation><activeByDefault>true</activeByDefault></activation>
<repositories>
<repository>
<id>spring-snapshots</id>
<name>Spring Snapshots</name>
<url>http://repo.spring.io/libs-snapshot-local</url>
<snapshots>
<enabled>true</enabled>
</snapshots>
</repository>
<repository>
<id>spring-milestones</id>
<name>Spring Milestones</name>
<url>http://repo.spring.io/libs-milestone-local</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
</repository>
<repository>
<id>spring-releases</id>
<name>Spring Releases</name>
<url>http://repo.spring.io/release</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
</repository>
</repositories>
<pluginRepositories>
<pluginRepository>
<id>spring-snapshots</id>
<name>Spring Snapshots</name>
<url>http://repo.spring.io/libs-snapshot-local</url>
<snapshots>
<enabled>true</enabled>
</snapshots>
</pluginRepository>
<pluginRepository>
<id>spring-milestones</id>
<name>Spring Milestones</name>
<url>http://repo.spring.io/libs-milestone-local</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
</pluginRepository>
</pluginRepositories>
</profile>
</profiles>
</settings>

201
LICENSE Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1 +1,3 @@
# spring-cloud-stream-binder-kafka
Spring Cloud Stream Binder implementation for Kafka

234
mvnw vendored Executable file
View File

@@ -0,0 +1,234 @@
#!/bin/sh
# ----------------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# Maven2 Start Up Batch script
#
# Required ENV vars:
# ------------------
# JAVA_HOME - location of a JDK home dir
#
# Optional ENV vars
# -----------------
# M2_HOME - location of maven2's installed home dir
# MAVEN_OPTS - parameters passed to the Java VM when running Maven
# e.g. to debug Maven itself, use
# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
# ----------------------------------------------------------------------------
if [ -z "$MAVEN_SKIP_RC" ] ; then
if [ -f /etc/mavenrc ] ; then
. /etc/mavenrc
fi
if [ -f "$HOME/.mavenrc" ] ; then
. "$HOME/.mavenrc"
fi
fi
# OS specific support. $var _must_ be set to either true or false.
cygwin=false;
darwin=false;
mingw=false
case "`uname`" in
CYGWIN*) cygwin=true ;;
MINGW*) mingw=true;;
Darwin*) darwin=true
#
# Look for the Apple JDKs first to preserve the existing behaviour, and then look
# for the new JDKs provided by Oracle.
#
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK ] ; then
#
# Apple JDKs
#
export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home
fi
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Java/JavaVirtualMachines/CurrentJDK ] ; then
#
# Apple JDKs
#
export JAVA_HOME=/System/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
fi
if [ -z "$JAVA_HOME" ] && [ -L "/Library/Java/JavaVirtualMachines/CurrentJDK" ] ; then
#
# Oracle JDKs
#
export JAVA_HOME=/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
fi
if [ -z "$JAVA_HOME" ] && [ -x "/usr/libexec/java_home" ]; then
#
# Apple JDKs
#
export JAVA_HOME=`/usr/libexec/java_home`
fi
;;
esac
if [ -z "$JAVA_HOME" ] ; then
if [ -r /etc/gentoo-release ] ; then
JAVA_HOME=`java-config --jre-home`
fi
fi
if [ -z "$M2_HOME" ] ; then
## resolve links - $0 may be a link to maven's home
PRG="$0"
# need this for relative symlinks
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG="`dirname "$PRG"`/$link"
fi
done
saveddir=`pwd`
M2_HOME=`dirname "$PRG"`/..
# make it fully qualified
M2_HOME=`cd "$M2_HOME" && pwd`
cd "$saveddir"
# echo Using m2 at $M2_HOME
fi
# For Cygwin, ensure paths are in UNIX format before anything is touched
if $cygwin ; then
[ -n "$M2_HOME" ] &&
M2_HOME=`cygpath --unix "$M2_HOME"`
[ -n "$JAVA_HOME" ] &&
JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
[ -n "$CLASSPATH" ] &&
CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
fi
# For Migwn, ensure paths are in UNIX format before anything is touched
if $mingw ; then
[ -n "$M2_HOME" ] &&
M2_HOME="`(cd "$M2_HOME"; pwd)`"
[ -n "$JAVA_HOME" ] &&
JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
# TODO classpath?
fi
if [ -z "$JAVA_HOME" ]; then
javaExecutable="`which javac`"
if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
# readlink(1) is not available as standard on Solaris 10.
readLink=`which readlink`
if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
if $darwin ; then
javaHome="`dirname \"$javaExecutable\"`"
javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
else
javaExecutable="`readlink -f \"$javaExecutable\"`"
fi
javaHome="`dirname \"$javaExecutable\"`"
javaHome=`expr "$javaHome" : '\(.*\)/bin'`
JAVA_HOME="$javaHome"
export JAVA_HOME
fi
fi
fi
if [ -z "$JAVACMD" ] ; then
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
else
JAVACMD="`which java`"
fi
fi
if [ ! -x "$JAVACMD" ] ; then
echo "Error: JAVA_HOME is not defined correctly." >&2
echo " We cannot execute $JAVACMD" >&2
exit 1
fi
if [ -z "$JAVA_HOME" ] ; then
echo "Warning: JAVA_HOME environment variable is not set."
fi
CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
# For Cygwin, switch paths to Windows format before running java
if $cygwin; then
[ -n "$M2_HOME" ] &&
M2_HOME=`cygpath --path --windows "$M2_HOME"`
[ -n "$JAVA_HOME" ] &&
JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
[ -n "$CLASSPATH" ] &&
CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
fi
# traverses directory structure from process work directory to filesystem root
# first directory with .mvn subdirectory is considered project base directory
find_maven_basedir() {
local basedir=$(pwd)
local wdir=$(pwd)
while [ "$wdir" != '/' ] ; do
if [ -d "$wdir"/.mvn ] ; then
basedir=$wdir
break
fi
wdir=$(cd "$wdir/.."; pwd)
done
echo "${basedir}"
}
# concatenates all lines of a file
concat_lines() {
if [ -f "$1" ]; then
echo "$(tr -s '\n' ' ' < "$1")"
fi
}
export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-$(find_maven_basedir)}
MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
# Provide a "standardized" way to retrieve the CLI args that will
# work with both Windows and non-Windows executions.
MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
export MAVEN_CMD_LINE_ARGS
WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
exec "$JAVACMD" \
$MAVEN_OPTS \
-classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
"-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
${WRAPPER_LAUNCHER} "$@"

234
mvnw.cmd vendored Normal file
View File

@@ -0,0 +1,234 @@
#!/bin/sh
# ----------------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# Maven2 Start Up Batch script
#
# Required ENV vars:
# ------------------
# JAVA_HOME - location of a JDK home dir
#
# Optional ENV vars
# -----------------
# M2_HOME - location of maven2's installed home dir
# MAVEN_OPTS - parameters passed to the Java VM when running Maven
# e.g. to debug Maven itself, use
# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
# ----------------------------------------------------------------------------
if [ -z "$MAVEN_SKIP_RC" ] ; then
if [ -f /etc/mavenrc ] ; then
. /etc/mavenrc
fi
if [ -f "$HOME/.mavenrc" ] ; then
. "$HOME/.mavenrc"
fi
fi
# OS specific support. $var _must_ be set to either true or false.
cygwin=false;
darwin=false;
mingw=false
case "`uname`" in
CYGWIN*) cygwin=true ;;
MINGW*) mingw=true;;
Darwin*) darwin=true
#
# Look for the Apple JDKs first to preserve the existing behaviour, and then look
# for the new JDKs provided by Oracle.
#
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK ] ; then
#
# Apple JDKs
#
export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home
fi
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Java/JavaVirtualMachines/CurrentJDK ] ; then
#
# Apple JDKs
#
export JAVA_HOME=/System/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
fi
if [ -z "$JAVA_HOME" ] && [ -L "/Library/Java/JavaVirtualMachines/CurrentJDK" ] ; then
#
# Oracle JDKs
#
export JAVA_HOME=/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
fi
if [ -z "$JAVA_HOME" ] && [ -x "/usr/libexec/java_home" ]; then
#
# Apple JDKs
#
export JAVA_HOME=`/usr/libexec/java_home`
fi
;;
esac
if [ -z "$JAVA_HOME" ] ; then
if [ -r /etc/gentoo-release ] ; then
JAVA_HOME=`java-config --jre-home`
fi
fi
if [ -z "$M2_HOME" ] ; then
## resolve links - $0 may be a link to maven's home
PRG="$0"
# need this for relative symlinks
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG="`dirname "$PRG"`/$link"
fi
done
saveddir=`pwd`
M2_HOME=`dirname "$PRG"`/..
# make it fully qualified
M2_HOME=`cd "$M2_HOME" && pwd`
cd "$saveddir"
# echo Using m2 at $M2_HOME
fi
# For Cygwin, ensure paths are in UNIX format before anything is touched
if $cygwin ; then
[ -n "$M2_HOME" ] &&
M2_HOME=`cygpath --unix "$M2_HOME"`
[ -n "$JAVA_HOME" ] &&
JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
[ -n "$CLASSPATH" ] &&
CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
fi
# For Migwn, ensure paths are in UNIX format before anything is touched
if $mingw ; then
[ -n "$M2_HOME" ] &&
M2_HOME="`(cd "$M2_HOME"; pwd)`"
[ -n "$JAVA_HOME" ] &&
JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
# TODO classpath?
fi
if [ -z "$JAVA_HOME" ]; then
javaExecutable="`which javac`"
if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
# readlink(1) is not available as standard on Solaris 10.
readLink=`which readlink`
if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
if $darwin ; then
javaHome="`dirname \"$javaExecutable\"`"
javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
else
javaExecutable="`readlink -f \"$javaExecutable\"`"
fi
javaHome="`dirname \"$javaExecutable\"`"
javaHome=`expr "$javaHome" : '\(.*\)/bin'`
JAVA_HOME="$javaHome"
export JAVA_HOME
fi
fi
fi
if [ -z "$JAVACMD" ] ; then
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
else
JAVACMD="`which java`"
fi
fi
if [ ! -x "$JAVACMD" ] ; then
echo "Error: JAVA_HOME is not defined correctly." >&2
echo " We cannot execute $JAVACMD" >&2
exit 1
fi
if [ -z "$JAVA_HOME" ] ; then
echo "Warning: JAVA_HOME environment variable is not set."
fi
CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
# For Cygwin, switch paths to Windows format before running java
if $cygwin; then
[ -n "$M2_HOME" ] &&
M2_HOME=`cygpath --path --windows "$M2_HOME"`
[ -n "$JAVA_HOME" ] &&
JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
[ -n "$CLASSPATH" ] &&
CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
fi
# traverses directory structure from process work directory to filesystem root
# first directory with .mvn subdirectory is considered project base directory
find_maven_basedir() {
local basedir=$(pwd)
local wdir=$(pwd)
while [ "$wdir" != '/' ] ; do
if [ -d "$wdir"/.mvn ] ; then
basedir=$wdir
break
fi
wdir=$(cd "$wdir/.."; pwd)
done
echo "${basedir}"
}
# concatenates all lines of a file
concat_lines() {
if [ -f "$1" ]; then
echo "$(tr -s '\n' ' ' < "$1")"
fi
}
export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-$(find_maven_basedir)}
MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
# Provide a "standardized" way to retrieve the CLI args that will
# work with both Windows and non-Windows executions.
MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
export MAVEN_CMD_LINE_ARGS
WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
exec "$JAVACMD" \
$MAVEN_OPTS \
-classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
"-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
${WRAPPER_LAUNCHER} "$@"

94
pom.xml Normal file
View File

@@ -0,0 +1,94 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>1.1.0.BUILD-SNAPSHOT</version>
<packaging>pom</packaging>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-build</artifactId>
<version>1.1.1.RELEASE</version>
<relativePath />
</parent>
<properties>
<spring-boot.version>1.4.0.BUILD-SNAPSHOT</spring-boot.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-dependencies</artifactId>
<version>1.1.0.BUILD-SNAPSHOT</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<modules>
<module>spring-cloud-stream-binder-kafka</module>
<module>spring-cloud-starter-stream-kafka</module>
</modules>
<profiles>
<profile>
<id>spring</id>
<repositories>
<repository>
<id>spring-snapshots</id>
<name>Spring Snapshots</name>
<url>http://repo.spring.io/libs-snapshot-local</url>
<snapshots>
<enabled>true</enabled>
</snapshots>
<releases>
<enabled>false</enabled>
</releases>
</repository>
<repository>
<id>spring-milestones</id>
<name>Spring Milestones</name>
<url>http://repo.spring.io/libs-milestone-local</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
</repository>
<repository>
<id>spring-releases</id>
<name>Spring Releases</name>
<url>http://repo.spring.io/release</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
</repository>
</repositories>
<pluginRepositories>
<pluginRepository>
<id>spring-snapshots</id>
<name>Spring Snapshots</name>
<url>http://repo.spring.io/libs-snapshot-local</url>
<snapshots>
<enabled>true</enabled>
</snapshots>
<releases>
<enabled>false</enabled>
</releases>
</pluginRepository>
<pluginRepository>
<id>spring-milestones</id>
<name>Spring Milestones</name>
<url>http://repo.spring.io/libs-milestone-local</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
</pluginRepository>
<pluginRepository>
<id>spring-releases</id>
<name>Spring Releases</name>
<url>http://repo.spring.io/libs-release-local</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
</pluginRepository>
</pluginRepositories>
</profile>
</profiles>
</project>

View File

@@ -0,0 +1,25 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>1.1.0.BUILD-SNAPSHOT</version>
</parent>
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
<description>Spring Cloud Starter Stream Kafka</description>
<url>http://projects.spring.io/spring-cloud</url>
<organization>
<name>Pivotal Software, Inc.</name>
<url>http://www.spring.io</url>
</organization>
<properties>
<main.basedir>${basedir}/../..</main.basedir>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1 @@
provides: spring-cloud-stream-binder-kafka

View File

@@ -0,0 +1,143 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
<packaging>jar</packaging>
<name>spring-cloud-stream-binder-kafka</name>
<description>Kafka binder implementation</description>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>1.1.0.BUILD-SNAPSHOT</version>
</parent>
<properties>
<kafka.version>0.8.2.2</kafka.version>
<curator.version>2.6.0</curator.version>
<spring-integration-kafka.version>1.3.1.BUILD-SNAPSHOT</spring-integration-kafka.version>
<rxjava-math.version>1.0.0</rxjava-math.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-configuration-processor</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-codec</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-autoconfigure</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-test-support-internal</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.integration</groupId>
<artifactId>spring-integration-kafka</artifactId>
<version>${spring-integration-kafka.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.avro</groupId>
<artifactId>avro-compiler</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
</dependency>
<dependency>
<groupId>io.reactivex</groupId>
<artifactId>rxjava</artifactId>
</dependency>
<dependency>
<groupId>io.reactivex</groupId>
<artifactId>rxjava-math</artifactId>
<version>${rxjava-math.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<classifier>test</classifier>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-test</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>${kafka.version}</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<classifier>test</classifier>
<version>${kafka.version}</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>${kafka.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
<version>${curator.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-test</artifactId>
<version>${curator.version}</version>
</dependency>
</dependencies>
</dependencyManagement>
</project>

View File

@@ -0,0 +1,95 @@
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import kafka.cluster.Broker;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils$;
import org.I0Itec.zkclient.ZkClient;
import scala.collection.JavaConversions;
import scala.collection.Seq;
import org.springframework.boot.actuate.health.Health;
import org.springframework.boot.actuate.health.HealthIndicator;
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
import org.springframework.integration.kafka.core.BrokerAddress;
import org.springframework.integration.kafka.core.Partition;
/**
* Health indicator for Kafka.
*
* @author Ilayaperumal Gopinathan
*/
public class KafkaBinderHealthIndicator implements HealthIndicator {
private final KafkaMessageChannelBinder binder;
private final KafkaBinderConfigurationProperties configurationProperties;
public KafkaBinderHealthIndicator(KafkaMessageChannelBinder binder,
KafkaBinderConfigurationProperties configurationProperties) {
this.binder = binder;
this.configurationProperties = configurationProperties;
}
@Override
public Health health() {
ZkClient zkClient = null;
try {
zkClient = new ZkClient(configurationProperties.getZkConnectionString(),
configurationProperties.getZkSessionTimeout(),
configurationProperties.getZkConnectionTimeout(), ZKStringSerializer$.MODULE$);
Set<String> brokersInClusterSet = new HashSet<>();
Seq<Broker> allBrokersInCluster = ZkUtils$.MODULE$.getAllBrokersInCluster(zkClient);
Collection<Broker> brokersInCluster = JavaConversions.asJavaCollection(allBrokersInCluster);
for (Broker broker : brokersInCluster) {
brokersInClusterSet.add(broker.connectionString());
}
Set<String> downMessages = new HashSet<>();
for (Map.Entry<String, Collection<Partition>> entry : binder.getTopicsInUse().entrySet()) {
for (Partition partition : entry.getValue()) {
BrokerAddress address = binder.getConnectionFactory().getLeader(partition);
if (!brokersInClusterSet.contains(address.toString())) {
downMessages.add(address.toString());
}
}
}
if (downMessages.isEmpty()) {
return Health.up().build();
}
return Health.down().withDetail("Following brokers are down: ", downMessages.toString()).build();
}
catch (Exception e) {
return Health.down(e).build();
}
finally {
if (zkClient != null) {
try {
zkClient.close();
}
catch (Exception e) {
// ignore
}
}
}
}
}

View File

@@ -0,0 +1,43 @@
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
/**
* @author Marius Bogoevici
*/
public class KafkaBindingProperties {
private KafkaConsumerProperties consumer = new KafkaConsumerProperties();
private KafkaProducerProperties producer = new KafkaProducerProperties();
public KafkaConsumerProperties getConsumer() {
return consumer;
}
public void setConsumer(KafkaConsumerProperties consumer) {
this.consumer = consumer;
}
public KafkaProducerProperties getProducer() {
return producer;
}
public void setProducer(KafkaProducerProperties producer) {
this.producer = producer;
}
}

View File

@@ -0,0 +1,83 @@
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
/**
* @author Marius Bogoevici
*/
public class KafkaConsumerProperties {
private boolean autoCommitOffset = true;
private Boolean autoCommitOnError;
private boolean resetOffsets;
private KafkaMessageChannelBinder.StartOffset startOffset;
private boolean enableDlq;
private int recoveryInterval = 5000;
public boolean isAutoCommitOffset() {
return autoCommitOffset;
}
public void setAutoCommitOffset(boolean autoCommitOffset) {
this.autoCommitOffset = autoCommitOffset;
}
public boolean isResetOffsets() {
return resetOffsets;
}
public void setResetOffsets(boolean resetOffsets) {
this.resetOffsets = resetOffsets;
}
public KafkaMessageChannelBinder.StartOffset getStartOffset() {
return startOffset;
}
public void setStartOffset(KafkaMessageChannelBinder.StartOffset startOffset) {
this.startOffset = startOffset;
}
public boolean isEnableDlq() {
return enableDlq;
}
public void setEnableDlq(boolean enableDlq) {
this.enableDlq = enableDlq;
}
public Boolean getAutoCommitOnError() {
return autoCommitOnError;
}
public void setAutoCommitOnError(Boolean autoCommitOnError) {
this.autoCommitOnError = autoCommitOnError;
}
public int getRecoveryInterval() {
return recoveryInterval;
}
public void setRecoveryInterval(int recoveryInterval) {
this.recoveryInterval = recoveryInterval;
}
}

View File

@@ -0,0 +1,60 @@
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.util.HashMap;
import java.util.Map;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.cloud.stream.binder.ExtendedBindingProperties;
/**
* @author Marius Bogoevici
*/
@ConfigurationProperties("spring.cloud.stream.kafka")
public class KafkaExtendedBindingProperties implements ExtendedBindingProperties<KafkaConsumerProperties, KafkaProducerProperties> {
private Map<String, KafkaBindingProperties> bindings = new HashMap<>();
public Map<String, KafkaBindingProperties> getBindings() {
return bindings;
}
public void setBindings(Map<String, KafkaBindingProperties> bindings) {
this.bindings = bindings;
}
@Override
public KafkaConsumerProperties getExtendedConsumerProperties(String channelName) {
if (bindings.containsKey(channelName) && bindings.get(channelName).getConsumer() != null) {
return bindings.get(channelName).getConsumer();
}
else {
return new KafkaConsumerProperties();
}
}
@Override
public KafkaProducerProperties getExtendedProducerProperties(String channelName) {
if (bindings.containsKey(channelName) && bindings.get(channelName).getProducer() != null) {
return bindings.get(channelName).getProducer();
}
else {
return new KafkaProducerProperties();
}
}
}

View File

@@ -0,0 +1,800 @@
/*
* Copyright 2014-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
import kafka.admin.AdminUtils;
import kafka.api.OffsetRequest;
import kafka.api.TopicMetadata;
import kafka.common.ErrorMapping;
import kafka.serializer.Decoder;
import kafka.serializer.DefaultDecoder;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils;
import org.I0Itec.zkclient.ZkClient;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.utils.Utils;
import scala.collection.Seq;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
import org.springframework.cloud.stream.binder.AbstractBinder;
import org.springframework.cloud.stream.binder.Binder;
import org.springframework.cloud.stream.binder.BinderException;
import org.springframework.cloud.stream.binder.BinderHeaders;
import org.springframework.cloud.stream.binder.Binding;
import org.springframework.cloud.stream.binder.DefaultBinding;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
import org.springframework.cloud.stream.binder.HeaderMode;
import org.springframework.cloud.stream.binder.MessageValues;
import org.springframework.cloud.stream.binder.PartitionHandler;
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
import org.springframework.http.MediaType;
import org.springframework.integration.channel.FixedSubscriberChannel;
import org.springframework.integration.endpoint.EventDrivenConsumer;
import org.springframework.integration.handler.AbstractMessageHandler;
import org.springframework.integration.handler.AbstractReplyProducingMessageHandler;
import org.springframework.integration.kafka.core.ConnectionFactory;
import org.springframework.integration.kafka.core.DefaultConnectionFactory;
import org.springframework.integration.kafka.core.KafkaMessage;
import org.springframework.integration.kafka.core.Partition;
import org.springframework.integration.kafka.core.ZookeeperConfiguration;
import org.springframework.integration.kafka.inbound.KafkaMessageDrivenChannelAdapter;
import org.springframework.integration.kafka.listener.AcknowledgingMessageListener;
import org.springframework.integration.kafka.listener.Acknowledgment;
import org.springframework.integration.kafka.listener.ErrorHandler;
import org.springframework.integration.kafka.listener.KafkaMessageListenerContainer;
import org.springframework.integration.kafka.listener.KafkaNativeOffsetManager;
import org.springframework.integration.kafka.listener.MessageListener;
import org.springframework.integration.kafka.listener.OffsetManager;
import org.springframework.integration.kafka.support.KafkaHeaders;
import org.springframework.integration.kafka.support.ProducerConfiguration;
import org.springframework.integration.kafka.support.ProducerFactoryBean;
import org.springframework.integration.kafka.support.ProducerListener;
import org.springframework.integration.kafka.support.ProducerMetadata;
import org.springframework.integration.kafka.support.ZookeeperConnect;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageChannel;
import org.springframework.messaging.MessageHandler;
import org.springframework.messaging.MessageHeaders;
import org.springframework.messaging.SubscribableChannel;
import org.springframework.messaging.support.MessageBuilder;
import org.springframework.retry.RetryCallback;
import org.springframework.retry.RetryContext;
import org.springframework.retry.RetryOperations;
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
import org.springframework.scheduling.concurrent.CustomizableThreadFactory;
import org.springframework.util.Assert;
import org.springframework.util.CollectionUtils;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
/**
* A {@link Binder} that uses Kafka as the underlying middleware.
*
* @author Eric Bottard
* @author Marius Bogoevici
* @author Ilayaperumal Gopinathan
* @author David Turanski
* @author Gary Russell
* @author Mark Fisher
* @author Soby Chacko
*/
public class KafkaMessageChannelBinder extends
AbstractBinder<MessageChannel, ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>>
implements ExtendedPropertiesBinder<MessageChannel, KafkaConsumerProperties, KafkaProducerProperties>,
DisposableBean {
public static final ByteArraySerializer BYTE_ARRAY_SERIALIZER = new ByteArraySerializer();
public static final ThreadFactory DAEMON_THREAD_FACTORY;
static {
CustomizableThreadFactory threadFactory = new CustomizableThreadFactory("kafka-binder-");
threadFactory.setDaemon(true);
DAEMON_THREAD_FACTORY = threadFactory;
}
private final KafkaBinderConfigurationProperties configurationProperties;
private final String[] headersToMap;
private RetryOperations metadataRetryOperations;
private final Map<String, Collection<Partition>> topicsInUse = new HashMap<>();
// -------- Default values for properties -------
private ConnectionFactory connectionFactory;
private ProducerListener producerListener;
private volatile Producer<byte[], byte[]> dlqProducer;
private KafkaExtendedBindingProperties extendedBindingProperties = new KafkaExtendedBindingProperties();
public KafkaMessageChannelBinder(KafkaBinderConfigurationProperties configurationProperties) {
this.configurationProperties = configurationProperties;
String[] configuredHeaders = configurationProperties.getHeaders();
if (ObjectUtils.isEmpty(configuredHeaders)) {
this.headersToMap = BinderHeaders.STANDARD_HEADERS;
}
else {
String[] combinedHeadersToMap = Arrays.copyOfRange(BinderHeaders.STANDARD_HEADERS, 0,
BinderHeaders.STANDARD_HEADERS.length + configuredHeaders.length);
System.arraycopy(configuredHeaders, 0, combinedHeadersToMap, BinderHeaders.STANDARD_HEADERS.length,
configuredHeaders.length);
this.headersToMap = combinedHeadersToMap;
}
}
String getZkAddress() {
return this.configurationProperties.getZkConnectionString();
}
public ConnectionFactory getConnectionFactory() {
return connectionFactory;
}
public void setProducerListener(ProducerListener producerListener) {
this.producerListener = producerListener;
}
/**
* Retry configuration for operations such as validating topic creation
* @param metadataRetryOperations the retry configuration
*/
public void setMetadataRetryOperations(RetryOperations metadataRetryOperations) {
this.metadataRetryOperations = metadataRetryOperations;
}
public void setExtendedBindingProperties(KafkaExtendedBindingProperties extendedBindingProperties) {
this.extendedBindingProperties = extendedBindingProperties;
}
@Override
public void onInit() throws Exception {
ZookeeperConfiguration configuration = new ZookeeperConfiguration(
new ZookeeperConnect(configurationProperties.getZkConnectionString()));
configuration.setBufferSize(configurationProperties.getSocketBufferSize());
configuration.setMaxWait(configurationProperties.getMaxWait());
DefaultConnectionFactory defaultConnectionFactory = new DefaultConnectionFactory(configuration);
defaultConnectionFactory.afterPropertiesSet();
this.connectionFactory = defaultConnectionFactory;
if (metadataRetryOperations == null) {
RetryTemplate retryTemplate = new RetryTemplate();
SimpleRetryPolicy simpleRetryPolicy = new SimpleRetryPolicy();
simpleRetryPolicy.setMaxAttempts(10);
retryTemplate.setRetryPolicy(simpleRetryPolicy);
ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy();
backOffPolicy.setInitialInterval(100);
backOffPolicy.setMultiplier(2);
backOffPolicy.setMaxInterval(1000);
retryTemplate.setBackOffPolicy(backOffPolicy);
metadataRetryOperations = retryTemplate;
}
}
@Override
public void destroy() throws Exception {
if (dlqProducer != null) {
dlqProducer.close();
dlqProducer = null;
}
}
/**
* Allowed chars are ASCII alphanumerics, '.', '_' and '-'.
*/
public static void validateTopicName(String topicName) {
try {
byte[] utf8 = topicName.getBytes("UTF-8");
for (byte b : utf8) {
if (!((b >= 'a') && (b <= 'z') || (b >= 'A') && (b <= 'Z') || (b >= '0') && (b <= '9') || (b == '.')
|| (b == '-') || (b == '_'))) {
throw new IllegalArgumentException(
"Topic name can only have ASCII alphanumerics, '.', '_' and '-'");
}
}
}
catch (UnsupportedEncodingException e) {
throw new AssertionError(e); // Can't happen
}
}
@Override
public KafkaConsumerProperties getExtendedConsumerProperties(String channelName) {
return extendedBindingProperties.getExtendedConsumerProperties(channelName);
}
@Override
public KafkaProducerProperties getExtendedProducerProperties(String channelName) {
return extendedBindingProperties.getExtendedProducerProperties(channelName);
}
Map<String, Collection<Partition>> getTopicsInUse() {
return this.topicsInUse;
}
@Override
protected Binding<MessageChannel> doBindConsumer(String name, String group, MessageChannel inputChannel,
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
// If the caller provides a consumer group, use it; otherwise an anonymous
// consumer group
// is generated each time, such that each anonymous binding will receive all
// messages.
// Consumers reset offsets at the latest time by default, which allows them to
// receive only
// messages sent after they've been bound. That behavior can be changed with the
// "resetOffsets" and "startOffset" properties.
boolean anonymous = !StringUtils.hasText(group);
Assert.isTrue(!anonymous || !properties.getExtension().isEnableDlq(),
"DLQ support is not available for anonymous subscriptions");
String consumerGroup = anonymous ? "anonymous." + UUID.randomUUID().toString() : group;
// The reference point, if not set explicitly is the latest time for anonymous
// subscriptions and the
// earliest time for group subscriptions. This allows the latter to receive
// messages published before the group
// has been created.
long referencePoint = properties.getExtension().getStartOffset() != null
? properties.getExtension().getStartOffset().getReferencePoint()
: (anonymous ? OffsetRequest.LatestTime() : OffsetRequest.EarliestTime());
return createKafkaConsumer(name, inputChannel, properties, consumerGroup, referencePoint);
}
@Override
public Binding<MessageChannel> doBindProducer(String name, MessageChannel moduleOutputChannel,
ExtendedProducerProperties<KafkaProducerProperties> properties) {
Assert.isInstanceOf(SubscribableChannel.class, moduleOutputChannel);
if (logger.isInfoEnabled()) {
logger.info("Using kafka topic for outbound: " + name);
}
validateTopicName(name);
Collection<Partition> partitions = ensureTopicCreated(name, properties.getPartitionCount());
if (properties.getPartitionCount() < partitions.size()) {
if (logger.isInfoEnabled()) {
logger.info("The `partitionCount` of the producer for topic " + name + " is "
+ properties.getPartitionCount() + ", smaller than the actual partition count of "
+ partitions.size() + " of the topic. The larger number will be used instead.");
}
}
topicsInUse.put(name, partitions);
ProducerMetadata<byte[], byte[]> producerMetadata = new ProducerMetadata<>(name, byte[].class, byte[].class,
BYTE_ARRAY_SERIALIZER, BYTE_ARRAY_SERIALIZER);
producerMetadata.setSync(properties.getExtension().isSync());
producerMetadata.setCompressionType(properties.getExtension().getCompressionType());
producerMetadata.setBatchBytes(properties.getExtension().getBufferSize());
Properties additionalProps = new Properties();
additionalProps.put(ProducerConfig.ACKS_CONFIG, String.valueOf(configurationProperties.getRequiredAcks()));
additionalProps.put(ProducerConfig.LINGER_MS_CONFIG,
String.valueOf(properties.getExtension().getBatchTimeout()));
ProducerFactoryBean<byte[], byte[]> producerFB = new ProducerFactoryBean<>(producerMetadata,
configurationProperties.getKafkaConnectionString(), additionalProps);
try {
final ProducerConfiguration<byte[], byte[]> producerConfiguration = new ProducerConfiguration<>(
producerMetadata, producerFB.getObject());
producerConfiguration.setProducerListener(producerListener);
MessageHandler handler = new SendingHandler(name, properties, partitions.size(), producerConfiguration);
EventDrivenConsumer consumer = new EventDrivenConsumer((SubscribableChannel) moduleOutputChannel, handler) {
@Override
protected void doStop() {
super.doStop();
producerConfiguration.stop();
}
};
consumer.setBeanFactory(this.getBeanFactory());
consumer.setBeanName("outbound." + name);
consumer.afterPropertiesSet();
DefaultBinding<MessageChannel> producerBinding = new DefaultBinding<>(name, null, moduleOutputChannel,
consumer);
consumer.start();
return producerBinding;
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
/**
* Creates a Kafka topic if needed, or try to increase its partition count to the
* desired number.
*/
private Collection<Partition> ensureTopicCreated(final String topicName, final int partitionCount) {
final ZkClient zkClient = new ZkClient(configurationProperties.getZkConnectionString(),
configurationProperties.getZkSessionTimeout(), configurationProperties.getZkConnectionTimeout(),
ZKStringSerializer$.MODULE$);
try {
final Properties topicConfig = new Properties();
TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topicName, zkClient);
if (topicMetadata.errorCode() == ErrorMapping.NoError()) {
// only consider minPartitionCount for resizing if autoAddPartitions is
// true
int effectivePartitionCount = configurationProperties.isAutoAddPartitions()
? Math.max(configurationProperties.getMinPartitionCount(), partitionCount) : partitionCount;
if (topicMetadata.partitionsMetadata().size() < effectivePartitionCount) {
if (configurationProperties.isAutoAddPartitions()) {
AdminUtils.addPartitions(zkClient, topicName, effectivePartitionCount, null, false,
new Properties());
}
else {
int topicSize = topicMetadata.partitionsMetadata().size();
throw new BinderException("The number of expected partitions was: " + partitionCount + ", but "
+ topicSize + (topicSize > 1 ? " have " : " has ") + "been found instead."
+ "Consider either increasing the partition count of the topic or enabling `autoAddPartitions`");
}
}
}
else if (topicMetadata.errorCode() == ErrorMapping.UnknownTopicOrPartitionCode()) {
if (configurationProperties.isAutoCreateTopics()) {
Seq<Object> brokerList = ZkUtils.getSortedBrokerList(zkClient);
// always consider minPartitionCount for topic creation
int effectivePartitionCount = Math.max(configurationProperties.getMinPartitionCount(),
partitionCount);
final scala.collection.Map<Object, Seq<Object>> replicaAssignment = AdminUtils
.assignReplicasToBrokers(brokerList, effectivePartitionCount,
configurationProperties.getReplicationFactor(), -1, -1);
metadataRetryOperations.execute(new RetryCallback<Object, RuntimeException>() {
@Override
public Object doWithRetry(RetryContext context) throws RuntimeException {
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topicName,
replicaAssignment, topicConfig, true);
return null;
}
});
}
else {
throw new BinderException("Topic " + topicName + " does not exist");
}
}
else {
throw new BinderException("Error fetching Kafka topic metadata: ",
ErrorMapping.exceptionFor(topicMetadata.errorCode()));
}
try {
Collection<Partition> partitions = metadataRetryOperations
.execute(new RetryCallback<Collection<Partition>, Exception>() {
@Override
public Collection<Partition> doWithRetry(RetryContext context) throws Exception {
connectionFactory.refreshMetadata(Collections.singleton(topicName));
Collection<Partition> partitions = connectionFactory.getPartitions(topicName);
// do a sanity check on the partition set
if (partitions.size() < partitionCount) {
throw new IllegalStateException("The number of expected partitions was: "
+ partitionCount + ", but " + partitions.size()
+ (partitions.size() > 1 ? " have " : " has ") + "been found instead");
}
connectionFactory.getLeaders(partitions);
return partitions;
}
});
return partitions;
}
catch (Exception e) {
logger.error("Cannot initialize Binder", e);
throw new BinderException("Cannot initialize binder:", e);
}
}
finally {
zkClient.close();
}
}
private Binding<MessageChannel> createKafkaConsumer(String name, final MessageChannel moduleInputChannel,
ExtendedConsumerProperties<KafkaConsumerProperties> properties, String group, long referencePoint) {
validateTopicName(name);
if (properties.getInstanceCount() == 0) {
throw new IllegalArgumentException("Instance count cannot be zero");
}
Collection<Partition> allPartitions = ensureTopicCreated(name,
properties.getInstanceCount() * properties.getConcurrency());
Decoder<byte[]> valueDecoder = new DefaultDecoder(null);
Decoder<byte[]> keyDecoder = new DefaultDecoder(null);
Collection<Partition> listenedPartitions;
if (properties.getInstanceCount() == 1) {
listenedPartitions = allPartitions;
}
else {
listenedPartitions = new ArrayList<>();
for (Partition partition : allPartitions) {
// divide partitions across modules
if ((partition.getId() % properties.getInstanceCount()) == properties.getInstanceIndex()) {
listenedPartitions.add(partition);
}
}
}
topicsInUse.put(name, listenedPartitions);
ReceivingHandler rh = new ReceivingHandler(properties);
rh.setOutputChannel(moduleInputChannel);
final FixedSubscriberChannel bridge = new FixedSubscriberChannel(rh);
bridge.setBeanName("bridge." + name);
Assert.isTrue(!CollectionUtils.isEmpty(listenedPartitions), "A list of partitions must be provided");
final KafkaMessageListenerContainer messageListenerContainer = new KafkaMessageListenerContainer(
connectionFactory, listenedPartitions.toArray(new Partition[listenedPartitions.size()]));
if (logger.isDebugEnabled()) {
logger.debug("Listened partitions: " + StringUtils.collectionToCommaDelimitedString(listenedPartitions));
}
OffsetManager offsetManager = createOffsetManager(group, referencePoint);
if (properties.getExtension().isResetOffsets()) {
offsetManager.resetOffsets(listenedPartitions);
}
messageListenerContainer.setOffsetManager(offsetManager);
messageListenerContainer.setQueueSize(configurationProperties.getQueueSize());
messageListenerContainer.setMaxFetch(configurationProperties.getFetchSize());
boolean autoCommitOnError = properties.getExtension().getAutoCommitOnError() != null
? properties.getExtension().getAutoCommitOnError()
: properties.getExtension().isAutoCommitOffset() && properties.getExtension().isEnableDlq();
messageListenerContainer.setAutoCommitOnError(autoCommitOnError);
messageListenerContainer.setRecoveryInterval(properties.getExtension().getRecoveryInterval());
int concurrency = Math.min(properties.getConcurrency(), listenedPartitions.size());
messageListenerContainer.setConcurrency(concurrency);
final ExecutorService dispatcherTaskExecutor = Executors.newFixedThreadPool(concurrency, DAEMON_THREAD_FACTORY);
messageListenerContainer.setDispatcherTaskExecutor(dispatcherTaskExecutor);
final KafkaMessageDrivenChannelAdapter kafkaMessageDrivenChannelAdapter = new KafkaMessageDrivenChannelAdapter(
messageListenerContainer);
kafkaMessageDrivenChannelAdapter.setBeanFactory(this.getBeanFactory());
kafkaMessageDrivenChannelAdapter.setKeyDecoder(keyDecoder);
kafkaMessageDrivenChannelAdapter.setPayloadDecoder(valueDecoder);
kafkaMessageDrivenChannelAdapter.setOutputChannel(bridge);
kafkaMessageDrivenChannelAdapter.setAutoCommitOffset(properties.getExtension().isAutoCommitOffset());
kafkaMessageDrivenChannelAdapter.afterPropertiesSet();
// we need to wrap the adapter listener into a retrying listener so that the retry
// logic is applied before the ErrorHandler is executed
final RetryTemplate retryTemplate = buildRetryTemplateIfRetryEnabled(properties);
if (retryTemplate != null) {
if (properties.getExtension().isAutoCommitOffset()) {
final MessageListener originalMessageListener = (MessageListener) messageListenerContainer
.getMessageListener();
messageListenerContainer.setMessageListener(new MessageListener() {
@Override
public void onMessage(final KafkaMessage message) {
try {
retryTemplate.execute(new RetryCallback<Object, Throwable>() {
@Override
public Object doWithRetry(RetryContext context) {
originalMessageListener.onMessage(message);
return null;
}
});
}
catch (Throwable throwable) {
if (throwable instanceof RuntimeException) {
throw (RuntimeException) throwable;
}
else {
throw new RuntimeException(throwable);
}
}
}
});
}
else {
messageListenerContainer.setMessageListener(new AcknowledgingMessageListener() {
final AcknowledgingMessageListener originalMessageListener = (AcknowledgingMessageListener) messageListenerContainer
.getMessageListener();
@Override
public void onMessage(final KafkaMessage message, final Acknowledgment acknowledgment) {
retryTemplate.execute(new RetryCallback<Object, RuntimeException>() {
@Override
public Object doWithRetry(RetryContext context) {
originalMessageListener.onMessage(message, acknowledgment);
return null;
}
});
}
});
}
}
if (properties.getExtension().isEnableDlq()) {
final String dlqTopic = "error." + name + "." + group;
initDlqProducer();
messageListenerContainer.setErrorHandler(new ErrorHandler() {
@Override
public void handle(Exception thrownException, final KafkaMessage message) {
final byte[] key = message.getMessage().key() != null ? Utils.toArray(message.getMessage().key())
: null;
final byte[] payload = message.getMessage().payload() != null
? Utils.toArray(message.getMessage().payload()) : null;
dlqProducer.send(new ProducerRecord<>(dlqTopic, key, payload), new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
StringBuffer messageLog = new StringBuffer();
messageLog.append(" a message with key='"
+ toDisplayString(ObjectUtils.nullSafeToString(key), 50) + "'");
messageLog.append(" and payload='"
+ toDisplayString(ObjectUtils.nullSafeToString(payload), 50) + "'");
messageLog.append(" received from " + message.getMetadata().getPartition());
if (exception != null) {
logger.error("Error sending to DLQ" + messageLog.toString(), exception);
}
else {
if (logger.isDebugEnabled()) {
logger.debug("Sent to DLQ " + messageLog.toString());
}
}
}
});
}
});
}
kafkaMessageDrivenChannelAdapter.start();
EventDrivenConsumer edc = new EventDrivenConsumer(bridge, rh) {
@Override
protected void doStop() {
// stop the offset manager and the channel adapter before unbinding
// this means that the upstream channel adapter has a chance to stop
kafkaMessageDrivenChannelAdapter.stop();
if (messageListenerContainer.getOffsetManager() instanceof DisposableBean) {
try {
((DisposableBean) messageListenerContainer.getOffsetManager()).destroy();
}
catch (Exception e) {
logger.error("Error while closing the offset manager", e);
}
}
super.doStop();
}
};
String groupedName = groupedName(name, group);
edc.setBeanName("inbound." + groupedName);
DefaultBinding<MessageChannel> consumerBinding = new DefaultBinding<MessageChannel>(name, group,
moduleInputChannel, edc) {
@Override
protected void afterUnbind() {
dispatcherTaskExecutor.shutdown();
}
};
edc.start();
return consumerBinding;
}
private synchronized void initDlqProducer() {
try {
if (dlqProducer == null) {
synchronized (this) {
if (dlqProducer == null) {
// we can use the producer defaults as we do not need to tune
// performance
ProducerMetadata<byte[], byte[]> producerMetadata = new ProducerMetadata<>("dlqKafkaProducer",
byte[].class, byte[].class, BYTE_ARRAY_SERIALIZER, BYTE_ARRAY_SERIALIZER);
producerMetadata.setSync(false);
producerMetadata.setCompressionType(ProducerMetadata.CompressionType.none);
producerMetadata.setBatchBytes(16384);
Properties additionalProps = new Properties();
additionalProps.put(ProducerConfig.ACKS_CONFIG,
String.valueOf(configurationProperties.getRequiredAcks()));
additionalProps.put(ProducerConfig.LINGER_MS_CONFIG, String.valueOf(0));
ProducerFactoryBean<byte[], byte[]> producerFactoryBean = new ProducerFactoryBean<>(
producerMetadata, configurationProperties.getKafkaConnectionString(), additionalProps);
dlqProducer = producerFactoryBean.getObject();
}
}
}
}
catch (Exception e) {
throw new RuntimeException("Cannot initialize DLQ producer:", e);
}
}
private OffsetManager createOffsetManager(String group, long referencePoint) {
try {
KafkaNativeOffsetManager kafkaOffsetManager = new KafkaNativeOffsetManager(connectionFactory,
new ZookeeperConnect(configurationProperties.getZkConnectionString()),
Collections.<Partition, Long>emptyMap());
kafkaOffsetManager.setConsumerId(group);
kafkaOffsetManager.setReferenceTimestamp(referencePoint);
kafkaOffsetManager.afterPropertiesSet();
WindowingOffsetManager windowingOffsetManager = new WindowingOffsetManager(kafkaOffsetManager);
windowingOffsetManager.setTimespan(configurationProperties.getOffsetUpdateTimeWindow());
windowingOffsetManager.setCount(configurationProperties.getOffsetUpdateCount());
windowingOffsetManager.setShutdownTimeout(configurationProperties.getOffsetUpdateShutdownTimeout());
windowingOffsetManager.afterPropertiesSet();
return windowingOffsetManager;
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
private String toDisplayString(String original, int maxCharacters) {
if (original.length() <= maxCharacters) {
return original;
}
return original.substring(0, maxCharacters) + "...";
}
@Override
public void doManualAck(LinkedList<MessageHeaders> messageHeadersList) {
Iterator<MessageHeaders> iterator = messageHeadersList.iterator();
while (iterator.hasNext()) {
MessageHeaders headers = iterator.next();
Acknowledgment acknowledgment = (Acknowledgment) headers.get(KafkaHeaders.ACKNOWLEDGMENT);
Assert.notNull(acknowledgment,
"Acknowledgement shouldn't be null when acknowledging kafka message " + "manually.");
acknowledgment.acknowledge();
}
}
private final class ReceivingHandler extends AbstractReplyProducingMessageHandler {
private final ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties;
private ReceivingHandler(ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties) {
this.consumerProperties = consumerProperties;
}
@Override
protected Object handleRequestMessage(Message<?> requestMessage) {
if (HeaderMode.embeddedHeaders.equals(consumerProperties.getHeaderMode())) {
MessageValues messageValues = extractMessageValues(requestMessage);
return MessageBuilder.createMessage(messageValues.getPayload(), new KafkaBinderHeaders(messageValues));
}
else {
return requestMessage;
}
}
@Override
protected boolean shouldCopyRequestHeaders() {
// prevent the message from being copied again in superclass
return false;
}
@SuppressWarnings("serial")
private final class KafkaBinderHeaders extends MessageHeaders {
KafkaBinderHeaders(Map<String, Object> headers) {
super(headers, MessageHeaders.ID_VALUE_NONE, -1L);
}
}
}
private final class SendingHandler extends AbstractMessageHandler {
private final AtomicInteger roundRobinCount = new AtomicInteger();
private final String topicName;
private final ExtendedProducerProperties<KafkaProducerProperties> producerProperties;
private final int numberOfKafkaPartitions;
private final ProducerConfiguration<byte[], byte[]> producerConfiguration;
private final PartitionHandler partitionHandler;
private SendingHandler(String topicName, ExtendedProducerProperties<KafkaProducerProperties> properties,
int numberOfPartitions, ProducerConfiguration<byte[], byte[]> producerConfiguration) {
this.topicName = topicName;
producerProperties = properties;
this.numberOfKafkaPartitions = numberOfPartitions;
ConfigurableListableBeanFactory beanFactory = KafkaMessageChannelBinder.this.getBeanFactory();
this.setBeanFactory(beanFactory);
this.producerConfiguration = producerConfiguration;
this.partitionHandler = new PartitionHandler(beanFactory, evaluationContext, partitionSelector, properties);
}
@Override
protected void handleMessageInternal(Message<?> message) throws Exception {
int targetPartition;
if (producerProperties.isPartitioned()) {
targetPartition = this.partitionHandler.determinePartition(message);
}
else {
targetPartition = roundRobin() % numberOfKafkaPartitions;
}
if (HeaderMode.embeddedHeaders.equals(producerProperties.getHeaderMode())) {
MessageValues transformed = serializePayloadIfNecessary(message);
byte[] messageToSend = embeddedHeadersMessageConverter.embedHeaders(transformed,
KafkaMessageChannelBinder.this.headersToMap);
producerConfiguration.send(topicName, targetPartition, null, messageToSend);
}
else if (HeaderMode.raw.equals(producerProperties.getHeaderMode())) {
Object contentType = message.getHeaders().get(MessageHeaders.CONTENT_TYPE);
if (contentType != null && !contentType.equals(MediaType.APPLICATION_OCTET_STREAM_VALUE)) {
logger.error("Raw mode supports only " + MediaType.APPLICATION_OCTET_STREAM_VALUE + " content type"
+ message.getPayload().getClass());
}
if (message.getPayload() instanceof byte[]) {
producerConfiguration.send(topicName, targetPartition, null, (byte[]) message.getPayload());
}
else {
logger.error("Raw mode supports only byte[] payloads but value sent was of type "
+ message.getPayload().getClass());
}
}
}
private int roundRobin() {
int result = roundRobinCount.incrementAndGet();
if (result == Integer.MAX_VALUE) {
roundRobinCount.set(0);
}
return result;
}
}
public enum StartOffset {
earliest(OffsetRequest.EarliestTime()), latest(OffsetRequest.LatestTime());
private final long referencePoint;
StartOffset(long referencePoint) {
this.referencePoint = referencePoint;
}
public long getReferencePoint() {
return referencePoint;
}
}
}

View File

@@ -0,0 +1,68 @@
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import javax.validation.constraints.NotNull;
import org.springframework.integration.kafka.support.ProducerMetadata;
/**
* @author Marius Bogoevici
*/
public class KafkaProducerProperties {
private int bufferSize = 16384;
private ProducerMetadata.CompressionType compressionType = ProducerMetadata.CompressionType.none;
private boolean sync;
private int batchTimeout;
public int getBufferSize() {
return bufferSize;
}
public void setBufferSize(int bufferSize) {
this.bufferSize = bufferSize;
}
@NotNull
public ProducerMetadata.CompressionType getCompressionType() {
return compressionType;
}
public void setCompressionType(ProducerMetadata.CompressionType compressionType) {
this.compressionType = compressionType;
}
public boolean isSync() {
return sync;
}
public void setSync(boolean sync) {
this.sync = sync;
}
public int getBatchTimeout() {
return batchTimeout;
}
public void setBatchTimeout(int batchTimeout) {
this.batchTimeout = batchTimeout;
}
}

View File

@@ -0,0 +1,262 @@
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.io.IOException;
import java.util.Collection;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import rx.Observable;
import rx.Subscription;
import rx.functions.Action0;
import rx.functions.Action1;
import rx.functions.Func1;
import rx.functions.Func2;
import rx.observables.GroupedObservable;
import rx.observables.MathObservable;
import rx.subjects.PublishSubject;
import rx.subjects.SerializedSubject;
import rx.subjects.Subject;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.integration.kafka.core.Partition;
import org.springframework.integration.kafka.listener.OffsetManager;
import org.springframework.util.Assert;
/**
* An {@link OffsetManager} that aggregates writes over a time or count window, using an underlying delegate to
* do the actual operations. Its purpose is to reduce the performance impact of writing operations
* wherever this is desirable.
*
* Either a time window or a number of writes can be specified, but not both.
*
* @author Marius Bogoevici
*/
public class WindowingOffsetManager implements OffsetManager, InitializingBean, DisposableBean {
private final CreatePartitionAndOffsetFunction createPartitionAndOffsetFunction = new CreatePartitionAndOffsetFunction();
private final GetOffsetFunction getOffsetFunction = new GetOffsetFunction();
private final ComputeMaximumOffsetByPartitionFunction findHighestOffsetInPartitionGroup = new ComputeMaximumOffsetByPartitionFunction();
private final GetPartitionFunction getPartition = new GetPartitionFunction();
private final FindHighestOffsetsByPartitionFunction findHighestOffsetsByPartition = new FindHighestOffsetsByPartitionFunction();
private final DelegateUpdateOffsetAction delegateUpdateOffsetAction = new DelegateUpdateOffsetAction();
private final NotifyObservableClosedAction notifyObservableClosed = new NotifyObservableClosedAction();
private final OffsetManager delegate;
private long timespan = 10 * 1000;
private int count;
private Subject<PartitionAndOffset, PartitionAndOffset> offsets;
private Subscription subscription;
private int shutdownTimeout = 2000;
private CountDownLatch shutdownLatch;
public WindowingOffsetManager(OffsetManager offsetManager) {
this.delegate = offsetManager;
}
/**
* The timespan for aggregating write operations, before invoking the underlying {@link OffsetManager}.
*
* @param timespan duration in milliseconds
*/
public void setTimespan(long timespan) {
Assert.isTrue(timespan >= 0, "Timespan must be a positive value");
this.timespan = timespan;
}
/**
* How many writes should be aggregated, before invoking the underlying {@link OffsetManager}. Setting this value
* to 1 effectively disables windowing.
*
* @param count number of writes
*/
public void setCount(int count) {
Assert.isTrue(count >= 0, "Count must be a positive value");
this.count = count;
}
/**
* The timeout that {@link #close()} and {@link #destroy()} operations will wait for receving a confirmation that the
* underlying writes have been processed.
*
* @param shutdownTimeout duration in milliseconds
*/
public void setShutdownTimeout(int shutdownTimeout) {
this.shutdownTimeout = shutdownTimeout;
}
@Override
public void afterPropertiesSet() throws Exception {
Assert.isTrue(timespan > 0 ^ count > 0, "Only one of the timespan or count must be set");
// create the stream if windowing is set, and count is higher than 1
if (timespan > 0 || count > 1) {
offsets = new SerializedSubject<>(PublishSubject.<PartitionAndOffset>create());
// window by either count or time
Observable<Observable<PartitionAndOffset>> window =
timespan > 0 ? offsets.window(timespan, TimeUnit.MILLISECONDS) : offsets.window(count);
Observable<PartitionAndOffset> maximumOffsetsByWindow = window
.flatMap(findHighestOffsetsByPartition)
.doOnCompleted(notifyObservableClosed);
subscription = maximumOffsetsByWindow.subscribe(delegateUpdateOffsetAction);
}
else {
offsets = null;
}
}
@Override
public void destroy() throws Exception {
this.flush();
this.close();
if (delegate instanceof DisposableBean) {
((DisposableBean) delegate).destroy();
}
}
@Override
public void updateOffset(Partition partition, long offset) {
if (offsets != null) {
offsets.onNext(new PartitionAndOffset(partition, offset));
}
else {
delegate.updateOffset(partition, offset);
}
}
@Override
public long getOffset(Partition partition) {
return delegate.getOffset(partition);
}
@Override
public void deleteOffset(Partition partition) {
delegate.deleteOffset(partition);
}
@Override
public void resetOffsets(Collection<Partition> partition) {
delegate.resetOffsets(partition);
}
@Override
public void close() throws IOException {
if (offsets != null) {
shutdownLatch = new CountDownLatch(1);
offsets.onCompleted();
try {
shutdownLatch.await(shutdownTimeout, TimeUnit.MILLISECONDS);
}
catch (InterruptedException e) {
// ignore
}
subscription.unsubscribe();
}
delegate.close();
}
@Override
public void flush() throws IOException {
delegate.flush();
}
private final class PartitionAndOffset {
private final Partition partition;
private final Long offset;
private PartitionAndOffset(Partition partition, Long offset) {
this.partition = partition;
this.offset = offset;
}
public Partition getPartition() {
return partition;
}
public Long getOffset() {
return offset;
}
}
private class DelegateUpdateOffsetAction implements Action1<PartitionAndOffset> {
@Override
public void call(PartitionAndOffset partitionAndOffset) {
delegate.updateOffset(partitionAndOffset.getPartition(), partitionAndOffset.getOffset());
}
}
private class NotifyObservableClosedAction implements Action0 {
@Override
public void call() {
if (shutdownLatch != null) {
shutdownLatch.countDown();
}
}
}
private class CreatePartitionAndOffsetFunction implements Func2<Partition, Long, PartitionAndOffset> {
@Override
public PartitionAndOffset call(Partition partition, Long offset) {
return new PartitionAndOffset(partition, offset);
}
}
private class GetOffsetFunction implements Func1<PartitionAndOffset, Long> {
@Override
public Long call(PartitionAndOffset partitionAndOffset) {
return partitionAndOffset.getOffset();
}
}
private class ComputeMaximumOffsetByPartitionFunction implements Func1<GroupedObservable<Partition, PartitionAndOffset>, Observable<PartitionAndOffset>> {
@Override
public Observable<PartitionAndOffset> call(GroupedObservable<Partition, PartitionAndOffset> group) {
return Observable.zip(Observable.just(group.getKey()),
MathObservable.max(group.map(getOffsetFunction)),
createPartitionAndOffsetFunction);
}
}
private class GetPartitionFunction implements Func1<PartitionAndOffset, Partition> {
@Override
public Partition call(PartitionAndOffset partitionAndOffset) {
return partitionAndOffset.getPartition();
}
}
private class FindHighestOffsetsByPartitionFunction implements Func1<Observable<PartitionAndOffset>, Observable<PartitionAndOffset>> {
@Override
public Observable<PartitionAndOffset> call(Observable<PartitionAndOffset> windowBuffer) {
return windowBuffer.groupBy(getPartition).flatMap(findHighestOffsetInPartitionGroup);
}
}
}

View File

@@ -0,0 +1,79 @@
/*
* Copyright 2015-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka.config;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.PropertyPlaceholderAutoConfiguration;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.cloud.stream.binder.Binder;
import org.springframework.cloud.stream.binder.kafka.KafkaBinderHealthIndicator;
import org.springframework.cloud.stream.binder.kafka.KafkaExtendedBindingProperties;
import org.springframework.cloud.stream.binder.kafka.KafkaMessageChannelBinder;
import org.springframework.cloud.stream.config.codec.kryo.KryoCodecAutoConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Import;
import org.springframework.integration.codec.Codec;
import org.springframework.integration.kafka.support.LoggingProducerListener;
import org.springframework.integration.kafka.support.ProducerListener;
/**
* @author David Turanski
* @author Marius Bogoevici
* @author Soby Chacko
* @author Mark Fisher
* @author Ilayaperumal Gopinathan
*/
@Configuration
@ConditionalOnMissingBean(Binder.class)
@Import({ KryoCodecAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class })
@EnableConfigurationProperties({ KafkaBinderConfigurationProperties.class, KafkaExtendedBindingProperties.class })
public class KafkaBinderConfiguration {
@Autowired
private Codec codec;
@Autowired
private KafkaBinderConfigurationProperties configurationProperties;
@Autowired
private KafkaExtendedBindingProperties kafkaExtendedBindingProperties;
@Autowired
private ProducerListener producerListener;
@Bean
KafkaMessageChannelBinder kafkaMessageChannelBinder() {
KafkaMessageChannelBinder kafkaMessageChannelBinder = new KafkaMessageChannelBinder(configurationProperties);
kafkaMessageChannelBinder.setCodec(codec);
kafkaMessageChannelBinder.setProducerListener(producerListener);
kafkaMessageChannelBinder.setExtendedBindingProperties(kafkaExtendedBindingProperties);
return kafkaMessageChannelBinder;
}
@Bean
@ConditionalOnMissingBean(ProducerListener.class)
ProducerListener producerListener() {
return new LoggingProducerListener();
}
@Bean
KafkaBinderHealthIndicator healthIndicator(KafkaMessageChannelBinder kafkaMessageChannelBinder) {
return new KafkaBinderHealthIndicator(kafkaMessageChannelBinder, configurationProperties);
}
}

View File

@@ -0,0 +1,243 @@
/*
* Copyright 2015-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka.config;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.util.StringUtils;
/**
* @author David Turanski
* @author Ilayaperumal Gopinathan
* @author Marius Bogoevici
*/
@ConfigurationProperties(prefix = "spring.cloud.stream.kafka.binder")
public class KafkaBinderConfigurationProperties {
private String[] zkNodes = new String[] { "localhost" };
private String defaultZkPort = "2181";
private String[] brokers = new String[] { "localhost" };
private String defaultBrokerPort = "9092";
private String[] headers = new String[] {};
private int offsetUpdateTimeWindow = 10000;
private int offsetUpdateCount;
private int offsetUpdateShutdownTimeout = 2000;
private int maxWait = 100;
private boolean autoCreateTopics = true;
private boolean autoAddPartitions;
private int socketBufferSize = 2097152;
/**
* ZK session timeout in milliseconds.
*/
private int zkSessionTimeout = 10000;
/**
* ZK Connection timeout in milliseconds.
*/
private int zkConnectionTimeout = 10000;
private int requiredAcks = 1;
private int replicationFactor = 1;
private int fetchSize = 1024 * 1024;
private int minPartitionCount = 1;
private int queueSize = 8192;
public String getZkConnectionString() {
return toConnectionString(this.zkNodes, this.defaultZkPort);
}
public String getKafkaConnectionString() {
return toConnectionString(this.brokers, this.defaultBrokerPort);
}
public String[] getHeaders() {
return headers;
}
public int getOffsetUpdateTimeWindow() {
return this.offsetUpdateTimeWindow;
}
public int getOffsetUpdateCount() {
return this.offsetUpdateCount;
}
public int getOffsetUpdateShutdownTimeout() {
return this.offsetUpdateShutdownTimeout;
}
public String[] getZkNodes() {
return zkNodes;
}
public void setZkNodes(String... zkNodes) {
this.zkNodes = zkNodes;
}
public void setDefaultZkPort(String defaultZkPort) {
this.defaultZkPort = defaultZkPort;
}
public String[] getBrokers() {
return brokers;
}
public void setBrokers(String... brokers) {
this.brokers = brokers;
}
public void setDefaultBrokerPort(String defaultBrokerPort) {
this.defaultBrokerPort = defaultBrokerPort;
}
public void setHeaders(String... headers) {
this.headers = headers;
}
public void setOffsetUpdateTimeWindow(int offsetUpdateTimeWindow) {
this.offsetUpdateTimeWindow = offsetUpdateTimeWindow;
}
public void setOffsetUpdateCount(int offsetUpdateCount) {
this.offsetUpdateCount = offsetUpdateCount;
}
public void setOffsetUpdateShutdownTimeout(int offsetUpdateShutdownTimeout) {
this.offsetUpdateShutdownTimeout = offsetUpdateShutdownTimeout;
}
public int getZkSessionTimeout() {
return this.zkSessionTimeout;
}
public void setZkSessionTimeout(int zkSessionTimeout) {
this.zkSessionTimeout = zkSessionTimeout;
}
public int getZkConnectionTimeout() {
return this.zkConnectionTimeout;
}
public void setZkConnectionTimeout(int zkConnectionTimeout) {
this.zkConnectionTimeout = zkConnectionTimeout;
}
/**
* Converts an array of host values to a comma-separated String.
*
* It will append the default port value, if not already specified.
*/
private String toConnectionString(String[] hosts, String defaultPort) {
String[] fullyFormattedHosts = new String[hosts.length];
for (int i = 0; i < hosts.length; i++) {
if (hosts[i].contains(":") || StringUtils.isEmpty(defaultPort)) {
fullyFormattedHosts[i] = hosts[i];
}
else {
fullyFormattedHosts[i] = hosts[i] + ":" + defaultPort;
}
}
return StringUtils.arrayToCommaDelimitedString(fullyFormattedHosts);
}
public int getMaxWait() {
return maxWait;
}
public void setMaxWait(int maxWait) {
this.maxWait = maxWait;
}
public int getRequiredAcks() {
return requiredAcks;
}
public void setRequiredAcks(int requiredAcks) {
this.requiredAcks = requiredAcks;
}
public int getReplicationFactor() {
return replicationFactor;
}
public void setReplicationFactor(int replicationFactor) {
this.replicationFactor = replicationFactor;
}
public int getFetchSize() {
return fetchSize;
}
public void setFetchSize(int fetchSize) {
this.fetchSize = fetchSize;
}
public int getMinPartitionCount() {
return minPartitionCount;
}
public void setMinPartitionCount(int minPartitionCount) {
this.minPartitionCount = minPartitionCount;
}
public int getQueueSize() {
return queueSize;
}
public void setQueueSize(int queueSize) {
this.queueSize = queueSize;
}
public boolean isAutoCreateTopics() {
return autoCreateTopics;
}
public void setAutoCreateTopics(boolean autoCreateTopics) {
this.autoCreateTopics = autoCreateTopics;
}
public boolean isAutoAddPartitions() {
return autoAddPartitions;
}
public void setAutoAddPartitions(boolean autoAddPartitions) {
this.autoAddPartitions = autoAddPartitions;
}
public int getSocketBufferSize() {
return socketBufferSize;
}
public void setSocketBufferSize(int socketBufferSize) {
this.socketBufferSize = socketBufferSize;
}
}

View File

@@ -0,0 +1,20 @@
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package contains an implementation of the {@link org.springframework.cloud.stream.binder.Binder} for Kafka.
*/
package org.springframework.cloud.stream.binder.kafka;

View File

@@ -0,0 +1,2 @@
kafka:\
org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfiguration

View File

@@ -0,0 +1,808 @@
/*
* Copyright 2014-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.util.Arrays;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import kafka.admin.AdminUtils;
import kafka.api.TopicMetadata;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
import org.springframework.beans.DirectFieldAccessor;
import org.springframework.cloud.stream.binder.BinderException;
import org.springframework.cloud.stream.binder.Binding;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.PartitionCapableBinderTests;
import org.springframework.cloud.stream.binder.Spy;
import org.springframework.cloud.stream.binder.TestUtils;
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
import org.springframework.cloud.stream.test.junit.kafka.KafkaTestSupport;
import org.springframework.context.support.GenericApplicationContext;
import org.springframework.integration.channel.DirectChannel;
import org.springframework.integration.channel.QueueChannel;
import org.springframework.integration.kafka.core.Partition;
import org.springframework.integration.kafka.core.TopicNotFoundException;
import org.springframework.integration.kafka.support.KafkaHeaders;
import org.springframework.integration.kafka.support.ProducerConfiguration;
import org.springframework.integration.kafka.support.ProducerMetadata;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageChannel;
import org.springframework.messaging.MessageHandler;
import org.springframework.messaging.MessagingException;
import org.springframework.messaging.support.GenericMessage;
import org.springframework.messaging.support.MessageBuilder;
import org.springframework.retry.backoff.FixedBackOffPolicy;
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.fail;
/**
* Integration tests for the {@link KafkaMessageChannelBinder}.
* @author Eric Bottard
* @author Marius Bogoevici
* @author Mark Fisher
* @author Ilayaperumal Gopinathan
*/
public class KafkaBinderTests extends
PartitionCapableBinderTests<KafkaTestBinder, ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>> {
private final String CLASS_UNDER_TEST_NAME = KafkaMessageChannelBinder.class.getSimpleName();
@ClassRule
public static KafkaTestSupport kafkaTestSupport = new KafkaTestSupport();
private KafkaTestBinder binder;
@Override
protected void binderBindUnbindLatency() throws InterruptedException {
Thread.sleep(500);
}
@Override
protected KafkaTestBinder getBinder() {
if (binder == null) {
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
binder = new KafkaTestBinder(binderConfiguration);
}
return binder;
}
private KafkaBinderConfigurationProperties createConfigurationProperties() {
KafkaBinderConfigurationProperties binderConfiguration = new KafkaBinderConfigurationProperties();
binderConfiguration.setBrokers(kafkaTestSupport.getBrokerAddress());
binderConfiguration.setZkNodes(kafkaTestSupport.getZkConnectString());
return binderConfiguration;
}
@Override
protected ExtendedConsumerProperties<KafkaConsumerProperties> createConsumerProperties() {
return new ExtendedConsumerProperties<>(new KafkaConsumerProperties());
}
@Override
protected ExtendedProducerProperties<KafkaProducerProperties> createProducerProperties() {
return new ExtendedProducerProperties<>(new KafkaProducerProperties());
}
@Before
public void init() {
String multiplier = System.getenv("KAFKA_TIMEOUT_MULTIPLIER");
if (multiplier != null) {
timeoutMultiplier = Double.parseDouble(multiplier);
}
}
@Override
protected boolean usesExplicitRouting() {
return false;
}
@Override
protected String getClassUnderTestName() {
return CLASS_UNDER_TEST_NAME;
}
@Override
public Spy spyOn(final String name) {
throw new UnsupportedOperationException("'spyOn' is not used by Kafka tests");
}
@Test
public void testDlqAndRetry() {
KafkaTestBinder binder = getBinder();
DirectChannel moduleOutputChannel = new DirectChannel();
DirectChannel moduleInputChannel = new DirectChannel();
QueueChannel dlqChannel = new QueueChannel();
FailingInvocationCountingMessageHandler handler = new FailingInvocationCountingMessageHandler();
moduleInputChannel.subscribe(handler);
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(10);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setMaxAttempts(3);
consumerProperties.setBackOffInitialInterval(100);
consumerProperties.setBackOffMaxInterval(150);
consumerProperties.getExtension().setEnableDlq(true);
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("retryTest." + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0",
"testGroup", moduleInputChannel, consumerProperties);
ExtendedConsumerProperties<KafkaConsumerProperties> dlqConsumerProperties = createConsumerProperties();
dlqConsumerProperties.setMaxAttempts(1);
Binding<MessageChannel> dlqConsumerBinding = binder.bindConsumer(
"error.retryTest." + uniqueBindingId + ".0.testGroup", null, dlqChannel, dlqConsumerProperties);
String testMessagePayload = "test." + UUID.randomUUID().toString();
Message<String> testMessage = MessageBuilder.withPayload(testMessagePayload).build();
moduleOutputChannel.send(testMessage);
Message<?> receivedMessage = receive(dlqChannel, 3);
assertThat(receivedMessage).isNotNull();
assertThat(receivedMessage.getPayload()).isEqualTo(testMessagePayload);
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
dlqConsumerBinding.unbind();
consumerBinding.unbind();
producerBinding.unbind();
}
@Test
public void testDefaultAutoCommitOnErrorWithoutDlq() throws Exception {
KafkaTestBinder binder = getBinder();
DirectChannel moduleOutputChannel = new DirectChannel();
DirectChannel moduleInputChannel = new DirectChannel();
FailingInvocationCountingMessageHandler handler = new FailingInvocationCountingMessageHandler();
moduleInputChannel.subscribe(handler);
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(10);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setMaxAttempts(1);
consumerProperties.setBackOffInitialInterval(100);
consumerProperties.setBackOffMaxInterval(150);
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("retryTest." + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0",
"testGroup", moduleInputChannel, consumerProperties);
String testMessagePayload = "test." + UUID.randomUUID().toString();
Message<String> testMessage = MessageBuilder.withPayload(testMessagePayload).build();
moduleOutputChannel.send(testMessage);
assertThat(handler.getLatch().await((int) (timeoutMultiplier * 1000), TimeUnit.MILLISECONDS));
// first attempt fails
assertThat(handler.getReceivedMessages().entrySet()).hasSize(1);
Message<?> receivedMessage = handler.getReceivedMessages().entrySet().iterator().next().getValue();
assertThat(receivedMessage).isNotNull();
assertThat(receivedMessage.getPayload()).isEqualTo(testMessagePayload);
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
consumerBinding.unbind();
// on the second attempt the message is redelivered
QueueChannel successfulInputChannel = new QueueChannel();
consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0", "testGroup",
successfulInputChannel, consumerProperties);
String testMessage2Payload = "test." + UUID.randomUUID().toString();
Message<String> testMessage2 = MessageBuilder.withPayload(testMessage2Payload).build();
moduleOutputChannel.send(testMessage2);
Message<?> firstReceived = receive(successfulInputChannel);
assertThat(firstReceived.getPayload()).isEqualTo(testMessagePayload);
Message<?> secondReceived = receive(successfulInputChannel);
assertThat(secondReceived.getPayload()).isEqualTo(testMessage2Payload);
consumerBinding.unbind();
producerBinding.unbind();
}
@Test
public void testDefaultAutoCommitOnErrorWithDlq() throws Exception {
KafkaTestBinder binder = getBinder();
DirectChannel moduleOutputChannel = new DirectChannel();
DirectChannel moduleInputChannel = new DirectChannel();
FailingInvocationCountingMessageHandler handler = new FailingInvocationCountingMessageHandler();
moduleInputChannel.subscribe(handler);
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(10);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setMaxAttempts(3);
consumerProperties.setBackOffInitialInterval(100);
consumerProperties.setBackOffMaxInterval(150);
consumerProperties.getExtension().setEnableDlq(true);
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("retryTest." + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0",
"testGroup", moduleInputChannel, consumerProperties);
ExtendedConsumerProperties<KafkaConsumerProperties> dlqConsumerProperties = createConsumerProperties();
dlqConsumerProperties.setMaxAttempts(1);
QueueChannel dlqChannel = new QueueChannel();
Binding<MessageChannel> dlqConsumerBinding = binder.bindConsumer(
"error.retryTest." + uniqueBindingId + ".0.testGroup", null, dlqChannel, dlqConsumerProperties);
String testMessagePayload = "test." + UUID.randomUUID().toString();
Message<String> testMessage = MessageBuilder.withPayload(testMessagePayload).build();
moduleOutputChannel.send(testMessage);
Message<?> dlqMessage = receive(dlqChannel, 3);
assertThat(dlqMessage).isNotNull();
assertThat(dlqMessage.getPayload()).isEqualTo(testMessagePayload);
// first attempt fails
assertThat(handler.getReceivedMessages().entrySet()).hasSize(1);
Message<?> handledMessage = handler.getReceivedMessages().entrySet().iterator().next().getValue();
assertThat(handledMessage).isNotNull();
assertThat(handledMessage.getPayload()).isEqualTo(testMessagePayload);
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
dlqConsumerBinding.unbind();
consumerBinding.unbind();
// on the second attempt the message is not redelivered because the DLQ is set
QueueChannel successfulInputChannel = new QueueChannel();
consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0", "testGroup",
successfulInputChannel, consumerProperties);
String testMessage2Payload = "test." + UUID.randomUUID().toString();
Message<String> testMessage2 = MessageBuilder.withPayload(testMessage2Payload).build();
moduleOutputChannel.send(testMessage2);
Message<?> receivedMessage = receive(successfulInputChannel);
assertThat(receivedMessage.getPayload()).isEqualTo(testMessage2Payload);
consumerBinding.unbind();
producerBinding.unbind();
}
@Test(expected = IllegalArgumentException.class)
public void testValidateKafkaTopicName() {
KafkaMessageChannelBinder.validateTopicName("foo:bar");
}
@Test
public void testCompression() throws Exception {
final ProducerMetadata.CompressionType[] codecs = new ProducerMetadata.CompressionType[] {
ProducerMetadata.CompressionType.none, ProducerMetadata.CompressionType.gzip,
ProducerMetadata.CompressionType.snappy };
byte[] testPayload = new byte[2048];
Arrays.fill(testPayload, (byte) 65);
KafkaTestBinder binder = getBinder();
for (ProducerMetadata.CompressionType codec : codecs) {
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.getExtension().setCompressionType(codec);
Binding<MessageChannel> producerBinding = binder.bindProducer("foo.0", moduleOutputChannel,
producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo.0", "test", moduleInputChannel,
createConsumerProperties());
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
.build();
// Let the consumer actually bind to the producer before sending a msg
binderBindUnbindLatency();
moduleOutputChannel.send(message);
Message<?> inbound = receive(moduleInputChannel);
assertThat(inbound).isNotNull();
assertThat((byte[]) inbound.getPayload()).containsExactly(testPayload);
producerBinding.unbind();
consumerBinding.unbind();
}
}
@Test
public void testCustomPartitionCountOverridesDefaultIfLarger() throws Exception {
byte[] testPayload = new byte[2048];
Arrays.fill(testPayload, (byte) 65);
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
binderConfiguration.setMinPartitionCount(10);
KafkaTestBinder binder = new KafkaTestBinder(binderConfiguration);
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(10);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("foo" + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo" + uniqueBindingId + ".0", null,
moduleInputChannel, consumerProperties);
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
.build();
// Let the consumer actually bind to the producer before sending a msg
binderBindUnbindLatency();
moduleOutputChannel.send(message);
Message<?> inbound = receive(moduleInputChannel);
assertThat(inbound).isNotNull();
assertThat((byte[]) inbound.getPayload()).containsExactly(testPayload);
Collection<Partition> partitions = binder.getCoreBinder().getConnectionFactory()
.getPartitions("foo" + uniqueBindingId + ".0");
assertThat(partitions).hasSize(10);
producerBinding.unbind();
consumerBinding.unbind();
}
@Test
public void testCustomPartitionCountDoesNotOverridePartitioningIfSmaller() throws Exception {
byte[] testPayload = new byte[2048];
Arrays.fill(testPayload, (byte) 65);
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
binderConfiguration.setMinPartitionCount(6);
KafkaTestBinder binder = new KafkaTestBinder(binderConfiguration);
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(5);
producerProperties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload"));
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("foo" + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo" + uniqueBindingId + ".0", null,
moduleInputChannel, consumerProperties);
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
.build();
// Let the consumer actually bind to the producer before sending a msg
binderBindUnbindLatency();
moduleOutputChannel.send(message);
Message<?> inbound = receive(moduleInputChannel);
assertThat(inbound).isNotNull();
assertThat((byte[]) inbound.getPayload()).containsExactly(testPayload);
Collection<Partition> partitions = binder.getCoreBinder().getConnectionFactory()
.getPartitions("foo" + uniqueBindingId + ".0");
assertThat(partitions).hasSize(6);
producerBinding.unbind();
consumerBinding.unbind();
}
@Test
public void testCustomPartitionCountOverridesPartitioningIfLarger() throws Exception {
byte[] testPayload = new byte[2048];
Arrays.fill(testPayload, (byte) 65);
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
binderConfiguration.setMinPartitionCount(4);
KafkaTestBinder binder = new KafkaTestBinder(binderConfiguration);
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(5);
producerProperties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload"));
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("foo" + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo" + uniqueBindingId + ".0", null,
moduleInputChannel, consumerProperties);
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
.build();
// Let the consumer actually bind to the producer before sending a msg
binderBindUnbindLatency();
moduleOutputChannel.send(message);
Message<?> inbound = receive(moduleInputChannel);
assertThat(inbound).isNotNull();
assertThat((byte[]) inbound.getPayload()).containsExactly(testPayload);
Collection<Partition> partitions = binder.getCoreBinder().getConnectionFactory()
.getPartitions("foo" + uniqueBindingId + ".0");
assertThat(partitions).hasSize(5);
producerBinding.unbind();
consumerBinding.unbind();
}
@Test
@SuppressWarnings("unchecked")
public void testDefaultConsumerStartsAtEarliest() throws Exception {
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(createConfigurationProperties());
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
QueueChannel input1 = new QueueChannel();
String testTopicName = UUID.randomUUID().toString();
binder.bindProducer(testTopicName, output, createProducerProperties());
String testPayload1 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
binder.bindConsumer(testTopicName, "startOffsets", input1, createConsumerProperties());
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
assertThat(new String(receivedMessage1.getPayload())).isEqualTo(testPayload1);
String testPayload2 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload2.getBytes()));
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isEqualTo(testPayload2);
}
@Test
@SuppressWarnings("unchecked")
public void testEarliest() throws Exception {
KafkaTestBinder binder = getBinder();
DirectChannel output = new DirectChannel();
QueueChannel input1 = new QueueChannel();
String testTopicName = UUID.randomUUID().toString();
binder.bindProducer(testTopicName, output, createProducerProperties());
String testPayload1 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> properties = createConsumerProperties();
properties.getExtension().setStartOffset(KafkaMessageChannelBinder.StartOffset.earliest);
binder.bindConsumer(testTopicName, "startOffsets", input1, properties);
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
String testPayload2 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload2.getBytes()));
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isEqualTo(testPayload2);
}
@Test
@SuppressWarnings("unchecked")
public void testReset() throws Exception {
KafkaTestBinder binder = getBinder();
DirectChannel output = new DirectChannel();
QueueChannel input1 = new QueueChannel();
String testTopicName = UUID.randomUUID().toString();
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, output,
createProducerProperties());
String testPayload1 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> properties = createConsumerProperties();
properties.getExtension().setResetOffsets(true);
properties.getExtension().setStartOffset(KafkaMessageChannelBinder.StartOffset.earliest);
Binding<MessageChannel> consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1,
properties);
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
String testPayload2 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload2.getBytes()));
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isEqualTo(testPayload2);
consumerBinding.unbind();
String testPayload3 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload3.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> properties2 = createConsumerProperties();
properties2.getExtension().setResetOffsets(true);
properties2.getExtension().setStartOffset(KafkaMessageChannelBinder.StartOffset.earliest);
consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1, properties2);
Message<byte[]> receivedMessage4 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage4).isNotNull();
assertThat(new String(receivedMessage4.getPayload())).isEqualTo(testPayload1);
Message<byte[]> receivedMessage5 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage5).isNotNull();
assertThat(new String(receivedMessage5.getPayload())).isEqualTo(testPayload2);
Message<byte[]> receivedMessage6 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage6).isNotNull();
assertThat(new String(receivedMessage6.getPayload())).isEqualTo(testPayload3);
consumerBinding.unbind();
producerBinding.unbind();
}
@Test
@SuppressWarnings("unchecked")
public void testResume() throws Exception {
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
QueueChannel input1 = new QueueChannel();
String testTopicName = UUID.randomUUID().toString();
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, output,
createProducerProperties());
String testPayload1 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> firstConsumerProperties = createConsumerProperties();
Binding<MessageChannel> consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1,
firstConsumerProperties);
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
String testPayload2 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload2.getBytes()));
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isNotNull();
consumerBinding.unbind();
String testPayload3 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload3.getBytes()));
consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1, createConsumerProperties());
Message<byte[]> receivedMessage3 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage3).isNotNull();
assertThat(new String(receivedMessage3.getPayload())).isEqualTo(testPayload3);
consumerBinding.unbind();
producerBinding.unbind();
}
@Test
public void testSyncProducerMetadata() throws Exception {
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(createConfigurationProperties());
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
String testTopicName = UUID.randomUUID().toString();
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
properties.getExtension().setSync(true);
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, output, properties);
DirectFieldAccessor accessor = new DirectFieldAccessor(extractEndpoint(producerBinding));
MessageHandler handler = (MessageHandler) accessor.getPropertyValue("handler");
DirectFieldAccessor accessor1 = new DirectFieldAccessor(handler);
ProducerConfiguration producerConfiguration = (ProducerConfiguration) accessor1
.getPropertyValue("producerConfiguration");
assertThat(producerConfiguration.getProducerMetadata().isSync())
.withFailMessage("Kafka Sync Producer should have been enabled.");
producerBinding.unbind();
}
@Test
public void testAutoCreateTopicsDisabledFailsIfTopicMissing() throws Exception {
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setAutoCreateTopics(false);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(1000);
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
String testTopicName = "nonexisting" + System.currentTimeMillis();
try {
binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
fail();
}
catch (Exception e) {
assertThat(e).isInstanceOf(BinderException.class);
assertThat(e).hasMessageContaining("Topic " + testTopicName + " does not exist");
}
try {
binder.getConnectionFactory().getPartitions(testTopicName);
fail();
}
catch (Exception e) {
assertThat(e).isInstanceOf(TopicNotFoundException.class);
}
}
@Test
public void testAutoConfigureTopicsDisabledSucceedsIfTopicExisting() throws Exception {
String testTopicName = "existing" + System.currentTimeMillis();
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 5, 1, new Properties());
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setAutoCreateTopics(false);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
Binding<MessageChannel> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
binding.unbind();
}
@Test
public void testAutoAddPartitionsDisabledFailsIfTopicUnderpartitioned() throws Exception {
String testTopicName = "existing" + System.currentTimeMillis();
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 1, 1, new Properties());
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setAutoAddPartitions(false);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
// this consumer must consume from partition 2
consumerProperties.setInstanceCount(3);
consumerProperties.setInstanceIndex(2);
try {
binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
}
catch (Exception e) {
assertThat(e).isInstanceOf(BinderException.class);
assertThat(e)
.hasMessageContaining("The number of expected partitions was: 3, but 1 has been found instead");
}
}
@Test
public void testAutoAddPartitionsDisabledSucceedsIfTopicPartitionedCorrectly() throws Exception {
String testTopicName = "existing" + System.currentTimeMillis();
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 6, 1, new Properties());
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setAutoAddPartitions(false);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(1000);
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
// this consumer must consume from partition 2
consumerProperties.setInstanceCount(3);
consumerProperties.setInstanceIndex(2);
Binding<?> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
Partition[] listenedPartitions = TestUtils.getPropertyValue(binding,
"endpoint.val$messageListenerContainer.partitions", Partition[].class);
assertThat(listenedPartitions).hasSize(2);
assertThat(listenedPartitions).contains(new Partition(testTopicName, 2), new Partition(testTopicName, 5));
Collection<Partition> partitions = binder.getConnectionFactory().getPartitions(testTopicName);
assertThat(partitions).hasSize(6);
binding.unbind();
}
@Test
public void testAutoCreateTopicsEnabledSucceeds() throws Exception {
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setAutoCreateTopics(true);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(1000);
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
String testTopicName = "nonexisting" + System.currentTimeMillis();
Binding<?> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
binding.unbind();
}
@Test
public void testPartitionCountNotReduced() throws Exception {
String testTopicName = "existing" + System.currentTimeMillis();
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 6, 1, new Properties());
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setAutoAddPartitions(true);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(1000);
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
Binding<?> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
binding.unbind();
TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(testTopicName,
kafkaTestSupport.getZkClient());
assertThat(topicMetadata.partitionsMetadata().size()).isEqualTo(6);
}
@Test
public void testPartitionCountIncreasedIfAutoAddPartitionsSet() throws Exception {
String testTopicName = "existing" + System.currentTimeMillis();
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 1, 1, new Properties());
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setMinPartitionCount(6);
configurationProperties.setAutoAddPartitions(true);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(1000);
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
Binding<?> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
binding.unbind();
TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(testTopicName,
kafkaTestSupport.getZkClient());
assertThat(topicMetadata.partitionsMetadata().size()).isEqualTo(6);
}
private static final class FailingInvocationCountingMessageHandler implements MessageHandler {
private int invocationCount;
private final LinkedHashMap<Long, Message<?>> receivedMessages = new LinkedHashMap<>();
private final CountDownLatch latch;
private FailingInvocationCountingMessageHandler(int latchSize) {
latch = new CountDownLatch(latchSize);
}
private FailingInvocationCountingMessageHandler() {
this(1);
}
@Override
public void handleMessage(Message<?> message) throws MessagingException {
invocationCount++;
Long offset = message.getHeaders().get(KafkaHeaders.OFFSET, Long.class);
// using the offset as key allows to ensure that we don't store duplicate
// messages on retry
if (!receivedMessages.containsKey(offset)) {
receivedMessages.put(offset, message);
latch.countDown();
}
throw new RuntimeException();
}
public LinkedHashMap<Long, Message<?>> getReceivedMessages() {
return receivedMessages;
}
public int getInvocationCount() {
return invocationCount;
}
public CountDownLatch getLatch() {
return latch;
}
}
}

View File

@@ -0,0 +1,89 @@
/*
* Copyright 2015-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.util.List;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.Registration;
import org.springframework.cloud.stream.binder.AbstractTestBinder;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
import org.springframework.cloud.stream.test.junit.kafka.TestKafkaCluster;
import org.springframework.context.support.GenericApplicationContext;
import org.springframework.integration.codec.Codec;
import org.springframework.integration.codec.kryo.KryoRegistrar;
import org.springframework.integration.codec.kryo.PojoCodec;
import org.springframework.integration.kafka.support.LoggingProducerListener;
import org.springframework.integration.kafka.support.ProducerListener;
import org.springframework.integration.tuple.TupleKryoRegistrar;
/**
* Test support class for {@link KafkaMessageChannelBinder}. Creates a binder that uses a
* test {@link TestKafkaCluster kafka cluster}.
* @author Eric Bottard
* @author Marius Bogoevici
* @author David Turanski
* @author Gary Russell
* @author Soby Chacko
*/
public class KafkaTestBinder extends
AbstractTestBinder<KafkaMessageChannelBinder, ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>> {
public KafkaTestBinder(KafkaBinderConfigurationProperties binderConfiguration) {
try {
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(binderConfiguration);
binder.setCodec(getCodec());
ProducerListener producerListener = new LoggingProducerListener();
binder.setProducerListener(producerListener);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
this.setBinder(binder);
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public void cleanup() {
// do nothing - the rule will take care of that
}
private static Codec getCodec() {
return new PojoCodec(new TupleRegistrar());
}
private static class TupleRegistrar implements KryoRegistrar {
private final TupleKryoRegistrar delegate = new TupleKryoRegistrar();
@Override
public void registerTypes(Kryo kryo) {
this.delegate.registerTypes(kryo);
}
@Override
public List<Registration> getRegistrations() {
return this.delegate.getRegistrations();
}
}
}

View File

@@ -0,0 +1,38 @@
/*
* Copyright 2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import org.springframework.cloud.stream.binder.PartitionKeyExtractorStrategy;
import org.springframework.cloud.stream.binder.PartitionSelectorStrategy;
import org.springframework.messaging.Message;
/**
* @author Marius Bogoevici
*/
public class RawKafkaPartitionTestSupport implements PartitionKeyExtractorStrategy, PartitionSelectorStrategy {
@Override
public int selectPartition(Object key, int divisor) {
return ((byte[]) key)[0] % divisor;
}
@Override
public Object extractKey(Message<?> message) {
return message.getPayload();
}
}

View File

@@ -0,0 +1,262 @@
/*
* Copyright 2015-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.util.Arrays;
import org.junit.Ignore;
import org.junit.Test;
import org.springframework.cloud.stream.binder.Binding;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.HeaderMode;
import org.springframework.integration.IntegrationMessageHeaderAccessor;
import org.springframework.integration.channel.DirectChannel;
import org.springframework.integration.channel.QueueChannel;
import org.springframework.integration.endpoint.AbstractEndpoint;
import org.springframework.integration.support.MessageBuilder;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageChannel;
import org.springframework.messaging.support.GenericMessage;
import static org.assertj.core.api.Assertions.assertThat;
/**
* @author Marius Bogoevici
* @author David Turanski
* @author Gary Russell
* @author Mark Fisher
*/
public class RawModeKafkaBinderTests extends KafkaBinderTests {
@Test
@Override
public void testPartitionedModuleJava() throws Exception {
KafkaTestBinder binder = getBinder();
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
properties.setHeaderMode(HeaderMode.raw);
properties.setPartitionKeyExtractorClass(RawKafkaPartitionTestSupport.class);
properties.setPartitionSelectorClass(RawKafkaPartitionTestSupport.class);
properties.setPartitionCount(6);
DirectChannel output = new DirectChannel();
output.setBeanName("test.output");
Binding<MessageChannel> outputBinding = binder.bindProducer("partJ.0", output, properties);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setConcurrency(2);
consumerProperties.setInstanceCount(3);
consumerProperties.setInstanceIndex(0);
consumerProperties.setPartitioned(true);
consumerProperties.setHeaderMode(HeaderMode.raw);
QueueChannel input0 = new QueueChannel();
input0.setBeanName("test.input0J");
Binding<MessageChannel> input0Binding = binder.bindConsumer("partJ.0", "test", input0, consumerProperties);
consumerProperties.setInstanceIndex(1);
QueueChannel input1 = new QueueChannel();
input1.setBeanName("test.input1J");
Binding<MessageChannel> input1Binding = binder.bindConsumer("partJ.0", "test", input1, consumerProperties);
consumerProperties.setInstanceIndex(2);
QueueChannel input2 = new QueueChannel();
input2.setBeanName("test.input2J");
Binding<MessageChannel> input2Binding = binder.bindConsumer("partJ.0", "test", input2, consumerProperties);
output.send(new GenericMessage<>(new byte[] { (byte) 0 }));
output.send(new GenericMessage<>(new byte[] { (byte) 1 }));
output.send(new GenericMessage<>(new byte[] { (byte) 2 }));
Message<?> receive0 = receive(input0);
assertThat(receive0).isNotNull();
Message<?> receive1 = receive(input1);
assertThat(receive1).isNotNull();
Message<?> receive2 = receive(input2);
assertThat(receive2).isNotNull();
assertThat(Arrays.asList(((byte[]) receive0.getPayload())[0], ((byte[]) receive1.getPayload())[0],
((byte[]) receive2.getPayload())[0])).containsExactlyInAnyOrder((byte) 0, (byte) 1, (byte) 2);
input0Binding.unbind();
input1Binding.unbind();
input2Binding.unbind();
outputBinding.unbind();
}
@Test
@Override
public void testPartitionedModuleSpEL() throws Exception {
KafkaTestBinder binder = getBinder();
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
properties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload[0]"));
properties.setPartitionSelectorExpression(spelExpressionParser.parseExpression("hashCode()"));
properties.setPartitionCount(6);
properties.setHeaderMode(HeaderMode.raw);
DirectChannel output = new DirectChannel();
output.setBeanName("test.output");
Binding<MessageChannel> outputBinding = binder.bindProducer("part.0", output, properties);
try {
AbstractEndpoint endpoint = extractEndpoint(outputBinding);
assertThat(getEndpointRouting(endpoint)).contains("part.0-' + headers['partition']");
}
catch (UnsupportedOperationException ignored) {
}
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setConcurrency(2);
consumerProperties.setInstanceIndex(0);
consumerProperties.setInstanceCount(3);
consumerProperties.setPartitioned(true);
consumerProperties.setHeaderMode(HeaderMode.raw);
QueueChannel input0 = new QueueChannel();
input0.setBeanName("test.input0S");
Binding<MessageChannel> input0Binding = binder.bindConsumer("part.0", "test", input0, consumerProperties);
consumerProperties.setInstanceIndex(1);
QueueChannel input1 = new QueueChannel();
input1.setBeanName("test.input1S");
Binding<MessageChannel> input1Binding = binder.bindConsumer("part.0", "test", input1, consumerProperties);
consumerProperties.setInstanceIndex(2);
QueueChannel input2 = new QueueChannel();
input2.setBeanName("test.input2S");
Binding<MessageChannel> input2Binding = binder.bindConsumer("part.0", "test", input2, consumerProperties);
Message<byte[]> message2 = MessageBuilder.withPayload(new byte[] { 2 })
.setHeader(IntegrationMessageHeaderAccessor.CORRELATION_ID, "foo")
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_NUMBER, 42)
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_SIZE, 43).build();
output.send(message2);
output.send(new GenericMessage<>(new byte[] { 1 }));
output.send(new GenericMessage<>(new byte[] { 0 }));
Message<?> receive0 = receive(input0);
assertThat(receive0).isNotNull();
Message<?> receive1 = receive(input1);
assertThat(receive1).isNotNull();
Message<?> receive2 = receive(input2);
assertThat(receive2).isNotNull();
assertThat(Arrays.asList(((byte[]) receive0.getPayload())[0], ((byte[]) receive1.getPayload())[0],
((byte[]) receive2.getPayload())[0])).containsExactlyInAnyOrder((byte) 0, (byte) 1, (byte) 2);
input0Binding.unbind();
input1Binding.unbind();
input2Binding.unbind();
outputBinding.unbind();
}
@Test
@Override
public void testSendAndReceive() throws Exception {
KafkaTestBinder binder = getBinder();
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setHeaderMode(HeaderMode.raw);
Binding<MessageChannel> producerBinding = binder.bindProducer("foo.0", moduleOutputChannel, producerProperties);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setHeaderMode(HeaderMode.raw);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo.0", "test", moduleInputChannel,
consumerProperties);
Message<?> message = MessageBuilder.withPayload("foo".getBytes()).build();
// Let the consumer actually bind to the producer before sending a msg
binderBindUnbindLatency();
moduleOutputChannel.send(message);
Message<?> inbound = receive(moduleInputChannel);
assertThat(inbound).isNotNull();
assertThat(new String((byte[]) inbound.getPayload())).isEqualTo("foo");
producerBinding.unbind();
consumerBinding.unbind();
}
// Ignored, since raw mode does not support headers
@Test
@Override
@Ignore
public void testSendAndReceiveNoOriginalContentType() throws Exception {
}
@Test
public void testSendAndReceiveWithExplicitConsumerGroup() {
KafkaTestBinder binder = getBinder();
DirectChannel moduleOutputChannel = new DirectChannel();
// Test pub/sub by emulating how StreamPlugin handles taps
QueueChannel module1InputChannel = new QueueChannel();
QueueChannel module2InputChannel = new QueueChannel();
QueueChannel module3InputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setHeaderMode(HeaderMode.raw);
Binding<MessageChannel> producerBinding = binder.bindProducer("baz.0", moduleOutputChannel, producerProperties);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setHeaderMode(HeaderMode.raw);
Binding<MessageChannel> input1Binding = binder.bindConsumer("baz.0", "test", module1InputChannel,
consumerProperties);
// A new module is using the tap as an input channel
String fooTapName = "baz.0";
Binding<MessageChannel> input2Binding = binder.bindConsumer(fooTapName, "tap1", module2InputChannel,
consumerProperties);
// Another new module is using tap as an input channel
String barTapName = "baz.0";
Binding<MessageChannel> input3Binding = binder.bindConsumer(barTapName, "tap2", module3InputChannel,
consumerProperties);
Message<?> message = MessageBuilder.withPayload("foo".getBytes()).build();
boolean success = false;
boolean retried = false;
while (!success) {
moduleOutputChannel.send(message);
Message<?> inbound = receive(module1InputChannel);
assertThat(inbound).isNotNull();
assertThat(new String((byte[]) inbound.getPayload())).isEqualTo("foo");
Message<?> tapped1 = receive(module2InputChannel);
Message<?> tapped2 = receive(module3InputChannel);
if (tapped1 == null || tapped2 == null) {
// listener may not have started
assertThat(retried).isFalse().withFailMessage("Failed to receive tap after retry");
retried = true;
continue;
}
success = true;
assertThat(new String((byte[]) tapped1.getPayload())).isEqualTo("foo");
assertThat(new String((byte[]) tapped2.getPayload())).isEqualTo("foo");
}
// delete one tap stream is deleted
input3Binding.unbind();
Message<?> message2 = MessageBuilder.withPayload("bar".getBytes()).build();
moduleOutputChannel.send(message2);
// other tap still receives messages
Message<?> tapped = receive(module2InputChannel);
assertThat(tapped).isNotNull();
// removed tap does not
assertThat(receive(module3InputChannel)).isNull();
// re-subscribed tap does receive the message
input3Binding = binder.bindConsumer(barTapName, "tap2", module3InputChannel, createConsumerProperties());
assertThat(receive(module3InputChannel)).isNotNull();
// clean up
input1Binding.unbind();
input2Binding.unbind();
input3Binding.unbind();
producerBinding.unbind();
assertThat(extractEndpoint(input1Binding).isRunning()).isFalse();
assertThat(extractEndpoint(input2Binding).isRunning()).isFalse();
assertThat(extractEndpoint(input3Binding).isRunning()).isFalse();
assertThat(extractEndpoint(producerBinding).isRunning()).isFalse();
}
}