Compare commits
180 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
13cc4e0155 | ||
|
|
5e0688df72 | ||
|
|
abd43cfffa | ||
|
|
8057b1f764 | ||
|
|
78ae3d1867 | ||
|
|
a7378c9132 | ||
|
|
9500ccfe46 | ||
|
|
3a5aed61c9 | ||
|
|
86146bfe81 | ||
|
|
f2abb59b19 | ||
|
|
36c39974ad | ||
|
|
16a195a887 | ||
|
|
bc562e3a77 | ||
|
|
39dd68a8ad | ||
|
|
8daaed43a9 | ||
|
|
d13d92131c | ||
|
|
0b4ccdefce | ||
|
|
ae269e729b | ||
|
|
9182adcf56 | ||
|
|
0b9e211e27 | ||
|
|
a7fe39fef5 | ||
|
|
50b8955dfc | ||
|
|
9a02fa69ac | ||
|
|
77cbfe2858 | ||
|
|
5db3ede9c4 | ||
|
|
e59cb569a6 | ||
|
|
b0c4f0cfcd | ||
|
|
2f6d5df7bc | ||
|
|
66f194dd93 | ||
|
|
4cb49f9ee4 | ||
|
|
035cc1a005 | ||
|
|
b3f42fe67d | ||
|
|
a9f40ac084 | ||
|
|
db02abe531 | ||
|
|
f1dc14b5c3 | ||
|
|
50ce8ca2ba | ||
|
|
6a312592a4 | ||
|
|
43d786f701 | ||
|
|
68811cad28 | ||
|
|
4382dab8f8 | ||
|
|
20a8158a56 | ||
|
|
3ad0d7c465 | ||
|
|
5b3974c932 | ||
|
|
3c7615f7a3 | ||
|
|
8ae0157135 | ||
|
|
08658ffa6c | ||
|
|
8d797deaf9 | ||
|
|
561b4b7e73 | ||
|
|
93fdd2ef0f | ||
|
|
fd48a1d0eb | ||
|
|
a07a0017bb | ||
|
|
62b40b852f | ||
|
|
c396c5c756 | ||
|
|
b20f4a0e08 | ||
|
|
77f4bc3fb8 | ||
|
|
2aa8e9eefa | ||
|
|
e3460d6fce | ||
|
|
29bb8513c0 | ||
|
|
69227166c7 | ||
|
|
4ff4507741 | ||
|
|
f2e1b63460 | ||
|
|
73f1ed9523 | ||
|
|
dc7662e17d | ||
|
|
b76fff31b8 | ||
|
|
1f4f0c3858 | ||
|
|
1aecd02404 | ||
|
|
6485bd2abd | ||
|
|
02913cd177 | ||
|
|
0865602141 | ||
|
|
790b141799 | ||
|
|
60e620e36e | ||
|
|
09d35cd742 | ||
|
|
91aec59342 | ||
|
|
7884d59bdc | ||
|
|
9134e101df | ||
|
|
512fd9830e | ||
|
|
94fcead9d5 | ||
|
|
756fde75ae | ||
|
|
6cc0b08dcd | ||
|
|
5dccbb9690 | ||
|
|
f473b3b42f | ||
|
|
a146255433 | ||
|
|
07e15c141e | ||
|
|
001cc5901e | ||
|
|
b7b5961f7d | ||
|
|
d19c1b7611 | ||
|
|
c627cefee0 | ||
|
|
3081a3aa44 | ||
|
|
143b96f79d | ||
|
|
a0f386f06f | ||
|
|
9ad04882c8 | ||
|
|
705213efe8 | ||
|
|
7355ada461 | ||
|
|
d4aaf78089 | ||
|
|
53e38902c9 | ||
|
|
a7a0a132ea | ||
|
|
ee4f3935ec | ||
|
|
4e0107b4d2 | ||
|
|
8f8a1e8709 | ||
|
|
c68ea8c570 | ||
|
|
61e7936978 | ||
|
|
5c70e2df43 | ||
|
|
f280edc9ce | ||
|
|
2c9afde8c6 | ||
|
|
6a8c0cd0c6 | ||
|
|
ec73f2785d | ||
|
|
8982f896fd | ||
|
|
005ec51d8b | ||
|
|
47aaf29e3e | ||
|
|
88d4b8eef5 | ||
|
|
66eb15a8e2 | ||
|
|
466400cdb7 | ||
|
|
bff0a072dc | ||
|
|
7fad2951f7 | ||
|
|
e7c5f750da | ||
|
|
1f28adaf4c | ||
|
|
91bdee65ec | ||
|
|
6a31e9c94f | ||
|
|
b541fca68f | ||
|
|
9d23e6a8fe | ||
|
|
89249b233f | ||
|
|
1998d5e7e8 | ||
|
|
e1906711a8 | ||
|
|
78213c98e8 | ||
|
|
c10206d41b | ||
|
|
73eda0ddd0 | ||
|
|
5b51d7cce3 | ||
|
|
2530229fb5 | ||
|
|
70cba0ae03 | ||
|
|
3eb9493cba | ||
|
|
89b75b4734 | ||
|
|
dc0bc18f37 | ||
|
|
8fafc2605e | ||
|
|
83eca9b734 | ||
|
|
b2d579b5b6 | ||
|
|
c389fa3fa4 | ||
|
|
50e0a8b30e | ||
|
|
7daa0f1b72 | ||
|
|
f3e38961c5 | ||
|
|
7b06ff9b17 | ||
|
|
ddb520ac02 | ||
|
|
cb01dda042 | ||
|
|
525aea8a98 | ||
|
|
01a715a9a9 | ||
|
|
21ab8090b8 | ||
|
|
e0bba57a51 | ||
|
|
1668f0c694 | ||
|
|
36c90aa8c0 | ||
|
|
943a7ae148 | ||
|
|
a1633ab241 | ||
|
|
68fd3fb6d7 | ||
|
|
df6d472e99 | ||
|
|
23ae2b3088 | ||
|
|
2e595f34f2 | ||
|
|
acb1c60216 | ||
|
|
275f5cbb8e | ||
|
|
79643586dd | ||
|
|
b292f81d46 | ||
|
|
8ad98c09b9 | ||
|
|
624b039da5 | ||
|
|
b08888862a | ||
|
|
e13e1b1b1b | ||
|
|
513c27beb8 | ||
|
|
5f1e6caeab | ||
|
|
ed4416e7c7 | ||
|
|
546fe69346 | ||
|
|
30c868e51b | ||
|
|
6506018a2c | ||
|
|
09ec23b4bc | ||
|
|
8f4fbd0da6 | ||
|
|
323206130a | ||
|
|
b944b594b5 | ||
|
|
4fa567fec7 | ||
|
|
16d6584774 | ||
|
|
ea514bd72d | ||
|
|
79feac15d0 | ||
|
|
3a27a5ec75 | ||
|
|
7308bd4991 | ||
|
|
866eaf4a25 | ||
|
|
b37e7b37d2 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -18,6 +18,7 @@ _site/
|
||||
*.ipr
|
||||
*.iws
|
||||
.idea/*
|
||||
*/.idea
|
||||
.factorypath
|
||||
dump.rdb
|
||||
.apt_generated
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
# spring-cloud-stream-binder-kafka
|
||||
|
||||
Spring Cloud Stream Binder implementation for Kafka
|
||||
Spring Cloud Stream Binder for Apache Kafka
|
||||
|
||||
25
mvnw
vendored
25
mvnw
vendored
@@ -57,27 +57,27 @@ case "`uname`" in
|
||||
#
|
||||
# Look for the Apple JDKs first to preserve the existing behaviour, and then look
|
||||
# for the new JDKs provided by Oracle.
|
||||
#
|
||||
#
|
||||
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK ] ; then
|
||||
#
|
||||
# Apple JDKs
|
||||
#
|
||||
export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home
|
||||
fi
|
||||
|
||||
|
||||
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Java/JavaVirtualMachines/CurrentJDK ] ; then
|
||||
#
|
||||
# Apple JDKs
|
||||
#
|
||||
export JAVA_HOME=/System/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
|
||||
fi
|
||||
|
||||
|
||||
if [ -z "$JAVA_HOME" ] && [ -L "/Library/Java/JavaVirtualMachines/CurrentJDK" ] ; then
|
||||
#
|
||||
# Oracle JDKs
|
||||
#
|
||||
export JAVA_HOME=/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ] && [ -x "/usr/libexec/java_home" ]; then
|
||||
#
|
||||
@@ -219,16 +219,27 @@ concat_lines() {
|
||||
export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-$(find_maven_basedir)}
|
||||
MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
|
||||
|
||||
# Provide a "standardized" way to retrieve the CLI args that will
|
||||
# Provide a "standardized" way to retrieve the CLI args that will
|
||||
# work with both Windows and non-Windows executions.
|
||||
MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
|
||||
export MAVEN_CMD_LINE_ARGS
|
||||
|
||||
WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
|
||||
|
||||
echo "Running version check"
|
||||
VERSION=$( sed '\!<parent!,\!</parent!d' `dirname $0`/pom.xml | grep '<version' | head -1 | sed -e 's/.*<version>//' -e 's!</version>.*$!!' )
|
||||
echo "The found version is [${VERSION}]"
|
||||
|
||||
if echo $VERSION | egrep -q 'M|RC'; then
|
||||
echo Activating \"milestone\" profile for version=\"$VERSION\"
|
||||
echo $MAVEN_ARGS | grep -q milestone || MAVEN_ARGS="$MAVEN_ARGS -Pmilestone"
|
||||
else
|
||||
echo Deactivating \"milestone\" profile for version=\"$VERSION\"
|
||||
echo $MAVEN_ARGS | grep -q milestone && MAVEN_ARGS=$(echo $MAVEN_ARGS | sed -e 's/-Pmilestone//')
|
||||
fi
|
||||
|
||||
exec "$JAVACMD" \
|
||||
$MAVEN_OPTS \
|
||||
-classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
|
||||
"-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
|
||||
${WRAPPER_LAUNCHER} "$@"
|
||||
|
||||
${WRAPPER_LAUNCHER} ${MAVEN_ARGS} "$@"
|
||||
|
||||
321
mvnw.cmd
vendored
321
mvnw.cmd
vendored
@@ -1,234 +1,145 @@
|
||||
#!/bin/sh
|
||||
# ----------------------------------------------------------------------------
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# ----------------------------------------------------------------------------
|
||||
@REM ----------------------------------------------------------------------------
|
||||
@REM Licensed to the Apache Software Foundation (ASF) under one
|
||||
@REM or more contributor license agreements. See the NOTICE file
|
||||
@REM distributed with this work for additional information
|
||||
@REM regarding copyright ownership. The ASF licenses this file
|
||||
@REM to you under the Apache License, Version 2.0 (the
|
||||
@REM "License"); you may not use this file except in compliance
|
||||
@REM with the License. You may obtain a copy of the License at
|
||||
@REM
|
||||
@REM http://www.apache.org/licenses/LICENSE-2.0
|
||||
@REM
|
||||
@REM Unless required by applicable law or agreed to in writing,
|
||||
@REM software distributed under the License is distributed on an
|
||||
@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
@REM KIND, either express or implied. See the License for the
|
||||
@REM specific language governing permissions and limitations
|
||||
@REM under the License.
|
||||
@REM ----------------------------------------------------------------------------
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Maven2 Start Up Batch script
|
||||
#
|
||||
# Required ENV vars:
|
||||
# ------------------
|
||||
# JAVA_HOME - location of a JDK home dir
|
||||
#
|
||||
# Optional ENV vars
|
||||
# -----------------
|
||||
# M2_HOME - location of maven2's installed home dir
|
||||
# MAVEN_OPTS - parameters passed to the Java VM when running Maven
|
||||
# e.g. to debug Maven itself, use
|
||||
# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
|
||||
# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
|
||||
# ----------------------------------------------------------------------------
|
||||
@REM ----------------------------------------------------------------------------
|
||||
@REM Maven2 Start Up Batch script
|
||||
@REM
|
||||
@REM Required ENV vars:
|
||||
@REM JAVA_HOME - location of a JDK home dir
|
||||
@REM
|
||||
@REM Optional ENV vars
|
||||
@REM M2_HOME - location of maven2's installed home dir
|
||||
@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
|
||||
@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending
|
||||
@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
|
||||
@REM e.g. to debug Maven itself, use
|
||||
@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
|
||||
@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
|
||||
@REM ----------------------------------------------------------------------------
|
||||
|
||||
if [ -z "$MAVEN_SKIP_RC" ] ; then
|
||||
@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
|
||||
@echo off
|
||||
@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on'
|
||||
@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
|
||||
|
||||
if [ -f /etc/mavenrc ] ; then
|
||||
. /etc/mavenrc
|
||||
fi
|
||||
@REM set %HOME% to equivalent of $HOME
|
||||
if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
|
||||
|
||||
if [ -f "$HOME/.mavenrc" ] ; then
|
||||
. "$HOME/.mavenrc"
|
||||
fi
|
||||
@REM Execute a user defined script before this one
|
||||
if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
|
||||
@REM check for pre script, once with legacy .bat ending and once with .cmd ending
|
||||
if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
|
||||
if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
|
||||
:skipRcPre
|
||||
|
||||
fi
|
||||
@setlocal
|
||||
|
||||
# OS specific support. $var _must_ be set to either true or false.
|
||||
cygwin=false;
|
||||
darwin=false;
|
||||
mingw=false
|
||||
case "`uname`" in
|
||||
CYGWIN*) cygwin=true ;;
|
||||
MINGW*) mingw=true;;
|
||||
Darwin*) darwin=true
|
||||
#
|
||||
# Look for the Apple JDKs first to preserve the existing behaviour, and then look
|
||||
# for the new JDKs provided by Oracle.
|
||||
#
|
||||
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK ] ; then
|
||||
#
|
||||
# Apple JDKs
|
||||
#
|
||||
export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Java/JavaVirtualMachines/CurrentJDK ] ; then
|
||||
#
|
||||
# Apple JDKs
|
||||
#
|
||||
export JAVA_HOME=/System/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ] && [ -L "/Library/Java/JavaVirtualMachines/CurrentJDK" ] ; then
|
||||
#
|
||||
# Oracle JDKs
|
||||
#
|
||||
export JAVA_HOME=/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
|
||||
fi
|
||||
set ERROR_CODE=0
|
||||
|
||||
if [ -z "$JAVA_HOME" ] && [ -x "/usr/libexec/java_home" ]; then
|
||||
#
|
||||
# Apple JDKs
|
||||
#
|
||||
export JAVA_HOME=`/usr/libexec/java_home`
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
@REM To isolate internal variables from possible post scripts, we use another setlocal
|
||||
@setlocal
|
||||
|
||||
if [ -z "$JAVA_HOME" ] ; then
|
||||
if [ -r /etc/gentoo-release ] ; then
|
||||
JAVA_HOME=`java-config --jre-home`
|
||||
fi
|
||||
fi
|
||||
@REM ==== START VALIDATION ====
|
||||
if not "%JAVA_HOME%" == "" goto OkJHome
|
||||
|
||||
if [ -z "$M2_HOME" ] ; then
|
||||
## resolve links - $0 may be a link to maven's home
|
||||
PRG="$0"
|
||||
echo.
|
||||
echo Error: JAVA_HOME not found in your environment. >&2
|
||||
echo Please set the JAVA_HOME variable in your environment to match the >&2
|
||||
echo location of your Java installation. >&2
|
||||
echo.
|
||||
goto error
|
||||
|
||||
# need this for relative symlinks
|
||||
while [ -h "$PRG" ] ; do
|
||||
ls=`ls -ld "$PRG"`
|
||||
link=`expr "$ls" : '.*-> \(.*\)$'`
|
||||
if expr "$link" : '/.*' > /dev/null; then
|
||||
PRG="$link"
|
||||
else
|
||||
PRG="`dirname "$PRG"`/$link"
|
||||
fi
|
||||
done
|
||||
:OkJHome
|
||||
if exist "%JAVA_HOME%\bin\java.exe" goto init
|
||||
|
||||
saveddir=`pwd`
|
||||
echo.
|
||||
echo Error: JAVA_HOME is set to an invalid directory. >&2
|
||||
echo JAVA_HOME = "%JAVA_HOME%" >&2
|
||||
echo Please set the JAVA_HOME variable in your environment to match the >&2
|
||||
echo location of your Java installation. >&2
|
||||
echo.
|
||||
goto error
|
||||
|
||||
M2_HOME=`dirname "$PRG"`/..
|
||||
@REM ==== END VALIDATION ====
|
||||
|
||||
# make it fully qualified
|
||||
M2_HOME=`cd "$M2_HOME" && pwd`
|
||||
:init
|
||||
|
||||
cd "$saveddir"
|
||||
# echo Using m2 at $M2_HOME
|
||||
fi
|
||||
set MAVEN_CMD_LINE_ARGS=%*
|
||||
|
||||
# For Cygwin, ensure paths are in UNIX format before anything is touched
|
||||
if $cygwin ; then
|
||||
[ -n "$M2_HOME" ] &&
|
||||
M2_HOME=`cygpath --unix "$M2_HOME"`
|
||||
[ -n "$JAVA_HOME" ] &&
|
||||
JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
|
||||
[ -n "$CLASSPATH" ] &&
|
||||
CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
|
||||
fi
|
||||
@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
|
||||
@REM Fallback to current working directory if not found.
|
||||
|
||||
# For Migwn, ensure paths are in UNIX format before anything is touched
|
||||
if $mingw ; then
|
||||
[ -n "$M2_HOME" ] &&
|
||||
M2_HOME="`(cd "$M2_HOME"; pwd)`"
|
||||
[ -n "$JAVA_HOME" ] &&
|
||||
JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
|
||||
# TODO classpath?
|
||||
fi
|
||||
set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
|
||||
IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
|
||||
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
javaExecutable="`which javac`"
|
||||
if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
|
||||
# readlink(1) is not available as standard on Solaris 10.
|
||||
readLink=`which readlink`
|
||||
if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
|
||||
if $darwin ; then
|
||||
javaHome="`dirname \"$javaExecutable\"`"
|
||||
javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
|
||||
else
|
||||
javaExecutable="`readlink -f \"$javaExecutable\"`"
|
||||
fi
|
||||
javaHome="`dirname \"$javaExecutable\"`"
|
||||
javaHome=`expr "$javaHome" : '\(.*\)/bin'`
|
||||
JAVA_HOME="$javaHome"
|
||||
export JAVA_HOME
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
set EXEC_DIR=%CD%
|
||||
set WDIR=%EXEC_DIR%
|
||||
:findBaseDir
|
||||
IF EXIST "%WDIR%"\.mvn goto baseDirFound
|
||||
cd ..
|
||||
IF "%WDIR%"=="%CD%" goto baseDirNotFound
|
||||
set WDIR=%CD%
|
||||
goto findBaseDir
|
||||
|
||||
if [ -z "$JAVACMD" ] ; then
|
||||
if [ -n "$JAVA_HOME" ] ; then
|
||||
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
|
||||
# IBM's JDK on AIX uses strange locations for the executables
|
||||
JAVACMD="$JAVA_HOME/jre/sh/java"
|
||||
else
|
||||
JAVACMD="$JAVA_HOME/bin/java"
|
||||
fi
|
||||
else
|
||||
JAVACMD="`which java`"
|
||||
fi
|
||||
fi
|
||||
:baseDirFound
|
||||
set MAVEN_PROJECTBASEDIR=%WDIR%
|
||||
cd "%EXEC_DIR%"
|
||||
goto endDetectBaseDir
|
||||
|
||||
if [ ! -x "$JAVACMD" ] ; then
|
||||
echo "Error: JAVA_HOME is not defined correctly." >&2
|
||||
echo " We cannot execute $JAVACMD" >&2
|
||||
exit 1
|
||||
fi
|
||||
:baseDirNotFound
|
||||
set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
|
||||
cd "%EXEC_DIR%"
|
||||
|
||||
if [ -z "$JAVA_HOME" ] ; then
|
||||
echo "Warning: JAVA_HOME environment variable is not set."
|
||||
fi
|
||||
:endDetectBaseDir
|
||||
|
||||
CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
|
||||
IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
|
||||
|
||||
# For Cygwin, switch paths to Windows format before running java
|
||||
if $cygwin; then
|
||||
[ -n "$M2_HOME" ] &&
|
||||
M2_HOME=`cygpath --path --windows "$M2_HOME"`
|
||||
[ -n "$JAVA_HOME" ] &&
|
||||
JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
|
||||
[ -n "$CLASSPATH" ] &&
|
||||
CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
|
||||
fi
|
||||
@setlocal EnableExtensions EnableDelayedExpansion
|
||||
for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
|
||||
@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
|
||||
|
||||
# traverses directory structure from process work directory to filesystem root
|
||||
# first directory with .mvn subdirectory is considered project base directory
|
||||
find_maven_basedir() {
|
||||
local basedir=$(pwd)
|
||||
local wdir=$(pwd)
|
||||
while [ "$wdir" != '/' ] ; do
|
||||
if [ -d "$wdir"/.mvn ] ; then
|
||||
basedir=$wdir
|
||||
break
|
||||
fi
|
||||
wdir=$(cd "$wdir/.."; pwd)
|
||||
done
|
||||
echo "${basedir}"
|
||||
}
|
||||
:endReadAdditionalConfig
|
||||
|
||||
# concatenates all lines of a file
|
||||
concat_lines() {
|
||||
if [ -f "$1" ]; then
|
||||
echo "$(tr -s '\n' ' ' < "$1")"
|
||||
fi
|
||||
}
|
||||
SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
|
||||
|
||||
export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-$(find_maven_basedir)}
|
||||
MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
|
||||
set WRAPPER_JAR="".\.mvn\wrapper\maven-wrapper.jar""
|
||||
set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
|
||||
|
||||
# Provide a "standardized" way to retrieve the CLI args that will
|
||||
# work with both Windows and non-Windows executions.
|
||||
MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
|
||||
export MAVEN_CMD_LINE_ARGS
|
||||
%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CMD_LINE_ARGS%
|
||||
if ERRORLEVEL 1 goto error
|
||||
goto end
|
||||
|
||||
WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
|
||||
:error
|
||||
set ERROR_CODE=1
|
||||
|
||||
exec "$JAVACMD" \
|
||||
$MAVEN_OPTS \
|
||||
-classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
|
||||
"-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
|
||||
${WRAPPER_LAUNCHER} "$@"
|
||||
:end
|
||||
@endlocal & set ERROR_CODE=%ERROR_CODE%
|
||||
|
||||
if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
|
||||
@REM check for post script, once with legacy .bat ending and once with .cmd ending
|
||||
if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
|
||||
if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
|
||||
:skipRcPost
|
||||
|
||||
@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
|
||||
if "%MAVEN_BATCH_PAUSE%" == "on" pause
|
||||
|
||||
if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
|
||||
|
||||
exit /B %ERROR_CODE%
|
||||
|
||||
145
pom.xml
145
pom.xml
@@ -2,53 +2,116 @@
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.1.0.M1</version>
|
||||
<version>2.0.0.M4</version>
|
||||
<packaging>pom</packaging>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-parent</artifactId>
|
||||
<version>1.1.0.M1</version>
|
||||
<artifactId>spring-cloud-build</artifactId>
|
||||
<version>2.0.0.M7</version>
|
||||
<relativePath />
|
||||
</parent>
|
||||
<properties>
|
||||
<java.version>1.7</java.version>
|
||||
<kafka.version>0.9.0.1</kafka.version>
|
||||
<spring-kafka.version>1.0.3.RELEASE</spring-kafka.version>
|
||||
<spring-integration-kafka.version>2.0.1.RELEASE</spring-integration-kafka.version>
|
||||
<java.version>1.8</java.version>
|
||||
<spring-kafka.version>2.1.2.RELEASE</spring-kafka.version>
|
||||
<spring-integration-kafka.version>3.0.1.RELEASE</spring-integration-kafka.version>
|
||||
<kafka.version>1.0.0</kafka.version>
|
||||
<spring-cloud-stream.version>2.0.0.M4</spring-cloud-stream.version>
|
||||
</properties>
|
||||
<modules>
|
||||
<module>spring-cloud-stream-binder-kafka</module>
|
||||
<module>spring-cloud-starter-stream-kafka</module>
|
||||
<module>spring-cloud-stream-binder-kafka-test-support</module>
|
||||
<module>spring-cloud-stream-binder-kafka-docs</module>
|
||||
</modules>
|
||||
<module>spring-cloud-stream-binder-kafka-core</module>
|
||||
<module>spring-cloud-stream-binder-kstream</module>
|
||||
</modules>
|
||||
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<version>${spring-kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
<version>${spring-integration-kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<scope>test</scope>
|
||||
<version>${spring-kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-streams</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
<version>${kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>jline</groupId>
|
||||
<artifactId>jline</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
|
||||
<build>
|
||||
<pluginManagement>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<redirectTestOutputToFile>true</redirectTestOutputToFile>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<version>1.7</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
<version>2.17</version>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.puppycrawl.tools</groupId>
|
||||
<artifactId>checkstyle</artifactId>
|
||||
<version>6.17</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
@@ -56,9 +119,35 @@
|
||||
<quiet>true</quiet>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<redirectTestOutputToFile>true</redirectTestOutputToFile>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</pluginManagement>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-tools</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<configuration>
|
||||
<configLocation>checkstyle.xml</configLocation>
|
||||
<headerLocation>checkstyle-header.txt</headerLocation>
|
||||
<includeTestSourceDirectory>true</includeTestSourceDirectory>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>spring</id>
|
||||
|
||||
0
spring-cloud-starter-stream-kafka/.jdk8
Normal file
0
spring-cloud-starter-stream-kafka/.jdk8
Normal file
@@ -4,7 +4,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.1.0.M1</version>
|
||||
<version>2.0.0.M4</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
<description>Spring Cloud Starter Stream Kafka</description>
|
||||
@@ -20,7 +20,7 @@
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>1.1.0.M1</version>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
|
||||
0
spring-cloud-stream-binder-kafka-core/.jdk8
Normal file
0
spring-cloud-stream-binder-kafka-core/.jdk8
Normal file
@@ -0,0 +1,5 @@
|
||||
eclipse.preferences.version=1
|
||||
org.eclipse.jdt.ui.ignorelowercasenames=true
|
||||
org.eclipse.jdt.ui.importorder=java;javax;com;org;org.springframework;ch.qos;\#;
|
||||
org.eclipse.jdt.ui.ondemandthreshold=99
|
||||
org.eclipse.jdt.ui.staticondemandthreshold=99
|
||||
59
spring-cloud-stream-binder-kafka-core/pom.xml
Normal file
59
spring-cloud-stream-binder-kafka-core/pom.xml
Normal file
@@ -0,0 +1,59 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>1.8</source>
|
||||
<target>1.8</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.0.0.M4</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder Core</description>
|
||||
<url>http://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>http://www.spring.io</url>
|
||||
</organization>
|
||||
<properties>
|
||||
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
<version>${spring-integration-kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.apache.avro</groupId>
|
||||
<artifactId>avro-compiler</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
<version>1.2.17</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
</project>
|
||||
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.security.auth.login.AppConfigurationEntry;
|
||||
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
/**
|
||||
* Contains properties for setting up an {@link AppConfigurationEntry} that can be used
|
||||
* for the Kafka or Zookeeper client.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class JaasLoginModuleConfiguration {
|
||||
|
||||
private String loginModule = "com.sun.security.auth.module.Krb5LoginModule";
|
||||
|
||||
private AppConfigurationEntry.LoginModuleControlFlag controlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUIRED;
|
||||
|
||||
private Map<String,String> options = new HashMap<>();
|
||||
|
||||
public String getLoginModule() {
|
||||
return loginModule;
|
||||
}
|
||||
|
||||
public void setLoginModule(String loginModule) {
|
||||
Assert.notNull(loginModule, "cannot be null");
|
||||
this.loginModule = loginModule;
|
||||
}
|
||||
|
||||
public String getControlFlag() {
|
||||
return controlFlag.toString();
|
||||
}
|
||||
|
||||
public AppConfigurationEntry.LoginModuleControlFlag getControlFlagValue() {
|
||||
return controlFlag;
|
||||
}
|
||||
|
||||
public void setControlFlag(String controlFlag) {
|
||||
Assert.notNull(controlFlag, "cannot be null");
|
||||
if (AppConfigurationEntry.LoginModuleControlFlag.OPTIONAL.equals(controlFlag)) {
|
||||
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.OPTIONAL;
|
||||
}
|
||||
else if (AppConfigurationEntry.LoginModuleControlFlag.REQUIRED.equals(controlFlag)) {
|
||||
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUIRED;
|
||||
}
|
||||
else if (AppConfigurationEntry.LoginModuleControlFlag.REQUISITE.equals(controlFlag)) {
|
||||
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUISITE;
|
||||
}
|
||||
else if (AppConfigurationEntry.LoginModuleControlFlag.SUFFICIENT.equals(controlFlag)) {
|
||||
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.SUFFICIENT;
|
||||
}
|
||||
else {
|
||||
throw new IllegalArgumentException(controlFlag + " is not a supported control flag");
|
||||
}
|
||||
}
|
||||
|
||||
public Map<String, String> getOptions() {
|
||||
return options;
|
||||
}
|
||||
|
||||
public void setOptions(Map<String, String> options) {
|
||||
this.options = options;
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2015-2016 the original author or authors.
|
||||
* Copyright 2015-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -14,12 +14,20 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.config;
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
@@ -27,17 +35,25 @@ import org.springframework.util.StringUtils;
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
@ConfigurationProperties(prefix = "spring.cloud.stream.kafka.binder")
|
||||
public class KafkaBinderConfigurationProperties {
|
||||
|
||||
private String[] zkNodes = new String[] {"localhost"};
|
||||
private static final String DEFAULT_KAFKA_CONNECTION_STRING = "localhost:9092";
|
||||
|
||||
private final Transaction transaction = new Transaction();
|
||||
|
||||
@Autowired(required = false)
|
||||
private KafkaProperties kafkaProperties;
|
||||
|
||||
private String[] zkNodes = new String[] { "localhost" };
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
private String defaultZkPort = "2181";
|
||||
|
||||
private String[] brokers = new String[] {"localhost"};
|
||||
private String[] brokers = new String[] { "localhost" };
|
||||
|
||||
private String defaultBrokerPort = "9092";
|
||||
|
||||
@@ -77,6 +93,22 @@ public class KafkaBinderConfigurationProperties {
|
||||
|
||||
private int queueSize = 8192;
|
||||
|
||||
/**
|
||||
* Time to wait to get partition information in seconds; default 60.
|
||||
*/
|
||||
private int healthTimeout = 60;
|
||||
|
||||
private JaasLoginModuleConfiguration jaas;
|
||||
|
||||
/**
|
||||
* The bean name of a custom header mapper to use instead of a {@link org.springframework.kafka.support.DefaultKafkaHeaderMapper}.
|
||||
*/
|
||||
private String headerMapperBeanName;
|
||||
|
||||
public Transaction getTransaction() {
|
||||
return this.transaction;
|
||||
}
|
||||
|
||||
public String getZkConnectionString() {
|
||||
return toConnectionString(this.zkNodes, this.defaultZkPort);
|
||||
}
|
||||
@@ -85,6 +117,10 @@ public class KafkaBinderConfigurationProperties {
|
||||
return toConnectionString(this.brokers, this.defaultBrokerPort);
|
||||
}
|
||||
|
||||
public String getDefaultKafkaConnectionString() {
|
||||
return DEFAULT_KAFKA_CONNECTION_STRING;
|
||||
}
|
||||
|
||||
public String[] getHeaders() {
|
||||
return this.headers;
|
||||
}
|
||||
@@ -215,6 +251,14 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.minPartitionCount = minPartitionCount;
|
||||
}
|
||||
|
||||
public int getHealthTimeout() {
|
||||
return this.healthTimeout;
|
||||
}
|
||||
|
||||
public void setHealthTimeout(int healthTimeout) {
|
||||
this.healthTimeout = healthTimeout;
|
||||
}
|
||||
|
||||
public int getQueueSize() {
|
||||
return this.queueSize;
|
||||
}
|
||||
@@ -254,4 +298,103 @@ public class KafkaBinderConfigurationProperties {
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
public Map<String, Object> getConsumerConfiguration() {
|
||||
Map<String, Object> consumerConfiguration = new HashMap<>();
|
||||
// If Spring Boot Kafka properties are present, add them with lowest precedence
|
||||
if (this.kafkaProperties != null) {
|
||||
consumerConfiguration.putAll(this.kafkaProperties.buildConsumerProperties());
|
||||
}
|
||||
// Copy configured binder properties
|
||||
for (Map.Entry<String, String> configurationEntry : this.configuration.entrySet()) {
|
||||
if (ConsumerConfig.configNames().contains(configurationEntry.getKey())) {
|
||||
consumerConfiguration.put(configurationEntry.getKey(), configurationEntry.getValue());
|
||||
}
|
||||
}
|
||||
// Override Spring Boot bootstrap server setting if left to default with the value
|
||||
// configured in the binder
|
||||
if (ObjectUtils.isEmpty(consumerConfiguration.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
consumerConfiguration.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, getKafkaConnectionString());
|
||||
}
|
||||
else {
|
||||
Object boostrapServersConfig = consumerConfiguration.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
if (boostrapServersConfig instanceof List) {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<String> bootStrapServers = (List<String>) consumerConfiguration
|
||||
.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
if (bootStrapServers.size() == 1 && bootStrapServers.get(0).equals("localhost:9092")) {
|
||||
consumerConfiguration.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, getKafkaConnectionString());
|
||||
}
|
||||
}
|
||||
}
|
||||
return Collections.unmodifiableMap(consumerConfiguration);
|
||||
}
|
||||
|
||||
public Map<String, Object> getProducerConfiguration() {
|
||||
Map<String, Object> producerConfiguration = new HashMap<>();
|
||||
// If Spring Boot Kafka properties are present, add them with lowest precedence
|
||||
if (this.kafkaProperties != null) {
|
||||
producerConfiguration.putAll(this.kafkaProperties.buildProducerProperties());
|
||||
}
|
||||
// Copy configured binder properties
|
||||
for (Map.Entry<String, String> configurationEntry : configuration.entrySet()) {
|
||||
if (ProducerConfig.configNames().contains(configurationEntry.getKey())) {
|
||||
producerConfiguration.put(configurationEntry.getKey(), configurationEntry.getValue());
|
||||
}
|
||||
}
|
||||
// Override Spring Boot bootstrap server setting if left to default with the value
|
||||
// configured in the binder
|
||||
if (ObjectUtils.isEmpty(producerConfiguration.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
|
||||
producerConfiguration.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, getKafkaConnectionString());
|
||||
}
|
||||
else {
|
||||
Object boostrapServersConfig = producerConfiguration.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
if (boostrapServersConfig instanceof List) {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<String> bootStrapServers = (List<String>) producerConfiguration
|
||||
.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
if (bootStrapServers.size() == 1 && bootStrapServers.get(0).equals("localhost:9092")) {
|
||||
producerConfiguration.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, getKafkaConnectionString());
|
||||
}
|
||||
}
|
||||
}
|
||||
return Collections.unmodifiableMap(producerConfiguration);
|
||||
}
|
||||
|
||||
public JaasLoginModuleConfiguration getJaas() {
|
||||
return this.jaas;
|
||||
}
|
||||
|
||||
public void setJaas(JaasLoginModuleConfiguration jaas) {
|
||||
this.jaas = jaas;
|
||||
}
|
||||
|
||||
public String getHeaderMapperBeanName() {
|
||||
return this.headerMapperBeanName;
|
||||
}
|
||||
|
||||
public void setHeaderMapperBeanName(String headerMapperBeanName) {
|
||||
this.headerMapperBeanName = headerMapperBeanName;
|
||||
}
|
||||
|
||||
public static class Transaction {
|
||||
|
||||
private final KafkaProducerProperties producer = new KafkaProducerProperties();
|
||||
|
||||
private String transactionIdPrefix;
|
||||
|
||||
public String getTransactionIdPrefix() {
|
||||
return this.transactionIdPrefix;
|
||||
}
|
||||
|
||||
public void setTransactionIdPrefix(String transactionIdPrefix) {
|
||||
this.transactionIdPrefix = transactionIdPrefix;
|
||||
}
|
||||
|
||||
public KafkaProducerProperties getProducer() {
|
||||
return this.producer;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -14,32 +14,69 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*
|
||||
* <p>Thanks to Laszlo Szabo for providing the initial patch for generic property support.</p>
|
||||
* <p>
|
||||
* Thanks to Laszlo Szabo for providing the initial patch for generic property support.
|
||||
* </p>
|
||||
*/
|
||||
public class KafkaConsumerProperties {
|
||||
|
||||
public enum StartOffset {
|
||||
earliest(-2L),
|
||||
latest(-1L);
|
||||
|
||||
private final long referencePoint;
|
||||
|
||||
StartOffset(long referencePoint) {
|
||||
this.referencePoint = referencePoint;
|
||||
}
|
||||
|
||||
public long getReferencePoint() {
|
||||
return this.referencePoint;
|
||||
}
|
||||
}
|
||||
|
||||
public enum StandardHeaders {
|
||||
none,
|
||||
id,
|
||||
timestamp,
|
||||
both
|
||||
}
|
||||
|
||||
private boolean autoRebalanceEnabled = true;
|
||||
|
||||
private boolean autoCommitOffset = true;
|
||||
|
||||
private Boolean autoCommitOnError;
|
||||
|
||||
private boolean resetOffsets;
|
||||
|
||||
private StartOffset startOffset;
|
||||
|
||||
private boolean enableDlq;
|
||||
|
||||
private String dlqName;
|
||||
|
||||
private KafkaProducerProperties dlqProducerProperties = new KafkaProducerProperties();
|
||||
|
||||
private int recoveryInterval = 5000;
|
||||
|
||||
private String[] trustedPackages;
|
||||
|
||||
private StandardHeaders standardHeaders = StandardHeaders.none;
|
||||
|
||||
private String converterBeanName;
|
||||
|
||||
private long idleEventInterval = 30_000;
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
public boolean isAutoCommitOffset() {
|
||||
@@ -50,14 +87,6 @@ public class KafkaConsumerProperties {
|
||||
this.autoCommitOffset = autoCommitOffset;
|
||||
}
|
||||
|
||||
public boolean isResetOffsets() {
|
||||
return this.resetOffsets;
|
||||
}
|
||||
|
||||
public void setResetOffsets(boolean resetOffsets) {
|
||||
this.resetOffsets = resetOffsets;
|
||||
}
|
||||
|
||||
public StartOffset getStartOffset() {
|
||||
return this.startOffset;
|
||||
}
|
||||
@@ -98,20 +127,6 @@ public class KafkaConsumerProperties {
|
||||
this.autoRebalanceEnabled = autoRebalanceEnabled;
|
||||
}
|
||||
|
||||
public enum StartOffset {
|
||||
earliest(-2L), latest(-1L);
|
||||
|
||||
private final long referencePoint;
|
||||
|
||||
StartOffset(long referencePoint) {
|
||||
this.referencePoint = referencePoint;
|
||||
}
|
||||
|
||||
public long getReferencePoint() {
|
||||
return this.referencePoint;
|
||||
}
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
@@ -119,4 +134,52 @@ public class KafkaConsumerProperties {
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
public String getDlqName() {
|
||||
return dlqName;
|
||||
}
|
||||
|
||||
public void setDlqName(String dlqName) {
|
||||
this.dlqName = dlqName;
|
||||
}
|
||||
|
||||
public String[] getTrustedPackages() {
|
||||
return trustedPackages;
|
||||
}
|
||||
|
||||
public void setTrustedPackages(String[] trustedPackages) {
|
||||
this.trustedPackages = trustedPackages;
|
||||
}
|
||||
|
||||
public KafkaProducerProperties getDlqProducerProperties() {
|
||||
return dlqProducerProperties;
|
||||
}
|
||||
|
||||
public void setDlqProducerProperties(KafkaProducerProperties dlqProducerProperties) {
|
||||
this.dlqProducerProperties = dlqProducerProperties;
|
||||
}
|
||||
public StandardHeaders getStandardHeaders() {
|
||||
return this.standardHeaders;
|
||||
}
|
||||
|
||||
public void setStandardHeaders(StandardHeaders standardHeaders) {
|
||||
this.standardHeaders = standardHeaders;
|
||||
}
|
||||
|
||||
public String getConverterBeanName() {
|
||||
return this.converterBeanName;
|
||||
}
|
||||
|
||||
public void setConverterBeanName(String converterBeanName) {
|
||||
this.converterBeanName = converterBeanName;
|
||||
}
|
||||
|
||||
public long getIdleEventInterval() {
|
||||
return this.idleEventInterval;
|
||||
}
|
||||
|
||||
public void setIdleEventInterval(long idleEventInterval) {
|
||||
this.idleEventInterval = idleEventInterval;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedBindingProperties;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Gary Russell
|
||||
*/
|
||||
@ConfigurationProperties("spring.cloud.stream.kafka")
|
||||
public class KafkaExtendedBindingProperties
|
||||
implements ExtendedBindingProperties<KafkaConsumerProperties, KafkaProducerProperties> {
|
||||
|
||||
private Map<String, KafkaBindingProperties> bindings = new HashMap<>();
|
||||
|
||||
public Map<String, KafkaBindingProperties> getBindings() {
|
||||
return this.bindings;
|
||||
}
|
||||
|
||||
public void setBindings(Map<String, KafkaBindingProperties> bindings) {
|
||||
this.bindings = bindings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized KafkaConsumerProperties getExtendedConsumerProperties(String channelName) {
|
||||
if (bindings.containsKey(channelName)) {
|
||||
if (bindings.get(channelName).getConsumer() != null) {
|
||||
return bindings.get(channelName).getConsumer();
|
||||
}
|
||||
else {
|
||||
KafkaConsumerProperties properties = new KafkaConsumerProperties();
|
||||
this.bindings.get(channelName).setConsumer(properties);
|
||||
return properties;
|
||||
}
|
||||
}
|
||||
else {
|
||||
KafkaConsumerProperties properties = new KafkaConsumerProperties();
|
||||
KafkaBindingProperties rbp = new KafkaBindingProperties();
|
||||
rbp.setConsumer(properties);
|
||||
bindings.put(channelName, rbp);
|
||||
return properties;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized KafkaProducerProperties getExtendedProducerProperties(String channelName) {
|
||||
if (bindings.containsKey(channelName)) {
|
||||
if (bindings.get(channelName).getProducer() != null) {
|
||||
return bindings.get(channelName).getProducer();
|
||||
}
|
||||
else {
|
||||
KafkaProducerProperties properties = new KafkaProducerProperties();
|
||||
this.bindings.get(channelName).setProducer(properties);
|
||||
return properties;
|
||||
}
|
||||
}
|
||||
else {
|
||||
KafkaProducerProperties properties = new KafkaProducerProperties();
|
||||
KafkaBindingProperties rbp = new KafkaBindingProperties();
|
||||
rbp.setProducer(properties);
|
||||
bindings.put(channelName, rbp);
|
||||
return properties;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -14,15 +14,19 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.validation.constraints.NotNull;
|
||||
|
||||
import org.springframework.expression.Expression;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Henryk Konsek
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KafkaProducerProperties {
|
||||
|
||||
@@ -34,6 +38,10 @@ public class KafkaProducerProperties {
|
||||
|
||||
private int batchTimeout;
|
||||
|
||||
private Expression messageKeyExpression;
|
||||
|
||||
private String[] headerPatterns;
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
public int getBufferSize() {
|
||||
@@ -69,6 +77,22 @@ public class KafkaProducerProperties {
|
||||
this.batchTimeout = batchTimeout;
|
||||
}
|
||||
|
||||
public Expression getMessageKeyExpression() {
|
||||
return messageKeyExpression;
|
||||
}
|
||||
|
||||
public void setMessageKeyExpression(Expression messageKeyExpression) {
|
||||
this.messageKeyExpression = messageKeyExpression;
|
||||
}
|
||||
|
||||
public String[] getHeaderPatterns() {
|
||||
return this.headerPatterns;
|
||||
}
|
||||
|
||||
public void setHeaderPatterns(String[] headerPatterns) {
|
||||
this.headerPatterns = headerPatterns;
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
@@ -0,0 +1,399 @@
|
||||
/*
|
||||
* Copyright 2014-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.provisioning;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
import org.apache.kafka.clients.admin.AdminClientConfig;
|
||||
import org.apache.kafka.clients.admin.CreatePartitionsResult;
|
||||
import org.apache.kafka.clients.admin.CreateTopicsResult;
|
||||
import org.apache.kafka.clients.admin.DescribeTopicsResult;
|
||||
import org.apache.kafka.clients.admin.ListTopicsResult;
|
||||
import org.apache.kafka.clients.admin.NewPartitions;
|
||||
import org.apache.kafka.clients.admin.NewTopic;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.apache.kafka.common.KafkaFuture;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
|
||||
import org.springframework.beans.factory.InitializingBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.BinderException;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.utils.KafkaTopicUtils;
|
||||
import org.springframework.cloud.stream.provisioning.ConsumerDestination;
|
||||
import org.springframework.cloud.stream.provisioning.ProducerDestination;
|
||||
import org.springframework.cloud.stream.provisioning.ProvisioningException;
|
||||
import org.springframework.cloud.stream.provisioning.ProvisioningProvider;
|
||||
import org.springframework.retry.RetryOperations;
|
||||
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
|
||||
import org.springframework.retry.policy.SimpleRetryPolicy;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Kafka implementation for {@link ProvisioningProvider}
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Simon Flandergan
|
||||
*/
|
||||
public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsumerProperties<KafkaConsumerProperties>,
|
||||
ExtendedProducerProperties<KafkaProducerProperties>>, InitializingBean {
|
||||
|
||||
private static final int DEFAULT_OPERATION_TIMEOUT = 30;
|
||||
|
||||
private final Log logger = LogFactory.getLog(getClass());
|
||||
|
||||
private KafkaBinderConfigurationProperties configurationProperties;
|
||||
|
||||
private final AdminClient adminClient;
|
||||
|
||||
private RetryOperations metadataRetryOperations;
|
||||
|
||||
private int operationTimeout = DEFAULT_OPERATION_TIMEOUT;
|
||||
|
||||
public KafkaTopicProvisioner(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties) {
|
||||
Assert.isTrue(kafkaProperties != null, "KafkaProperties cannot be null");
|
||||
Map<String, Object> adminClientProperties = kafkaProperties.buildAdminProperties();
|
||||
String kafkaConnectionString = kafkaBinderConfigurationProperties.getKafkaConnectionString();
|
||||
|
||||
if (ObjectUtils.isEmpty(adminClientProperties.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG))
|
||||
|| !kafkaConnectionString.equals(kafkaBinderConfigurationProperties.getDefaultKafkaConnectionString())) {
|
||||
adminClientProperties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConnectionString);
|
||||
}
|
||||
this.configurationProperties = kafkaBinderConfigurationProperties;
|
||||
this.adminClient = AdminClient.create(adminClientProperties);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param metadataRetryOperations the retry configuration
|
||||
*/
|
||||
public void setMetadataRetryOperations(RetryOperations metadataRetryOperations) {
|
||||
this.metadataRetryOperations = metadataRetryOperations;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterPropertiesSet() throws Exception {
|
||||
if (this.metadataRetryOperations == null) {
|
||||
RetryTemplate retryTemplate = new RetryTemplate();
|
||||
|
||||
SimpleRetryPolicy simpleRetryPolicy = new SimpleRetryPolicy();
|
||||
simpleRetryPolicy.setMaxAttempts(10);
|
||||
retryTemplate.setRetryPolicy(simpleRetryPolicy);
|
||||
|
||||
ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy();
|
||||
backOffPolicy.setInitialInterval(100);
|
||||
backOffPolicy.setMultiplier(2);
|
||||
backOffPolicy.setMaxInterval(1000);
|
||||
retryTemplate.setBackOffPolicy(backOffPolicy);
|
||||
this.metadataRetryOperations = retryTemplate;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ProducerDestination provisionProducerDestination(final String name, ExtendedProducerProperties<KafkaProducerProperties> properties) {
|
||||
if (this.logger.isInfoEnabled()) {
|
||||
this.logger.info("Using kafka topic for outbound: " + name);
|
||||
}
|
||||
KafkaTopicUtils.validateTopicName(name);
|
||||
createTopic(name, properties.getPartitionCount(), false);
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminClient != null) {
|
||||
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult.all();
|
||||
|
||||
try {
|
||||
Map<String, TopicDescription> topicDescriptions = all.get(operationTimeout, TimeUnit.SECONDS);
|
||||
TopicDescription topicDescription = topicDescriptions.get(name);
|
||||
int partitions = topicDescription.partitions().size();
|
||||
return new KafkaProducerDestination(name, partitions);
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new ProvisioningException("Problems encountered with partitions finding", e);
|
||||
}
|
||||
}
|
||||
else {
|
||||
return new KafkaProducerDestination(name);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConsumerDestination provisionConsumerDestination(final String name, final String group, ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
KafkaTopicUtils.validateTopicName(name);
|
||||
boolean anonymous = !StringUtils.hasText(group);
|
||||
Assert.isTrue(!anonymous || !properties.getExtension().isEnableDlq(),
|
||||
"DLQ support is not available for anonymous subscriptions");
|
||||
if (properties.getInstanceCount() == 0) {
|
||||
throw new IllegalArgumentException("Instance count cannot be zero");
|
||||
}
|
||||
int partitionCount = properties.getInstanceCount() * properties.getConcurrency();
|
||||
createTopic(name, partitionCount, properties.getExtension().isAutoRebalanceEnabled());
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminClient != null) {
|
||||
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult.all();
|
||||
try {
|
||||
Map<String, TopicDescription> topicDescriptions = all.get(operationTimeout, TimeUnit.SECONDS);
|
||||
TopicDescription topicDescription = topicDescriptions.get(name);
|
||||
int partitions = topicDescription.partitions().size();
|
||||
ConsumerDestination dlqTopic = createDlqIfNeedBe(name, group, properties, anonymous, partitions);
|
||||
if (dlqTopic != null) return dlqTopic;
|
||||
return new KafkaConsumerDestination(name, partitions);
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new ProvisioningException("provisioning exception", e);
|
||||
}
|
||||
}
|
||||
return new KafkaConsumerDestination(name);
|
||||
}
|
||||
|
||||
private ConsumerDestination createDlqIfNeedBe(String name, String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties,
|
||||
boolean anonymous, int partitions) {
|
||||
if (properties.getExtension().isEnableDlq() && !anonymous) {
|
||||
String dlqTopic = StringUtils.hasText(properties.getExtension().getDlqName()) ?
|
||||
properties.getExtension().getDlqName() : "error." + name + "." + group;
|
||||
try {
|
||||
createTopicAndPartitions(dlqTopic, partitions, properties.getExtension().isAutoRebalanceEnabled());
|
||||
}
|
||||
catch (Throwable throwable) {
|
||||
if (throwable instanceof Error) {
|
||||
throw (Error) throwable;
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("provisioning exception", throwable);
|
||||
}
|
||||
}
|
||||
return new KafkaConsumerDestination(name, partitions, dlqTopic);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private void createTopic(String name, int partitionCount, boolean tolerateLowerPartitionsOnBroker) {
|
||||
try {
|
||||
createTopicIfNecessary(name, partitionCount, tolerateLowerPartitionsOnBroker);
|
||||
}
|
||||
catch (Throwable throwable) {
|
||||
if (throwable instanceof Error) {
|
||||
throw (Error) throwable;
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("provisioning exception", throwable);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void createTopicIfNecessary(final String topicName, final int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker) throws Throwable {
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminClient != null) {
|
||||
createTopicAndPartitions(topicName, partitionCount, tolerateLowerPartitionsOnBroker);
|
||||
}
|
||||
else if (this.configurationProperties.isAutoCreateTopics() && adminClient == null) {
|
||||
this.logger.warn("Auto creation of topics is enabled, but Kafka AdminUtils class is not present on the classpath. " +
|
||||
"No topic will be created by the binder");
|
||||
}
|
||||
else if (!this.configurationProperties.isAutoCreateTopics()) {
|
||||
this.logger.info("Auto creation of topics is disabled.");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Kafka topic if needed, or try to increase its partition count to the
|
||||
* desired number.
|
||||
*/
|
||||
private void createTopicAndPartitions(final String topicName, final int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker) throws Throwable {
|
||||
ListTopicsResult listTopicsResult = adminClient.listTopics();
|
||||
KafkaFuture<Set<String>> namesFutures = listTopicsResult.names();
|
||||
|
||||
Set<String> names = namesFutures.get(operationTimeout, TimeUnit.SECONDS);
|
||||
if (names.contains(topicName)) {
|
||||
// only consider minPartitionCount for resizing if autoAddPartitions is true
|
||||
int effectivePartitionCount = this.configurationProperties.isAutoAddPartitions()
|
||||
? Math.max(this.configurationProperties.getMinPartitionCount(), partitionCount)
|
||||
: partitionCount;
|
||||
DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Collections.singletonList(topicName));
|
||||
KafkaFuture<Map<String, TopicDescription>> topicDescriptionsFuture = describeTopicsResult.all();
|
||||
Map<String, TopicDescription> topicDescriptions = topicDescriptionsFuture.get(operationTimeout, TimeUnit.SECONDS);
|
||||
TopicDescription topicDescription = topicDescriptions.get(topicName);
|
||||
int partitionSize = topicDescription.partitions().size();
|
||||
if (partitionSize < effectivePartitionCount) {
|
||||
if (this.configurationProperties.isAutoAddPartitions()) {
|
||||
CreatePartitionsResult partitions = adminClient.createPartitions(
|
||||
Collections.singletonMap(topicName, NewPartitions.increaseTo(effectivePartitionCount)));
|
||||
partitions.all().get(operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
else if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "There will be " + (effectivePartitionCount - partitionSize) + " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "Consider either increasing the partition count of the topic or enabling " +
|
||||
"`autoAddPartitions`");
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (!names.contains(topicName)) {
|
||||
// always consider minPartitionCount for topic creation
|
||||
final int effectivePartitionCount = Math.max(this.configurationProperties.getMinPartitionCount(),
|
||||
partitionCount);
|
||||
this.metadataRetryOperations.execute(context -> {
|
||||
|
||||
NewTopic newTopic = new NewTopic(topicName, effectivePartitionCount,
|
||||
(short) configurationProperties.getReplicationFactor());
|
||||
CreateTopicsResult createTopicsResult = adminClient.createTopics(Collections.singletonList(newTopic));
|
||||
try {
|
||||
createTopicsResult.all().get(operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
catch (Exception e) {
|
||||
if (e instanceof ExecutionException) {
|
||||
String exceptionMessage = e.getMessage();
|
||||
if (exceptionMessage.contains("org.apache.kafka.common.errors.TopicExistsException")) {
|
||||
if (logger.isWarnEnabled()) {
|
||||
logger.warn("Attempt to create topic: " + topicName + ". Topic already exists.");
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
logger.error("Failed to create topics", e.getCause());
|
||||
throw e.getCause();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public Collection<PartitionInfo> getPartitionsForTopic(final int partitionCount,
|
||||
final boolean tolerateLowerPartitionsOnBroker,
|
||||
final Callable<Collection<PartitionInfo>> callable) {
|
||||
try {
|
||||
return this.metadataRetryOperations
|
||||
.execute(context -> {
|
||||
Collection<PartitionInfo> partitions = callable.call();
|
||||
// do a sanity check on the partition set
|
||||
int partitionSize = partitions.size();
|
||||
if (partitionSize < partitionCount) {
|
||||
if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "There will be " + (partitionCount - partitionSize) + " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ") + "been found instead");
|
||||
}
|
||||
}
|
||||
return partitions;
|
||||
});
|
||||
}
|
||||
catch (Exception e) {
|
||||
this.logger.error("Cannot initialize Binder", e);
|
||||
throw new BinderException("Cannot initialize binder:", e);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class KafkaProducerDestination implements ProducerDestination {
|
||||
|
||||
private final String producerDestinationName;
|
||||
|
||||
private final int partitions;
|
||||
|
||||
KafkaProducerDestination(String destinationName) {
|
||||
this(destinationName, 0);
|
||||
}
|
||||
|
||||
KafkaProducerDestination(String destinationName, Integer partitions) {
|
||||
this.producerDestinationName = destinationName;
|
||||
this.partitions = partitions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return producerDestinationName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getNameForPartition(int partition) {
|
||||
return producerDestinationName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KafkaProducerDestination{" +
|
||||
"producerDestinationName='" + producerDestinationName + '\'' +
|
||||
", partitions=" + partitions +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
private static final class KafkaConsumerDestination implements ConsumerDestination {
|
||||
|
||||
private final String consumerDestinationName;
|
||||
|
||||
private final int partitions;
|
||||
|
||||
private final String dlqName;
|
||||
|
||||
KafkaConsumerDestination(String consumerDestinationName) {
|
||||
this(consumerDestinationName, 0, null);
|
||||
}
|
||||
|
||||
KafkaConsumerDestination(String consumerDestinationName, int partitions) {
|
||||
this(consumerDestinationName, partitions, null);
|
||||
}
|
||||
|
||||
KafkaConsumerDestination(String consumerDestinationName, Integer partitions, String dlqName) {
|
||||
this.consumerDestinationName = consumerDestinationName;
|
||||
this.partitions = partitions;
|
||||
this.dlqName = dlqName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return this.consumerDestinationName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KafkaConsumerDestination{" +
|
||||
"consumerDestinationName='" + consumerDestinationName + '\'' +
|
||||
", partitions=" + partitions +
|
||||
", dlqName='" + dlqName + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -14,7 +14,7 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
package org.springframework.cloud.stream.binder.kafka.utils;
|
||||
|
||||
import java.io.UnsupportedEncodingException;
|
||||
|
||||
@@ -37,7 +37,8 @@ public final class KafkaTopicUtils {
|
||||
if (!((b >= 'a') && (b <= 'z') || (b >= 'A') && (b <= 'Z') || (b >= '0') && (b <= '9') || (b == '.')
|
||||
|| (b == '-') || (b == '_'))) {
|
||||
throw new IllegalArgumentException(
|
||||
"Topic name can only have ASCII alphanumerics, '.', '_' and '-'");
|
||||
"Topic name can only have ASCII alphanumerics, '.', '_' and '-', but was: '" + topicName
|
||||
+ "'");
|
||||
}
|
||||
}
|
||||
}
|
||||
0
spring-cloud-stream-binder-kafka-docs/.jdk8
Normal file
0
spring-cloud-stream-binder-kafka-docs/.jdk8
Normal file
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.1.0.M1</version>
|
||||
<version>2.0.0.M4</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>spring-cloud-stream-binder-kafka-docs</artifactId>
|
||||
@@ -18,7 +18,7 @@
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>1.1.0.M1</version>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<profiles>
|
||||
|
||||
109
spring-cloud-stream-binder-kafka-docs/src/main/asciidoc/dlq.adoc
Normal file
109
spring-cloud-stream-binder-kafka-docs/src/main/asciidoc/dlq.adoc
Normal file
@@ -0,0 +1,109 @@
|
||||
[[kafka-dlq-processing]]
|
||||
== Dead-Letter Topic Processing
|
||||
|
||||
Because it can't be anticipated how users would want to dispose of dead-lettered messages, the framework does not provide any standard mechanism to handle them.
|
||||
If the reason for the dead-lettering is transient, you may wish to route the messages back to the original topic.
|
||||
However, if the problem is a permanent issue, that could cause an infinite loop.
|
||||
The following `spring-boot` application is an example of how to route those messages back to the original topic, but moves them to a third "parking lot" topic after three attempts.
|
||||
The application is simply another spring-cloud-stream application that reads from the dead-letter topic.
|
||||
It terminates when no messages are received for 5 seconds.
|
||||
|
||||
The examples assume the original destination is `so8400out` and the consumer group is `so8400`.
|
||||
|
||||
There are several considerations.
|
||||
|
||||
- Consider only running the rerouting when the main application is not running.
|
||||
Otherwise, the retries for transient errors will be used up very quickly.
|
||||
- Alternatively, use a two-stage approach - use this application to route to a third topic, and another to route from there back to the main topic.
|
||||
- Since this technique uses a message header to keep track of retries, it won't work with `headerMode=raw`.
|
||||
In that case, consider adding some data to the payload (that can be ignored by the main application).
|
||||
- `x-retries` has to be added to the `headers` property `spring.cloud.stream.kafka.binder.headers=x-retries` on both this, and the main application so that the header is transported between the applications.
|
||||
- Since kafka is publish/subscribe, replayed messages will be sent to each consumer group, even those that successfully processed a message the first time around.
|
||||
|
||||
.application.properties
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.input.group=so8400replay
|
||||
spring.cloud.stream.bindings.input.destination=error.so8400out.so8400
|
||||
|
||||
spring.cloud.stream.bindings.output.destination=so8400out
|
||||
spring.cloud.stream.bindings.output.producer.partitioned=true
|
||||
|
||||
spring.cloud.stream.bindings.parkingLot.destination=so8400in.parkingLot
|
||||
spring.cloud.stream.bindings.parkingLot.producer.partitioned=true
|
||||
|
||||
spring.cloud.stream.kafka.binder.configuration.auto.offset.reset=earliest
|
||||
|
||||
spring.cloud.stream.kafka.binder.headers=x-retries
|
||||
----
|
||||
|
||||
.Application
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(TwoOutputProcessor.class)
|
||||
public class ReRouteDlqKApplication implements CommandLineRunner {
|
||||
|
||||
private static final String X_RETRIES_HEADER = "x-retries";
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(ReRouteDlqKApplication.class, args).close();
|
||||
}
|
||||
|
||||
private final AtomicInteger processed = new AtomicInteger();
|
||||
|
||||
@Autowired
|
||||
private MessageChannel parkingLot;
|
||||
|
||||
@StreamListener(Processor.INPUT)
|
||||
@SendTo(Processor.OUTPUT)
|
||||
public Message<?> reRoute(Message<?> failed) {
|
||||
processed.incrementAndGet();
|
||||
Integer retries = failed.getHeaders().get(X_RETRIES_HEADER, Integer.class);
|
||||
if (retries == null) {
|
||||
System.out.println("First retry for " + failed);
|
||||
return MessageBuilder.fromMessage(failed)
|
||||
.setHeader(X_RETRIES_HEADER, new Integer(1))
|
||||
.setHeader(BinderHeaders.PARTITION_OVERRIDE,
|
||||
failed.getHeaders().get(KafkaHeaders.RECEIVED_PARTITION_ID))
|
||||
.build();
|
||||
}
|
||||
else if (retries.intValue() < 3) {
|
||||
System.out.println("Another retry for " + failed);
|
||||
return MessageBuilder.fromMessage(failed)
|
||||
.setHeader(X_RETRIES_HEADER, new Integer(retries.intValue() + 1))
|
||||
.setHeader(BinderHeaders.PARTITION_OVERRIDE,
|
||||
failed.getHeaders().get(KafkaHeaders.RECEIVED_PARTITION_ID))
|
||||
.build();
|
||||
}
|
||||
else {
|
||||
System.out.println("Retries exhausted for " + failed);
|
||||
parkingLot.send(MessageBuilder.fromMessage(failed)
|
||||
.setHeader(BinderHeaders.PARTITION_OVERRIDE,
|
||||
failed.getHeaders().get(KafkaHeaders.RECEIVED_PARTITION_ID))
|
||||
.build());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run(String... args) throws Exception {
|
||||
while (true) {
|
||||
int count = this.processed.get();
|
||||
Thread.sleep(5000);
|
||||
if (count == this.processed.get()) {
|
||||
System.out.println("Idle, terminating");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public interface TwoOutputProcessor extends Processor {
|
||||
|
||||
@Output("parkingLot")
|
||||
MessageChannel parkingLot();
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
@@ -1,6 +1,6 @@
|
||||
[[spring-cloud-stream-binder-kafka-reference]]
|
||||
= Spring Cloud Stream Kafka Binder Reference Guide
|
||||
Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinathan, Gunnar Hillert, Mark Pollack, Patrick Peralta, Glenn Renfro, Thomas Risberg, Dave Syer, David Turanski, Janne Valkealahti, Benjamin Klein
|
||||
Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinathan, Gunnar Hillert, Mark Pollack, Patrick Peralta, Glenn Renfro, Thomas Risberg, Dave Syer, David Turanski, Janne Valkealahti, Benjamin Klein, Henryk Konsek, Gary Russell
|
||||
:doctype: book
|
||||
:toc:
|
||||
:toclevels: 4
|
||||
@@ -24,10 +24,14 @@ Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinat
|
||||
= Reference Guide
|
||||
include::overview.adoc[]
|
||||
|
||||
include::dlq.adoc[]
|
||||
|
||||
include::partitions.adoc[]
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
include::building.adoc[]
|
||||
|
||||
include::contributing.adoc[]
|
||||
|
||||
// ======================================================================================
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
--
|
||||
This guide describes the Apache Kafka implementation of the Spring Cloud Stream Binder.
|
||||
It contains information about its design, usage and configuration options, as well as information on how the Stream Cloud Stream concepts map into Apache Kafka specific constructs.
|
||||
In addition, this guide also explains the Kafka Streams binding capabilities of Spring Cloud Stream.
|
||||
--
|
||||
|
||||
== Usage
|
||||
@@ -41,7 +42,7 @@ Partitioning also maps directly to Apache Kafka partitions as well.
|
||||
|
||||
This section contains the configuration options used by the Apache Kafka binder.
|
||||
|
||||
For common configuration options and properties pertaining to binder, refer to the https://github.com/spring-cloud/spring-cloud-stream/blob/master/spring-cloud-stream-docs/src/main/asciidoc/spring-cloud-stream-overview.adoc#configuration-options[core docs].
|
||||
For common configuration options and properties pertaining to binder, refer to the <<binding-properties,core documentation>>.
|
||||
|
||||
=== Kafka Binder Properties
|
||||
|
||||
@@ -72,6 +73,11 @@ spring.cloud.stream.kafka.binder.headers::
|
||||
The list of custom headers that will be transported by the binder.
|
||||
+
|
||||
Default: empty.
|
||||
spring.cloud.stream.kafka.binder.healthTimeout::
|
||||
The time to wait to get partition information in seconds; default 60.
|
||||
Health will report as down if this timer expires.
|
||||
+
|
||||
Default: 10.
|
||||
spring.cloud.stream.kafka.binder.offsetUpdateTimeWindow::
|
||||
The frequency, in milliseconds, with which offsets are saved.
|
||||
Ignored if `0`.
|
||||
@@ -114,15 +120,36 @@ spring.cloud.stream.kafka.binder.socketBufferSize::
|
||||
Size (in bytes) of the socket buffer to be used by the Kafka consumers.
|
||||
+
|
||||
Default: `2097152`.
|
||||
spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix::
|
||||
Enable transactions in the binder; see `transaction.id` in the Kafka documentation and https://docs.spring.io/spring-kafka/reference/html/_reference.html#transactions[Transactions] in the `spring-kafka` documentation.
|
||||
When transactions are enabled, individual `producer` properties are ignored and all producers use the `spring.cloud.stream.kafka.binder.transaction.producer.*` properties.
|
||||
+
|
||||
Default `null` (no transactions)
|
||||
spring.cloud.stream.kafka.binder.transaction.producer.*::
|
||||
Global producer properties for producers in a transactional binder.
|
||||
See `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` and <<kafka-producer-properties>> and the general producer properties supported by all binders.
|
||||
+
|
||||
Default: See individual producer properties.
|
||||
|
||||
[[kafka-consumer-properties]]
|
||||
=== Kafka Consumer Properties
|
||||
|
||||
The following properties are available for Kafka consumers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.consumer.`.
|
||||
|
||||
autoRebalanceEnabled::
|
||||
When `true`, topic partitions will be automatically rebalanced between the members of a consumer group.
|
||||
When `false`, each consumer will be assigned a fixed set of partitions based on `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex`.
|
||||
This requires both `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex` properties to be set appropriately on each launched instance.
|
||||
The property `spring.cloud.stream.instanceCount` must typically be greater than 1 in this case.
|
||||
+
|
||||
Default: `true`.
|
||||
autoCommitOffset::
|
||||
Whether to autocommit offsets when a message has been processed.
|
||||
If set to `false`, an `Acknowledgment` header will be available in the message headers for late acknowledgment.
|
||||
If set to `false`, a header with the key `kafka_acknowledgment` of the type `org.springframework.kafka.support.Acknowledgment` header will be present in the inbound message.
|
||||
Applications may use this header for acknowledging messages.
|
||||
See the examples section for details.
|
||||
When this property is set to `false`, Kafka binder will set the ack mode to `org.springframework.kafka.listener.AbstractMessageListenerContainer.AckMode.MANUAL`.
|
||||
+
|
||||
Default: `true`.
|
||||
autoCommitOnError::
|
||||
@@ -136,26 +163,52 @@ recoveryInterval::
|
||||
The interval between connection recovery attempts, in milliseconds.
|
||||
+
|
||||
Default: `5000`.
|
||||
resetOffsets::
|
||||
Whether to reset offsets on the consumer to the value provided by `startOffset`.
|
||||
+
|
||||
Default: `false`.
|
||||
startOffset::
|
||||
The starting offset for new groups, or when `resetOffsets` is `true`.
|
||||
The starting offset for new groups.
|
||||
Allowed values: `earliest`, `latest`.
|
||||
If the consumer group is set explicitly for the consumer 'binding' (via `spring.cloud.stream.bindings.<channelName>.group`), then 'startOffset' is set to `earliest`; otherwise it is set to `latest` for the `anonymous` consumer group.
|
||||
+
|
||||
Default: null (equivalent to `earliest`).
|
||||
enableDlq::
|
||||
When set to true, it will send enable DLQ behavior for the consumer.
|
||||
Messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`.
|
||||
By default, messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`.
|
||||
The DLQ topic name can be configurable via the property `dlqName`.
|
||||
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
|
||||
See <<kafka-dlq-processing>> processing for more information.
|
||||
Starting with _version 2.0_, messages sent to the DLQ topic are enhanced with the following headers: `x-original-topic`, `x-exception-message` and `x-exception-stacktrace` as `byte[]`.
|
||||
+
|
||||
Default: `false`.
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka consumer properties.
|
||||
+
|
||||
Default: Empty map.
|
||||
dlqName::
|
||||
The name of the DLQ topic to receive the error messages.
|
||||
+
|
||||
Default: null (If not specified, messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`).
|
||||
dlqProducerProperties::
|
||||
Using this, dlq specific producer properties can be set.
|
||||
All the properties available through kafka producer properties can be set through this property.
|
||||
+
|
||||
Default: Default Kafka producer properties.
|
||||
standardHeaders::
|
||||
Indicates which standard headers are populated by the inbound channel adapter.
|
||||
`none`, `id`, `timestamp` or `both`.
|
||||
Useful if using native deserialization and the first component to receive a message needs an `id` (such as an aggregator that is configured to use a JDBC message store).
|
||||
+
|
||||
Default: `none`
|
||||
converterBeanName::
|
||||
The name of a bean that implements `RecordMessageConverter`; used in the inbound channel adapter to replace the default `MessagingMessageConverter`.
|
||||
+
|
||||
Default: `null`
|
||||
idleEventInterval::
|
||||
The interval, in milliseconds between events indicating that no messages have recently been received.
|
||||
Use an `ApplicationListener<ListenerContainerIdleEvent>` to receive these events.
|
||||
See <<pause-resume>> for a usage example.
|
||||
+
|
||||
Default: `30000`
|
||||
|
||||
[[kafka-producer-properties]]
|
||||
=== Kafka Producer Properties
|
||||
|
||||
The following properties are available for Kafka producers only and
|
||||
@@ -174,6 +227,19 @@ batchTimeout::
|
||||
(Normally the producer does not wait at all, and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.
|
||||
+
|
||||
Default: `0`.
|
||||
messageKeyExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message.
|
||||
For example `headers.key` or `payload.myKey`.
|
||||
+
|
||||
Default: `none`.
|
||||
headerPatterns::
|
||||
A comma-delimited list of simple patterns to match spring-messaging headers to be mapped to the kafka `Headers` in the `ProducerRecord`.
|
||||
Patterns can begin or end with the wildcard character (asterisk).
|
||||
Patterns can be negated by prefixing with `!`; matching stops after the first match (positive or negative).
|
||||
For example `!foo,fo*` will pass `fox` but not `foo`.
|
||||
`id` and `timestamp` are never mapped.
|
||||
+
|
||||
Default: `*` (all headers - except the `id` and `timestamp`)
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka producer properties.
|
||||
+
|
||||
@@ -192,6 +258,34 @@ If a topic already exists with a larger number of partitions than the maximum of
|
||||
|
||||
In this section, we illustrate the use of the above properties for specific scenarios.
|
||||
|
||||
==== Example: Setting `autoCommitOffset` false and relying on manual acking.
|
||||
|
||||
This example illustrates how one may manually acknowledge offsets in a consumer application.
|
||||
|
||||
This example requires that `spring.cloud.stream.kafka.bindings.input.consumer.autoCommitOffset` is set to false.
|
||||
Use the corresponding input channel name for your example.
|
||||
|
||||
[source]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class ManuallyAcknowdledgingConsumer {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(ManuallyAcknowdledgingConsumer.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void process(Message<?> message) {
|
||||
Acknowledgment acknowledgment = message.getHeaders().get(KafkaHeaders.ACKNOWLEDGMENT, Acknowledgment.class);
|
||||
if (acknowledgment != null) {
|
||||
System.out.println("Acknowledgment provided");
|
||||
acknowledgment.acknowledge();
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
==== Example: security configuration
|
||||
|
||||
Apache Kafka 0.9 supports secure connections between client and brokers.
|
||||
@@ -208,22 +302,454 @@ spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_SSL
|
||||
All the other security properties can be set in a similar manner.
|
||||
|
||||
When using Kerberos, follow the instructions in the http://kafka.apache.org/090/documentation.html#security_sasl_clientconfig[reference documentation] for creating and referencing the JAAS configuration.
|
||||
At the time of this release, the JAAS, and (optionally) krb5 file locations must be set for Spring Cloud Stream applications by using system properties.
|
||||
Here is an example of launching a Spring Cloud Stream application with SASL and Kerberos.
|
||||
|
||||
Spring Cloud Stream supports passing JAAS configuration information to the application using a JAAS configuration file and using Spring Boot properties.
|
||||
|
||||
===== Using JAAS configuration files
|
||||
|
||||
The JAAS, and (optionally) krb5 file locations can be set for Spring Cloud Stream applications by using system properties.
|
||||
Here is an example of launching a Spring Cloud Stream application with SASL and Kerberos using a JAAS configuration file:
|
||||
|
||||
[source]
|
||||
----
|
||||
java -Djava.security.auth.login.config=/path.to/kafka_client_jaas.conf -jar log.jar \\
|
||||
--spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \\
|
||||
--spring.cloud.stream.kafka.binder.zkNodes=secure.zookeeper:2181 \\
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \\
|
||||
--spring.cloud.stream.kafka.binder.clientConfiguration.security.protocol=SASL_PLAINTEXT
|
||||
java -Djava.security.auth.login.config=/path.to/kafka_client_jaas.conf -jar log.jar \
|
||||
--spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.kafka.binder.zkNodes=secure.zookeeper:2181 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT
|
||||
----
|
||||
|
||||
===== Using Spring Boot properties
|
||||
|
||||
As an alternative to having a JAAS configuration file, Spring Cloud Stream provides a mechanism for setting up the JAAS configuration for Spring Cloud Stream applications using Spring Boot properties.
|
||||
|
||||
The following properties can be used for configuring the login context of the Kafka client.
|
||||
|
||||
spring.cloud.stream.kafka.binder.jaas.loginModule::
|
||||
The login module name. Not necessary to be set in normal cases.
|
||||
+
|
||||
Default: `com.sun.security.auth.module.Krb5LoginModule`.
|
||||
spring.cloud.stream.kafka.binder.jaas.controlFlag::
|
||||
The control flag of the login module.
|
||||
+
|
||||
Default: `required`.
|
||||
spring.cloud.stream.kafka.binder.jaas.options::
|
||||
Map with a key/value pair containing the login module options.
|
||||
+
|
||||
Default: Empty map.
|
||||
|
||||
Here is an example of launching a Spring Cloud Stream application with SASL and Kerberos using Spring Boot configuration properties:
|
||||
|
||||
[source]
|
||||
----
|
||||
java --spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.kafka.binder.zkNodes=secure.zookeeper:2181 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.autoCreateTopics=false \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.useKeyTab=true \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.storeKey=true \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.keyTab=/etc/security/keytabs/kafka_client.keytab \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.principal=kafka-client-1@EXAMPLE.COM
|
||||
----
|
||||
|
||||
This represents the equivalent of the following JAAS file:
|
||||
|
||||
[source]
|
||||
----
|
||||
KafkaClient {
|
||||
com.sun.security.auth.module.Krb5LoginModule required
|
||||
useKeyTab=true
|
||||
storeKey=true
|
||||
keyTab="/etc/security/keytabs/kafka_client.keytab"
|
||||
principal="kafka-client-1@EXAMPLE.COM";
|
||||
};
|
||||
----
|
||||
|
||||
If the topics required already exist on the broker, or will be created by an administrator, autocreation can be turned off and only client JAAS properties need to be sent. As an alternative to setting `spring.cloud.stream.kafka.binder.autoCreateTopics` you can simply remove the broker dependency from the application. See <<exclude-admin-utils>> for details.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Do not mix JAAS configuration files and Spring Boot properties in the same application.
|
||||
If the `-Djava.security.auth.login.config` system property is already present, Spring Cloud Stream will ignore the Spring Boot properties.
|
||||
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Exercise caution when using the `autoCreateTopics` and `autoAddPartitions` if using Kerberos.
|
||||
Usually applications may use principals that do not have administrative rights in Kafka and Zookeeper, and relying on Spring Cloud Stream to create/modify topics may fail.
|
||||
In secure environments, we strongly recommend creating topics and managing ACLs administratively using Kafka tooling.
|
||||
====
|
||||
====
|
||||
|
||||
[[pause-resume]]
|
||||
==== Example: Pausing and Resuming the Consumer
|
||||
|
||||
If you wish to suspend consumption, but not cause a partition rebalance, you can pause and resume the consumer.
|
||||
This is facilitated by adding the `Consumer` as a parameter to your `@StreamListener`.
|
||||
To resume, you need an `ApplicationListener` for `ListenerContainerIdleEvent` s; the frequency at which events are published is controlled by the `idleEventInterval` property.
|
||||
Since the consumer is not thread-safe, you must call these methods on the calling thread.
|
||||
|
||||
The following simple application shows how to pause and resume.
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class Application {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(Application.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void in(String in, @Header(KafkaHeaders.CONSUMER) Consumer<?, ?> consumer) {
|
||||
System.out.println(in);
|
||||
consumer.pause(Collections.singleton(new TopicPartition("myTopic", 0)));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ApplicationListener<ListenerContainerIdleEvent> idleListener() {
|
||||
return event -> {
|
||||
System.out.println(event);
|
||||
if (event.getConsumer().paused().size() > 0) {
|
||||
event.getConsumer().resume(event.getConsumer().paused());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
|
||||
==== Using the binder with Apache Kafka 0.10
|
||||
|
||||
The default Kafka support in Spring Cloud Stream Kafka binder is for Kafka version 0.10.1.1. The binder also supports connecting to other 0.10 based versions and 0.9 clients.
|
||||
In order to do this, when you create the project that contains your application, include `spring-cloud-starter-stream-kafka` as you normally would do for the default binder.
|
||||
Then add these dependencies at the top of the `<dependencies>` section in the pom.xml file to override the dependencies.
|
||||
|
||||
Here is an example for downgrading your application to 0.10.0.1. Since it is still on the 0.10 line, the default `spring-kafka` and `spring-integration-kafka` versions can be retained.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>0.10.0.1</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>0.10.0.1</version>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
Here is another example of using 0.9.0.1 version.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<version>1.0.5.RELEASE</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
<version>2.0.1.RELEASE</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>0.9.0.1</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>0.9.0.1</version>
|
||||
</dependency>
|
||||
|
||||
----
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The versions above are provided only for the sake of the example.
|
||||
For best results, we recommend using the most recent 0.10-compatible versions of the projects.
|
||||
====
|
||||
|
||||
[[exclude-admin-utils]]
|
||||
==== Excluding Kafka broker jar from the classpath of the binder based application
|
||||
|
||||
The Apache Kafka Binder uses the administrative utilities which are part of the Apache Kafka server library to create and reconfigure topics.
|
||||
If the inclusion of the Apache Kafka server library and its dependencies is not necessary at runtime because the application will rely on the topics being configured administratively, the Kafka binder allows for Apache Kafka server dependency to be excluded from the application.
|
||||
|
||||
If you use non default versions for Kafka dependencies as advised above, all you have to do is not to include the kafka broker dependency.
|
||||
If you use the default Kafka version, then ensure that you exclude the kafka broker jar from the `spring-cloud-starter-stream-kafka` dependency as following.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
If you exclude the Apache Kafka server dependency and the topic is not present on the server, then the Apache Kafka broker will create the topic if auto topic creation is enabled on the server.
|
||||
Please keep in mind that if you are relying on this, then the Kafka server will use the default number of partitions and replication factors.
|
||||
On the other hand, if auto topic creation is disabled on the server, then care must be taken before running the application to create the topic with the desired number of partitions.
|
||||
|
||||
If you want to have full control over how partitions are allocated, then leave the default settings as they are, i.e. do not exclude the kafka broker jar and ensure that `spring.cloud.stream.kafka.binder.autoCreateTopics` is set to `true`, which is the default.
|
||||
|
||||
== Kafka Streams Binding Capabilities of Spring Cloud Stream
|
||||
|
||||
Spring Cloud Stream Kafka support also includes a binder specifically designed for Kafka Streams binding.
|
||||
Using this binder, applications can be written that leverage the Kafka Streams API.
|
||||
For more information on Kafka Streams, see https://kafka.apache.org/documentation/streams/developer-guide[Kafka Streams API Developer Manual]
|
||||
|
||||
Kafka Streams support in Spring Cloud Stream is based on the foundations provided by the Spring Kafka project.
|
||||
For details on that support, see http://docs.spring.io/spring-kafka/reference/html/_reference.html#kafka-streams[Kafaka Streams Support in Spring Kafka].
|
||||
|
||||
Here are the maven coordinates for the Spring Cloud Stream KStream binder artifact.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kstream</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
High level streams DSL provided through the Kafka Streams API can be used through Spring Cloud Stream support.
|
||||
Kafka Streams applications using the Spring Cloud Stream support can only be written using the processor model, i.e. messages read from an inbound topic and messages written to an outbound topic.
|
||||
|
||||
=== Usage example of high level streams DSL
|
||||
|
||||
This application will listen from a Kafka topic and write the word count for each unique word that it sees in a 5 seconds time window.
|
||||
|
||||
[source]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(KStreamProcessor.class)
|
||||
public class WordCountProcessorApplication {
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<?, String> process(KStream<?, String> input) {
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.groupBy((key, value) -> value)
|
||||
.windowedBy(TimeWindows.of(5000))
|
||||
.count(Materialized.as("WordCounts-multi"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key.key(), value, new Date(key.window().start()), new Date(key.window().end()))));
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(WordCountProcessorApplication.class, args);
|
||||
}
|
||||
----
|
||||
|
||||
If you build it as a Spring Boot uber jar, you can run the above example in the following way:
|
||||
|
||||
[source]
|
||||
----
|
||||
java -jar uber.jar --spring.cloud.stream.bindings.input.destination=words --spring.cloud.stream.bindings.output.destination=counts
|
||||
----
|
||||
|
||||
This means that the application will listen from the incoming Kafka topic `words` and write to the output topic `counts`.
|
||||
|
||||
Spring Cloud Stream will ensure that the messages from both the incoming and outgoing topics are bound as KStream objects.
|
||||
Applications can exclusively focus on the business aspects of the code, i.e. writing the logic required in the processor rather than setting up the streams specific configuration required by the Kafka Streams infrastructure.
|
||||
All such interactions are handled by the framework.
|
||||
|
||||
=== Message conversion in Spring Cloud Stream Kafka Streams applications
|
||||
|
||||
If the following property is set (default is false), the framework skips all message conversions on the outbound (producer) side and it is then done by Kafka itself.
|
||||
|
||||
`spring.cloud.stream.bindings.output.producer.useNativeEncoding`.
|
||||
|
||||
Similarly, if the following property is set (default is false), any message conversion is skipped on the inbound (consumer) side and natively done by Kafka.
|
||||
|
||||
`spring.cloud.stream.bindings.input.consumer.useNativeDecoding`.
|
||||
|
||||
When native encoding is disabled, then the messages on the outbound are converted by Spring Cloud Stream using the provided `contentType` header.
|
||||
If no contentType is set by the application, it defaults to `application/json`.
|
||||
|
||||
By default, all the out of the box message converters, serialize the data as `byte[]` encoding the proper contentType.
|
||||
In most situations, this is what you want to do, but if other formats than `byte[]` are desired, then ann appropriate message converter needs to be registered in the context and corresponding contentType specified as a property.
|
||||
When doing this way, Serdes should be overridden on the producer using the following property.
|
||||
|
||||
`spring.cloud.stream.kstream.bindings.output.producer.valueSerde`.
|
||||
|
||||
Keys will not get converted, but if the Serdes are different for keys from what is given as the common Serde, you can override that using the following property.
|
||||
|
||||
`spring.cloud.stream.kstream.bindings.output.producer.keySerde`.
|
||||
|
||||
=== Support for branching in Kafka Streams API
|
||||
|
||||
Kafka Streams allow outbound data to be split into multiple topics based on some predicates.
|
||||
Spring Cloud Stream Kafka Streams binder provides support for this feature without losing the overall programming model exposed through `StreamListener` in the end user application.
|
||||
You write the application in the usual way as demonstrated above in the word count example.
|
||||
When using the branching feature, you are required to do a few things.
|
||||
First, you need to make sure that your return type is `KStream[]` instead of a regular `KStream`.
|
||||
Then you need to use the `SendTo` annotation containing the output bindings in the order (example below).
|
||||
For each of these output bindings, you need to configure destination, content-type etc. as required by any other standard Spring Cloud Stream application
|
||||
|
||||
Here is an example:
|
||||
|
||||
[source]
|
||||
----
|
||||
@EnableBinding(KStreamProcessorWithBranches.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class WordCountProcessorApplication {
|
||||
|
||||
@Autowired
|
||||
private TimeWindows timeWindows;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo({"output1","output2","output3})
|
||||
public KStream<?, WordCount>[] process(KStream<Object, String> input) {
|
||||
|
||||
Predicate<Object, WordCount> isEnglish = (k, v) -> v.word.equals("english");
|
||||
Predicate<Object, WordCount> isFrench = (k, v) -> v.word.equals("french");
|
||||
Predicate<Object, WordCount> isSpanish = (k, v) -> v.word.equals("spanish");
|
||||
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.groupBy((key, value) -> value)
|
||||
.windowedBy(timeWindows)
|
||||
.count(Materialized.as("WordCounts-1"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key.key(), value, new Date(key.window().start()), new Date(key.window().end()))))
|
||||
.branch(isEnglish, isFrench, isSpanish);
|
||||
}
|
||||
|
||||
interface KStreamProcessorWithBranches {
|
||||
|
||||
@Input("input")
|
||||
KStream<?, ?> input();
|
||||
|
||||
@Output("output1")
|
||||
KStream<?, ?> output1();
|
||||
|
||||
@Output("output2")
|
||||
KStream<?, ?> output2();
|
||||
|
||||
@Output("output3")
|
||||
KStream<?, ?> output3();
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
Then in the properties:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.bindings.output1.contentType: application/json
|
||||
spring.cloud.stream.bindings.output2.contentType: application/json
|
||||
spring.cloud.stream.bindings.output3.contentType: application/json
|
||||
spring.cloud.stream.kstream.binder.configuration.commit.interval.ms: 1000
|
||||
spring.cloud.stream.kstream.binder.configuration:
|
||||
key.serde: org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
value.serde: org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
spring.cloud.stream.bindings.output1:
|
||||
destination: foo
|
||||
producer:
|
||||
headerMode: raw
|
||||
spring.cloud.stream.bindings.output2:
|
||||
destination: bar
|
||||
producer:
|
||||
headerMode: raw
|
||||
spring.cloud.stream.bindings.output3:
|
||||
destination: fox
|
||||
producer:
|
||||
headerMode: raw
|
||||
spring.cloud.stream.bindings.input:
|
||||
destination: words
|
||||
consumer:
|
||||
headerMode: raw
|
||||
----
|
||||
|
||||
=== Support for interactive queries
|
||||
|
||||
If access to the `KafkaStreams` is needed for interactive queries, the internal `KafkaStreams` instance can be accessed via `KStreamBuilderFactoryBean.getKafkaStreams()`.
|
||||
You can autowire the `KStreamBuilderFactoryBean` instance provided by the KStream binder.
|
||||
Then you get `KafkaStreams` instance from it and retrieve the underlying store, execute queries on it, etc.
|
||||
|
||||
=== Kafka Streams properties
|
||||
|
||||
configuration::
|
||||
Map with a key/value pair containing properties pertaining to Kafka Streams API.
|
||||
This property must be prefixed with `spring.cloud.stream.kstream.binder.`.
|
||||
|
||||
Following are some examples of using this property.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kstream.binder.configuration.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
spring.cloud.stream.kstream.binder.configuration.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde
|
||||
spring.cloud.stream.kstream.binder.configuration.commit.interval.ms=1000
|
||||
----
|
||||
|
||||
For more information about all the properties that may go into streams configuration, see StreamsConfig JavaDocs.
|
||||
|
||||
There can also be binding specific properties.
|
||||
|
||||
For instance, you can use a different Serde for your input or output destination.
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kstream.bindings.output.producer.keySerde=org.apache.kafka.common.serialization.Serdes$IntegerSerde
|
||||
spring.cloud.stream.kstream.bindings.output.producer.valueSerde=org.apache.kafka.common.serialization.Serdes$LongSerde
|
||||
----
|
||||
|
||||
TimeWindow properties:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kstream.timeWindow.length (milliseconds)
|
||||
|
||||
When this property is given, you can autowire a `TimeWindows` bean into the application.
|
||||
|
||||
spring.cloud.stream.kstream.timeWindow.advanceBy (milliseconds)
|
||||
----
|
||||
|
||||
[[kafka-error-channels]]
|
||||
== Error Channels
|
||||
|
||||
Starting with _version 1.3_, the binder unconditionally sends exceptions to an error channel for each consumer destination, and can be configured to send async producer send failures to an error channel too.
|
||||
See <<binder-error-channels>> for more information.
|
||||
|
||||
The payload of the `ErrorMessage` for a send failure is a `KafkaSendFailureException` with properties:
|
||||
|
||||
* `failedMessage` - the spring-messaging `Message<?>` that failed to be sent.
|
||||
* `record` - the raw `ProducerRecord` that was created from the `failedMessage`
|
||||
|
||||
There is no automatic handling of producer exceptions (such as sending to a <<kafka-dlq-processing, Dead-Letter queue>>); you can consume these exceptions with your own Spring Integration flow.
|
||||
|
||||
[[kafka-metrics]]
|
||||
== Kafka Metrics
|
||||
|
||||
Kafka binder module exposes the following metrics:
|
||||
|
||||
`spring.cloud.stream.binder.kafka.someGroup.someTopic.lag` - this metric indicates how many messages have not been yet consumed from given binder's topic by given consumer group.
|
||||
For example if the value of the metric `spring.cloud.stream.binder.kafka.myGroup.myTopic.lag` is `1000`, then consumer group `myGroup` has `1000` messages to waiting to be consumed from topic `myTopic`.
|
||||
This metric is particularly useful to provide auto-scaling feedback to PaaS platform of your choice.
|
||||
|
||||
@@ -0,0 +1,100 @@
|
||||
== Partitioning with the Kafka Binder
|
||||
|
||||
Apache Kafka supports topic partitioning natively.
|
||||
|
||||
Sometimes it is advantageous to send data to specific partitions, for example when you want to strictly order message processing - all messages for a particular customer should go to the same partition.
|
||||
|
||||
The following illustrates how to configure the producer and consumer side:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Source.class)
|
||||
public class KafkaPartitionProducerApplication {
|
||||
|
||||
private static final Random RANDOM = new Random(System.currentTimeMillis());
|
||||
|
||||
private static final String[] data = new String[] {
|
||||
"foo1", "bar1", "qux1",
|
||||
"foo2", "bar2", "qux2",
|
||||
"foo3", "bar3", "qux3",
|
||||
"foo4", "bar4", "qux4",
|
||||
};
|
||||
|
||||
public static void main(String[] args) {
|
||||
new SpringApplicationBuilder(KafkaPartitionProducerApplication.class)
|
||||
.web(false)
|
||||
.run(args);
|
||||
}
|
||||
|
||||
@InboundChannelAdapter(channel = Source.OUTPUT, poller = @Poller(fixedRate = "5000"))
|
||||
public Message<?> generate() {
|
||||
String value = data[RANDOM.nextInt(data.length)];
|
||||
System.out.println("Sending: " + value);
|
||||
return MessageBuilder.withPayload(value)
|
||||
.setHeader("partitionKey", value)
|
||||
.build();
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
.application.yml
|
||||
[source, yaml]
|
||||
----
|
||||
spring:
|
||||
cloud:
|
||||
stream:
|
||||
bindings:
|
||||
output:
|
||||
destination: partitioned.topic
|
||||
producer:
|
||||
partitioned: true
|
||||
partition-key-expression: headers['partitionKey']
|
||||
partition-count: 12
|
||||
----
|
||||
|
||||
IMPORTANT: The topic must be provisioned to have enough partitions to achieve the desired concurrency for all consumer groups.
|
||||
The above configuration will support up to 12 consumer instances (or 6 if their `concurrency` is 2, etc.).
|
||||
It is generally best to "over provision" the partitions to allow for future increases in consumers and/or concurrency.
|
||||
|
||||
NOTE: The above configuration uses the default partitioning (`key.hashCode() % partitionCount`).
|
||||
This may or may not provide a suitably balanced algorithm, depending on the key values; you can override this default by using the `partitionSelectorExpression` or `partitionSelectorClass` properties.
|
||||
|
||||
Since partitions are natively handled by Kafka, no special configuration is needed on the consumer side.
|
||||
Kafka will allocate partitions across the instances.
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class KafkaPartitionConsumerApplication {
|
||||
|
||||
public static void main(String[] args) {
|
||||
new SpringApplicationBuilder(KafkaPartitionConsumerApplication.class)
|
||||
.web(false)
|
||||
.run(args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void listen(@Payload String in, @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition) {
|
||||
System.out.println(in + " received from partition " + partition);
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
.application.yml
|
||||
[source, yaml]
|
||||
----
|
||||
spring:
|
||||
cloud:
|
||||
stream:
|
||||
bindings:
|
||||
input:
|
||||
destination: partitioned.topic
|
||||
group: myGroup
|
||||
----
|
||||
|
||||
You can add instances as needed; Kafka will rebalance the partition allocations.
|
||||
If the instance count (or `instance count * concurrency`) exceeds the number of partitions, some consumers will be idle.
|
||||
@@ -0,0 +1,3 @@
|
||||
include::overview.adoc[leveloffset=+1]
|
||||
include::dlq.adoc[leveloffset=+1]
|
||||
include::partitions.adoc[leveloffset=+1]
|
||||
@@ -1,24 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.1.0.M1</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-test-support</artifactId>
|
||||
<description>Kafka related test classes</description>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<version>${spring-kafka.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
0
spring-cloud-stream-binder-kafka/.jdk8
Normal file
0
spring-cloud-stream-binder-kafka/.jdk8
Normal file
@@ -0,0 +1,5 @@
|
||||
eclipse.preferences.version=1
|
||||
org.eclipse.jdt.ui.ignorelowercasenames=true
|
||||
org.eclipse.jdt.ui.importorder=java;javax;com;org;org.springframework;ch.qos;\#;
|
||||
org.eclipse.jdt.ui.ondemandthreshold=99
|
||||
org.eclipse.jdt.ui.staticondemandthreshold=99
|
||||
@@ -10,10 +10,14 @@
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>1.1.0.M1</version>
|
||||
<version>2.0.0.M4</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-configuration-processor</artifactId>
|
||||
@@ -23,10 +27,6 @@
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-codec</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-autoconfigure</artifactId>
|
||||
@@ -38,77 +38,28 @@
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-test-support</artifactId>
|
||||
<scope>test</scope>
|
||||
<version>1.1.0.M1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
<version>${spring-integration-kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.apache.avro</groupId>
|
||||
<artifactId>avro-compiler</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<version>${spring-kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<scope>test</scope>
|
||||
<version>${spring-kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<classifier>test</classifier>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<classifier>test</classifier>
|
||||
<version>${kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
</project>
|
||||
|
||||
@@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.env.EnvironmentPostProcessor;
|
||||
import org.springframework.core.env.ConfigurableEnvironment;
|
||||
import org.springframework.core.env.MapPropertySource;
|
||||
|
||||
/**
|
||||
* An {@link EnvironmentPostProcessor} that sets some common configuration properties (log config etc.,) for Kafka
|
||||
* binder.
|
||||
*
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
public class KafkaBinderEnvironmentPostProcessor implements EnvironmentPostProcessor {
|
||||
|
||||
public final static String SPRING_KAFKA = "spring.kafka";
|
||||
|
||||
public final static String SPRING_KAFKA_PRODUCER = SPRING_KAFKA + ".producer";
|
||||
|
||||
public final static String SPRING_KAFKA_CONSUMER = SPRING_KAFKA + ".consumer";
|
||||
|
||||
public final static String SPRING_KAFKA_PRODUCER_KEY_SERIALIZER = SPRING_KAFKA_PRODUCER + "." + "keySerializer";
|
||||
|
||||
public final static String SPRING_KAFKA_PRODUCER_VALUE_SERIALIZER = SPRING_KAFKA_PRODUCER + "." + "valueSerializer";
|
||||
|
||||
public final static String SPRING_KAFKA_CONSUMER_KEY_DESERIALIZER = SPRING_KAFKA_CONSUMER + "." + "keyDeserializer";
|
||||
|
||||
public final static String SPRING_KAFKA_CONSUMER_VALUE_DESERIALIZER = SPRING_KAFKA_CONSUMER + "." + "valueDeserializer";
|
||||
|
||||
private static final String KAFKA_BINDER_DEFAULT_PROPERTIES = "kafkaBinderDefaultProperties";
|
||||
|
||||
@Override
|
||||
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
|
||||
if (!environment.getPropertySources().contains(KAFKA_BINDER_DEFAULT_PROPERTIES)) {
|
||||
Map<String, Object> kafkaBinderDefaultProperties = new HashMap<>();
|
||||
kafkaBinderDefaultProperties.put("logging.level.org.I0Itec.zkclient", "ERROR");
|
||||
kafkaBinderDefaultProperties.put("logging.level.kafka.server.KafkaConfig", "ERROR");
|
||||
kafkaBinderDefaultProperties.put("logging.level.kafka.admin.AdminClient.AdminConfig", "ERROR");
|
||||
kafkaBinderDefaultProperties.put(SPRING_KAFKA_PRODUCER_KEY_SERIALIZER, ByteArraySerializer.class.getName());
|
||||
kafkaBinderDefaultProperties.put(SPRING_KAFKA_PRODUCER_VALUE_SERIALIZER, ByteArraySerializer.class.getName());
|
||||
kafkaBinderDefaultProperties.put(SPRING_KAFKA_CONSUMER_KEY_DESERIALIZER, ByteArrayDeserializer.class.getName());
|
||||
kafkaBinderDefaultProperties.put(SPRING_KAFKA_CONSUMER_VALUE_DESERIALIZER, ByteArrayDeserializer.class.getName());
|
||||
environment.getPropertySources().addLast(new MapPropertySource(KAFKA_BINDER_DEFAULT_PROPERTIES, kafkaBinderDefaultProperties));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,69 +16,115 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
|
||||
import org.springframework.boot.actuate.health.Health;
|
||||
import org.springframework.boot.actuate.health.HealthIndicator;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
|
||||
/**
|
||||
* Health indicator for Kafka.
|
||||
*
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Marius Bogoevici
|
||||
* @author Henryk Konsek
|
||||
* @author Gary Russell
|
||||
* @author Laur Aliste
|
||||
*/
|
||||
public class KafkaBinderHealthIndicator implements HealthIndicator {
|
||||
|
||||
private static final int DEFAULT_TIMEOUT = 60;
|
||||
|
||||
private final KafkaMessageChannelBinder binder;
|
||||
|
||||
private final KafkaBinderConfigurationProperties configurationProperties;
|
||||
private final ConsumerFactory<?, ?> consumerFactory;
|
||||
|
||||
public KafkaBinderHealthIndicator(KafkaMessageChannelBinder binder,
|
||||
KafkaBinderConfigurationProperties configurationProperties) {
|
||||
private int timeout = DEFAULT_TIMEOUT;
|
||||
|
||||
private Consumer<?, ?> metadataConsumer;
|
||||
|
||||
public KafkaBinderHealthIndicator(KafkaMessageChannelBinder binder, ConsumerFactory<?, ?> consumerFactory) {
|
||||
this.binder = binder;
|
||||
this.configurationProperties = configurationProperties;
|
||||
this.consumerFactory = consumerFactory;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the timeout in seconds to retrieve health information.
|
||||
*
|
||||
* @param timeout the timeout - default 60.
|
||||
*/
|
||||
public void setTimeout(int timeout) {
|
||||
this.timeout = timeout;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Health health() {
|
||||
Map<String, String> properties = new HashMap<>();
|
||||
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configurationProperties
|
||||
.getKafkaConnectionString());
|
||||
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
|
||||
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
|
||||
KafkaConsumer metadataConsumer = new KafkaConsumer(properties);
|
||||
try {
|
||||
Set<String> downMessages = new HashSet<>();
|
||||
for (String topic : this.binder.getTopicsInUse().keySet()) {
|
||||
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
|
||||
for (PartitionInfo partitionInfo : partitionInfos) {
|
||||
if (this.binder.getTopicsInUse().get(topic).contains(partitionInfo) && partitionInfo.leader()
|
||||
.id() == -1) {
|
||||
downMessages.add(partitionInfo.toString());
|
||||
ExecutorService exec = Executors.newSingleThreadExecutor();
|
||||
Future<Health> future = exec.submit(new Callable<Health>() {
|
||||
|
||||
@Override
|
||||
public Health call() {
|
||||
try {
|
||||
if (metadataConsumer == null) {
|
||||
metadataConsumer = consumerFactory.createConsumer();
|
||||
}
|
||||
Set<String> downMessages = new HashSet<>();
|
||||
for (String topic : KafkaBinderHealthIndicator.this.binder.getTopicsInUse().keySet()) {
|
||||
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
|
||||
for (PartitionInfo partitionInfo : partitionInfos) {
|
||||
if (KafkaBinderHealthIndicator.this.binder.getTopicsInUse().get(topic).getPartitionInfos()
|
||||
.contains(partitionInfo) && partitionInfo.leader().id() == -1) {
|
||||
downMessages.add(partitionInfo.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
if (downMessages.isEmpty()) {
|
||||
return Health.up().build();
|
||||
}
|
||||
else {
|
||||
return Health.down()
|
||||
.withDetail("Following partitions in use have no leaders: ", downMessages.toString())
|
||||
.build();
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
return Health.down(e).build();
|
||||
}
|
||||
}
|
||||
if (downMessages.isEmpty()) {
|
||||
return Health.up().build();
|
||||
}
|
||||
return Health.down().withDetail("Following partitions in use have no leaders: ", downMessages.toString())
|
||||
|
||||
});
|
||||
try {
|
||||
return future.get(this.timeout, TimeUnit.SECONDS);
|
||||
}
|
||||
catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
return Health.down()
|
||||
.withDetail("Interrupted while waiting for partition information in", this.timeout + " seconds")
|
||||
.build();
|
||||
}
|
||||
catch (Exception e) {
|
||||
catch (ExecutionException e) {
|
||||
return Health.down(e).build();
|
||||
}
|
||||
catch (TimeoutException e) {
|
||||
return Health.down()
|
||||
.withDetail("Failed to retrieve partition information in", this.timeout + " seconds")
|
||||
.build();
|
||||
}
|
||||
finally {
|
||||
metadataConsumer.close();
|
||||
exec.shutdownNow();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -0,0 +1,124 @@
|
||||
/*
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.micrometer.core.instrument.binder.MeterBinder;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
|
||||
/**
|
||||
* Metrics for Kafka binder.
|
||||
*
|
||||
* @author Henryk Konsek
|
||||
*/
|
||||
public class KafkaBinderMetrics implements MeterBinder {
|
||||
|
||||
private final static Log LOG = LogFactory.getLog(KafkaBinderMetrics.class);
|
||||
|
||||
static final String METRIC_PREFIX = "spring.cloud.stream.binder.kafka";
|
||||
|
||||
private final KafkaMessageChannelBinder binder;
|
||||
|
||||
private final KafkaBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
private ConsumerFactory<?, ?> defaultConsumerFactory;
|
||||
|
||||
public KafkaBinderMetrics(KafkaMessageChannelBinder binder,
|
||||
KafkaBinderConfigurationProperties binderConfigurationProperties,
|
||||
ConsumerFactory<?, ?> defaultConsumerFactory) {
|
||||
this.binder = binder;
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
this.defaultConsumerFactory = defaultConsumerFactory;
|
||||
}
|
||||
|
||||
public KafkaBinderMetrics(KafkaMessageChannelBinder binder,
|
||||
KafkaBinderConfigurationProperties binderConfigurationProperties) {
|
||||
this(binder, binderConfigurationProperties, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void bindTo(MeterRegistry registry) {
|
||||
for (Map.Entry<String, KafkaMessageChannelBinder.TopicInformation> topicInfo : this.binder.getTopicsInUse()
|
||||
.entrySet()) {
|
||||
if (!topicInfo.getValue().isConsumerTopic()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
String topic = topicInfo.getKey();
|
||||
String group = topicInfo.getValue().getConsumerGroup();
|
||||
|
||||
try (Consumer<?, ?> metadataConsumer = createConsumerFactory(group).createConsumer()) {
|
||||
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
|
||||
List<TopicPartition> topicPartitions = new LinkedList<>();
|
||||
for (PartitionInfo partitionInfo : partitionInfos) {
|
||||
topicPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
|
||||
}
|
||||
Map<TopicPartition, Long> endOffsets = metadataConsumer.endOffsets(topicPartitions);
|
||||
long lag = 0;
|
||||
for (Map.Entry<TopicPartition, Long> endOffset : endOffsets.entrySet()) {
|
||||
OffsetAndMetadata current = metadataConsumer.committed(endOffset.getKey());
|
||||
if (current != null) {
|
||||
lag += endOffset.getValue() - current.offset();
|
||||
}
|
||||
else {
|
||||
lag += endOffset.getValue();
|
||||
}
|
||||
}
|
||||
registry.gauge(String.format("%s.%s.%s.lag", METRIC_PREFIX, group, topic), lag);
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOG.debug("Cannot generate metric for topic: " + topic, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private ConsumerFactory<?, ?> createConsumerFactory(String group) {
|
||||
if (defaultConsumerFactory != null) {
|
||||
return defaultConsumerFactory;
|
||||
}
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
if (!ObjectUtils.isEmpty(binderConfigurationProperties.getConsumerConfiguration())) {
|
||||
props.putAll(binderConfigurationProperties.getConsumerConfiguration());
|
||||
}
|
||||
if (!props.containsKey(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
|
||||
this.binderConfigurationProperties.getKafkaConnectionString());
|
||||
}
|
||||
props.put("group.id", group);
|
||||
return new DefaultKafkaConsumerFactory<>(props);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedBindingProperties;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
@ConfigurationProperties("spring.cloud.stream.kafka")
|
||||
public class KafkaExtendedBindingProperties
|
||||
implements ExtendedBindingProperties<KafkaConsumerProperties, KafkaProducerProperties> {
|
||||
|
||||
private Map<String, KafkaBindingProperties> bindings = new HashMap<>();
|
||||
|
||||
public Map<String, KafkaBindingProperties> getBindings() {
|
||||
return this.bindings;
|
||||
}
|
||||
|
||||
public void setBindings(Map<String, KafkaBindingProperties> bindings) {
|
||||
this.bindings = bindings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaConsumerProperties getExtendedConsumerProperties(String channelName) {
|
||||
if (this.bindings.containsKey(channelName) && this.bindings.get(channelName).getConsumer() != null) {
|
||||
return this.bindings.get(channelName).getConsumer();
|
||||
}
|
||||
else {
|
||||
return new KafkaConsumerProperties();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaProducerProperties getExtendedProducerProperties(String channelName) {
|
||||
if (this.bindings.containsKey(channelName) && this.bindings.get(channelName).getProducer() != null) {
|
||||
return this.bindings.get(channelName).getProducer();
|
||||
}
|
||||
else {
|
||||
return new KafkaProducerProperties();
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2015-2016 the original author or authors.
|
||||
* Copyright 2015-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,21 +16,39 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.config;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import io.micrometer.core.instrument.binder.MeterBinder;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.PropertyPlaceholderAutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
|
||||
import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaBinderHealthIndicator;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaBinderMetrics;
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaMessageChannelBinder;
|
||||
import org.springframework.cloud.stream.config.codec.kryo.KryoCodecAutoConfiguration;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.JaasLoginModuleConfiguration;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.integration.codec.Codec;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.security.jaas.KafkaJaasLoginModuleInitializer;
|
||||
import org.springframework.kafka.support.LoggingProducerListener;
|
||||
import org.springframework.kafka.support.ProducerListener;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
|
||||
/**
|
||||
* @author David Turanski
|
||||
@@ -38,15 +56,16 @@ import org.springframework.kafka.support.ProducerListener;
|
||||
* @author Soby Chacko
|
||||
* @author Mark Fisher
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Henryk Konsek
|
||||
* @author Gary Russell
|
||||
*/
|
||||
@Configuration
|
||||
@ConditionalOnMissingBean(Binder.class)
|
||||
@Import({KryoCodecAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class})
|
||||
@EnableConfigurationProperties({KafkaBinderConfigurationProperties.class, KafkaExtendedBindingProperties.class})
|
||||
@Import({ PropertyPlaceholderAutoConfiguration.class})
|
||||
@EnableConfigurationProperties({ KafkaBinderConfigurationProperties.class, KafkaExtendedBindingProperties.class })
|
||||
public class KafkaBinderConfiguration {
|
||||
|
||||
@Autowired
|
||||
private Codec codec;
|
||||
protected static final Log logger = LogFactory.getLog(KafkaBinderConfiguration.class);
|
||||
|
||||
@Autowired
|
||||
private KafkaBinderConfigurationProperties configurationProperties;
|
||||
@@ -57,12 +76,22 @@ public class KafkaBinderConfiguration {
|
||||
@Autowired
|
||||
private ProducerListener producerListener;
|
||||
|
||||
@Autowired
|
||||
private ApplicationContext context;
|
||||
|
||||
@Autowired
|
||||
private KafkaProperties kafkaProperties;
|
||||
|
||||
@Bean
|
||||
KafkaTopicProvisioner provisioningProvider() {
|
||||
return new KafkaTopicProvisioner(this.configurationProperties, this.kafkaProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
KafkaMessageChannelBinder kafkaMessageChannelBinder() {
|
||||
KafkaMessageChannelBinder kafkaMessageChannelBinder = new KafkaMessageChannelBinder(
|
||||
this.configurationProperties);
|
||||
kafkaMessageChannelBinder.setCodec(this.codec);
|
||||
//kafkaMessageChannelBinder.setProducerListener(producerListener);
|
||||
this.configurationProperties, provisioningProvider());
|
||||
kafkaMessageChannelBinder.setProducerListener(producerListener);
|
||||
kafkaMessageChannelBinder.setExtendedBindingProperties(this.kafkaExtendedBindingProperties);
|
||||
return kafkaMessageChannelBinder;
|
||||
}
|
||||
@@ -75,6 +104,36 @@ public class KafkaBinderConfiguration {
|
||||
|
||||
@Bean
|
||||
KafkaBinderHealthIndicator healthIndicator(KafkaMessageChannelBinder kafkaMessageChannelBinder) {
|
||||
return new KafkaBinderHealthIndicator(kafkaMessageChannelBinder, this.configurationProperties);
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
|
||||
if (!ObjectUtils.isEmpty(configurationProperties.getConsumerConfiguration())) {
|
||||
props.putAll(configurationProperties.getConsumerConfiguration());
|
||||
}
|
||||
if (!props.containsKey(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configurationProperties.getKafkaConnectionString());
|
||||
}
|
||||
ConsumerFactory<?, ?> consumerFactory = new DefaultKafkaConsumerFactory<>(props);
|
||||
KafkaBinderHealthIndicator indicator = new KafkaBinderHealthIndicator(kafkaMessageChannelBinder,
|
||||
consumerFactory);
|
||||
indicator.setTimeout(this.configurationProperties.getHealthTimeout());
|
||||
return indicator;
|
||||
}
|
||||
|
||||
@Bean
|
||||
public MeterBinder kafkaBinderMetrics(KafkaMessageChannelBinder kafkaMessageChannelBinder) {
|
||||
return new KafkaBinderMetrics(kafkaMessageChannelBinder, configurationProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KafkaJaasLoginModuleInitializer jaasInitializer() throws IOException {
|
||||
return new KafkaJaasLoginModuleInitializer();
|
||||
}
|
||||
|
||||
public static class JaasConfigurationProperties {
|
||||
|
||||
private JaasLoginModuleConfiguration kafka;
|
||||
|
||||
private JaasLoginModuleConfiguration zookeeper;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
org.springframework.boot.env.EnvironmentPostProcessor=\
|
||||
org.springframework.cloud.stream.binder.kafka.KafkaBinderEnvironmentPostProcessor
|
||||
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Copyright 2014-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import org.springframework.cloud.stream.binder.AbstractPollableConsumerTestBinder;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public abstract class AbstractKafkaTestBinder extends
|
||||
AbstractPollableConsumerTestBinder<KafkaMessageChannelBinder,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>> {
|
||||
|
||||
private ApplicationContext applicationContext;
|
||||
|
||||
@Override
|
||||
public void cleanup() {
|
||||
// do nothing - the rule will take care of that
|
||||
}
|
||||
|
||||
protected final void setApplicationContext(ApplicationContext context) {
|
||||
this.applicationContext = context;
|
||||
}
|
||||
|
||||
public ApplicationContext getApplicationContext() {
|
||||
return this.applicationContext;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,136 @@
|
||||
/*
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.common.serialization.LongDeserializer;
|
||||
import org.apache.kafka.common.serialization.LongSerializer;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfiguration;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.test.context.TestPropertySource;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
@RunWith(SpringJUnit4ClassRunner.class)
|
||||
@SpringBootTest(classes = { KafkaBinderAutoConfigurationPropertiesTest.KafkaBinderConfigProperties.class,
|
||||
KafkaBinderConfiguration.class })
|
||||
@TestPropertySource(locations = "classpath:binder-config-autoconfig.properties")
|
||||
public class KafkaBinderAutoConfigurationPropertiesTest {
|
||||
|
||||
@Autowired
|
||||
private KafkaMessageChannelBinder kafkaMessageChannelBinder;
|
||||
|
||||
@Autowired
|
||||
private KafkaBinderHealthIndicator kafkaBinderHealthIndicator;
|
||||
|
||||
@Test
|
||||
public void testKafkaBinderConfigurationWithKafkaProperties() throws Exception {
|
||||
assertNotNull(this.kafkaMessageChannelBinder);
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = new ExtendedProducerProperties<>(
|
||||
new KafkaProducerProperties());
|
||||
Method getProducerFactoryMethod = KafkaMessageChannelBinder.class.getDeclaredMethod("getProducerFactory",
|
||||
String.class, ExtendedProducerProperties.class);
|
||||
getProducerFactoryMethod.setAccessible(true);
|
||||
DefaultKafkaProducerFactory producerFactory = (DefaultKafkaProducerFactory) getProducerFactoryMethod
|
||||
.invoke(this.kafkaMessageChannelBinder, "foo", producerProperties);
|
||||
Field producerFactoryConfigField = ReflectionUtils.findField(DefaultKafkaProducerFactory.class, "configs",
|
||||
Map.class);
|
||||
ReflectionUtils.makeAccessible(producerFactoryConfigField);
|
||||
Map<String, Object> producerConfigs = (Map<String, Object>) ReflectionUtils.getField(producerFactoryConfigField,
|
||||
producerFactory);
|
||||
assertTrue(producerConfigs.get("batch.size").equals(10));
|
||||
assertTrue(producerConfigs.get("key.serializer").equals(LongSerializer.class));
|
||||
assertTrue(producerConfigs.get("key.deserializer") == null);
|
||||
assertTrue(producerConfigs.get("value.serializer").equals(LongSerializer.class));
|
||||
assertTrue(producerConfigs.get("value.deserializer") == null);
|
||||
assertTrue(producerConfigs.get("compression.type").equals("snappy"));
|
||||
List<String> bootstrapServers = new ArrayList<>();
|
||||
bootstrapServers.add("10.98.09.199:9092");
|
||||
bootstrapServers.add("10.98.09.196:9092");
|
||||
assertTrue((((List<String>) producerConfigs.get("bootstrap.servers")).containsAll(bootstrapServers)));
|
||||
Method createKafkaConsumerFactoryMethod = KafkaMessageChannelBinder.class.getDeclaredMethod(
|
||||
"createKafkaConsumerFactory", boolean.class, String.class, ExtendedConsumerProperties.class);
|
||||
createKafkaConsumerFactoryMethod.setAccessible(true);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = new ExtendedConsumerProperties<>(
|
||||
new KafkaConsumerProperties());
|
||||
DefaultKafkaConsumerFactory consumerFactory = (DefaultKafkaConsumerFactory) createKafkaConsumerFactoryMethod
|
||||
.invoke(this.kafkaMessageChannelBinder, true, "test", consumerProperties);
|
||||
Field consumerFactoryConfigField = ReflectionUtils.findField(DefaultKafkaConsumerFactory.class, "configs",
|
||||
Map.class);
|
||||
ReflectionUtils.makeAccessible(consumerFactoryConfigField);
|
||||
Map<String, Object> consumerConfigs = (Map<String, Object>) ReflectionUtils.getField(consumerFactoryConfigField,
|
||||
consumerFactory);
|
||||
assertTrue(consumerConfigs.get("key.deserializer").equals(LongDeserializer.class));
|
||||
assertTrue(consumerConfigs.get("key.serializer") == null);
|
||||
assertTrue(consumerConfigs.get("value.deserializer").equals(LongDeserializer.class));
|
||||
assertTrue(consumerConfigs.get("value.serialized") == null);
|
||||
assertTrue(consumerConfigs.get("group.id").equals("groupIdFromBootConfig"));
|
||||
assertTrue(consumerConfigs.get("auto.offset.reset").equals("earliest"));
|
||||
assertTrue((((List<String>) consumerConfigs.get("bootstrap.servers")).containsAll(bootstrapServers)));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKafkaHealthIndicatorProperties() {
|
||||
assertNotNull(this.kafkaBinderHealthIndicator);
|
||||
Field consumerFactoryField = ReflectionUtils.findField(KafkaBinderHealthIndicator.class, "consumerFactory",
|
||||
ConsumerFactory.class);
|
||||
ReflectionUtils.makeAccessible(consumerFactoryField);
|
||||
DefaultKafkaConsumerFactory consumerFactory = (DefaultKafkaConsumerFactory) ReflectionUtils.getField(
|
||||
consumerFactoryField, this.kafkaBinderHealthIndicator);
|
||||
Field configField = ReflectionUtils.findField(DefaultKafkaConsumerFactory.class, "configs", Map.class);
|
||||
ReflectionUtils.makeAccessible(configField);
|
||||
Map<String, Object> configs = (Map<String, Object>) ReflectionUtils.getField(configField, consumerFactory);
|
||||
assertTrue(configs.containsKey("bootstrap.servers"));
|
||||
List<String> bootstrapServers = new ArrayList<>();
|
||||
bootstrapServers.add("10.98.09.199:9092");
|
||||
bootstrapServers.add("10.98.09.196:9092");
|
||||
assertTrue(((List<String>) configs.get("bootstrap.servers")).containsAll(bootstrapServers));
|
||||
}
|
||||
|
||||
public static class KafkaBinderConfigProperties {
|
||||
|
||||
@Bean
|
||||
KafkaProperties kafkaProperties() {
|
||||
return new KafkaProperties();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,103 @@
|
||||
/*
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfiguration;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.test.context.TestPropertySource;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
@RunWith(SpringJUnit4ClassRunner.class)
|
||||
@SpringBootTest(classes = { KafkaBinderConfiguration.class, KafkaAutoConfiguration.class,
|
||||
KafkaBinderConfigurationPropertiesTest.class })
|
||||
@TestPropertySource(locations = "classpath:binder-config.properties")
|
||||
public class KafkaBinderConfigurationPropertiesTest {
|
||||
|
||||
@Autowired
|
||||
private KafkaMessageChannelBinder kafkaMessageChannelBinder;
|
||||
|
||||
@Test
|
||||
public void testKafkaBinderConfigurationProperties() throws Exception {
|
||||
assertNotNull(this.kafkaMessageChannelBinder);
|
||||
KafkaProducerProperties kafkaProducerProperties = new KafkaProducerProperties();
|
||||
kafkaProducerProperties.setBufferSize(12345);
|
||||
kafkaProducerProperties.setBatchTimeout(100);
|
||||
kafkaProducerProperties.setCompressionType(KafkaProducerProperties.CompressionType.gzip);
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = new ExtendedProducerProperties<>(
|
||||
kafkaProducerProperties);
|
||||
Method getProducerFactoryMethod = KafkaMessageChannelBinder.class.getDeclaredMethod("getProducerFactory",
|
||||
String.class, ExtendedProducerProperties.class);
|
||||
getProducerFactoryMethod.setAccessible(true);
|
||||
DefaultKafkaProducerFactory producerFactory = (DefaultKafkaProducerFactory) getProducerFactoryMethod
|
||||
.invoke(this.kafkaMessageChannelBinder, "bar", producerProperties);
|
||||
Field producerFactoryConfigField = ReflectionUtils.findField(DefaultKafkaProducerFactory.class, "configs",
|
||||
Map.class);
|
||||
ReflectionUtils.makeAccessible(producerFactoryConfigField);
|
||||
Map<String, Object> producerConfigs = (Map<String, Object>) ReflectionUtils.getField(producerFactoryConfigField,
|
||||
producerFactory);
|
||||
assertTrue(producerConfigs.get("batch.size").equals("12345"));
|
||||
assertTrue(producerConfigs.get("linger.ms").equals("100"));
|
||||
assertTrue(producerConfigs.get("key.serializer").equals(ByteArraySerializer.class));
|
||||
assertTrue(producerConfigs.get("value.serializer").equals(ByteArraySerializer.class));
|
||||
assertTrue(producerConfigs.get("compression.type").equals("gzip"));
|
||||
List<String> bootstrapServers = new ArrayList<>();
|
||||
bootstrapServers.add("10.98.09.199:9082");
|
||||
assertTrue((((String) producerConfigs.get("bootstrap.servers")).contains("10.98.09.199:9082")));
|
||||
Method createKafkaConsumerFactoryMethod = KafkaMessageChannelBinder.class.getDeclaredMethod(
|
||||
"createKafkaConsumerFactory", boolean.class, String.class, ExtendedConsumerProperties.class);
|
||||
createKafkaConsumerFactoryMethod.setAccessible(true);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = new ExtendedConsumerProperties<>(
|
||||
new KafkaConsumerProperties());
|
||||
DefaultKafkaConsumerFactory consumerFactory = (DefaultKafkaConsumerFactory) createKafkaConsumerFactoryMethod
|
||||
.invoke(this.kafkaMessageChannelBinder, true, "test", consumerProperties);
|
||||
Field consumerFactoryConfigField = ReflectionUtils.findField(DefaultKafkaConsumerFactory.class, "configs",
|
||||
Map.class);
|
||||
ReflectionUtils.makeAccessible(consumerFactoryConfigField);
|
||||
Map<String, Object> consumerConfigs = (Map<String, Object>) ReflectionUtils.getField(consumerFactoryConfigField,
|
||||
consumerFactory);
|
||||
assertTrue(consumerConfigs.get("key.deserializer").equals(ByteArrayDeserializer.class));
|
||||
assertTrue(consumerConfigs.get("value.deserializer").equals(ByteArrayDeserializer.class));
|
||||
assertTrue((((String) consumerConfigs.get("bootstrap.servers")).contains("10.98.09.199:9082")));
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfiguration;
|
||||
import org.springframework.kafka.support.ProducerListener;
|
||||
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
|
||||
/**
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
@RunWith(SpringJUnit4ClassRunner.class)
|
||||
@SpringBootTest(classes = { KafkaBinderConfiguration.class,
|
||||
KafkaAutoConfiguration.class,
|
||||
KafkaBinderConfigurationTest.class })
|
||||
public class KafkaBinderConfigurationTest {
|
||||
|
||||
@Autowired
|
||||
private KafkaMessageChannelBinder kafkaMessageChannelBinder;
|
||||
|
||||
@Test
|
||||
public void testKafkaBinderProducerListener() {
|
||||
assertNotNull(this.kafkaMessageChannelBinder);
|
||||
Field producerListenerField = ReflectionUtils.findField(
|
||||
KafkaMessageChannelBinder.class, "producerListener",
|
||||
ProducerListener.class);
|
||||
ReflectionUtils.makeAccessible(producerListenerField);
|
||||
ProducerListener producerListener = (ProducerListener) ReflectionUtils.getField(
|
||||
producerListenerField, this.kafkaMessageChannelBinder);
|
||||
assertNotNull(producerListener);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,142 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.KafkaException;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import org.springframework.boot.actuate.health.Health;
|
||||
import org.springframework.boot.actuate.health.Status;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Barry Commins
|
||||
* @author Gary Russell
|
||||
* @author Laur Aliste
|
||||
*/
|
||||
public class KafkaBinderHealthIndicatorTest {
|
||||
|
||||
private static final String TEST_TOPIC = "test";
|
||||
|
||||
private KafkaBinderHealthIndicator indicator;
|
||||
|
||||
@Mock
|
||||
private DefaultKafkaConsumerFactory consumerFactory;
|
||||
|
||||
@Mock
|
||||
private KafkaConsumer consumer;
|
||||
|
||||
@Mock
|
||||
private KafkaMessageChannelBinder binder;
|
||||
|
||||
private final Map<String, KafkaMessageChannelBinder.TopicInformation> topicsInUse = new HashMap<>();
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
MockitoAnnotations.initMocks(this);
|
||||
org.mockito.BDDMockito.given(consumerFactory.createConsumer()).willReturn(consumer);
|
||||
org.mockito.BDDMockito.given(binder.getTopicsInUse()).willReturn(topicsInUse);
|
||||
this.indicator = new KafkaBinderHealthIndicator(binder, consumerFactory);
|
||||
this.indicator.setTimeout(10);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void kafkaBinderIsUp() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
Health health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.UP);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void kafkaBinderIsDown() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(-1, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
Health health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.DOWN);
|
||||
}
|
||||
|
||||
@Test(timeout = 5000)
|
||||
public void kafkaBinderDoesNotAnswer() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(-1, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willAnswer(new Answer<Object>() {
|
||||
|
||||
@Override
|
||||
public Object answer(InvocationOnMock invocation) throws Throwable {
|
||||
final int fiveMinutes = 1000 * 60 * 5;
|
||||
Thread.sleep(fiveMinutes);
|
||||
return partitions;
|
||||
}
|
||||
|
||||
});
|
||||
this.indicator.setTimeout(1);
|
||||
Health health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.DOWN);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void createsConsumerOnceWhenInvokedMultipleTimes() {
|
||||
final List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new KafkaMessageChannelBinder.TopicInformation("group", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
|
||||
indicator.health();
|
||||
Health health = indicator.health();
|
||||
|
||||
assertThat(health.getStatus()).isEqualTo(Status.UP);
|
||||
org.mockito.Mockito.verify(this.consumerFactory).createConsumer();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void consumerCreationFailsFirstTime() {
|
||||
org.mockito.BDDMockito.given(consumerFactory.createConsumer()).willThrow(KafkaException.class)
|
||||
.willReturn(consumer);
|
||||
|
||||
Health health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.DOWN);
|
||||
|
||||
health = indicator.health();
|
||||
assertThat(health.getStatus()).isEqualTo(Status.UP);
|
||||
|
||||
org.mockito.Mockito.verify(this.consumerFactory, Mockito.times(2)).createConsumer();
|
||||
}
|
||||
|
||||
private List<PartitionInfo> partitions(Node leader) {
|
||||
List<PartitionInfo> partitions = new ArrayList<>();
|
||||
partitions.add(new PartitionInfo(TEST_TOPIC, 0, leader, null, null));
|
||||
return partitions;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,133 @@
|
||||
/*
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.micrometer.core.instrument.search.Search;
|
||||
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.MockitoAnnotations;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.KafkaMessageChannelBinder.TopicInformation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Henryk Konsek
|
||||
*/
|
||||
public class KafkaBinderMetricsTest {
|
||||
|
||||
private static final String TEST_TOPIC = "test";
|
||||
|
||||
private KafkaBinderMetrics metrics;
|
||||
|
||||
@Mock
|
||||
private DefaultKafkaConsumerFactory consumerFactory;
|
||||
|
||||
@Mock
|
||||
private KafkaConsumer consumer;
|
||||
|
||||
@Mock
|
||||
private KafkaMessageChannelBinder binder;
|
||||
|
||||
private MeterRegistry meterRegistry = new SimpleMeterRegistry();
|
||||
|
||||
private Map<String, TopicInformation> topicsInUse = new HashMap<>();
|
||||
|
||||
@Mock
|
||||
private KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties;
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
MockitoAnnotations.initMocks(this);
|
||||
org.mockito.BDDMockito.given(consumerFactory.createConsumer()).willReturn(consumer);
|
||||
org.mockito.BDDMockito.given(binder.getTopicsInUse()).willReturn(topicsInUse);
|
||||
metrics = new KafkaBinderMetrics(binder, kafkaBinderConfigurationProperties, consumerFactory);
|
||||
org.mockito.BDDMockito.given(consumer.endOffsets(org.mockito.Matchers.anyCollectionOf(TopicPartition.class)))
|
||||
.willReturn(java.util.Collections.singletonMap(new TopicPartition(TEST_TOPIC, 0), 1000L));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldIndicateLag() {
|
||||
org.mockito.BDDMockito.given(consumer.committed(org.mockito.Matchers.any(TopicPartition.class))).willReturn(new OffsetAndMetadata(500));
|
||||
List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
metrics.bindTo(meterRegistry);
|
||||
assertThat(meterRegistry.getMeters()).hasSize(1);
|
||||
Search group = meterRegistry.find(String.format("%s.%s.%s.lag", KafkaBinderMetrics.METRIC_PREFIX, "group", TEST_TOPIC));
|
||||
assertThat(group.gauge().value()).isEqualTo(500.0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldSumUpPartitionsLags() {
|
||||
Map<TopicPartition, Long> endOffsets = new HashMap<>();
|
||||
endOffsets.put(new TopicPartition(TEST_TOPIC, 0), 1000L);
|
||||
endOffsets.put(new TopicPartition(TEST_TOPIC, 1), 1000L);
|
||||
org.mockito.BDDMockito.given(consumer.endOffsets(org.mockito.Matchers.anyCollectionOf(TopicPartition.class))).willReturn(endOffsets);
|
||||
org.mockito.BDDMockito.given(consumer.committed(org.mockito.Matchers.any(TopicPartition.class))).willReturn(new OffsetAndMetadata(500));
|
||||
List<PartitionInfo> partitions = partitions(new Node(0, null, 0), new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
metrics.bindTo(meterRegistry);
|
||||
assertThat(meterRegistry.getMeters()).hasSize(1);
|
||||
Search group = meterRegistry.find(String.format("%s.%s.%s.lag", KafkaBinderMetrics.METRIC_PREFIX, "group", TEST_TOPIC));
|
||||
assertThat(group.gauge().value()).isEqualTo(1000.0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldIndicateFullLagForNotCommittedGroups() {
|
||||
List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation("group", partitions));
|
||||
org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC)).willReturn(partitions);
|
||||
metrics.bindTo(meterRegistry);
|
||||
assertThat(meterRegistry.getMeters()).hasSize(1);
|
||||
Search group = meterRegistry.find(String.format("%s.%s.%s.lag", KafkaBinderMetrics.METRIC_PREFIX, "group", TEST_TOPIC));
|
||||
assertThat(group.gauge().value()).isEqualTo(1000.0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void shouldNotCalculateLagForProducerTopics() {
|
||||
List<PartitionInfo> partitions = partitions(new Node(0, null, 0));
|
||||
topicsInUse.put(TEST_TOPIC, new TopicInformation(null, partitions));
|
||||
metrics.bindTo(meterRegistry);
|
||||
assertThat(meterRegistry.getMeters()).isEmpty();
|
||||
}
|
||||
|
||||
private List<PartitionInfo> partitions(Node... nodes) {
|
||||
List<PartitionInfo> partitions = new ArrayList<>();
|
||||
for (int i = 0; i < nodes.length; i++) {
|
||||
partitions.add(new PartitionInfo(TEST_TOPIC, i, nodes[i], null, null));
|
||||
}
|
||||
return partitions;
|
||||
}
|
||||
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.integration.test.util.TestUtils;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Gary Russell
|
||||
* @since 1.2.2
|
||||
*
|
||||
*/
|
||||
public class KafkaBinderUnitTests {
|
||||
|
||||
@Test
|
||||
public void testPropertyOverrides() throws Exception {
|
||||
KafkaBinderConfigurationProperties binderConfigurationProperties = new KafkaBinderConfigurationProperties();
|
||||
KafkaTopicProvisioner provisioningProvider = new KafkaTopicProvisioner(binderConfigurationProperties, new KafkaProperties());
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(binderConfigurationProperties,
|
||||
provisioningProvider);
|
||||
KafkaConsumerProperties consumerProps = new KafkaConsumerProperties();
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> ecp =
|
||||
new ExtendedConsumerProperties<KafkaConsumerProperties>(consumerProps);
|
||||
Method method = KafkaMessageChannelBinder.class.getDeclaredMethod("createKafkaConsumerFactory", boolean.class,
|
||||
String.class, ExtendedConsumerProperties.class);
|
||||
method.setAccessible(true);
|
||||
|
||||
// test default for anon
|
||||
Object factory = method.invoke(binder, true, "foo", ecp);
|
||||
Map<?, ?> configs = TestUtils.getPropertyValue(factory, "configs", Map.class);
|
||||
assertThat(configs.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("latest");
|
||||
|
||||
// test default for named
|
||||
factory = method.invoke(binder, false, "foo", ecp);
|
||||
configs = TestUtils.getPropertyValue(factory, "configs", Map.class);
|
||||
assertThat(configs.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("earliest");
|
||||
|
||||
// binder level setting
|
||||
binderConfigurationProperties.setConfiguration(
|
||||
Collections.singletonMap(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"));
|
||||
factory = method.invoke(binder, false, "foo", ecp);
|
||||
configs = TestUtils.getPropertyValue(factory, "configs", Map.class);
|
||||
assertThat(configs.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("latest");
|
||||
|
||||
// consumer level setting
|
||||
consumerProps.setConfiguration(Collections.singletonMap(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"));
|
||||
factory = method.invoke(binder, false, "foo", ecp);
|
||||
configs = TestUtils.getPropertyValue(factory, "configs", Map.class);
|
||||
assertThat(configs.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)).isEqualTo("earliest");
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2015-2016 the original author or authors.
|
||||
* Copyright 2015-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -16,72 +16,64 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import com.esotericsoftware.kryo.Kryo;
|
||||
import com.esotericsoftware.kryo.Registration;
|
||||
|
||||
import org.springframework.cloud.stream.binder.AbstractTestBinder;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.context.support.GenericApplicationContext;
|
||||
import org.springframework.integration.codec.Codec;
|
||||
import org.springframework.integration.codec.kryo.KryoRegistrar;
|
||||
import org.springframework.integration.codec.kryo.PojoCodec;
|
||||
import org.springframework.integration.tuple.TupleKryoRegistrar;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.provisioning.ConsumerDestination;
|
||||
import org.springframework.context.annotation.AnnotationConfigApplicationContext;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.integration.config.EnableIntegration;
|
||||
import org.springframework.kafka.support.LoggingProducerListener;
|
||||
import org.springframework.kafka.support.ProducerListener;
|
||||
|
||||
/**
|
||||
* Test support class for {@link KafkaMessageChannelBinder}.
|
||||
*
|
||||
* @author Eric Bottard
|
||||
* @author Marius Bogoevici
|
||||
* @author David Turanski
|
||||
* @author Gary Russell
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KafkaTestBinder extends
|
||||
AbstractTestBinder<KafkaMessageChannelBinder, ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>> {
|
||||
public class KafkaTestBinder extends AbstractKafkaTestBinder {
|
||||
|
||||
public KafkaTestBinder(KafkaBinderConfigurationProperties binderConfiguration) {
|
||||
@SuppressWarnings({"rawtypes", "unchecked"})
|
||||
KafkaTestBinder(KafkaBinderConfigurationProperties binderConfiguration, KafkaTopicProvisioner kafkaTopicProvisioner) {
|
||||
try {
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(binderConfiguration);
|
||||
binder.setCodec(getCodec());
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(binderConfiguration,
|
||||
kafkaTopicProvisioner) {
|
||||
|
||||
/*
|
||||
* Some tests use multiple instance indexes for the same topic; we need to make
|
||||
* the error infrastructure beans unique.
|
||||
*/
|
||||
@Override
|
||||
protected String errorsBaseName(ConsumerDestination destination, String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties) {
|
||||
return super.errorsBaseName(destination, group, consumerProperties) + "-"
|
||||
+ consumerProperties.getInstanceIndex();
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
ProducerListener producerListener = new LoggingProducerListener();
|
||||
binder.setProducerListener(producerListener);
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(Config.class);
|
||||
setApplicationContext(context);
|
||||
binder.setApplicationContext(context);
|
||||
binder.afterPropertiesSet();
|
||||
this.setBinder(binder);
|
||||
this.setPollableConsumerBinder(binder);
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cleanup() {
|
||||
// do nothing - the rule will take care of that
|
||||
}
|
||||
@Configuration
|
||||
@EnableIntegration
|
||||
static class Config {
|
||||
|
||||
private static Codec getCodec() {
|
||||
return new PojoCodec(new TupleRegistrar());
|
||||
}
|
||||
|
||||
private static class TupleRegistrar implements KryoRegistrar {
|
||||
private final TupleKryoRegistrar delegate = new TupleKryoRegistrar();
|
||||
|
||||
@Override
|
||||
public void registerTypes(Kryo kryo) {
|
||||
this.delegate.registerTypes(kryo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Registration> getRegistrations() {
|
||||
return this.delegate.getRegistrations();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -0,0 +1,97 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.Collections;
|
||||
|
||||
import org.apache.kafka.clients.producer.Callback;
|
||||
import org.apache.kafka.clients.producer.Producer;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.mockito.InOrder;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.context.support.GenericApplicationContext;
|
||||
import org.springframework.integration.channel.DirectChannel;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.messaging.support.GenericMessage;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyString;
|
||||
import static org.mockito.BDDMockito.willReturn;
|
||||
import static org.mockito.Mockito.inOrder;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.spy;
|
||||
|
||||
/**
|
||||
* @author Gary Russell
|
||||
* @since 2.0
|
||||
*
|
||||
*/
|
||||
public class KafkaTransactionTests {
|
||||
|
||||
@ClassRule
|
||||
public static final KafkaEmbedded embeddedKafka = new KafkaEmbedded(1);
|
||||
|
||||
@SuppressWarnings({ "rawtypes", "unchecked" })
|
||||
@Test
|
||||
public void testProducerRunsInTx() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
kafkaProperties.setBootstrapServers(Collections.singletonList(embeddedKafka.getBrokersAsString()));
|
||||
KafkaBinderConfigurationProperties configurationProperties = new KafkaBinderConfigurationProperties();
|
||||
configurationProperties.getTransaction().setTransactionIdPrefix("foo-");
|
||||
KafkaTopicProvisioner provisioningProvider = new KafkaTopicProvisioner(configurationProperties, kafkaProperties);
|
||||
provisioningProvider.setMetadataRetryOperations(new RetryTemplate());
|
||||
final Producer mockProducer = mock(Producer.class);
|
||||
willReturn(Collections.singletonList(new TopicPartition("foo", 0))).given(mockProducer).partitionsFor(anyString());
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties, provisioningProvider) {
|
||||
|
||||
@Override
|
||||
protected DefaultKafkaProducerFactory<byte[], byte[]> getProducerFactory(String transactionIdPrefix,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties) {
|
||||
DefaultKafkaProducerFactory<byte[], byte[]> producerFactory =
|
||||
spy(super.getProducerFactory(transactionIdPrefix, producerProperties));
|
||||
willReturn(mockProducer).given(producerFactory).createProducer();
|
||||
return producerFactory;
|
||||
}
|
||||
|
||||
};
|
||||
GenericApplicationContext applicationContext = new GenericApplicationContext();
|
||||
applicationContext.refresh();
|
||||
binder.setApplicationContext(applicationContext);
|
||||
DirectChannel channel = new DirectChannel();
|
||||
KafkaProducerProperties extension = new KafkaProducerProperties();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties = new ExtendedProducerProperties<>(extension);
|
||||
binder.bindProducer("foo", channel, properties);
|
||||
channel.send(new GenericMessage<>("foo".getBytes()));
|
||||
InOrder inOrder = inOrder(mockProducer);
|
||||
inOrder.verify(mockProducer).beginTransaction();
|
||||
inOrder.verify(mockProducer).send(any(ProducerRecord.class), any(Callback.class));
|
||||
inOrder.verify(mockProducer).commitTransaction();
|
||||
inOrder.verify(mockProducer).close();
|
||||
inOrder.verifyNoMoreInteractions();
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,258 +0,0 @@
|
||||
/*
|
||||
* Copyright 2015-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.HeaderMode;
|
||||
import org.springframework.integration.IntegrationMessageHeaderAccessor;
|
||||
import org.springframework.integration.channel.DirectChannel;
|
||||
import org.springframework.integration.channel.QueueChannel;
|
||||
import org.springframework.integration.support.MessageBuilder;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
import org.springframework.messaging.support.GenericMessage;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author David Turanski
|
||||
* @author Gary Russell
|
||||
* @author Mark Fisher
|
||||
*/
|
||||
public class RawModeKafkaBinderTests extends KafkaBinderTests {
|
||||
|
||||
@Test
|
||||
@Override
|
||||
public void testPartitionedModuleJava() throws Exception {
|
||||
KafkaTestBinder binder = getBinder();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
|
||||
properties.setHeaderMode(HeaderMode.raw);
|
||||
properties.setPartitionKeyExtractorClass(RawKafkaPartitionTestSupport.class);
|
||||
properties.setPartitionSelectorClass(RawKafkaPartitionTestSupport.class);
|
||||
properties.setPartitionCount(6);
|
||||
|
||||
DirectChannel output = createBindableChannel("output", createProducerBindingProperties(properties));
|
||||
output.setBeanName("test.output");
|
||||
Binding<MessageChannel> outputBinding = binder.bindProducer("partJ.0", output, properties);
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setConcurrency(2);
|
||||
consumerProperties.setInstanceCount(3);
|
||||
consumerProperties.setInstanceIndex(0);
|
||||
consumerProperties.setPartitioned(true);
|
||||
consumerProperties.setHeaderMode(HeaderMode.raw);
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
QueueChannel input0 = new QueueChannel();
|
||||
input0.setBeanName("test.input0J");
|
||||
Binding<MessageChannel> input0Binding = binder.bindConsumer("partJ.0", "test", input0, consumerProperties);
|
||||
consumerProperties.setInstanceIndex(1);
|
||||
QueueChannel input1 = new QueueChannel();
|
||||
input1.setBeanName("test.input1J");
|
||||
Binding<MessageChannel> input1Binding = binder.bindConsumer("partJ.0", "test", input1, consumerProperties);
|
||||
consumerProperties.setInstanceIndex(2);
|
||||
QueueChannel input2 = new QueueChannel();
|
||||
input2.setBeanName("test.input2J");
|
||||
Binding<MessageChannel> input2Binding = binder.bindConsumer("partJ.0", "test", input2, consumerProperties);
|
||||
|
||||
output.send(new GenericMessage<>(new byte[] {(byte) 0}));
|
||||
output.send(new GenericMessage<>(new byte[] {(byte) 1}));
|
||||
output.send(new GenericMessage<>(new byte[] {(byte) 2}));
|
||||
|
||||
Message<?> receive0 = receive(input0);
|
||||
assertThat(receive0).isNotNull();
|
||||
Message<?> receive1 = receive(input1);
|
||||
assertThat(receive1).isNotNull();
|
||||
Message<?> receive2 = receive(input2);
|
||||
assertThat(receive2).isNotNull();
|
||||
|
||||
assertThat(Arrays.asList(((byte[]) receive0.getPayload())[0], ((byte[]) receive1.getPayload())[0],
|
||||
((byte[]) receive2.getPayload())[0])).containsExactlyInAnyOrder((byte) 0, (byte) 1, (byte) 2);
|
||||
|
||||
input0Binding.unbind();
|
||||
input1Binding.unbind();
|
||||
input2Binding.unbind();
|
||||
outputBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
@Override
|
||||
public void testPartitionedModuleSpEL() throws Exception {
|
||||
KafkaTestBinder binder = getBinder();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
|
||||
properties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload[0]"));
|
||||
properties.setPartitionSelectorExpression(spelExpressionParser.parseExpression("hashCode()"));
|
||||
properties.setPartitionCount(6);
|
||||
properties.setHeaderMode(HeaderMode.raw);
|
||||
|
||||
DirectChannel output = createBindableChannel("output", createProducerBindingProperties(properties));
|
||||
output.setBeanName("test.output");
|
||||
Binding<MessageChannel> outputBinding = binder.bindProducer("part.0", output, properties);
|
||||
try {
|
||||
Object endpoint = extractEndpoint(outputBinding);
|
||||
assertThat(getEndpointRouting(endpoint))
|
||||
.contains(getExpectedRoutingBaseDestination("part.0", "test") + "-' + headers['partition']");
|
||||
}
|
||||
catch (UnsupportedOperationException ignored) {
|
||||
}
|
||||
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setConcurrency(2);
|
||||
consumerProperties.setInstanceIndex(0);
|
||||
consumerProperties.setInstanceCount(3);
|
||||
consumerProperties.setPartitioned(true);
|
||||
consumerProperties.setHeaderMode(HeaderMode.raw);
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
QueueChannel input0 = new QueueChannel();
|
||||
input0.setBeanName("test.input0S");
|
||||
Binding<MessageChannel> input0Binding = binder.bindConsumer("part.0", "test", input0, consumerProperties);
|
||||
consumerProperties.setInstanceIndex(1);
|
||||
QueueChannel input1 = new QueueChannel();
|
||||
input1.setBeanName("test.input1S");
|
||||
Binding<MessageChannel> input1Binding = binder.bindConsumer("part.0", "test", input1, consumerProperties);
|
||||
consumerProperties.setInstanceIndex(2);
|
||||
QueueChannel input2 = new QueueChannel();
|
||||
input2.setBeanName("test.input2S");
|
||||
Binding<MessageChannel> input2Binding = binder.bindConsumer("part.0", "test", input2, consumerProperties);
|
||||
|
||||
Message<byte[]> message2 = MessageBuilder.withPayload(new byte[] {2})
|
||||
.setHeader(IntegrationMessageHeaderAccessor.CORRELATION_ID, "foo")
|
||||
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_NUMBER, 42)
|
||||
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_SIZE, 43).build();
|
||||
output.send(message2);
|
||||
output.send(new GenericMessage<>(new byte[] {1}));
|
||||
output.send(new GenericMessage<>(new byte[] {0}));
|
||||
Message<?> receive0 = receive(input0);
|
||||
assertThat(receive0).isNotNull();
|
||||
Message<?> receive1 = receive(input1);
|
||||
assertThat(receive1).isNotNull();
|
||||
Message<?> receive2 = receive(input2);
|
||||
assertThat(receive2).isNotNull();
|
||||
assertThat(Arrays.asList(((byte[]) receive0.getPayload())[0], ((byte[]) receive1.getPayload())[0],
|
||||
((byte[]) receive2.getPayload())[0])).containsExactlyInAnyOrder((byte) 0, (byte) 1, (byte) 2);
|
||||
input0Binding.unbind();
|
||||
input1Binding.unbind();
|
||||
input2Binding.unbind();
|
||||
outputBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
@Override
|
||||
public void testSendAndReceive() throws Exception {
|
||||
KafkaTestBinder binder = getBinder();
|
||||
DirectChannel moduleOutputChannel = new DirectChannel();
|
||||
QueueChannel moduleInputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.setHeaderMode(HeaderMode.raw);
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("foo.0", moduleOutputChannel,
|
||||
producerProperties);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setHeaderMode(HeaderMode.raw);
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo.0", "test", moduleInputChannel,
|
||||
consumerProperties);
|
||||
Message<?> message = MessageBuilder.withPayload("foo".getBytes()).build();
|
||||
// Let the consumer actually bind to the producer before sending a msg
|
||||
binderBindUnbindLatency();
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(moduleInputChannel);
|
||||
assertThat(inbound).isNotNull();
|
||||
assertThat(new String((byte[]) inbound.getPayload())).isEqualTo("foo");
|
||||
producerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSendAndReceiveWithExplicitConsumerGroup() {
|
||||
KafkaTestBinder binder = getBinder();
|
||||
DirectChannel moduleOutputChannel = new DirectChannel();
|
||||
// Test pub/sub by emulating how StreamPlugin handles taps
|
||||
QueueChannel module1InputChannel = new QueueChannel();
|
||||
QueueChannel module2InputChannel = new QueueChannel();
|
||||
QueueChannel module3InputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.setHeaderMode(HeaderMode.raw);
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer("baz.0", moduleOutputChannel,
|
||||
producerProperties);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.setHeaderMode(HeaderMode.raw);
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
Binding<MessageChannel> input1Binding = binder.bindConsumer("baz.0", "test", module1InputChannel,
|
||||
consumerProperties);
|
||||
// A new module is using the tap as an input channel
|
||||
String fooTapName = "baz.0";
|
||||
Binding<MessageChannel> input2Binding = binder.bindConsumer(fooTapName, "tap1", module2InputChannel,
|
||||
consumerProperties);
|
||||
// Another new module is using tap as an input channel
|
||||
String barTapName = "baz.0";
|
||||
Binding<MessageChannel> input3Binding = binder.bindConsumer(barTapName, "tap2", module3InputChannel,
|
||||
consumerProperties);
|
||||
|
||||
Message<?> message = MessageBuilder.withPayload("foo".getBytes()).build();
|
||||
boolean success = false;
|
||||
boolean retried = false;
|
||||
while (!success) {
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(module1InputChannel);
|
||||
assertThat(inbound).isNotNull();
|
||||
assertThat(new String((byte[]) inbound.getPayload())).isEqualTo("foo");
|
||||
|
||||
Message<?> tapped1 = receive(module2InputChannel);
|
||||
Message<?> tapped2 = receive(module3InputChannel);
|
||||
if (tapped1 == null || tapped2 == null) {
|
||||
// listener may not have started
|
||||
assertThat(retried).isFalse().withFailMessage("Failed to receive tap after retry");
|
||||
retried = true;
|
||||
continue;
|
||||
}
|
||||
success = true;
|
||||
assertThat(new String((byte[]) tapped1.getPayload())).isEqualTo("foo");
|
||||
assertThat(new String((byte[]) tapped2.getPayload())).isEqualTo("foo");
|
||||
}
|
||||
// delete one tap stream is deleted
|
||||
input3Binding.unbind();
|
||||
Message<?> message2 = MessageBuilder.withPayload("bar".getBytes()).build();
|
||||
moduleOutputChannel.send(message2);
|
||||
|
||||
// other tap still receives messages
|
||||
Message<?> tapped = receive(module2InputChannel);
|
||||
assertThat(tapped).isNotNull();
|
||||
|
||||
// removed tap does not
|
||||
assertThat(receive(module3InputChannel)).isNull();
|
||||
|
||||
// re-subscribed tap does receive the message
|
||||
input3Binding = binder.bindConsumer(barTapName, "tap2", module3InputChannel, createConsumerProperties());
|
||||
assertThat(receive(module3InputChannel)).isNotNull();
|
||||
|
||||
// clean up
|
||||
input1Binding.unbind();
|
||||
input2Binding.unbind();
|
||||
input3Binding.unbind();
|
||||
producerBinding.unbind();
|
||||
assertThat(extractEndpoint(input1Binding).isRunning()).isFalse();
|
||||
assertThat(extractEndpoint(input2Binding).isRunning()).isFalse();
|
||||
assertThat(extractEndpoint(input3Binding).isRunning()).isFalse();
|
||||
assertThat(extractEndpoint(producerBinding).isRunning()).isFalse();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.bootstrap;
|
||||
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
import org.springframework.boot.builder.SpringApplicationBuilder;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class KafkaBinderBootstrapTest {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, 10);
|
||||
|
||||
@Test
|
||||
public void testKafkaBinderConfiguration() throws Exception {
|
||||
ConfigurableApplicationContext applicationContext = new SpringApplicationBuilder(SimpleApplication.class)
|
||||
.web(false)
|
||||
.run("--spring.cloud.stream.kafka.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kafka.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
applicationContext.close();
|
||||
}
|
||||
|
||||
@SpringBootApplication
|
||||
static class SimpleApplication {
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,10 @@
|
||||
spring.kafka.producer.keySerializer=org.apache.kafka.common.serialization.LongSerializer
|
||||
spring.kafka.producer.valueSerializer=org.apache.kafka.common.serialization.LongSerializer
|
||||
spring.kafka.consumer.keyDeserializer=org.apache.kafka.common.serialization.LongDeserializer
|
||||
spring.kafka.consumer.valueDeserializer=org.apache.kafka.common.serialization.LongDeserializer
|
||||
spring.kafka.producer.batchSize=10
|
||||
spring.kafka.bootstrapServers=10.98.09.199:9092,10.98.09.196:9092
|
||||
spring.kafka.producer.compressionType=snappy
|
||||
# Test consumer properties
|
||||
spring.kafka.consumer.auto-offset-reset=earliest
|
||||
spring.kafka.consumer.group-id=groupIdFromBootConfig
|
||||
@@ -0,0 +1 @@
|
||||
spring.cloud.stream.kafka.binder.brokers=10.98.09.199:9082
|
||||
@@ -0,0 +1,7 @@
|
||||
KafkaClient {
|
||||
com.sun.security.auth.module.Krb5LoginModule required
|
||||
useKeyTab=true
|
||||
storeKey=true
|
||||
keyTab="/etc/security/keytabs/kafka_client.keytab"
|
||||
principal="kafka-client-1@EXAMPLE.COM";
|
||||
};
|
||||
@@ -0,0 +1,14 @@
|
||||
KafkaClient {
|
||||
com.sun.security.auth.module.Krb5LoginModule required
|
||||
useKeyTab=true
|
||||
storeKey=true
|
||||
keyTab="/etc/security/keytabs/kafka_client.keytab"
|
||||
principal="kafka-client-1@EXAMPLE.COM";
|
||||
};
|
||||
Client {
|
||||
com.sun.security.auth.module.Krb5LoginModule required
|
||||
useKeyTab=true
|
||||
storeKey=true
|
||||
keyTab="/etc/security/keytabs/zk_client.keytab"
|
||||
principal="zk-client-1@EXAMPLE.COM";
|
||||
};
|
||||
@@ -0,0 +1,15 @@
|
||||
<configuration>
|
||||
<appender name="stdout" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>%d{ISO8601} %5p %t %c{2}:%L - %m%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
<logger name="org.apache.kafka" level="DEBUG"/>
|
||||
<logger name="org.springframework.integration.kafka" level="INFO"/>
|
||||
<logger name="org.springframework.kafka" level="INFO"/>
|
||||
<logger name="org.springframework.cloud.stream" level="INFO" />
|
||||
<logger name="org.springframework.integration.channel" level="INFO" />
|
||||
<root level="WARN">
|
||||
<appender-ref ref="stdout"/>
|
||||
</root>
|
||||
</configuration>
|
||||
0
spring-cloud-stream-binder-kstream/.jdk8
Normal file
0
spring-cloud-stream-binder-kstream/.jdk8
Normal file
@@ -0,0 +1,5 @@
|
||||
eclipse.preferences.version=1
|
||||
org.eclipse.jdt.ui.ignorelowercasenames=true
|
||||
org.eclipse.jdt.ui.importorder=java;javax;com;org;org.springframework;ch.qos;\#;
|
||||
org.eclipse.jdt.ui.ondemandthreshold=99
|
||||
org.eclipse.jdt.ui.staticondemandthreshold=99
|
||||
66
spring-cloud-stream-binder-kstream/pom.xml
Normal file
66
spring-cloud-stream-binder-kstream/pom.xml
Normal file
@@ -0,0 +1,66 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>spring-cloud-stream-binder-kstream</artifactId>
|
||||
<packaging>jar</packaging>
|
||||
<name>spring-cloud-stream-binder-kstream</name>
|
||||
<description>Kafka Streams Binder Implementation</description>
|
||||
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.0.0.M4</version>
|
||||
</parent>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-configuration-processor</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-autoconfigure</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-streams</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<classifier>test</classifier>
|
||||
</dependency>
|
||||
<!-- Added back since Kafka still depends on it, but it has been removed by Boot due to EOL -->
|
||||
<dependency>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
<version>1.2.17</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</project>
|
||||
@@ -0,0 +1,170 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
|
||||
import org.apache.kafka.common.Configurable;
|
||||
import org.apache.kafka.common.serialization.Serde;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.KeyValueMapper;
|
||||
import org.apache.kafka.streams.kstream.Produced;
|
||||
|
||||
import org.springframework.cloud.stream.binder.AbstractBinder;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.DefaultBinding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kstream.config.KStreamConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kstream.config.KStreamExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.kstream.config.KStreamProducerProperties;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KStreamBinder extends
|
||||
AbstractBinder<KStream<Object, Object>, ExtendedConsumerProperties<KStreamConsumerProperties>, ExtendedProducerProperties<KStreamProducerProperties>>
|
||||
implements ExtendedPropertiesBinder<KStream<Object, Object>, KStreamConsumerProperties, KStreamProducerProperties> {
|
||||
|
||||
private final KafkaTopicProvisioner kafkaTopicProvisioner;
|
||||
|
||||
private final KStreamExtendedBindingProperties kStreamExtendedBindingProperties;
|
||||
|
||||
private final StreamsConfig streamsConfig;
|
||||
|
||||
private final KafkaBinderConfigurationProperties binderConfigurationProperties;
|
||||
|
||||
private final MessageConversionDelegate messageConversionDelegate;
|
||||
|
||||
public KStreamBinder(KafkaBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KStreamExtendedBindingProperties kStreamExtendedBindingProperties, StreamsConfig streamsConfig,
|
||||
MessageConversionDelegate messageConversionDelegate) {
|
||||
this.binderConfigurationProperties = binderConfigurationProperties;
|
||||
this.kafkaTopicProvisioner = kafkaTopicProvisioner;
|
||||
this.kStreamExtendedBindingProperties = kStreamExtendedBindingProperties;
|
||||
this.streamsConfig = streamsConfig;
|
||||
this.messageConversionDelegate = messageConversionDelegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Binding<KStream<Object, Object>> doBindConsumer(String name, String group,
|
||||
KStream<Object, Object> inputTarget,
|
||||
ExtendedConsumerProperties<KStreamConsumerProperties> properties) {
|
||||
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties = new ExtendedConsumerProperties<>(
|
||||
new KafkaConsumerProperties());
|
||||
this.kafkaTopicProvisioner.provisionConsumerDestination(name, group, extendedConsumerProperties);
|
||||
return new DefaultBinding<>(name, group, inputTarget, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected Binding<KStream<Object, Object>> doBindProducer(String name, KStream<Object, Object> outboundBindTarget,
|
||||
ExtendedProducerProperties<KStreamProducerProperties> properties) {
|
||||
ExtendedProducerProperties<KafkaProducerProperties> extendedProducerProperties = new ExtendedProducerProperties<>(
|
||||
new KafkaProducerProperties());
|
||||
this.kafkaTopicProvisioner.provisionProducerDestination(name, extendedProducerProperties);
|
||||
|
||||
Serde<?> keySerde = getKeySerde(properties);
|
||||
Serde<?> valueSerde = getValueSerde(properties);
|
||||
|
||||
to(properties.isUseNativeEncoding(), name, outboundBindTarget, (Serde<Object>) keySerde, (Serde<Object>) valueSerde);
|
||||
|
||||
return new DefaultBinding<>(name, null, outboundBindTarget, null);
|
||||
}
|
||||
|
||||
private Serde<?> getKeySerde(ExtendedProducerProperties<KStreamProducerProperties> properties) {
|
||||
Serde<?> keySerde;
|
||||
try {
|
||||
if (StringUtils.hasText(properties.getExtension().getKeySerde())) {
|
||||
keySerde = Utils.newInstance(properties.getExtension().getKeySerde(), Serde.class);
|
||||
if (keySerde instanceof Configurable) {
|
||||
((Configurable) keySerde).configure(streamsConfig.originals());
|
||||
}
|
||||
}
|
||||
else {
|
||||
keySerde = this.binderConfigurationProperties.getConfiguration().containsKey("key.serde") ?
|
||||
Utils.newInstance(this.binderConfigurationProperties.getConfiguration().get("key.serde"), Serde.class) : Serdes.ByteArray();
|
||||
}
|
||||
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("Serde class not found: ", e);
|
||||
}
|
||||
return keySerde;
|
||||
}
|
||||
|
||||
private Serde<?> getValueSerde(ExtendedProducerProperties<KStreamProducerProperties> properties) {
|
||||
Serde<?> valueSerde;
|
||||
try {
|
||||
if (properties.isUseNativeEncoding()) {
|
||||
if (StringUtils.hasText(properties.getExtension().getValueSerde())) {
|
||||
valueSerde = Utils.newInstance(properties.getExtension().getValueSerde(), Serde.class);
|
||||
if (valueSerde instanceof Configurable) {
|
||||
((Configurable) valueSerde).configure(streamsConfig.originals());
|
||||
}
|
||||
}
|
||||
else {
|
||||
valueSerde = this.binderConfigurationProperties.getConfiguration().containsKey("value.serde") ?
|
||||
Utils.newInstance(this.binderConfigurationProperties.getConfiguration().get("value.serde"), Serde.class) : Serdes.ByteArray();
|
||||
}
|
||||
}
|
||||
else {
|
||||
valueSerde = Serdes.ByteArray();
|
||||
}
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("Serde class not found: ", e);
|
||||
}
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void to(boolean isNativeEncoding, String name, KStream<Object, Object> outboundBindTarget,
|
||||
Serde<Object> keySerde, Serde<Object> valueSerde) {
|
||||
KeyValueMapper<Object, Object, KeyValue<Object, Object>> keyValueMapper = null;
|
||||
if (!isNativeEncoding) {
|
||||
keyValueMapper = messageConversionDelegate.outboundKeyValueMapper(name);
|
||||
}
|
||||
if (!isNativeEncoding) {
|
||||
outboundBindTarget.map(keyValueMapper).to(name, Produced.with(keySerde, valueSerde));
|
||||
}
|
||||
else {
|
||||
outboundBindTarget.to(name, Produced.with(keySerde, valueSerde));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public KStreamConsumerProperties getExtendedConsumerProperties(String channelName) {
|
||||
return this.kStreamExtendedBindingProperties.getExtendedConsumerProperties(channelName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KStreamProducerProperties getExtendedProducerProperties(String channelName) {
|
||||
return this.kStreamExtendedBindingProperties.getExtendedProducerProperties(channelName);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,110 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
|
||||
import org.aopalliance.intercept.MethodInterceptor;
|
||||
import org.aopalliance.intercept.MethodInvocation;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.StreamsBuilder;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.aop.framework.ProxyFactory;
|
||||
import org.springframework.cloud.stream.binding.AbstractBindingTargetFactory;
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KStreamBoundElementFactory extends AbstractBindingTargetFactory<KStream> {
|
||||
|
||||
private final StreamsBuilder kStreamBuilder;
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
|
||||
public KStreamBoundElementFactory(StreamsBuilder kStreamBuilder, BindingServiceProperties bindingServiceProperties) {
|
||||
super(KStream.class);
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.kStreamBuilder = kStreamBuilder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KStream createInput(String name) {
|
||||
KStream<Object, Object> stream = kStreamBuilder.stream(bindingServiceProperties.getBindingDestination(name));
|
||||
stream = stream.map((key, value) -> {
|
||||
KeyValue<Object, Object> keyValue;
|
||||
BindingProperties bindingProperties = bindingServiceProperties.getBindingProperties(name);
|
||||
String contentType = bindingProperties.getContentType();
|
||||
if (!StringUtils.isEmpty(contentType) && !bindingProperties.getConsumer().isUseNativeDecoding()) {
|
||||
Message<Object> message = MessageBuilder.withPayload(value)
|
||||
.setHeader(MessageHeaders.CONTENT_TYPE, contentType).build();
|
||||
keyValue = new KeyValue<>(key, message);
|
||||
}
|
||||
else {
|
||||
keyValue = new KeyValue<>(key, value);
|
||||
}
|
||||
return keyValue;
|
||||
});
|
||||
return stream;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public KStream createOutput(final String name) {
|
||||
KStreamWrapperHandler wrapper= new KStreamWrapperHandler();
|
||||
ProxyFactory proxyFactory = new ProxyFactory(KStreamWrapper.class, KStream.class);
|
||||
proxyFactory.addAdvice(wrapper);
|
||||
return (KStream) proxyFactory.getProxy();
|
||||
}
|
||||
|
||||
public interface KStreamWrapper {
|
||||
|
||||
void wrap(KStream<Object, Object> delegate);
|
||||
}
|
||||
|
||||
private static class KStreamWrapperHandler implements KStreamWrapper, MethodInterceptor {
|
||||
|
||||
private KStream<Object, Object> delegate;
|
||||
|
||||
public void wrap(KStream<Object, Object> delegate) {
|
||||
Assert.notNull(delegate, "delegate cannot be null");
|
||||
Assert.isNull(this.delegate, "delegate already set to " + this.delegate);
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object invoke(MethodInvocation methodInvocation) throws Throwable {
|
||||
if (methodInvocation.getMethod().getDeclaringClass().equals(KStream.class)) {
|
||||
Assert.notNull(delegate, "Trying to invoke " + methodInvocation
|
||||
.getMethod() + " but no delegate has been set.");
|
||||
return methodInvocation.getMethod().invoke(delegate, methodInvocation.getArguments());
|
||||
}
|
||||
else if (methodInvocation.getMethod().getDeclaringClass().equals(KStreamWrapper.class)) {
|
||||
return methodInvocation.getMethod().invoke(this, methodInvocation.getArguments());
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException("Only KStream method invocations are permitted");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.cloud.stream.binding.StreamListenerParameterAdapter;
|
||||
import org.springframework.core.MethodParameter;
|
||||
import org.springframework.core.ResolvableType;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KStreamListenerParameterAdapter implements StreamListenerParameterAdapter<KStream<?,?>, KStream<?, ?>> {
|
||||
|
||||
private final MessageConversionDelegate messageConversionDelegate;
|
||||
|
||||
public KStreamListenerParameterAdapter(MessageConversionDelegate messageConversionDelegate) {
|
||||
this.messageConversionDelegate = messageConversionDelegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean supports(Class bindingTargetType, MethodParameter methodParameter) {
|
||||
return KStream.class.isAssignableFrom(bindingTargetType)
|
||||
&& KStream.class.isAssignableFrom(methodParameter.getParameterType());
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public KStream adapt(KStream<?, ?> bindingTarget, MethodParameter parameter) {
|
||||
ResolvableType resolvableType = ResolvableType.forMethodParameter(parameter);
|
||||
final Class<?> valueClass = (resolvableType.getGeneric(1).getRawClass() != null)
|
||||
? (resolvableType.getGeneric(1).getRawClass()) : Object.class;
|
||||
|
||||
return bindingTarget.map(messageConversionDelegate.inboundKeyValueMapper(valueClass));
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,183 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.beans.BeansException;
|
||||
import org.springframework.beans.factory.BeanInitializationException;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerErrorMessages;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerParameterAdapter;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerResultAdapter;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerSetupMethodOrchestrator;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.ApplicationContextAware;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.core.MethodParameter;
|
||||
import org.springframework.core.annotation.AnnotationUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Kafka Streams specific implementation for {@link StreamListenerSetupMethodOrchestrator}
|
||||
* that overrides the default mechanisms for invoking StreamListener adapters.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KStreamListenerSetupMethodOrchestrator implements StreamListenerSetupMethodOrchestrator, ApplicationContextAware {
|
||||
|
||||
private ConfigurableApplicationContext applicationContext;
|
||||
|
||||
private StreamListenerParameterAdapter streamListenerParameterAdapter;
|
||||
private Collection<StreamListenerResultAdapter> streamListenerResultAdapters;
|
||||
|
||||
public KStreamListenerSetupMethodOrchestrator(StreamListenerParameterAdapter streamListenerParameterAdapter,
|
||||
Collection<StreamListenerResultAdapter> streamListenerResultAdapters) {
|
||||
this.streamListenerParameterAdapter = streamListenerParameterAdapter;
|
||||
this.streamListenerResultAdapters = streamListenerResultAdapters;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean supports(Method method) {
|
||||
return methodParameterSuppports(method) && methodReturnTypeSuppports(method);
|
||||
}
|
||||
|
||||
private boolean methodReturnTypeSuppports(Method method) {
|
||||
Class<?> returnType = method.getReturnType();
|
||||
if (returnType.equals(KStream.class) ||
|
||||
(returnType.isArray() && returnType.getComponentType().equals(KStream.class))) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private boolean methodParameterSuppports(Method method) {
|
||||
MethodParameter methodParameter = MethodParameter.forExecutable(method, 0);
|
||||
Class<?> parameterType = methodParameter.getParameterType();
|
||||
return parameterType.equals(KStream.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings({"rawtypes", "unchecked"})
|
||||
public void orchestrateStreamListenerSetupMethod(StreamListener streamListener, Method method, Object bean) {
|
||||
String[] methodAnnotatedOutboundNames = getOutboundBindingTargetNames(method);
|
||||
validateStreamListenerMethod(streamListener, method, methodAnnotatedOutboundNames);
|
||||
|
||||
String methodAnnotatedInboundName = streamListener.value();
|
||||
Object[] adaptedInboundArguments = adaptAndRetrieveInboundArguments(method, methodAnnotatedInboundName,
|
||||
this.applicationContext,
|
||||
this.streamListenerParameterAdapter);
|
||||
|
||||
try {
|
||||
Object result = method.invoke(bean, adaptedInboundArguments);
|
||||
|
||||
if (result.getClass().isArray()) {
|
||||
Assert.isTrue(methodAnnotatedOutboundNames.length == ((Object[]) result).length, "Big error");
|
||||
} else {
|
||||
Assert.isTrue(methodAnnotatedOutboundNames.length == 1, "Big error");
|
||||
}
|
||||
if (result.getClass().isArray()) {
|
||||
Object[] outboundKStreams = (Object[]) result;
|
||||
int i = 0;
|
||||
for (Object outboundKStream : outboundKStreams) {
|
||||
Object targetBean = this.applicationContext.getBean(methodAnnotatedOutboundNames[i++]);
|
||||
for (StreamListenerResultAdapter streamListenerResultAdapter : streamListenerResultAdapters) {
|
||||
if (streamListenerResultAdapter.supports(outboundKStream.getClass(), targetBean.getClass())) {
|
||||
streamListenerResultAdapter.adapt(outboundKStream, targetBean);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
Object targetBean = this.applicationContext.getBean(methodAnnotatedOutboundNames[0]);
|
||||
for (StreamListenerResultAdapter streamListenerResultAdapter : streamListenerResultAdapters) {
|
||||
if (streamListenerResultAdapter.supports(result.getClass(), targetBean.getClass())) {
|
||||
streamListenerResultAdapter.adapt(result, targetBean);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new BeanInitializationException("Cannot setup StreamListener for " + method, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
|
||||
this.applicationContext = (ConfigurableApplicationContext) applicationContext;
|
||||
}
|
||||
|
||||
private void validateStreamListenerMethod(StreamListener streamListener, Method method, String[] methodAnnotatedOutboundNames) {
|
||||
String methodAnnotatedInboundName = streamListener.value();
|
||||
for (String s : methodAnnotatedOutboundNames) {
|
||||
if (StringUtils.hasText(s)) {
|
||||
Assert.isTrue(isDeclarativeOutput(method, s), "Method must be declarative");
|
||||
}
|
||||
}
|
||||
if (StringUtils.hasText(methodAnnotatedInboundName)) {
|
||||
int methodArgumentsLength = method.getParameterTypes().length;
|
||||
|
||||
for (int parameterIndex = 0; parameterIndex < methodArgumentsLength; parameterIndex++) {
|
||||
MethodParameter methodParameter = MethodParameter.forExecutable(method, parameterIndex);
|
||||
Assert.isTrue(isDeclarativeInput(methodAnnotatedInboundName, methodParameter), "Method must be declarative");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private boolean isDeclarativeOutput(Method m, String targetBeanName) {
|
||||
boolean declarative;
|
||||
Class<?> returnType = m.getReturnType();
|
||||
if (returnType.isArray()){
|
||||
Class<?> targetBeanClass = this.applicationContext.getType(targetBeanName);
|
||||
declarative = this.streamListenerResultAdapters.stream()
|
||||
.anyMatch(slpa -> slpa.supports(returnType.getComponentType(), targetBeanClass));
|
||||
return declarative;
|
||||
}
|
||||
Class<?> targetBeanClass = this.applicationContext.getType(targetBeanName);
|
||||
declarative = this.streamListenerResultAdapters.stream()
|
||||
.anyMatch(slpa -> slpa.supports(returnType, targetBeanClass));
|
||||
return declarative;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private boolean isDeclarativeInput(String targetBeanName, MethodParameter methodParameter) {
|
||||
if (!methodParameter.getParameterType().isAssignableFrom(Object.class) && this.applicationContext.containsBean(targetBeanName)) {
|
||||
Class<?> targetBeanClass = this.applicationContext.getType(targetBeanName);
|
||||
return this.streamListenerParameterAdapter.supports(targetBeanClass, methodParameter);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private static String[] getOutboundBindingTargetNames(Method method) {
|
||||
SendTo sendTo = AnnotationUtils.findAnnotation(method, SendTo.class);
|
||||
if (sendTo != null) {
|
||||
Assert.isTrue(!ObjectUtils.isEmpty(sendTo.value()), StreamListenerErrorMessages.ATLEAST_ONE_OUTPUT);
|
||||
Assert.isTrue(sendTo.value().length >= 1, "At least one outbound destination need to be provided.");
|
||||
return sendTo.value();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.cloud.stream.binding.StreamListenerResultAdapter;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class KStreamStreamListenerResultAdapter implements StreamListenerResultAdapter<KStream, KStreamBoundElementFactory.KStreamWrapper> {
|
||||
|
||||
@Override
|
||||
public boolean supports(Class<?> resultType, Class<?> boundElement) {
|
||||
return KStream.class.isAssignableFrom(resultType) && KStream.class.isAssignableFrom(boundElement);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public Closeable adapt(KStream streamListenerResult, KStreamBoundElementFactory.KStreamWrapper boundElement) {
|
||||
boundElement.wrap(streamListenerResult.map(KeyValue::new));
|
||||
return new NoOpCloseable();
|
||||
}
|
||||
|
||||
private static final class NoOpCloseable implements Closeable {
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,96 @@
|
||||
/*
|
||||
* Copyright 2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KeyValueMapper;
|
||||
|
||||
import org.springframework.cloud.stream.config.BindingProperties;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.cloud.stream.converter.CompositeMessageConverterFactory;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageHeaders;
|
||||
import org.springframework.messaging.converter.MessageConverter;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
import org.springframework.util.MimeType;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class MessageConversionDelegate {
|
||||
|
||||
private final BindingServiceProperties bindingServiceProperties;
|
||||
private final CompositeMessageConverterFactory compositeMessageConverterFactory;
|
||||
|
||||
public MessageConversionDelegate(BindingServiceProperties bindingServiceProperties,
|
||||
CompositeMessageConverterFactory compositeMessageConverterFactory) {
|
||||
this.bindingServiceProperties = bindingServiceProperties;
|
||||
this.compositeMessageConverterFactory = compositeMessageConverterFactory;
|
||||
}
|
||||
|
||||
public KeyValueMapper<Object, Object, KeyValue<Object, Object>> outboundKeyValueMapper(String name) {
|
||||
BindingProperties bindingProperties = bindingServiceProperties.getBindingProperties(name);
|
||||
String contentType = bindingProperties.getContentType();
|
||||
MessageConverter messageConverter = StringUtils.hasText(contentType) ? compositeMessageConverterFactory
|
||||
.getMessageConverterForType(MimeType.valueOf(contentType))
|
||||
: null;
|
||||
|
||||
return (k, v) -> {
|
||||
Message<?> message = v instanceof Message<?> ? (Message<?>)v :
|
||||
MessageBuilder.withPayload(v).build();
|
||||
Map<String, Object> headers = new HashMap<>(message.getHeaders());
|
||||
if (!StringUtils.isEmpty(contentType)) {
|
||||
headers.put(MessageHeaders.CONTENT_TYPE, contentType);
|
||||
}
|
||||
MessageHeaders messageHeaders = new MessageHeaders(headers);
|
||||
return new KeyValue<>(k,
|
||||
messageConverter.toMessage(message.getPayload(),
|
||||
messageHeaders).getPayload());
|
||||
};
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public KeyValueMapper<Object, Object, KeyValue<Object, Object>> inboundKeyValueMapper(Class<?> valueClass) {
|
||||
MessageConverter messageConverter = compositeMessageConverterFactory.getMessageConverterForAllRegistered();
|
||||
return (KeyValueMapper) (o, o2) -> {
|
||||
KeyValue<Object, Object> keyValue;
|
||||
if (valueClass.isAssignableFrom(o2.getClass())) {
|
||||
keyValue = new KeyValue<>(o, o2);
|
||||
}
|
||||
else if (o2 instanceof Message) {
|
||||
if (valueClass.isAssignableFrom(((Message) o2).getPayload().getClass())) {
|
||||
keyValue = new KeyValue<>(o, ((Message) o2).getPayload());
|
||||
}
|
||||
else {
|
||||
keyValue = new KeyValue<>(o, messageConverter.fromMessage((Message) o2, valueClass));
|
||||
}
|
||||
}
|
||||
else if(o2 instanceof String || o2 instanceof byte[]) {
|
||||
Message<Object> message = MessageBuilder.withPayload(o2).build();
|
||||
keyValue = new KeyValue<>(o, messageConverter.fromMessage(message, valueClass));
|
||||
}
|
||||
else {
|
||||
keyValue = new KeyValue<>(o, o2);
|
||||
}
|
||||
return keyValue;
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.annotations;
|
||||
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.Output;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public interface KStreamProcessor {
|
||||
|
||||
@Input("input")
|
||||
KStream<?, ?> input();
|
||||
|
||||
@Output("output")
|
||||
KStream<?, ?> output();
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.config;
|
||||
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@Configuration
|
||||
@EnableConfigurationProperties(KStreamApplicationSupportProperties.class)
|
||||
public class KStreamApplicationSupportAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
@ConditionalOnProperty("spring.cloud.stream.kstream.timeWindow.length")
|
||||
public TimeWindows configuredTimeWindow(KStreamApplicationSupportProperties processorProperties) {
|
||||
return processorProperties.getTimeWindow().getAdvanceBy() > 0
|
||||
? TimeWindows.of(processorProperties.getTimeWindow().getLength()).advanceBy(processorProperties.getTimeWindow().getAdvanceBy())
|
||||
: TimeWindows.of(processorProperties.getTimeWindow().getLength());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,64 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.config;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
|
||||
/**
|
||||
* {@link ConfigurationProperties} that can be used by end user Kafka Stream applications. This class provides
|
||||
* convenient ways to access the commonly used kafka stream properties from the user application. For example, windowing
|
||||
* operations are common use cases in stream processing and one can provide window specific properties at runtime and use
|
||||
* those properties in the applications using this class.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@ConfigurationProperties("spring.cloud.stream.kstream")
|
||||
public class KStreamApplicationSupportProperties {
|
||||
|
||||
private TimeWindow timeWindow;
|
||||
|
||||
public TimeWindow getTimeWindow() {
|
||||
return timeWindow;
|
||||
}
|
||||
|
||||
public void setTimeWindow(TimeWindow timeWindow) {
|
||||
this.timeWindow = timeWindow;
|
||||
}
|
||||
|
||||
public static class TimeWindow {
|
||||
|
||||
private int length;
|
||||
|
||||
private int advanceBy;
|
||||
|
||||
public int getLength() {
|
||||
return length;
|
||||
}
|
||||
|
||||
public void setLength(int length) {
|
||||
this.length = length;
|
||||
}
|
||||
|
||||
public int getAdvanceBy() {
|
||||
return advanceBy;
|
||||
}
|
||||
|
||||
public void setAdvanceBy(int advanceBy) {
|
||||
this.advanceBy = advanceBy;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.config;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.cloud.stream.binder.kstream.KStreamBinder;
|
||||
import org.springframework.cloud.stream.binder.kstream.MessageConversionDelegate;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Gary Russell
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
@Configuration
|
||||
@EnableConfigurationProperties(KStreamExtendedBindingProperties.class)
|
||||
public class KStreamBinderConfiguration {
|
||||
|
||||
private static final Log logger = LogFactory.getLog(KStreamBinderConfiguration.class);
|
||||
|
||||
@Autowired
|
||||
private KafkaProperties kafkaProperties;
|
||||
|
||||
@Bean
|
||||
public KafkaTopicProvisioner provisioningProvider(KafkaBinderConfigurationProperties binderConfigurationProperties) {
|
||||
return new KafkaTopicProvisioner(binderConfigurationProperties, kafkaProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamBinder kStreamBinder(KafkaBinderConfigurationProperties binderConfigurationProperties,
|
||||
KafkaTopicProvisioner kafkaTopicProvisioner,
|
||||
KStreamExtendedBindingProperties kStreamExtendedBindingProperties, StreamsConfig streamsConfig,
|
||||
MessageConversionDelegate messageConversionDelegate) {
|
||||
KStreamBinder kStreamBinder = new KStreamBinder(binderConfigurationProperties, kafkaTopicProvisioner, kStreamExtendedBindingProperties,
|
||||
streamsConfig, messageConversionDelegate);
|
||||
return kStreamBinder;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,115 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.config;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Properties;
|
||||
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.StreamsBuilder;
|
||||
import org.apache.kafka.streams.StreamsConfig;
|
||||
|
||||
import org.springframework.beans.factory.ObjectProvider;
|
||||
import org.springframework.beans.factory.UnsatisfiedDependencyException;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kstream.KStreamBoundElementFactory;
|
||||
import org.springframework.cloud.stream.binder.kstream.KStreamListenerParameterAdapter;
|
||||
import org.springframework.cloud.stream.binder.kstream.KStreamListenerSetupMethodOrchestrator;
|
||||
import org.springframework.cloud.stream.binder.kstream.KStreamStreamListenerResultAdapter;
|
||||
import org.springframework.cloud.stream.binder.kstream.MessageConversionDelegate;
|
||||
import org.springframework.cloud.stream.binding.StreamListenerResultAdapter;
|
||||
import org.springframework.cloud.stream.config.BindingServiceProperties;
|
||||
import org.springframework.cloud.stream.converter.CompositeMessageConverterFactory;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.annotation.KafkaStreamsDefaultConfiguration;
|
||||
import org.springframework.kafka.core.StreamsBuilderFactoryBean;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KStreamBinderSupportAutoConfiguration {
|
||||
|
||||
@Bean
|
||||
@ConfigurationProperties(prefix = "spring.cloud.stream.kstream.binder")
|
||||
public KafkaBinderConfigurationProperties binderConfigurationProperties() {
|
||||
return new KafkaBinderConfigurationProperties();
|
||||
}
|
||||
|
||||
@Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_BUILDER_BEAN_NAME)
|
||||
public StreamsBuilderFactoryBean defaultKafkaStreamBuilder(
|
||||
@Qualifier(KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME) ObjectProvider<StreamsConfig> streamsConfigProvider) {
|
||||
StreamsConfig streamsConfig = streamsConfigProvider.getIfAvailable();
|
||||
if (streamsConfig != null) {
|
||||
StreamsBuilderFactoryBean kStreamBuilderFactoryBean = new StreamsBuilderFactoryBean(streamsConfig);
|
||||
kStreamBuilderFactoryBean.setPhase(Integer.MAX_VALUE - 500);
|
||||
return kStreamBuilderFactoryBean;
|
||||
} else {
|
||||
throw new UnsatisfiedDependencyException(KafkaStreamsDefaultConfiguration.class.getName(),
|
||||
KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_BUILDER_BEAN_NAME, "streamsConfig",
|
||||
"There is no '" + KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME
|
||||
+ "' StreamsConfig bean in the application context.\n");
|
||||
}
|
||||
}
|
||||
|
||||
@Bean(KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME)
|
||||
public StreamsConfig streamsConfig(KafkaBinderConfigurationProperties binderConfigurationProperties) {
|
||||
Properties props = new Properties();
|
||||
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, binderConfigurationProperties.getKafkaConnectionString());
|
||||
props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName());
|
||||
props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class.getName());
|
||||
props.put(StreamsConfig.APPLICATION_ID_CONFIG, "default");
|
||||
if (!ObjectUtils.isEmpty(binderConfigurationProperties.getConfiguration())) {
|
||||
props.putAll(binderConfigurationProperties.getConfiguration());
|
||||
}
|
||||
return new StreamsConfig(props);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamStreamListenerResultAdapter kafkaStreamStreamListenerResultAdapter() {
|
||||
return new KStreamStreamListenerResultAdapter();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamListenerParameterAdapter kafkaStreamListenerParameterAdapter(
|
||||
MessageConversionDelegate messageConversionDelegate) {
|
||||
return new KStreamListenerParameterAdapter(messageConversionDelegate);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamListenerSetupMethodOrchestrator kStreamListenerSetupMethodOrchestrator(
|
||||
KStreamListenerParameterAdapter kafkaStreamListenerParameterAdapter,
|
||||
Collection<StreamListenerResultAdapter> streamListenerResultAdapters){
|
||||
return new KStreamListenerSetupMethodOrchestrator(kafkaStreamListenerParameterAdapter, streamListenerResultAdapters);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public MessageConversionDelegate messageConversionDelegate(BindingServiceProperties bindingServiceProperties,
|
||||
CompositeMessageConverterFactory compositeMessageConverterFactory) {
|
||||
return new MessageConversionDelegate(bindingServiceProperties, compositeMessageConverterFactory);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public KStreamBoundElementFactory kafkaStreamBindableTargetFactory(StreamsBuilder kStreamBuilder,
|
||||
BindingServiceProperties bindingServiceProperties) {
|
||||
return new KStreamBoundElementFactory(kStreamBuilder, bindingServiceProperties);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.config;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
public class KStreamBindingProperties {
|
||||
|
||||
private KStreamConsumerProperties consumer = new KStreamConsumerProperties();
|
||||
|
||||
private KStreamProducerProperties producer = new KStreamProducerProperties();
|
||||
|
||||
public KStreamConsumerProperties getConsumer() {
|
||||
return consumer;
|
||||
}
|
||||
|
||||
public void setConsumer(KStreamConsumerProperties consumer) {
|
||||
this.consumer = consumer;
|
||||
}
|
||||
|
||||
public KStreamProducerProperties getProducer() {
|
||||
return producer;
|
||||
}
|
||||
|
||||
public void setProducer(KStreamProducerProperties producer) {
|
||||
this.producer = producer;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.config;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KStreamCommonProperties {
|
||||
|
||||
private String keySerde;
|
||||
|
||||
private String valueSerde;
|
||||
|
||||
public String getKeySerde() {
|
||||
return keySerde;
|
||||
}
|
||||
|
||||
public void setKeySerde(String keySerde) {
|
||||
this.keySerde = keySerde;
|
||||
}
|
||||
|
||||
public String getValueSerde() {
|
||||
return valueSerde;
|
||||
}
|
||||
|
||||
public void setValueSerde(String valueSerde) {
|
||||
this.valueSerde = valueSerde;
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2015 the original author or authors.
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@@ -14,7 +14,11 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.config;
|
||||
|
||||
/**
|
||||
* This package contains an implementation of the {@link org.springframework.cloud.stream.binder.Binder} for Kafka.
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
public class KStreamConsumerProperties extends KStreamCommonProperties {
|
||||
|
||||
}
|
||||
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.config;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedBindingProperties;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
*/
|
||||
@ConfigurationProperties("spring.cloud.stream.kstream")
|
||||
public class KStreamExtendedBindingProperties
|
||||
implements ExtendedBindingProperties<KStreamConsumerProperties, KStreamProducerProperties> {
|
||||
|
||||
private Map<String, KStreamBindingProperties> bindings = new HashMap<>();
|
||||
|
||||
public Map<String, KStreamBindingProperties> getBindings() {
|
||||
return this.bindings;
|
||||
}
|
||||
|
||||
public void setBindings(Map<String, KStreamBindingProperties> bindings) {
|
||||
this.bindings = bindings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KStreamConsumerProperties getExtendedConsumerProperties(String binding) {
|
||||
if (this.bindings.containsKey(binding) && this.bindings.get(binding).getConsumer() != null) {
|
||||
return this.bindings.get(binding).getConsumer();
|
||||
}
|
||||
else {
|
||||
return new KStreamConsumerProperties();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public KStreamProducerProperties getExtendedProducerProperties(String binding) {
|
||||
if (this.bindings.containsKey(binding) && this.bindings.get(binding).getProducer() != null) {
|
||||
return this.bindings.get(binding).getProducer();
|
||||
}
|
||||
else {
|
||||
return new KStreamProducerProperties();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
/*
|
||||
* Copyright 2017-2018 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream.config;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class KStreamProducerProperties extends KStreamCommonProperties {
|
||||
|
||||
}
|
||||
@@ -0,0 +1,4 @@
|
||||
kstream:\
|
||||
org.springframework.cloud.stream.binder.kstream.config.KStreamBinderConfiguration
|
||||
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
org.springframework.boot.autoconfigure.EnableAutoConfiguration=\
|
||||
org.springframework.cloud.stream.binder.kstream.config.KStreamBinderSupportAutoConfiguration,\
|
||||
org.springframework.cloud.stream.binder.kstream.config.KStreamApplicationSupportAutoConfiguration
|
||||
|
||||
|
||||
@@ -0,0 +1,140 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kstream.annotations.KStreamProcessor;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.support.serializer.JsonSerde;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KStreamBinderPojoInputAndPrimitiveTypeOutputTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts-id");
|
||||
|
||||
private static Consumer<Integer, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group-id", "false", embeddedKafka);
|
||||
//consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Deserializer.class.getName());
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts-id");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKstreamBinderWithPojoInputAndStringOuput() throws Exception {
|
||||
SpringApplication app = new SpringApplication(ProductCountApplication.class);
|
||||
app.setWebEnvironment(false);
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.cloud.stream.bindings.input.destination=foos",
|
||||
"--spring.cloud.stream.bindings.output.destination=counts-id",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.kstream.bindings.output.producer.keySerde=org.apache.kafka.common.serialization.Serdes$IntegerSerde",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kstream.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kstream.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidateFoo(context);
|
||||
} finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foos");
|
||||
template.sendDefault("{\"id\":\"123\"}");
|
||||
ConsumerRecord<Integer, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts-id");
|
||||
|
||||
assertThat(cr.key().equals(123));
|
||||
ObjectMapper om = new ObjectMapper();
|
||||
Long aLong = om.readValue(cr.value(), Long.class);
|
||||
assertThat(aLong.equals(1L));
|
||||
}
|
||||
|
||||
@EnableBinding(KStreamProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class ProductCountApplication {
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<Integer, Long> process(KStream<Object, Product> input) {
|
||||
return input
|
||||
.filter((key, product) -> product.getId() == 123)
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(new JsonSerde<>(Product.class), new JsonSerde<>(Product.class))
|
||||
.count(TimeWindows.of(5000), "id-count-store")
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(key.key().id, value));
|
||||
}
|
||||
}
|
||||
|
||||
static class Product {
|
||||
|
||||
Integer id;
|
||||
|
||||
public Integer getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(Integer id) {
|
||||
this.id = id;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,187 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Date;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kstream.annotations.KStreamProcessor;
|
||||
import org.springframework.cloud.stream.binder.kstream.config.KStreamApplicationSupportProperties;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KStreamBinderWordCountIntegrationTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts");
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKstreamWordCountWithStringInputAndPojoOuput() throws Exception {
|
||||
SpringApplication app = new SpringApplication(WordCountProcessorApplication.class);
|
||||
app.setWebEnvironment(false);
|
||||
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.cloud.stream.bindings.input.destination=words",
|
||||
"--spring.cloud.stream.bindings.output.destination=counts",
|
||||
"--spring.cloud.stream.bindings.output.contentType=application/json",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kstream.timeWindow.length=5000",
|
||||
"--spring.cloud.stream.kstream.timeWindow.advanceBy=0",
|
||||
"--spring.cloud.stream.kstream.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kstream.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidate(context);
|
||||
} finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidate(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("words");
|
||||
template.sendDefault("foobar");
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts");
|
||||
assertThat(cr.value().contains("\"word\":\"foobar\",\"count\":1")).isTrue();
|
||||
}
|
||||
|
||||
@EnableBinding(KStreamProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
@EnableConfigurationProperties(KStreamApplicationSupportProperties.class)
|
||||
public static class WordCountProcessorApplication {
|
||||
|
||||
@Autowired
|
||||
private TimeWindows timeWindows;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<?, WordCount> process(KStream<Object, String> input) {
|
||||
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(Serdes.String(), Serdes.String())
|
||||
.count(timeWindows, "foo-WordCounts")
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key.key(), value, new Date(key.window().start()), new Date(key.window().end()))));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class WordCount {
|
||||
|
||||
private String word;
|
||||
|
||||
private long count;
|
||||
|
||||
private Date start;
|
||||
|
||||
private Date end;
|
||||
|
||||
WordCount(String word, long count, Date start, Date end) {
|
||||
this.word = word;
|
||||
this.count = count;
|
||||
this.start = start;
|
||||
this.end = end;
|
||||
}
|
||||
|
||||
public String getWord() {
|
||||
return word;
|
||||
}
|
||||
|
||||
public void setWord(String word) {
|
||||
this.word = word;
|
||||
}
|
||||
|
||||
public long getCount() {
|
||||
return count;
|
||||
}
|
||||
|
||||
public void setCount(long count) {
|
||||
this.count = count;
|
||||
}
|
||||
|
||||
public Date getStart() {
|
||||
return start;
|
||||
}
|
||||
|
||||
public void setStart(Date start) {
|
||||
this.start = start;
|
||||
}
|
||||
|
||||
public Date getEnd() {
|
||||
return end;
|
||||
}
|
||||
|
||||
public void setEnd(Date end) {
|
||||
this.end = end;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,167 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.Serdes;
|
||||
import org.apache.kafka.streams.KafkaStreams;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.state.QueryableStoreTypes;
|
||||
import org.apache.kafka.streams.state.ReadOnlyKeyValueStore;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kstream.annotations.KStreamProcessor;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.core.StreamsBuilderFactoryBean;
|
||||
import org.springframework.kafka.support.serializer.JsonSerde;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KStreamInteractiveQueryIntegrationTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts-id");
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group-id", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts-id");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKstreamBinderWithPojoInputAndStringOuput() throws Exception {
|
||||
SpringApplication app = new SpringApplication(ProductCountApplication.class);
|
||||
app.setWebEnvironment(false);
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.cloud.stream.bindings.input.destination=foos",
|
||||
"--spring.cloud.stream.bindings.output.destination=counts-id",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kstream.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kstream.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidateFoo(context);
|
||||
} finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foos");
|
||||
template.sendDefault("{\"id\":\"123\"}");
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts-id");
|
||||
assertThat(cr.value().contains("Count for product with ID 123: 1")).isTrue();
|
||||
|
||||
ProductCountApplication.Foo foo = context.getBean(ProductCountApplication.Foo.class);
|
||||
assertThat(foo.getProductStock(123).equals(1L));
|
||||
}
|
||||
|
||||
@EnableBinding(KStreamProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class ProductCountApplication {
|
||||
|
||||
@Autowired
|
||||
private StreamsBuilderFactoryBean kStreamBuilderFactoryBean;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<?, String> process(KStream<Object, Product> input) {
|
||||
|
||||
return input
|
||||
.filter((key, product) -> product.getId() == 123)
|
||||
.map((key, value) -> new KeyValue<>(value.id, value))
|
||||
.groupByKey(new Serdes.IntegerSerde(), new JsonSerde<>(Product.class))
|
||||
.count("prod-id-count-store")
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, "Count for product with ID 123: " + value));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public Foo foo(StreamsBuilderFactoryBean kStreamBuilderFactoryBean) {
|
||||
return new Foo(kStreamBuilderFactoryBean);
|
||||
}
|
||||
|
||||
static class Foo {
|
||||
StreamsBuilderFactoryBean kStreamBuilderFactoryBean;
|
||||
|
||||
Foo(StreamsBuilderFactoryBean kStreamBuilderFactoryBean) {
|
||||
this.kStreamBuilderFactoryBean = kStreamBuilderFactoryBean;
|
||||
}
|
||||
|
||||
public Long getProductStock(Integer id) {
|
||||
KafkaStreams streams = kStreamBuilderFactoryBean.getKafkaStreams();
|
||||
ReadOnlyKeyValueStore<Object, Object> keyValueStore =
|
||||
streams.store("prod-id-count-store", QueryableStoreTypes.keyValueStore());
|
||||
return (Long) keyValueStore.get(id);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class Product {
|
||||
|
||||
Integer id;
|
||||
|
||||
public Integer getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(Integer id) {
|
||||
this.id = id;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,136 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kstream.annotations.KStreamProcessor;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.support.serializer.JsonSerde;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class KstreamBinderPojoInputStringOutputIntegrationTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts-id");
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group-id", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromAnEmbeddedTopic(consumer, "counts-id");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKstreamBinderWithPojoInputAndStringOuput() throws Exception {
|
||||
SpringApplication app = new SpringApplication(ProductCountApplication.class);
|
||||
app.setWebEnvironment(false);
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.cloud.stream.bindings.input.destination=foos",
|
||||
"--spring.cloud.stream.bindings.output.destination=counts-id",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.kstream.bindings.output.producer.keySerde=org.apache.kafka.common.serialization.Serdes$IntegerSerde",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kstream.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kstream.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidateFoo(context);
|
||||
} finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidateFoo(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("foos");
|
||||
template.sendDefault("{\"id\":\"123\"}");
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts-id");
|
||||
assertThat(cr.value().contains("Count for product with ID 123: 1")).isTrue();
|
||||
}
|
||||
|
||||
@EnableBinding(KStreamProcessor.class)
|
||||
@EnableAutoConfiguration
|
||||
public static class ProductCountApplication {
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo("output")
|
||||
public KStream<Integer, String> process(KStream<Object, Product> input) {
|
||||
|
||||
return input
|
||||
.filter((key, product) -> product.getId() == 123)
|
||||
.map((key, value) -> new KeyValue<>(value, value))
|
||||
.groupByKey(new JsonSerde<>(Product.class), new JsonSerde<>(Product.class))
|
||||
.count(TimeWindows.of(5000), "id-count-store")
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(key.key().id, "Count for product with ID 123: " + value));
|
||||
}
|
||||
}
|
||||
|
||||
static class Product {
|
||||
|
||||
Integer id;
|
||||
|
||||
public Integer getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(Integer id) {
|
||||
this.id = id;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,224 @@
|
||||
/*
|
||||
* Copyright 2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kstream;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Date;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.streams.KeyValue;
|
||||
import org.apache.kafka.streams.kstream.KStream;
|
||||
import org.apache.kafka.streams.kstream.Materialized;
|
||||
import org.apache.kafka.streams.kstream.Predicate;
|
||||
import org.apache.kafka.streams.kstream.TimeWindows;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.cloud.stream.annotation.EnableBinding;
|
||||
import org.springframework.cloud.stream.annotation.Input;
|
||||
import org.springframework.cloud.stream.annotation.Output;
|
||||
import org.springframework.cloud.stream.annotation.StreamListener;
|
||||
import org.springframework.cloud.stream.binder.kstream.config.KStreamApplicationSupportProperties;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
|
||||
import org.springframework.kafka.core.KafkaTemplate;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
import org.springframework.messaging.handler.annotation.SendTo;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
*/
|
||||
public class WordCountMultipleBranchesIntegrationTests {
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, "counts","foo","bar");
|
||||
|
||||
private static Consumer<String, String> consumer;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
Map<String, Object> consumerProps = KafkaTestUtils.consumerProps("group", "false", embeddedKafka);
|
||||
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
DefaultKafkaConsumerFactory<String, String> cf = new DefaultKafkaConsumerFactory<>(consumerProps);
|
||||
consumer = cf.createConsumer();
|
||||
embeddedKafka.consumeFromEmbeddedTopics(consumer, "counts", "foo", "bar");
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
consumer.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKstreamWordCountWithStringInputAndPojoOuput() throws Exception {
|
||||
SpringApplication app = new SpringApplication(WordCountProcessorApplication.class);
|
||||
app.setWebEnvironment(false);
|
||||
|
||||
ConfigurableApplicationContext context = app.run("--server.port=0",
|
||||
"--spring.cloud.stream.bindings.input.destination=words",
|
||||
"--spring.cloud.stream.bindings.output1.destination=counts",
|
||||
"--spring.cloud.stream.bindings.output1.contentType=application/json",
|
||||
"--spring.cloud.stream.bindings.output2.destination=foo",
|
||||
"--spring.cloud.stream.bindings.output2.contentType=application/json",
|
||||
"--spring.cloud.stream.bindings.output3.destination=bar",
|
||||
"--spring.cloud.stream.bindings.output3.contentType=application/json",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.commit.interval.ms=1000",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.kstream.binder.configuration.value.serde=org.apache.kafka.common.serialization.Serdes$StringSerde",
|
||||
"--spring.cloud.stream.bindings.output.producer.headerMode=raw",
|
||||
"--spring.cloud.stream.bindings.input.consumer.headerMode=raw",
|
||||
"--spring.cloud.stream.kstream.timeWindow.length=5000",
|
||||
"--spring.cloud.stream.kstream.timeWindow.advanceBy=0",
|
||||
"--spring.cloud.stream.kstream.binder.brokers=" + embeddedKafka.getBrokersAsString(),
|
||||
"--spring.cloud.stream.kstream.binder.zkNodes=" + embeddedKafka.getZookeeperConnectionString());
|
||||
try {
|
||||
receiveAndValidate(context);
|
||||
} finally {
|
||||
context.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void receiveAndValidate(ConfigurableApplicationContext context) throws Exception {
|
||||
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
|
||||
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
|
||||
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
|
||||
template.setDefaultTopic("words");
|
||||
template.sendDefault("english");
|
||||
ConsumerRecord<String, String> cr = KafkaTestUtils.getSingleRecord(consumer, "counts");
|
||||
assertThat(cr.value().contains("\"word\":\"english\",\"count\":1")).isTrue();
|
||||
|
||||
template.sendDefault("french");
|
||||
template.sendDefault("french");
|
||||
cr = KafkaTestUtils.getSingleRecord(consumer, "foo");
|
||||
assertThat(cr.value().contains("\"word\":\"french\",\"count\":2")).isTrue();
|
||||
|
||||
template.sendDefault("spanish");
|
||||
template.sendDefault("spanish");
|
||||
template.sendDefault("spanish");
|
||||
cr = KafkaTestUtils.getSingleRecord(consumer, "bar");
|
||||
assertThat(cr.value().contains("\"word\":\"spanish\",\"count\":3")).isTrue();
|
||||
}
|
||||
|
||||
@EnableBinding(KStreamProcessorX.class)
|
||||
@EnableAutoConfiguration
|
||||
@EnableConfigurationProperties(KStreamApplicationSupportProperties.class)
|
||||
public static class WordCountProcessorApplication {
|
||||
|
||||
@Autowired
|
||||
private TimeWindows timeWindows;
|
||||
|
||||
@StreamListener("input")
|
||||
@SendTo({"output1","output2","output3"})
|
||||
@SuppressWarnings("unchecked")
|
||||
public KStream<?, WordCount>[] process(KStream<Object, String> input) {
|
||||
|
||||
Predicate<Object, WordCount> isEnglish = (k, v) -> v.word.equals("english");
|
||||
Predicate<Object, WordCount> isFrench = (k, v) -> v.word.equals("french");
|
||||
Predicate<Object, WordCount> isSpanish = (k, v) -> v.word.equals("spanish");
|
||||
|
||||
return input
|
||||
.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
|
||||
.groupBy((key, value) -> value)
|
||||
.windowedBy(timeWindows)
|
||||
.count(Materialized.as("WordCounts-multi"))
|
||||
.toStream()
|
||||
.map((key, value) -> new KeyValue<>(null, new WordCount(key.key(), value, new Date(key.window().start()), new Date(key.window().end()))))
|
||||
.branch(isEnglish, isFrench, isSpanish);
|
||||
}
|
||||
}
|
||||
|
||||
interface KStreamProcessorX {
|
||||
|
||||
@Input("input")
|
||||
KStream<?, ?> input();
|
||||
|
||||
@Output("output1")
|
||||
KStream<?, ?> output1();
|
||||
|
||||
@Output("output2")
|
||||
KStream<?, ?> output2();
|
||||
|
||||
@Output("output3")
|
||||
KStream<?, ?> output3();
|
||||
}
|
||||
|
||||
static class WordCount {
|
||||
|
||||
private String word;
|
||||
|
||||
private long count;
|
||||
|
||||
private Date start;
|
||||
|
||||
private Date end;
|
||||
|
||||
WordCount(String word, long count, Date start, Date end) {
|
||||
this.word = word;
|
||||
this.count = count;
|
||||
this.start = start;
|
||||
this.end = end;
|
||||
}
|
||||
|
||||
public String getWord() {
|
||||
return word;
|
||||
}
|
||||
|
||||
public void setWord(String word) {
|
||||
this.word = word;
|
||||
}
|
||||
|
||||
public long getCount() {
|
||||
return count;
|
||||
}
|
||||
|
||||
public void setCount(long count) {
|
||||
this.count = count;
|
||||
}
|
||||
|
||||
public Date getStart() {
|
||||
return start;
|
||||
}
|
||||
|
||||
public void setStart(Date start) {
|
||||
this.start = start;
|
||||
}
|
||||
|
||||
public Date getEnd() {
|
||||
return end;
|
||||
}
|
||||
|
||||
public void setEnd(Date end) {
|
||||
this.end = end;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,15 @@
|
||||
<configuration>
|
||||
<appender name="stdout" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>%d{ISO8601} %5p %t %c{2}:%L - %m%n</pattern>
|
||||
</encoder>
|
||||
</appender>
|
||||
<logger name="org.springframework.integration.kafka" level="INFO"/>
|
||||
<logger name="org.springframework.kafka" level="INFO"/>
|
||||
<logger name="org.springframework.cloud.stream" level="INFO" />
|
||||
<logger name="org.springframework.integration.channel" level="INFO" />
|
||||
<root level="WARN">
|
||||
<appender-ref ref="stdout"/>
|
||||
</root>
|
||||
</configuration>
|
||||
|
||||
29
update-version.sh
Executable file
29
update-version.sh
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
#Execute this script from local checkout of spring cloud stream
|
||||
|
||||
./mvnw versions:update-parent -DparentVersion=[0.0.1,$2] -Pspring -DgenerateBackupPoms=false -DallowSnapshots=true
|
||||
./mvnw versions:set -DnewVersion=$1 -DgenerateBackupPoms=false
|
||||
|
||||
|
||||
|
||||
lines=$(find . -name 'pom.xml' | xargs egrep "SNAPSHOT|M[0-9]|RC[0-9]" | grep -v regex | wc -l)
|
||||
if [ $lines -eq 0 ]; then
|
||||
echo "No snapshots found"
|
||||
else
|
||||
echo "Snapshots found."
|
||||
fi
|
||||
|
||||
lines=$(find . -name 'pom.xml' | xargs egrep "M[0-9]" | grep -v regex | wc -l)
|
||||
if [ $lines -eq 0 ]; then
|
||||
echo "No milestones found"
|
||||
else
|
||||
echo "Milestones found."
|
||||
fi
|
||||
|
||||
lines=$(find . -name 'pom.xml' | xargs egrep "RC[0-9]" | grep -v regex | wc -l)
|
||||
if [ $lines -eq 0 ]; then
|
||||
echo "No release candidates found"
|
||||
else
|
||||
echo "Release candidates found."
|
||||
fi
|
||||
Reference in New Issue
Block a user