Compare commits

..

9 Commits

Author SHA1 Message Date
Oleg Zhurakousky
ea87792c76 Merge pull request #609 from spring-operator/polish-urls-xml-kafka-0.9-upgrade
URL Cleanup
2019-03-26 14:21:35 +01:00
Spring Operator
c3c8935014 URL Cleanup
This commit updates URLs to prefer the https protocol. Redirects are not followed to avoid accidentally expanding intentionally shortened URLs (i.e. if using a URL shortener).

# Fixed URLs

## Fixed Success
These URLs were switched to an https URL with a 2xx status. While the status was successful, your review is still recommended.

* [ ] http://repo.spring.io/libs-milestone-local with 2 occurrences migrated to:
  https://repo.spring.io/libs-milestone-local ([https](https://repo.spring.io/libs-milestone-local) result 302).
* [ ] http://repo.spring.io/libs-snapshot-local with 2 occurrences migrated to:
  https://repo.spring.io/libs-snapshot-local ([https](https://repo.spring.io/libs-snapshot-local) result 302).
* [ ] http://repo.spring.io/release with 1 occurrences migrated to:
  https://repo.spring.io/release ([https](https://repo.spring.io/release) result 302).

# Ignored
These URLs were intentionally ignored.

* http://maven.apache.org/POM/4.0.0 with 14 occurrences
* http://www.w3.org/2001/XMLSchema-instance with 7 occurrences
2019-03-26 00:24:04 -05:00
Oleg Zhurakousky
92da0fe3e9 Merge pull request #584 from spring-operator/polish-urls-apache-license-kafka-0.9-upgrade
URL Cleanup
2019-03-25 14:55:34 +01:00
Spring Operator
6a60c76dd9 URL Cleanup
This commit updates URLs to prefer the https protocol. Redirects are not followed to avoid accidentally expanding intentionally shortened URLs (i.e. if using a URL shortener).

# Fixed URLs

## Fixed Success
These URLs were switched to an https URL with a 2xx status. While the status was successful, your review is still recommended.

* [ ] http://www.apache.org/licenses/ with 1 occurrences migrated to:
  https://www.apache.org/licenses/ ([https](https://www.apache.org/licenses/) result 200).
* [ ] http://www.apache.org/licenses/LICENSE-2.0 with 26 occurrences migrated to:
  https://www.apache.org/licenses/LICENSE-2.0 ([https](https://www.apache.org/licenses/LICENSE-2.0) result 200).
2019-03-21 13:24:01 -05:00
Spring Operator
b3c26cf93f URL Cleanup
This commit updates URLs to prefer the https protocol. Redirects are not followed to avoid accidentally expanding intentionally shortened URLs (i.e. if using a URL shortener).

# Fixed URLs

## Fixed Success
These URLs were switched to an https URL with a 2xx status. While the status was successful, your review is still recommended.

* http://maven.apache.org/xsd/maven-4.0.0.xsd with 7 occurrences migrated to:
  https://maven.apache.org/xsd/maven-4.0.0.xsd ([https](https://maven.apache.org/xsd/maven-4.0.0.xsd) result 200).
* http://www.apache.org/licenses/LICENSE-2.0 with 2 occurrences migrated to:
  https://www.apache.org/licenses/LICENSE-2.0 ([https](https://www.apache.org/licenses/LICENSE-2.0) result 200).
* http://projects.spring.io/spring-cloud with 2 occurrences migrated to:
  https://projects.spring.io/spring-cloud ([https](https://projects.spring.io/spring-cloud) result 301).
* http://www.spring.io with 2 occurrences migrated to:
  https://www.spring.io ([https](https://www.spring.io) result 301).
* http://repo.spring.io/libs-milestone-local with 2 occurrences migrated to:
  https://repo.spring.io/libs-milestone-local ([https](https://repo.spring.io/libs-milestone-local) result 302).
* http://repo.spring.io/libs-release-local with 1 occurrences migrated to:
  https://repo.spring.io/libs-release-local ([https](https://repo.spring.io/libs-release-local) result 302).
* http://repo.spring.io/libs-snapshot-local with 2 occurrences migrated to:
  https://repo.spring.io/libs-snapshot-local ([https](https://repo.spring.io/libs-snapshot-local) result 302).
* http://repo.spring.io/release with 1 occurrences migrated to:
  https://repo.spring.io/release ([https](https://repo.spring.io/release) result 302).

# Ignored
These URLs were intentionally ignored.

* http://maven.apache.org/POM/4.0.0 with 14 occurrences
* http://www.w3.org/2001/XMLSchema-instance with 7 occurrences
2019-03-20 09:48:06 -04:00
Soby Chacko
3b0bf53896 cleanup 2016-07-08 18:07:55 -04:00
Soby Chacko
d9670e040b Kafka 0.9 upgrade changes
Incorporating refactorings occured in SCS into both 0.8 and 0.9 binders

cleanup
2016-07-08 18:04:00 -04:00
Ilayaperumal Gopinathan
fd28764e39 Initial refactoring to move classes across 2016-06-28 23:01:46 +05:30
Soby Chacko
079592363d Initial project structure changes for the 0.9 upgrade 2016-06-22 18:02:57 -04:00
120 changed files with 4625 additions and 9973 deletions

1
.gitignore vendored
View File

@@ -18,7 +18,6 @@ _site/
*.ipr
*.iws
.idea/*
*/.idea
.factorypath
dump.rdb
.apt_generated

View File

@@ -21,7 +21,7 @@
<repository>
<id>spring-snapshots</id>
<name>Spring Snapshots</name>
<url>http://repo.spring.io/libs-snapshot-local</url>
<url>https://repo.spring.io/libs-snapshot-local</url>
<snapshots>
<enabled>true</enabled>
</snapshots>
@@ -29,7 +29,7 @@
<repository>
<id>spring-milestones</id>
<name>Spring Milestones</name>
<url>http://repo.spring.io/libs-milestone-local</url>
<url>https://repo.spring.io/libs-milestone-local</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
@@ -37,7 +37,7 @@
<repository>
<id>spring-releases</id>
<name>Spring Releases</name>
<url>http://repo.spring.io/release</url>
<url>https://repo.spring.io/release</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
@@ -47,7 +47,7 @@
<pluginRepository>
<id>spring-snapshots</id>
<name>Spring Snapshots</name>
<url>http://repo.spring.io/libs-snapshot-local</url>
<url>https://repo.spring.io/libs-snapshot-local</url>
<snapshots>
<enabled>true</enabled>
</snapshots>
@@ -55,7 +55,7 @@
<pluginRepository>
<id>spring-milestones</id>
<name>Spring Milestones</name>
<url>http://repo.spring.io/libs-milestone-local</url>
<url>https://repo.spring.io/libs-milestone-local</url>
<snapshots>
<enabled>false</enabled>
</snapshots>

View File

@@ -1,6 +1,6 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
@@ -192,7 +192,7 @@
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,

View File

@@ -1 +1,3 @@
Spring Cloud Stream Binder for Apache Kafka
# spring-cloud-stream-binder-kafka
Spring Cloud Stream Binder implementation for Kafka

27
mvnw vendored
View File

@@ -8,7 +8,7 @@
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
@@ -57,27 +57,27 @@ case "`uname`" in
#
# Look for the Apple JDKs first to preserve the existing behaviour, and then look
# for the new JDKs provided by Oracle.
#
#
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK ] ; then
#
# Apple JDKs
#
export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home
fi
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Java/JavaVirtualMachines/CurrentJDK ] ; then
#
# Apple JDKs
#
export JAVA_HOME=/System/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
fi
if [ -z "$JAVA_HOME" ] && [ -L "/Library/Java/JavaVirtualMachines/CurrentJDK" ] ; then
#
# Oracle JDKs
#
export JAVA_HOME=/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
fi
fi
if [ -z "$JAVA_HOME" ] && [ -x "/usr/libexec/java_home" ]; then
#
@@ -219,27 +219,16 @@ concat_lines() {
export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-$(find_maven_basedir)}
MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
# Provide a "standardized" way to retrieve the CLI args that will
# Provide a "standardized" way to retrieve the CLI args that will
# work with both Windows and non-Windows executions.
MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
export MAVEN_CMD_LINE_ARGS
WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
echo "Running version check"
VERSION=$( sed '\!<parent!,\!</parent!d' `dirname $0`/pom.xml | grep '<version' | head -1 | sed -e 's/.*<version>//' -e 's!</version>.*$!!' )
echo "The found version is [${VERSION}]"
if echo $VERSION | egrep -q 'M|RC'; then
echo Activating \"milestone\" profile for version=\"$VERSION\"
echo $MAVEN_ARGS | grep -q milestone || MAVEN_ARGS="$MAVEN_ARGS -Pmilestone"
else
echo Deactivating \"milestone\" profile for version=\"$VERSION\"
echo $MAVEN_ARGS | grep -q milestone && MAVEN_ARGS=$(echo $MAVEN_ARGS | sed -e 's/-Pmilestone//')
fi
exec "$JAVACMD" \
$MAVEN_OPTS \
-classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
"-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
${WRAPPER_LAUNCHER} ${MAVEN_ARGS} "$@"
${WRAPPER_LAUNCHER} "$@"

321
mvnw.cmd vendored
View File

@@ -1,145 +1,234 @@
@REM ----------------------------------------------------------------------------
@REM Licensed to the Apache Software Foundation (ASF) under one
@REM or more contributor license agreements. See the NOTICE file
@REM distributed with this work for additional information
@REM regarding copyright ownership. The ASF licenses this file
@REM to you under the Apache License, Version 2.0 (the
@REM "License"); you may not use this file except in compliance
@REM with the License. You may obtain a copy of the License at
@REM
@REM http://www.apache.org/licenses/LICENSE-2.0
@REM
@REM Unless required by applicable law or agreed to in writing,
@REM software distributed under the License is distributed on an
@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@REM KIND, either express or implied. See the License for the
@REM specific language governing permissions and limitations
@REM under the License.
@REM ----------------------------------------------------------------------------
#!/bin/sh
# ----------------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ----------------------------------------------------------------------------
@REM ----------------------------------------------------------------------------
@REM Maven2 Start Up Batch script
@REM
@REM Required ENV vars:
@REM JAVA_HOME - location of a JDK home dir
@REM
@REM Optional ENV vars
@REM M2_HOME - location of maven2's installed home dir
@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending
@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
@REM e.g. to debug Maven itself, use
@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files
@REM ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# Maven2 Start Up Batch script
#
# Required ENV vars:
# ------------------
# JAVA_HOME - location of a JDK home dir
#
# Optional ENV vars
# -----------------
# M2_HOME - location of maven2's installed home dir
# MAVEN_OPTS - parameters passed to the Java VM when running Maven
# e.g. to debug Maven itself, use
# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
# MAVEN_SKIP_RC - flag to disable loading of mavenrc files
# ----------------------------------------------------------------------------
@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
@echo off
@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on'
@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
if [ -z "$MAVEN_SKIP_RC" ] ; then
@REM set %HOME% to equivalent of $HOME
if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%")
if [ -f /etc/mavenrc ] ; then
. /etc/mavenrc
fi
@REM Execute a user defined script before this one
if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre
@REM check for pre script, once with legacy .bat ending and once with .cmd ending
if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat"
if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd"
:skipRcPre
if [ -f "$HOME/.mavenrc" ] ; then
. "$HOME/.mavenrc"
fi
@setlocal
fi
set ERROR_CODE=0
# OS specific support. $var _must_ be set to either true or false.
cygwin=false;
darwin=false;
mingw=false
case "`uname`" in
CYGWIN*) cygwin=true ;;
MINGW*) mingw=true;;
Darwin*) darwin=true
#
# Look for the Apple JDKs first to preserve the existing behaviour, and then look
# for the new JDKs provided by Oracle.
#
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK ] ; then
#
# Apple JDKs
#
export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home
fi
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Java/JavaVirtualMachines/CurrentJDK ] ; then
#
# Apple JDKs
#
export JAVA_HOME=/System/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
fi
if [ -z "$JAVA_HOME" ] && [ -L "/Library/Java/JavaVirtualMachines/CurrentJDK" ] ; then
#
# Oracle JDKs
#
export JAVA_HOME=/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
fi
@REM To isolate internal variables from possible post scripts, we use another setlocal
@setlocal
if [ -z "$JAVA_HOME" ] && [ -x "/usr/libexec/java_home" ]; then
#
# Apple JDKs
#
export JAVA_HOME=`/usr/libexec/java_home`
fi
;;
esac
@REM ==== START VALIDATION ====
if not "%JAVA_HOME%" == "" goto OkJHome
if [ -z "$JAVA_HOME" ] ; then
if [ -r /etc/gentoo-release ] ; then
JAVA_HOME=`java-config --jre-home`
fi
fi
echo.
echo Error: JAVA_HOME not found in your environment. >&2
echo Please set the JAVA_HOME variable in your environment to match the >&2
echo location of your Java installation. >&2
echo.
goto error
if [ -z "$M2_HOME" ] ; then
## resolve links - $0 may be a link to maven's home
PRG="$0"
:OkJHome
if exist "%JAVA_HOME%\bin\java.exe" goto init
# need this for relative symlinks
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG="`dirname "$PRG"`/$link"
fi
done
echo.
echo Error: JAVA_HOME is set to an invalid directory. >&2
echo JAVA_HOME = "%JAVA_HOME%" >&2
echo Please set the JAVA_HOME variable in your environment to match the >&2
echo location of your Java installation. >&2
echo.
goto error
saveddir=`pwd`
@REM ==== END VALIDATION ====
M2_HOME=`dirname "$PRG"`/..
:init
# make it fully qualified
M2_HOME=`cd "$M2_HOME" && pwd`
set MAVEN_CMD_LINE_ARGS=%*
cd "$saveddir"
# echo Using m2 at $M2_HOME
fi
@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
@REM Fallback to current working directory if not found.
# For Cygwin, ensure paths are in UNIX format before anything is touched
if $cygwin ; then
[ -n "$M2_HOME" ] &&
M2_HOME=`cygpath --unix "$M2_HOME"`
[ -n "$JAVA_HOME" ] &&
JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
[ -n "$CLASSPATH" ] &&
CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
fi
set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR%
IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir
# For Migwn, ensure paths are in UNIX format before anything is touched
if $mingw ; then
[ -n "$M2_HOME" ] &&
M2_HOME="`(cd "$M2_HOME"; pwd)`"
[ -n "$JAVA_HOME" ] &&
JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
# TODO classpath?
fi
set EXEC_DIR=%CD%
set WDIR=%EXEC_DIR%
:findBaseDir
IF EXIST "%WDIR%"\.mvn goto baseDirFound
cd ..
IF "%WDIR%"=="%CD%" goto baseDirNotFound
set WDIR=%CD%
goto findBaseDir
if [ -z "$JAVA_HOME" ]; then
javaExecutable="`which javac`"
if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then
# readlink(1) is not available as standard on Solaris 10.
readLink=`which readlink`
if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then
if $darwin ; then
javaHome="`dirname \"$javaExecutable\"`"
javaExecutable="`cd \"$javaHome\" && pwd -P`/javac"
else
javaExecutable="`readlink -f \"$javaExecutable\"`"
fi
javaHome="`dirname \"$javaExecutable\"`"
javaHome=`expr "$javaHome" : '\(.*\)/bin'`
JAVA_HOME="$javaHome"
export JAVA_HOME
fi
fi
fi
:baseDirFound
set MAVEN_PROJECTBASEDIR=%WDIR%
cd "%EXEC_DIR%"
goto endDetectBaseDir
if [ -z "$JAVACMD" ] ; then
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
else
JAVACMD="`which java`"
fi
fi
:baseDirNotFound
set MAVEN_PROJECTBASEDIR=%EXEC_DIR%
cd "%EXEC_DIR%"
if [ ! -x "$JAVACMD" ] ; then
echo "Error: JAVA_HOME is not defined correctly." >&2
echo " We cannot execute $JAVACMD" >&2
exit 1
fi
:endDetectBaseDir
if [ -z "$JAVA_HOME" ] ; then
echo "Warning: JAVA_HOME environment variable is not set."
fi
IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig
CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
@setlocal EnableExtensions EnableDelayedExpansion
for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a
@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS%
# For Cygwin, switch paths to Windows format before running java
if $cygwin; then
[ -n "$M2_HOME" ] &&
M2_HOME=`cygpath --path --windows "$M2_HOME"`
[ -n "$JAVA_HOME" ] &&
JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
[ -n "$CLASSPATH" ] &&
CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
fi
:endReadAdditionalConfig
# traverses directory structure from process work directory to filesystem root
# first directory with .mvn subdirectory is considered project base directory
find_maven_basedir() {
local basedir=$(pwd)
local wdir=$(pwd)
while [ "$wdir" != '/' ] ; do
if [ -d "$wdir"/.mvn ] ; then
basedir=$wdir
break
fi
wdir=$(cd "$wdir/.."; pwd)
done
echo "${basedir}"
}
SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
# concatenates all lines of a file
concat_lines() {
if [ -f "$1" ]; then
echo "$(tr -s '\n' ' ' < "$1")"
fi
}
set WRAPPER_JAR="".\.mvn\wrapper\maven-wrapper.jar""
set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-$(find_maven_basedir)}
MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CMD_LINE_ARGS%
if ERRORLEVEL 1 goto error
goto end
# Provide a "standardized" way to retrieve the CLI args that will
# work with both Windows and non-Windows executions.
MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
export MAVEN_CMD_LINE_ARGS
:error
set ERROR_CODE=1
WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
:end
@endlocal & set ERROR_CODE=%ERROR_CODE%
exec "$JAVACMD" \
$MAVEN_OPTS \
-classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
"-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
${WRAPPER_LAUNCHER} "$@"
if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost
@REM check for post script, once with legacy .bat ending and once with .cmd ending
if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat"
if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd"
:skipRcPost
@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on'
if "%MAVEN_BATCH_PAUSE%" == "on" pause
if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE%
exit /B %ERROR_CODE%

166
pom.xml
View File

@@ -1,159 +1,37 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>2.0.0.M1</version>
<version>1.1.0.BUILD-SNAPSHOT</version>
<packaging>pom</packaging>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-build</artifactId>
<version>2.0.0.M1</version>
<version>1.1.1.RELEASE</version>
<relativePath />
</parent>
<properties>
<java.version>1.7</java.version>
<kafka.version>0.10.2.0</kafka.version>
<spring-kafka.version>2.0.0.M2</spring-kafka.version>
<spring-integration-kafka.version>3.0.0.M1</spring-integration-kafka.version>
<spring-cloud-stream.version>2.0.0.M1</spring-cloud-stream.version>
<spring-boot.version>1.4.0.BUILD-SNAPSHOT</spring-boot.version>
</properties>
<modules>
<module>spring-cloud-stream-binder-kafka</module>
<module>spring-cloud-starter-stream-kafka</module>
<module>spring-cloud-stream-binder-kafka-docs</module>
<module>spring-cloud-stream-binder-kafka-0.10.1-test</module>
<module>spring-cloud-stream-binder-kafka-core</module>
</modules>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream</artifactId>
<version>${spring-cloud-stream.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-codec</artifactId>
<version>${spring-cloud-stream.version}</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<version>${kafka.version}</version>
<exclusions>
<exclusion>
<groupId>jline</groupId>
<artifactId>jline</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>${kafka.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.integration</groupId>
<artifactId>spring-integration-kafka</artifactId>
<version>${spring-integration-kafka.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-test</artifactId>
<version>${spring-cloud-stream.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka-test</artifactId>
<scope>test</scope>
<version>${spring-kafka.version}</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<classifier>test</classifier>
<version>${kafka.version}</version>
<artifactId>spring-cloud-stream-dependencies</artifactId>
<version>1.1.0.BUILD-SNAPSHOT</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<version>1.7</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>2.17</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
<version>7.1</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<configuration>
<quiet>true</quiet>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<redirectTestOutputToFile>true</redirectTestOutputToFile>
</configuration>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-build-tools</artifactId>
<version>2.0.0.M1</version>
</dependency>
</dependencies>
<executions>
<execution>
<id>checkstyle-validation</id>
<phase>validate</phase>
<configuration>
<configLocation>checkstyle.xml</configLocation>
<encoding>UTF-8</encoding>
<consoleOutput>true</consoleOutput>
<failsOnError>true</failsOnError>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
</configuration>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
<modules>
<module>spring-cloud-stream-binder-kafka-common</module>
<module>spring-cloud-stream-binder-kafka</module>
<module>spring-cloud-starter-stream-kafka</module>
<module>spring-cloud-stream-binder-kafka-0.8</module>
<module>spring-cloud-starter-stream-kafka-0.8</module>
<module>spring-cloud-stream-binder-kafka-test-support</module>
</modules>
<profiles>
<profile>
<id>spring</id>
@@ -161,7 +39,7 @@
<repository>
<id>spring-snapshots</id>
<name>Spring Snapshots</name>
<url>http://repo.spring.io/libs-snapshot-local</url>
<url>https://repo.spring.io/libs-snapshot-local</url>
<snapshots>
<enabled>true</enabled>
</snapshots>
@@ -172,7 +50,7 @@
<repository>
<id>spring-milestones</id>
<name>Spring Milestones</name>
<url>http://repo.spring.io/libs-milestone-local</url>
<url>https://repo.spring.io/libs-milestone-local</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
@@ -180,7 +58,7 @@
<repository>
<id>spring-releases</id>
<name>Spring Releases</name>
<url>http://repo.spring.io/release</url>
<url>https://repo.spring.io/release</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
@@ -190,7 +68,7 @@
<pluginRepository>
<id>spring-snapshots</id>
<name>Spring Snapshots</name>
<url>http://repo.spring.io/libs-snapshot-local</url>
<url>https://repo.spring.io/libs-snapshot-local</url>
<snapshots>
<enabled>true</enabled>
</snapshots>
@@ -201,7 +79,7 @@
<pluginRepository>
<id>spring-milestones</id>
<name>Spring Milestones</name>
<url>http://repo.spring.io/libs-milestone-local</url>
<url>https://repo.spring.io/libs-milestone-local</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
@@ -209,7 +87,7 @@
<pluginRepository>
<id>spring-releases</id>
<name>Spring Releases</name>
<url>http://repo.spring.io/libs-release-local</url>
<url>https://repo.spring.io/libs-release-local</url>
<snapshots>
<enabled>false</enabled>
</snapshots>

View File

@@ -0,0 +1,26 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>1.1.0.BUILD-SNAPSHOT</version>
</parent>
<artifactId>spring-cloud-starter-stream-kafka-0.8</artifactId>
<description>Spring Cloud Starter Stream Kafka for 0.8</description>
<url>https://projects.spring.io/spring-cloud</url>
<organization>
<name>Pivotal Software, Inc.</name>
<url>https://www.spring.io</url>
</organization>
<properties>
<main.basedir>${basedir}/../..</main.basedir>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-0.8</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1 @@
provides: spring-cloud-starter-stream-kafka-0.8

View File

@@ -1,17 +1,17 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>2.0.0.M1</version>
<version>1.1.0.BUILD-SNAPSHOT</version>
</parent>
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
<description>Spring Cloud Starter Stream Kafka</description>
<url>http://projects.spring.io/spring-cloud</url>
<url>https://projects.spring.io/spring-cloud</url>
<organization>
<name>Pivotal Software, Inc.</name>
<url>http://www.spring.io</url>
<url>https://www.spring.io</url>
</organization>
<properties>
<main.basedir>${basedir}/../..</main.basedir>

View File

@@ -1,120 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>2.0.0.M1</version>
</parent>
<artifactId>spring-cloud-stream-binder-kafka-0.10.1-test</artifactId>
<description>Spring Cloud Stream Kafka Binder 0.10.1 Tests</description>
<url>http://projects.spring.io/spring-cloud</url>
<organization>
<name>Pivotal Software, Inc.</name>
<url>http://www.spring.io</url>
</organization>
<properties>
<main.basedir>${basedir}/../..</main.basedir>
<kafka.version>0.10.1.1</kafka.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
<version>${project.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka-test</artifactId>
<version>1.1.6.RELEASE</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.integration</groupId>
<artifactId>spring-integration-kafka</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
<version>${project.version}</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-schema</artifactId>
<version>${spring-cloud-stream.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
<artifactId>kafka-avro-serializer</artifactId>
<version>3.1.2</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
<artifactId>kafka-schema-registry</artifactId>
<version>3.1.2</version>
<scope>test</scope>
</dependency>
</dependencies>
<repositories>
<repository>
<id>confluent</id>
<url>http://packages.confluent.io/maven/</url>
</repository>
</repositories>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<version>3.0.2</version>
<executions>
<execution>
<goals>
<goal>test-jar</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@@ -1,60 +0,0 @@
/*
* Copyright 2015-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import org.springframework.cloud.stream.binder.kafka.admin.AdminUtilsOperation;
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
import org.springframework.context.support.GenericApplicationContext;
import org.springframework.kafka.support.LoggingProducerListener;
import org.springframework.kafka.support.ProducerListener;
/**
* Test support class for {@link KafkaMessageChannelBinder}.
* @author Eric Bottard
* @author Marius Bogoevici
* @author David Turanski
* @author Gary Russell
* @author Soby Chacko
*/
public class Kafka10TestBinder extends AbstractKafkaTestBinder {
public Kafka10TestBinder(KafkaBinderConfigurationProperties binderConfiguration) {
try {
AdminUtilsOperation adminUtilsOperation = new Kafka10AdminUtilsOperation();
KafkaTopicProvisioner provisioningProvider =
new KafkaTopicProvisioner(binderConfiguration, adminUtilsOperation);
provisioningProvider.afterPropertiesSet();
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(binderConfiguration, provisioningProvider);
binder.setCodec(AbstractKafkaTestBinder.getCodec());
ProducerListener producerListener = new LoggingProducerListener();
binder.setProducerListener(producerListener);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
this.setBinder(binder);
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
}

View File

@@ -1,239 +0,0 @@
/*
* Copyright 2014-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import io.confluent.kafka.schemaregistry.rest.SchemaRegistryConfig;
import io.confluent.kafka.schemaregistry.rest.SchemaRegistryRestApplication;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils;
import org.I0Itec.zkclient.ZkClient;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.serialization.Deserializer;
import org.assertj.core.api.Assertions;
import org.eclipse.jetty.server.Server;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
import org.springframework.cloud.stream.binder.Binder;
import org.springframework.cloud.stream.binder.Binding;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.Spy;
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
import org.springframework.integration.channel.DirectChannel;
import org.springframework.integration.channel.QueueChannel;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.test.core.BrokerAddress;
import org.springframework.kafka.test.rule.KafkaEmbedded;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageChannel;
import org.springframework.messaging.SubscribableChannel;
import org.springframework.messaging.support.MessageBuilder;
import static org.junit.Assert.assertTrue;
/**
* Integration tests for the {@link KafkaMessageChannelBinder}.
*
* @author Eric Bottard
* @author Marius Bogoevici
* @author Mark Fisher
* @author Ilayaperumal Gopinathan
*/
public class Kafka_0_10_1_BinderTests extends KafkaBinderTests {
private final String CLASS_UNDER_TEST_NAME = KafkaMessageChannelBinder.class.getSimpleName();
@ClassRule
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, 10);
private Kafka10TestBinder binder;
private Kafka10AdminUtilsOperation adminUtilsOperation = new Kafka10AdminUtilsOperation();
@Override
protected void binderBindUnbindLatency() throws InterruptedException {
Thread.sleep(500);
}
@Override
protected Kafka10TestBinder getBinder() {
if (binder == null) {
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
binder = new Kafka10TestBinder(binderConfiguration);
}
return binder;
}
protected KafkaBinderConfigurationProperties createConfigurationProperties() {
KafkaBinderConfigurationProperties binderConfiguration = new KafkaBinderConfigurationProperties();
BrokerAddress[] brokerAddresses = embeddedKafka.getBrokerAddresses();
List<String> bAddresses = new ArrayList<>();
for (BrokerAddress bAddress : brokerAddresses) {
bAddresses.add(bAddress.toString());
}
String[] foo = new String[bAddresses.size()];
binderConfiguration.setBrokers(bAddresses.toArray(foo));
binderConfiguration.setZkNodes(embeddedKafka.getZookeeperConnectionString());
return binderConfiguration;
}
@Override
protected int partitionSize(String topic) {
return consumerFactory().createConsumer().partitionsFor(topic).size();
}
@Override
protected ZkUtils getZkUtils(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties) {
final ZkClient zkClient = new ZkClient(kafkaBinderConfigurationProperties.getZkConnectionString(),
kafkaBinderConfigurationProperties.getZkSessionTimeout(), kafkaBinderConfigurationProperties.getZkConnectionTimeout(),
ZKStringSerializer$.MODULE$);
return new ZkUtils(zkClient, null, false);
}
@Override
protected void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions, int replicationFactor, Properties topicConfig) {
adminUtilsOperation.invokeCreateTopic(zkUtils, topic, partitions, replicationFactor, new Properties());
}
@Override
protected int invokePartitionSize(String topic, ZkUtils zkUtils) {
return adminUtilsOperation.partitionSize(topic, zkUtils);
}
@Override
public String getKafkaOffsetHeaderKey() {
return KafkaHeaders.OFFSET;
}
@Override
protected Binder getBinder(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties) {
return new Kafka10TestBinder(kafkaBinderConfigurationProperties);
}
@Before
public void init() {
String multiplier = System.getenv("KAFKA_TIMEOUT_MULTIPLIER");
if (multiplier != null) {
timeoutMultiplier = Double.parseDouble(multiplier);
}
}
@Override
protected boolean usesExplicitRouting() {
return false;
}
@Override
protected String getClassUnderTestName() {
return CLASS_UNDER_TEST_NAME;
}
@Override
public Spy spyOn(final String name) {
throw new UnsupportedOperationException("'spyOn' is not used by Kafka tests");
}
private ConsumerFactory<byte[], byte[]> consumerFactory() {
Map<String, Object> props = new HashMap<>();
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
props.put(ConsumerConfig.GROUP_ID_CONFIG, "TEST-CONSUMER-GROUP");
Deserializer<byte[]> valueDecoder = new ByteArrayDeserializer();
Deserializer<byte[]> keyDecoder = new ByteArrayDeserializer();
return new DefaultKafkaConsumerFactory<>(props, keyDecoder, valueDecoder);
}
@Test
@SuppressWarnings("unchecked")
public void testCustomAvroSerialization() throws Exception {
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
final ZkClient zkClient = new ZkClient(configurationProperties.getZkConnectionString(),
configurationProperties.getZkSessionTimeout(), configurationProperties.getZkConnectionTimeout(),
ZKStringSerializer$.MODULE$);
final ZkUtils zkUtils = new ZkUtils(zkClient, null, false);
Map<String, Object> schemaRegistryProps = new HashMap<>();
schemaRegistryProps.put("kafkastore.connection.url", configurationProperties.getZkConnectionString());
schemaRegistryProps.put("listeners", "http://0.0.0.0:8082");
schemaRegistryProps.put("port", "8082");
schemaRegistryProps.put("kafkastore.topic", "_schemas");
SchemaRegistryConfig config = new SchemaRegistryConfig(schemaRegistryProps);
SchemaRegistryRestApplication app = new SchemaRegistryRestApplication(config);
Server server = app.createServer();
server.start();
long endTime = System.currentTimeMillis() + 5000;
while(true) {
if (server.isRunning()) {
break;
}
else if (System.currentTimeMillis() > endTime) {
Assertions.fail("Kafka Schema Registry Server failed to start");
}
}
User1 firstOutboundFoo = new User1();
String userName1 = "foo-name" + UUID.randomUUID().toString();
String favColor1 = "foo-color" + UUID.randomUUID().toString();
firstOutboundFoo.setName(userName1);
firstOutboundFoo.setFavoriteColor(favColor1);
Message<?> message = MessageBuilder.withPayload(firstOutboundFoo).build();
SubscribableChannel moduleOutputChannel = new DirectChannel();
String testTopicName = "existing" + System.currentTimeMillis();
invokeCreateTopic(zkUtils, testTopicName, 6, 1, new Properties());
configurationProperties.setAutoAddPartitions(true);
Binder binder = getBinder(configurationProperties);
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.getExtension().getConfiguration().put("value.serializer", "io.confluent.kafka.serializers.KafkaAvroSerializer");
producerProperties.getExtension().getConfiguration().put("schema.registry.url", "http://localhost:8082");
producerProperties.setUseNativeEncoding(true);
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, moduleOutputChannel, producerProperties);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
consumerProperties.getExtension().getConfiguration().put("value.deserializer", "io.confluent.kafka.serializers.KafkaAvroDeserializer");
consumerProperties.getExtension().getConfiguration().put("schema.registry.url", "http://localhost:8082");
Binding<MessageChannel> consumerBinding = binder.bindConsumer(testTopicName, "test", moduleInputChannel, consumerProperties);
// Let the consumer actually bind to the producer before sending a msg
binderBindUnbindLatency();
moduleOutputChannel.send(message);
Message<?> inbound = receive(moduleInputChannel);
Assertions.assertThat(inbound).isNotNull();
assertTrue(message.getPayload() instanceof User1);
User1 receivedUser = (User1) message.getPayload();
Assertions.assertThat(receivedUser.getName()).isEqualTo(userName1);
Assertions.assertThat(receivedUser.getFavoriteColor()).isEqualTo(favColor1);
producerBinding.unbind();
consumerBinding.unbind();
}
}

View File

@@ -1,85 +0,0 @@
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.io.IOException;
import org.apache.avro.Schema;
import org.apache.avro.reflect.Nullable;
import org.apache.avro.specific.SpecificRecordBase;
import org.springframework.core.io.ClassPathResource;
/**
* @author Marius Bogoevici
* @author Ilayaperumal Gopinathan
*/
public class User1 extends SpecificRecordBase {
@Nullable
private String name;
@Nullable
private String favoriteColor;
public String getName() {
return this.name;
}
public void setName(String name) {
this.name = name;
}
public String getFavoriteColor() {
return this.favoriteColor;
}
public void setFavoriteColor(String favoriteColor) {
this.favoriteColor = favoriteColor;
}
@Override
public Schema getSchema() {
try {
return new Schema.Parser().parse(new ClassPathResource("schemas/users_v1.schema").getInputStream());
}
catch (IOException e) {
throw new IllegalStateException(e);
}
}
@Override
public Object get(int i) {
if (i == 0) {
return getName().toString();
}
if (i == 1) {
return getFavoriteColor().toString();
}
return null;
}
@Override
public void put(int i, Object o) {
if (i == 0) {
setName((String) o);
}
if (i == 1) {
setFavoriteColor((String) o);
}
}
}

View File

@@ -0,0 +1,136 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>spring-cloud-stream-binder-kafka-0.8</artifactId>
<packaging>jar</packaging>
<name>spring-cloud-stream-binder-kafka-0.8</name>
<description>Kafka binder implementation</description>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>1.1.0.BUILD-SNAPSHOT</version>
</parent>
<properties>
<kafka.version>0.8.2.2</kafka.version>
<curator.version>2.6.0</curator.version>
<spring-integration-kafka.version>1.3.1.BUILD-SNAPSHOT</spring-integration-kafka.version>
<rxjava-math.version>1.0.0</rxjava-math.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-autoconfigure</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-test-support</artifactId>
<scope>test</scope>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.integration</groupId>
<artifactId>spring-integration-kafka</artifactId>
<version>${spring-integration-kafka.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.avro</groupId>
<artifactId>avro-compiler</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
</dependency>
<dependency>
<groupId>io.reactivex</groupId>
<artifactId>rxjava</artifactId>
</dependency>
<dependency>
<groupId>io.reactivex</groupId>
<artifactId>rxjava-math</artifactId>
<version>${rxjava-math.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<classifier>test</classifier>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-test</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>${kafka.version}</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<classifier>test</classifier>
<version>${kafka.version}</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>${kafka.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
<version>${curator.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-test</artifactId>
<version>${curator.version}</version>
</dependency>
</dependencies>
</dependencyManagement>
</project>

View File

@@ -0,0 +1,95 @@
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import kafka.cluster.Broker;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils$;
import org.I0Itec.zkclient.ZkClient;
import scala.collection.JavaConversions;
import scala.collection.Seq;
import org.springframework.boot.actuate.health.Health;
import org.springframework.boot.actuate.health.HealthIndicator;
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
import org.springframework.integration.kafka.core.BrokerAddress;
import org.springframework.integration.kafka.core.Partition;
/**
* Health indicator for Kafka.
*
* @author Ilayaperumal Gopinathan
*/
public class KafkaBinderHealthIndicator implements HealthIndicator {
private final KafkaMessageChannelBinder binder;
private final KafkaBinderConfigurationProperties configurationProperties;
public KafkaBinderHealthIndicator(KafkaMessageChannelBinder binder,
KafkaBinderConfigurationProperties configurationProperties) {
this.binder = binder;
this.configurationProperties = configurationProperties;
}
@Override
public Health health() {
ZkClient zkClient = null;
try {
zkClient = new ZkClient(configurationProperties.getZkConnectionString(),
configurationProperties.getZkSessionTimeout(),
configurationProperties.getZkConnectionTimeout(), ZKStringSerializer$.MODULE$);
Set<String> brokersInClusterSet = new HashSet<>();
Seq<Broker> allBrokersInCluster = ZkUtils$.MODULE$.getAllBrokersInCluster(zkClient);
Collection<Broker> brokersInCluster = JavaConversions.asJavaCollection(allBrokersInCluster);
for (Broker broker : brokersInCluster) {
brokersInClusterSet.add(broker.connectionString());
}
Set<String> downMessages = new HashSet<>();
for (Map.Entry<String, Collection<Partition>> entry : binder.getTopicsInUse().entrySet()) {
for (Partition partition : entry.getValue()) {
BrokerAddress address = binder.getConnectionFactory().getLeader(partition);
if (!brokersInClusterSet.contains(address.toString())) {
downMessages.add(address.toString());
}
}
}
if (downMessages.isEmpty()) {
return Health.up().build();
}
return Health.down().withDetail("Following brokers are down: ", downMessages.toString()).build();
}
catch (Exception e) {
return Health.down(e).build();
}
finally {
if (zkClient != null) {
try {
zkClient.close();
}
catch (Exception e) {
// ignore
}
}
}
}
}

View File

@@ -0,0 +1,713 @@
/*
* Copyright 2014-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import org.I0Itec.zkclient.ZkClient;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.utils.Utils;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.cloud.stream.binder.AbstractMessageChannelBinder;
import org.springframework.cloud.stream.binder.Binder;
import org.springframework.cloud.stream.binder.BinderException;
import org.springframework.cloud.stream.binder.BinderHeaders;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaConsumerProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaExtendedBindingProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaProducerProperties;
import org.springframework.context.Lifecycle;
import org.springframework.integration.endpoint.AbstractEndpoint;
import org.springframework.integration.kafka.core.ConnectionFactory;
import org.springframework.integration.kafka.core.DefaultConnectionFactory;
import org.springframework.integration.kafka.core.KafkaMessage;
import org.springframework.integration.kafka.core.Partition;
import org.springframework.integration.kafka.core.ZookeeperConfiguration;
import org.springframework.integration.kafka.inbound.KafkaMessageDrivenChannelAdapter;
import org.springframework.integration.kafka.listener.AcknowledgingMessageListener;
import org.springframework.integration.kafka.listener.Acknowledgment;
import org.springframework.integration.kafka.listener.ErrorHandler;
import org.springframework.integration.kafka.listener.KafkaMessageListenerContainer;
import org.springframework.integration.kafka.listener.KafkaNativeOffsetManager;
import org.springframework.integration.kafka.listener.MessageListener;
import org.springframework.integration.kafka.listener.OffsetManager;
import org.springframework.integration.kafka.support.KafkaHeaders;
import org.springframework.integration.kafka.support.KafkaProducerContext;
import org.springframework.integration.kafka.support.ProducerConfiguration;
import org.springframework.integration.kafka.support.ProducerFactoryBean;
import org.springframework.integration.kafka.support.ProducerListener;
import org.springframework.integration.kafka.support.ProducerMetadata;
import org.springframework.integration.kafka.support.ZookeeperConnect;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageChannel;
import org.springframework.messaging.MessageHandler;
import org.springframework.messaging.MessageHeaders;
import org.springframework.messaging.MessagingException;
import org.springframework.retry.RetryCallback;
import org.springframework.retry.RetryContext;
import org.springframework.retry.RetryOperations;
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
import org.springframework.scheduling.concurrent.CustomizableThreadFactory;
import org.springframework.util.Assert;
import org.springframework.util.CollectionUtils;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
import kafka.admin.AdminUtils;
import kafka.api.OffsetRequest;
import kafka.api.TopicMetadata;
import kafka.common.ErrorMapping;
import kafka.serializer.DefaultDecoder;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils;
import scala.collection.Seq;
/**
* A {@link Binder} that uses Kafka as the underlying middleware.
* @author Eric Bottard
* @author Marius Bogoevici
* @author Ilayaperumal Gopinathan
* @author David Turanski
* @author Gary Russell
* @author Mark Fisher
* @author Soby Chacko
*/
public class KafkaMessageChannelBinder extends
AbstractMessageChannelBinder<ExtendedConsumerProperties<KafkaConsumerProperties>,
ExtendedProducerProperties<KafkaProducerProperties>>
implements ExtendedPropertiesBinder<MessageChannel, KafkaConsumerProperties, KafkaProducerProperties>,
DisposableBean {
private static final ByteArraySerializer BYTE_ARRAY_SERIALIZER = new ByteArraySerializer();
private static final ThreadFactory DAEMON_THREAD_FACTORY;
static {
CustomizableThreadFactory threadFactory = new CustomizableThreadFactory("kafka-binder-");
threadFactory.setDaemon(true);
DAEMON_THREAD_FACTORY = threadFactory;
}
private final KafkaBinderConfigurationProperties configurationProperties;
private RetryOperations metadataRetryOperations;
private final Map<String, Collection<Partition>> topicsInUse = new HashMap<>();
// -------- Default values for properties -------
private ConnectionFactory connectionFactory;
private ProducerListener producerListener;
private volatile Producer<byte[], byte[]> dlqProducer;
private KafkaExtendedBindingProperties extendedBindingProperties = new KafkaExtendedBindingProperties();
public KafkaMessageChannelBinder(KafkaBinderConfigurationProperties configurationProperties) {
super(false, headersToMap(configurationProperties));
this.configurationProperties = configurationProperties;
}
private static String[] headersToMap(KafkaBinderConfigurationProperties configurationProperties) {
String[] headersToMap;
if (ObjectUtils.isEmpty(configurationProperties.getHeaders())) {
headersToMap = BinderHeaders.STANDARD_HEADERS;
}
else {
String[] combinedHeadersToMap = Arrays.copyOfRange(BinderHeaders.STANDARD_HEADERS, 0,
BinderHeaders.STANDARD_HEADERS.length + configurationProperties.getHeaders().length);
System.arraycopy(configurationProperties.getHeaders(), 0, combinedHeadersToMap,
BinderHeaders.STANDARD_HEADERS.length,
configurationProperties.getHeaders().length);
headersToMap = combinedHeadersToMap;
}
return headersToMap;
}
ConnectionFactory getConnectionFactory() {
return this.connectionFactory;
}
public void setProducerListener(ProducerListener producerListener) {
this.producerListener = producerListener;
}
/**
* Retry configuration for operations such as validating topic creation
* @param metadataRetryOperations the retry configuration
*/
public void setMetadataRetryOperations(RetryOperations metadataRetryOperations) {
this.metadataRetryOperations = metadataRetryOperations;
}
public void setExtendedBindingProperties(KafkaExtendedBindingProperties extendedBindingProperties) {
this.extendedBindingProperties = extendedBindingProperties;
}
@Override
public void onInit() throws Exception {
ZookeeperConfiguration configuration = new ZookeeperConfiguration(
new ZookeeperConnect(this.configurationProperties.getZkConnectionString()));
configuration.setBufferSize(this.configurationProperties.getSocketBufferSize());
configuration.setMaxWait(this.configurationProperties.getMaxWait());
DefaultConnectionFactory defaultConnectionFactory = new DefaultConnectionFactory(configuration);
defaultConnectionFactory.afterPropertiesSet();
this.connectionFactory = defaultConnectionFactory;
if (this.metadataRetryOperations == null) {
RetryTemplate retryTemplate = new RetryTemplate();
SimpleRetryPolicy simpleRetryPolicy = new SimpleRetryPolicy();
simpleRetryPolicy.setMaxAttempts(10);
retryTemplate.setRetryPolicy(simpleRetryPolicy);
ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy();
backOffPolicy.setInitialInterval(100);
backOffPolicy.setMultiplier(2);
backOffPolicy.setMaxInterval(1000);
retryTemplate.setBackOffPolicy(backOffPolicy);
this.metadataRetryOperations = retryTemplate;
}
}
@Override
public void destroy() throws Exception {
if (this.dlqProducer != null) {
this.dlqProducer.close();
this.dlqProducer = null;
}
}
/**
* Allowed chars are ASCII alphanumerics, '.', '_' and '-'.
*/
static void validateTopicName(String topicName) {
try {
byte[] utf8 = topicName.getBytes("UTF-8");
for (byte b : utf8) {
if (!((b >= 'a') && (b <= 'z') || (b >= 'A') && (b <= 'Z') || (b >= '0') && (b <= '9') || (b == '.')
|| (b == '-') || (b == '_'))) {
throw new IllegalArgumentException(
"Topic name can only have ASCII alphanumerics, '.', '_' and '-'");
}
}
}
catch (UnsupportedEncodingException e) {
throw new AssertionError(e); // Can't happen
}
}
@Override
public KafkaConsumerProperties getExtendedConsumerProperties(String channelName) {
return this.extendedBindingProperties.getExtendedConsumerProperties(channelName);
}
@Override
public KafkaProducerProperties getExtendedProducerProperties(String channelName) {
return this.extendedBindingProperties.getExtendedProducerProperties(channelName);
}
Map<String, Collection<Partition>> getTopicsInUse() {
return this.topicsInUse;
}
@Override
protected Object createConsumerDestinationIfNecessary(String name, String group,
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
validateTopicName(name);
if (properties.getInstanceCount() == 0) {
throw new IllegalArgumentException("Instance count cannot be zero");
}
Collection<Partition> allPartitions = ensureTopicCreated(name,
properties.getInstanceCount() * properties.getConcurrency());
Collection<Partition> listenedPartitions;
if (properties.getInstanceCount() == 1) {
listenedPartitions = allPartitions;
}
else {
listenedPartitions = new ArrayList<>();
for (Partition partition : allPartitions) {
// divide partitions across modules
if ((partition.getId() % properties.getInstanceCount()) == properties.getInstanceIndex()) {
listenedPartitions.add(partition);
}
}
}
this.topicsInUse.put(name, listenedPartitions);
return listenedPartitions;
}
@Override
@SuppressWarnings("unchecked")
protected AbstractEndpoint createConsumerEndpoint(String name, String group, Object queue,
MessageChannel inputChannel, ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
Collection<Partition> listenedPartitions = (Collection<Partition>) queue;
Assert.isTrue(!CollectionUtils.isEmpty(listenedPartitions), "A list of partitions must be provided");
int concurrency = Math.min(properties.getConcurrency(), listenedPartitions.size());
final ExecutorService dispatcherTaskExecutor =
Executors.newFixedThreadPool(concurrency, DAEMON_THREAD_FACTORY);
final KafkaMessageListenerContainer messageListenerContainer = new KafkaMessageListenerContainer(
this.connectionFactory, listenedPartitions.toArray(new Partition[listenedPartitions.size()])) {
@Override
public void stop(Runnable callback) {
super.stop(callback);
if (getOffsetManager() instanceof DisposableBean) {
try {
((DisposableBean) getOffsetManager()).destroy();
}
catch (Exception e) {
KafkaMessageChannelBinder.this.logger.error("Error while closing the offset manager", e);
}
}
dispatcherTaskExecutor.shutdown();
}
};
if (this.logger.isDebugEnabled()) {
this.logger.debug(
"Listened partitions: " + StringUtils.collectionToCommaDelimitedString(listenedPartitions));
}
boolean anonymous = !StringUtils.hasText(group);
Assert.isTrue(!anonymous || !properties.getExtension().isEnableDlq(),
"DLQ support is not available for anonymous subscriptions");
String consumerGroup = anonymous ? "anonymous." + UUID.randomUUID().toString() : group;
long referencePoint = properties.getExtension().getStartOffset() != null
? properties.getExtension().getStartOffset().getReferencePoint()
: (anonymous ? OffsetRequest.LatestTime() : OffsetRequest.EarliestTime());
OffsetManager offsetManager = createOffsetManager(consumerGroup, referencePoint);
if (properties.getExtension().isResetOffsets()) {
offsetManager.resetOffsets(listenedPartitions);
}
messageListenerContainer.setOffsetManager(offsetManager);
messageListenerContainer.setQueueSize(this.configurationProperties.getQueueSize());
messageListenerContainer.setMaxFetch(this.configurationProperties.getFetchSize());
boolean autoCommitOnError = properties.getExtension().getAutoCommitOnError() != null
? properties.getExtension().getAutoCommitOnError()
: properties.getExtension().isAutoCommitOffset() && properties.getExtension().isEnableDlq();
messageListenerContainer.setAutoCommitOnError(autoCommitOnError);
messageListenerContainer.setRecoveryInterval(properties.getExtension().getRecoveryInterval());
messageListenerContainer.setConcurrency(concurrency);
messageListenerContainer.setDispatcherTaskExecutor(dispatcherTaskExecutor);
final KafkaMessageDrivenChannelAdapter kafkaMessageDrivenChannelAdapter = new KafkaMessageDrivenChannelAdapter(
messageListenerContainer);
kafkaMessageDrivenChannelAdapter.setBeanFactory(this.getBeanFactory());
kafkaMessageDrivenChannelAdapter.setKeyDecoder(new DefaultDecoder(null));
kafkaMessageDrivenChannelAdapter.setPayloadDecoder(new DefaultDecoder(null));
kafkaMessageDrivenChannelAdapter.setOutputChannel(inputChannel);
kafkaMessageDrivenChannelAdapter.setAutoCommitOffset(properties.getExtension().isAutoCommitOffset());
kafkaMessageDrivenChannelAdapter.afterPropertiesSet();
// we need to wrap the adapter listener into a retrying listener so that the retry
// logic is applied before the ErrorHandler is executed
final RetryTemplate retryTemplate = buildRetryTemplateIfRetryEnabled(properties);
if (retryTemplate != null) {
if (properties.getExtension().isAutoCommitOffset()) {
final MessageListener originalMessageListener = (MessageListener) messageListenerContainer
.getMessageListener();
messageListenerContainer.setMessageListener(new MessageListener() {
@Override
public void onMessage(final KafkaMessage message) {
try {
retryTemplate.execute(new RetryCallback<Object, Throwable>() {
@Override
public Object doWithRetry(RetryContext context) {
originalMessageListener.onMessage(message);
return null;
}
});
}
catch (Throwable throwable) {
if (throwable instanceof RuntimeException) {
throw (RuntimeException) throwable;
}
else {
throw new RuntimeException(throwable);
}
}
}
});
}
else {
messageListenerContainer.setMessageListener(new AcknowledgingMessageListener() {
final AcknowledgingMessageListener originalMessageListener =
(AcknowledgingMessageListener) messageListenerContainer
.getMessageListener();
@Override
public void onMessage(final KafkaMessage message, final Acknowledgment acknowledgment) {
retryTemplate.execute(new RetryCallback<Object, RuntimeException>() {
@Override
public Object doWithRetry(RetryContext context) {
originalMessageListener.onMessage(message, acknowledgment);
return null;
}
});
}
});
}
}
if (properties.getExtension().isEnableDlq()) {
final String dlqTopic = "error." + name + "." + consumerGroup;
initDlqProducer();
messageListenerContainer.setErrorHandler(new ErrorHandler() {
@Override
public void handle(Exception thrownException, final KafkaMessage message) {
final byte[] key = message.getMessage().key() != null ? Utils.toArray(message.getMessage().key())
: null;
final byte[] payload = message.getMessage().payload() != null
? Utils.toArray(message.getMessage().payload()) : null;
KafkaMessageChannelBinder.this.dlqProducer.send(new ProducerRecord<>(dlqTopic, key, payload),
new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
StringBuffer messageLog = new StringBuffer();
messageLog.append(" a message with key='"
+ toDisplayString(ObjectUtils.nullSafeToString(key), 50) + "'");
messageLog.append(" and payload='"
+ toDisplayString(ObjectUtils.nullSafeToString(payload), 50) + "'");
messageLog.append(" received from " + message.getMetadata().getPartition());
if (exception != null) {
KafkaMessageChannelBinder.this.logger.error(
"Error sending to DLQ" + messageLog.toString(), exception);
}
else {
if (KafkaMessageChannelBinder.this.logger.isDebugEnabled()) {
KafkaMessageChannelBinder.this.logger.debug(
"Sent to DLQ " + messageLog.toString());
}
}
}
});
}
});
}
kafkaMessageDrivenChannelAdapter.start();
return kafkaMessageDrivenChannelAdapter;
}
@Override
protected MessageHandler createProducerMessageHandler(final String name,
ExtendedProducerProperties<KafkaProducerProperties> producerProperties) throws Exception {
ProducerMetadata<byte[], byte[]> producerMetadata = new ProducerMetadata<>(name, byte[].class, byte[].class,
BYTE_ARRAY_SERIALIZER, BYTE_ARRAY_SERIALIZER);
producerMetadata.setSync(producerProperties.getExtension().isSync());
KafkaProducerProperties.CompressionType compressionType = producerProperties.getExtension().getCompressionType();
producerMetadata.setCompressionType(fromKafkaProducerPropertiesCompressionType(compressionType));
producerMetadata.setBatchBytes(producerProperties.getExtension().getBufferSize());
Properties additional = new Properties();
additional.put(ProducerConfig.ACKS_CONFIG, String.valueOf(this.configurationProperties.getRequiredAcks()));
additional.put(ProducerConfig.LINGER_MS_CONFIG,
String.valueOf(producerProperties.getExtension().getBatchTimeout()));
ProducerFactoryBean<byte[], byte[]> producerFB = new ProducerFactoryBean<>(producerMetadata,
this.configurationProperties.getKafkaConnectionString(), additional);
final ProducerConfiguration<byte[], byte[]> producerConfiguration = new ProducerConfiguration<>(
producerMetadata, producerFB.getObject());
producerConfiguration.setProducerListener(this.producerListener);
KafkaProducerContext kafkaProducerContext = new KafkaProducerContext();
kafkaProducerContext.setProducerConfigurations(
Collections.<String, ProducerConfiguration<?, ?>>singletonMap(name, producerConfiguration));
return new ProducerConfigurationMessageHandler(producerConfiguration, name);
}
ProducerMetadata.CompressionType fromKafkaProducerPropertiesCompressionType(
KafkaProducerProperties.CompressionType compressionType) {
switch (compressionType) {
case snappy : return ProducerMetadata.CompressionType.snappy;
case gzip : return ProducerMetadata.CompressionType.gzip;
default: return ProducerMetadata.CompressionType.none;
}
}
@Override
protected void createProducerDestinationIfNecessary(String name,
ExtendedProducerProperties<KafkaProducerProperties> properties) {
if (this.logger.isInfoEnabled()) {
this.logger.info("Using kafka topic for outbound: " + name);
}
validateTopicName(name);
Collection<Partition> partitions = ensureTopicCreated(name, properties.getPartitionCount());
if (properties.getPartitionCount() < partitions.size()) {
if (this.logger.isInfoEnabled()) {
this.logger.info("The `partitionCount` of the producer for topic " + name + " is "
+ properties.getPartitionCount() + ", smaller than the actual partition count of "
+ partitions.size() + " of the topic. The larger number will be used instead.");
}
}
this.topicsInUse.put(name, partitions);
}
/**
* Creates a Kafka topic if needed, or try to increase its partition count to the
* desired number.
*/
private Collection<Partition> ensureTopicCreated(final String topicName, final int partitionCount) {
final ZkClient zkClient = new ZkClient(this.configurationProperties.getZkConnectionString(),
this.configurationProperties.getZkSessionTimeout(),
this.configurationProperties.getZkConnectionTimeout(),
ZKStringSerializer$.MODULE$);
try {
final Properties topicConfig = new Properties();
TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topicName, zkClient);
if (topicMetadata.errorCode() == ErrorMapping.NoError()) {
// only consider minPartitionCount for resizing if autoAddPartitions is
// true
int effectivePartitionCount = this.configurationProperties.isAutoAddPartitions()
? Math.max(this.configurationProperties.getMinPartitionCount(),
partitionCount) : partitionCount;
if (topicMetadata.partitionsMetadata().size() < effectivePartitionCount) {
if (this.configurationProperties.isAutoAddPartitions()) {
AdminUtils.addPartitions(zkClient, topicName, effectivePartitionCount, null, false,
new Properties());
}
else {
int topicSize = topicMetadata.partitionsMetadata().size();
throw new BinderException("The number of expected partitions was: " + partitionCount + ", but "
+ topicSize + (topicSize > 1 ? " have " : " has ") + "been found instead."
+ "Consider either increasing the partition count of the topic or enabling " +
"`autoAddPartitions`");
}
}
}
else if (topicMetadata.errorCode() == ErrorMapping.UnknownTopicOrPartitionCode()) {
if (this.configurationProperties.isAutoCreateTopics()) {
Seq<Object> brokerList = ZkUtils.getSortedBrokerList(zkClient);
// always consider minPartitionCount for topic creation
int effectivePartitionCount = Math.max(this.configurationProperties.getMinPartitionCount(),
partitionCount);
final scala.collection.Map<Object, Seq<Object>> replicaAssignment = AdminUtils
.assignReplicasToBrokers(brokerList, effectivePartitionCount,
this.configurationProperties.getReplicationFactor(), -1, -1);
this.metadataRetryOperations.execute(new RetryCallback<Object, RuntimeException>() {
@Override
public Object doWithRetry(RetryContext context) throws RuntimeException {
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topicName,
replicaAssignment, topicConfig, true);
return null;
}
});
}
else {
throw new BinderException("Topic " + topicName + " does not exist");
}
}
else {
throw new BinderException("Error fetching Kafka topic metadata: ",
ErrorMapping.exceptionFor(topicMetadata.errorCode()));
}
try {
Collection<Partition> partitions = this.metadataRetryOperations
.execute(new RetryCallback<Collection<Partition>, Exception>() {
@Override
public Collection<Partition> doWithRetry(RetryContext context) throws Exception {
KafkaMessageChannelBinder.this.connectionFactory.refreshMetadata(
Collections.singleton(topicName));
Collection<Partition> partitions =
KafkaMessageChannelBinder.this.connectionFactory.getPartitions(topicName);
// do a sanity check on the partition set
if (partitions.size() < partitionCount) {
throw new IllegalStateException("The number of expected partitions was: "
+ partitionCount + ", but " + partitions.size()
+ (partitions.size() > 1 ? " have " : " has ") + "been found instead");
}
KafkaMessageChannelBinder.this.connectionFactory.getLeaders(partitions);
return partitions;
}
});
return partitions;
}
catch (Exception e) {
this.logger.error("Cannot initialize Binder", e);
throw new BinderException("Cannot initialize binder:", e);
}
}
finally {
zkClient.close();
}
}
private synchronized void initDlqProducer() {
try {
if (this.dlqProducer == null) {
synchronized (this) {
if (this.dlqProducer == null) {
// we can use the producer defaults as we do not need to tune
// performance
ProducerMetadata<byte[], byte[]> producerMetadata = new ProducerMetadata<>("dlqKafkaProducer",
byte[].class, byte[].class, BYTE_ARRAY_SERIALIZER, BYTE_ARRAY_SERIALIZER);
producerMetadata.setSync(false);
producerMetadata.setCompressionType(ProducerMetadata.CompressionType.none);
producerMetadata.setBatchBytes(16384);
Properties additionalProps = new Properties();
additionalProps.put(ProducerConfig.ACKS_CONFIG,
String.valueOf(this.configurationProperties.getRequiredAcks()));
additionalProps.put(ProducerConfig.LINGER_MS_CONFIG, String.valueOf(0));
ProducerFactoryBean<byte[], byte[]> producerFactoryBean = new ProducerFactoryBean<>(
producerMetadata, this.configurationProperties.getKafkaConnectionString(),
additionalProps);
this.dlqProducer = producerFactoryBean.getObject();
}
}
}
}
catch (Exception e) {
throw new RuntimeException("Cannot initialize DLQ producer:", e);
}
}
private OffsetManager createOffsetManager(String group, long referencePoint) {
try {
KafkaNativeOffsetManager kafkaOffsetManager = new KafkaNativeOffsetManager(this.connectionFactory,
new ZookeeperConnect(this.configurationProperties.getZkConnectionString()),
Collections.<Partition, Long>emptyMap());
kafkaOffsetManager.setConsumerId(group);
kafkaOffsetManager.setReferenceTimestamp(referencePoint);
kafkaOffsetManager.afterPropertiesSet();
WindowingOffsetManager windowingOffsetManager = new WindowingOffsetManager(kafkaOffsetManager);
windowingOffsetManager.setTimespan(this.configurationProperties.getOffsetUpdateTimeWindow());
windowingOffsetManager.setCount(this.configurationProperties.getOffsetUpdateCount());
windowingOffsetManager.setShutdownTimeout(this.configurationProperties.getOffsetUpdateShutdownTimeout());
windowingOffsetManager.afterPropertiesSet();
return windowingOffsetManager;
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
private String toDisplayString(String original, int maxCharacters) {
if (original.length() <= maxCharacters) {
return original;
}
return original.substring(0, maxCharacters) + "...";
}
@Override
public void doManualAck(LinkedList<MessageHeaders> messageHeadersList) {
Iterator<MessageHeaders> iterator = messageHeadersList.iterator();
while (iterator.hasNext()) {
MessageHeaders headers = iterator.next();
Acknowledgment acknowledgment = (Acknowledgment) headers.get(KafkaHeaders.ACKNOWLEDGMENT);
Assert.notNull(acknowledgment,
"Acknowledgement shouldn't be null when acknowledging kafka message " + "manually.");
acknowledgment.acknowledge();
}
}
public enum StartOffset {
earliest(OffsetRequest.EarliestTime()), latest(OffsetRequest.LatestTime());
private final long referencePoint;
StartOffset(long referencePoint) {
this.referencePoint = referencePoint;
}
public long getReferencePoint() {
return this.referencePoint;
}
}
private final static class ProducerConfigurationMessageHandler implements MessageHandler, Lifecycle {
private ProducerConfiguration<byte[], byte[]> delegate;
private String targetTopic;
private boolean running = true;
private ProducerConfigurationMessageHandler(
ProducerConfiguration<byte[], byte[]> delegate, String targetTopic) {
Assert.notNull(delegate, "Delegate cannot be null");
Assert.hasText(targetTopic, "Target topic cannot be null");
this.delegate = delegate;
this.targetTopic = targetTopic;
}
@Override
public void start() {
}
@Override
public void stop() {
this.delegate.stop();
this.running = false;
}
@Override
public boolean isRunning() {
return this.running;
}
@Override
public void handleMessage(Message<?> message) throws MessagingException {
this.delegate.send(this.targetTopic,
message.getHeaders().get(BinderHeaders.PARTITION_HEADER, Integer.class), null,
(byte[]) message.getPayload());
}
}
}

View File

@@ -0,0 +1,262 @@
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.io.IOException;
import java.util.Collection;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import rx.Observable;
import rx.Subscription;
import rx.functions.Action0;
import rx.functions.Action1;
import rx.functions.Func1;
import rx.functions.Func2;
import rx.observables.GroupedObservable;
import rx.observables.MathObservable;
import rx.subjects.PublishSubject;
import rx.subjects.SerializedSubject;
import rx.subjects.Subject;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.integration.kafka.core.Partition;
import org.springframework.integration.kafka.listener.OffsetManager;
import org.springframework.util.Assert;
/**
* An {@link OffsetManager} that aggregates writes over a time or count window, using an underlying delegate to
* do the actual operations. Its purpose is to reduce the performance impact of writing operations
* wherever this is desirable.
*
* Either a time window or a number of writes can be specified, but not both.
*
* @author Marius Bogoevici
*/
public class WindowingOffsetManager implements OffsetManager, InitializingBean, DisposableBean {
private final CreatePartitionAndOffsetFunction createPartitionAndOffsetFunction = new CreatePartitionAndOffsetFunction();
private final GetOffsetFunction getOffsetFunction = new GetOffsetFunction();
private final ComputeMaximumOffsetByPartitionFunction findHighestOffsetInPartitionGroup = new ComputeMaximumOffsetByPartitionFunction();
private final GetPartitionFunction getPartition = new GetPartitionFunction();
private final FindHighestOffsetsByPartitionFunction findHighestOffsetsByPartition = new FindHighestOffsetsByPartitionFunction();
private final DelegateUpdateOffsetAction delegateUpdateOffsetAction = new DelegateUpdateOffsetAction();
private final NotifyObservableClosedAction notifyObservableClosed = new NotifyObservableClosedAction();
private final OffsetManager delegate;
private long timespan = 10 * 1000;
private int count;
private Subject<PartitionAndOffset, PartitionAndOffset> offsets;
private Subscription subscription;
private int shutdownTimeout = 2000;
private CountDownLatch shutdownLatch;
public WindowingOffsetManager(OffsetManager offsetManager) {
this.delegate = offsetManager;
}
/**
* The timespan for aggregating write operations, before invoking the underlying {@link OffsetManager}.
*
* @param timespan duration in milliseconds
*/
public void setTimespan(long timespan) {
Assert.isTrue(timespan >= 0, "Timespan must be a positive value");
this.timespan = timespan;
}
/**
* How many writes should be aggregated, before invoking the underlying {@link OffsetManager}. Setting this value
* to 1 effectively disables windowing.
*
* @param count number of writes
*/
public void setCount(int count) {
Assert.isTrue(count >= 0, "Count must be a positive value");
this.count = count;
}
/**
* The timeout that {@link #close()} and {@link #destroy()} operations will wait for receving a confirmation that the
* underlying writes have been processed.
*
* @param shutdownTimeout duration in milliseconds
*/
public void setShutdownTimeout(int shutdownTimeout) {
this.shutdownTimeout = shutdownTimeout;
}
@Override
public void afterPropertiesSet() throws Exception {
Assert.isTrue(timespan > 0 ^ count > 0, "Only one of the timespan or count must be set");
// create the stream if windowing is set, and count is higher than 1
if (timespan > 0 || count > 1) {
offsets = new SerializedSubject<>(PublishSubject.<PartitionAndOffset>create());
// window by either count or time
Observable<Observable<PartitionAndOffset>> window =
timespan > 0 ? offsets.window(timespan, TimeUnit.MILLISECONDS) : offsets.window(count);
Observable<PartitionAndOffset> maximumOffsetsByWindow = window
.flatMap(findHighestOffsetsByPartition)
.doOnCompleted(notifyObservableClosed);
subscription = maximumOffsetsByWindow.subscribe(delegateUpdateOffsetAction);
}
else {
offsets = null;
}
}
@Override
public void destroy() throws Exception {
this.flush();
this.close();
if (delegate instanceof DisposableBean) {
((DisposableBean) delegate).destroy();
}
}
@Override
public void updateOffset(Partition partition, long offset) {
if (offsets != null) {
offsets.onNext(new PartitionAndOffset(partition, offset));
}
else {
delegate.updateOffset(partition, offset);
}
}
@Override
public long getOffset(Partition partition) {
return delegate.getOffset(partition);
}
@Override
public void deleteOffset(Partition partition) {
delegate.deleteOffset(partition);
}
@Override
public void resetOffsets(Collection<Partition> partition) {
delegate.resetOffsets(partition);
}
@Override
public void close() throws IOException {
if (offsets != null) {
shutdownLatch = new CountDownLatch(1);
offsets.onCompleted();
try {
shutdownLatch.await(shutdownTimeout, TimeUnit.MILLISECONDS);
}
catch (InterruptedException e) {
// ignore
}
subscription.unsubscribe();
}
delegate.close();
}
@Override
public void flush() throws IOException {
delegate.flush();
}
private final class PartitionAndOffset {
private final Partition partition;
private final Long offset;
private PartitionAndOffset(Partition partition, Long offset) {
this.partition = partition;
this.offset = offset;
}
public Partition getPartition() {
return partition;
}
public Long getOffset() {
return offset;
}
}
private class DelegateUpdateOffsetAction implements Action1<PartitionAndOffset> {
@Override
public void call(PartitionAndOffset partitionAndOffset) {
delegate.updateOffset(partitionAndOffset.getPartition(), partitionAndOffset.getOffset());
}
}
private class NotifyObservableClosedAction implements Action0 {
@Override
public void call() {
if (shutdownLatch != null) {
shutdownLatch.countDown();
}
}
}
private class CreatePartitionAndOffsetFunction implements Func2<Partition, Long, PartitionAndOffset> {
@Override
public PartitionAndOffset call(Partition partition, Long offset) {
return new PartitionAndOffset(partition, offset);
}
}
private class GetOffsetFunction implements Func1<PartitionAndOffset, Long> {
@Override
public Long call(PartitionAndOffset partitionAndOffset) {
return partitionAndOffset.getOffset();
}
}
private class ComputeMaximumOffsetByPartitionFunction implements Func1<GroupedObservable<Partition, PartitionAndOffset>, Observable<PartitionAndOffset>> {
@Override
public Observable<PartitionAndOffset> call(GroupedObservable<Partition, PartitionAndOffset> group) {
return Observable.zip(Observable.just(group.getKey()),
MathObservable.max(group.map(getOffsetFunction)),
createPartitionAndOffsetFunction);
}
}
private class GetPartitionFunction implements Func1<PartitionAndOffset, Partition> {
@Override
public Partition call(PartitionAndOffset partitionAndOffset) {
return partitionAndOffset.getPartition();
}
}
private class FindHighestOffsetsByPartitionFunction implements Func1<Observable<PartitionAndOffset>, Observable<PartitionAndOffset>> {
@Override
public Observable<PartitionAndOffset> call(Observable<PartitionAndOffset> windowBuffer) {
return windowBuffer.groupBy(getPartition).flatMap(findHighestOffsetInPartitionGroup);
}
}
}

View File

@@ -0,0 +1,79 @@
/*
* Copyright 2015-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka.config;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.PropertyPlaceholderAutoConfiguration;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.cloud.stream.binder.Binder;
import org.springframework.cloud.stream.binder.kafka.KafkaBinderHealthIndicator;
import org.springframework.cloud.stream.binder.kafka.KafkaMessageChannelBinder;
import org.springframework.cloud.stream.binder.kafka.config.KafkaExtendedBindingProperties;
import org.springframework.cloud.stream.config.codec.kryo.KryoCodecAutoConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Import;
import org.springframework.integration.codec.Codec;
import org.springframework.integration.kafka.support.LoggingProducerListener;
import org.springframework.integration.kafka.support.ProducerListener;
/**
* @author David Turanski
* @author Marius Bogoevici
* @author Soby Chacko
* @author Mark Fisher
* @author Ilayaperumal Gopinathan
*/
@Configuration
@ConditionalOnMissingBean(Binder.class)
@Import({ KryoCodecAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class })
@EnableConfigurationProperties({ KafkaBinderConfigurationProperties.class, KafkaExtendedBindingProperties.class })
public class KafkaBinderConfiguration {
@Autowired
private Codec codec;
@Autowired
private KafkaBinderConfigurationProperties configurationProperties;
@Autowired
private KafkaExtendedBindingProperties kafkaExtendedBindingProperties;
@Autowired
private ProducerListener producerListener;
@Bean
KafkaMessageChannelBinder kafkaMessageChannelBinder() {
KafkaMessageChannelBinder kafkaMessageChannelBinder = new KafkaMessageChannelBinder(configurationProperties);
kafkaMessageChannelBinder.setCodec(codec);
kafkaMessageChannelBinder.setProducerListener(producerListener);
kafkaMessageChannelBinder.setExtendedBindingProperties(kafkaExtendedBindingProperties);
return kafkaMessageChannelBinder;
}
@Bean
@ConditionalOnMissingBean(ProducerListener.class)
ProducerListener producerListener() {
return new LoggingProducerListener();
}
@Bean
KafkaBinderHealthIndicator healthIndicator(KafkaMessageChannelBinder kafkaMessageChannelBinder) {
return new KafkaBinderHealthIndicator(kafkaMessageChannelBinder, configurationProperties);
}
}

View File

@@ -0,0 +1,20 @@
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package contains an implementation of the {@link org.springframework.cloud.stream.binder.Binder} for Kafka.
*/
package org.springframework.cloud.stream.binder.kafka;

View File

@@ -0,0 +1,2 @@
kafka:\
org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfiguration

View File

@@ -0,0 +1,820 @@
/*
* Copyright 2014-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.fail;
import java.util.Arrays;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
import org.springframework.beans.DirectFieldAccessor;
import org.springframework.cloud.stream.binder.BinderException;
import org.springframework.cloud.stream.binder.Binding;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.PartitionCapableBinderTests;
import org.springframework.cloud.stream.binder.Spy;
import org.springframework.cloud.stream.binder.TestUtils;
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaConsumerProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaProducerProperties;
import org.springframework.cloud.stream.binder.test.junit.kafka.KafkaTestSupport;
import org.springframework.context.support.GenericApplicationContext;
import org.springframework.integration.channel.DirectChannel;
import org.springframework.integration.channel.QueueChannel;
import org.springframework.integration.kafka.core.Partition;
import org.springframework.integration.kafka.core.TopicNotFoundException;
import org.springframework.integration.kafka.support.KafkaHeaders;
import org.springframework.integration.kafka.support.ProducerConfiguration;
import org.springframework.integration.kafka.support.ProducerMetadata;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageChannel;
import org.springframework.messaging.MessageHandler;
import org.springframework.messaging.MessagingException;
import org.springframework.messaging.support.GenericMessage;
import org.springframework.messaging.support.MessageBuilder;
import org.springframework.retry.backoff.FixedBackOffPolicy;
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
import kafka.admin.AdminUtils;
import kafka.api.TopicMetadata;
/**
* Integration tests for the {@link KafkaMessageChannelBinder}.
* @author Eric Bottard
* @author Marius Bogoevici
* @author Mark Fisher
* @author Ilayaperumal Gopinathan
*/
public class KafkaBinderTests extends
PartitionCapableBinderTests<KafkaTestBinder, ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>> {
private final String CLASS_UNDER_TEST_NAME = KafkaMessageChannelBinder.class.getSimpleName();
@ClassRule
public static KafkaTestSupport kafkaTestSupport = new KafkaTestSupport();
private KafkaTestBinder binder;
@Override
protected void binderBindUnbindLatency() throws InterruptedException {
Thread.sleep(500);
}
@Override
protected KafkaTestBinder getBinder() {
if (binder == null) {
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
binder = new KafkaTestBinder(binderConfiguration);
}
return binder;
}
private KafkaBinderConfigurationProperties createConfigurationProperties() {
KafkaBinderConfigurationProperties binderConfiguration = new KafkaBinderConfigurationProperties();
binderConfiguration.setBrokers(kafkaTestSupport.getBrokerAddress());
binderConfiguration.setZkNodes(kafkaTestSupport.getZkConnectString());
return binderConfiguration;
}
@Override
protected ExtendedConsumerProperties<KafkaConsumerProperties> createConsumerProperties() {
return new ExtendedConsumerProperties<>(new KafkaConsumerProperties());
}
@Override
protected ExtendedProducerProperties<KafkaProducerProperties> createProducerProperties() {
return new ExtendedProducerProperties<>(new KafkaProducerProperties());
}
@Before
public void init() {
String multiplier = System.getenv("KAFKA_TIMEOUT_MULTIPLIER");
if (multiplier != null) {
timeoutMultiplier = Double.parseDouble(multiplier);
}
}
@Override
protected boolean usesExplicitRouting() {
return false;
}
@Override
protected String getClassUnderTestName() {
return CLASS_UNDER_TEST_NAME;
}
@Override
public Spy spyOn(final String name) {
throw new UnsupportedOperationException("'spyOn' is not used by Kafka tests");
}
@Test
public void testDlqAndRetry() {
KafkaTestBinder binder = getBinder();
DirectChannel moduleOutputChannel = new DirectChannel();
DirectChannel moduleInputChannel = new DirectChannel();
QueueChannel dlqChannel = new QueueChannel();
FailingInvocationCountingMessageHandler handler = new FailingInvocationCountingMessageHandler();
moduleInputChannel.subscribe(handler);
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(10);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setMaxAttempts(3);
consumerProperties.setBackOffInitialInterval(100);
consumerProperties.setBackOffMaxInterval(150);
consumerProperties.getExtension().setEnableDlq(true);
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("retryTest." + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0",
"testGroup", moduleInputChannel, consumerProperties);
ExtendedConsumerProperties<KafkaConsumerProperties> dlqConsumerProperties = createConsumerProperties();
dlqConsumerProperties.setMaxAttempts(1);
Binding<MessageChannel> dlqConsumerBinding = binder.bindConsumer(
"error.retryTest." + uniqueBindingId + ".0.testGroup", null, dlqChannel, dlqConsumerProperties);
String testMessagePayload = "test." + UUID.randomUUID().toString();
Message<String> testMessage = MessageBuilder.withPayload(testMessagePayload).build();
moduleOutputChannel.send(testMessage);
Message<?> receivedMessage = receive(dlqChannel, 3);
assertThat(receivedMessage).isNotNull();
assertThat(receivedMessage.getPayload()).isEqualTo(testMessagePayload);
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
dlqConsumerBinding.unbind();
consumerBinding.unbind();
producerBinding.unbind();
}
@Test
public void testDefaultAutoCommitOnErrorWithoutDlq() throws Exception {
KafkaTestBinder binder = getBinder();
DirectChannel moduleOutputChannel = new DirectChannel();
DirectChannel moduleInputChannel = new DirectChannel();
FailingInvocationCountingMessageHandler handler = new FailingInvocationCountingMessageHandler();
moduleInputChannel.subscribe(handler);
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(10);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setMaxAttempts(1);
consumerProperties.setBackOffInitialInterval(100);
consumerProperties.setBackOffMaxInterval(150);
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("retryTest." + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0",
"testGroup", moduleInputChannel, consumerProperties);
String testMessagePayload = "test." + UUID.randomUUID().toString();
Message<String> testMessage = MessageBuilder.withPayload(testMessagePayload).build();
moduleOutputChannel.send(testMessage);
assertThat(handler.getLatch().await((int) (timeoutMultiplier * 1000), TimeUnit.MILLISECONDS));
// first attempt fails
assertThat(handler.getReceivedMessages().entrySet()).hasSize(1);
Message<?> receivedMessage = handler.getReceivedMessages().entrySet().iterator().next().getValue();
assertThat(receivedMessage).isNotNull();
assertThat(receivedMessage.getPayload()).isEqualTo(testMessagePayload);
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
consumerBinding.unbind();
// on the second attempt the message is redelivered
QueueChannel successfulInputChannel = new QueueChannel();
consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0", "testGroup",
successfulInputChannel, consumerProperties);
String testMessage2Payload = "test." + UUID.randomUUID().toString();
Message<String> testMessage2 = MessageBuilder.withPayload(testMessage2Payload).build();
moduleOutputChannel.send(testMessage2);
Message<?> firstReceived = receive(successfulInputChannel);
assertThat(firstReceived.getPayload()).isEqualTo(testMessagePayload);
Message<?> secondReceived = receive(successfulInputChannel);
assertThat(secondReceived.getPayload()).isEqualTo(testMessage2Payload);
consumerBinding.unbind();
producerBinding.unbind();
}
@Test
public void testDefaultAutoCommitOnErrorWithDlq() throws Exception {
KafkaTestBinder binder = getBinder();
DirectChannel moduleOutputChannel = new DirectChannel();
DirectChannel moduleInputChannel = new DirectChannel();
FailingInvocationCountingMessageHandler handler = new FailingInvocationCountingMessageHandler();
moduleInputChannel.subscribe(handler);
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(10);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setMaxAttempts(3);
consumerProperties.setBackOffInitialInterval(100);
consumerProperties.setBackOffMaxInterval(150);
consumerProperties.getExtension().setEnableDlq(true);
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("retryTest." + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0",
"testGroup", moduleInputChannel, consumerProperties);
ExtendedConsumerProperties<KafkaConsumerProperties> dlqConsumerProperties = createConsumerProperties();
dlqConsumerProperties.setMaxAttempts(1);
QueueChannel dlqChannel = new QueueChannel();
Binding<MessageChannel> dlqConsumerBinding = binder.bindConsumer(
"error.retryTest." + uniqueBindingId + ".0.testGroup", null, dlqChannel, dlqConsumerProperties);
String testMessagePayload = "test." + UUID.randomUUID().toString();
Message<String> testMessage = MessageBuilder.withPayload(testMessagePayload).build();
moduleOutputChannel.send(testMessage);
Message<?> dlqMessage = receive(dlqChannel, 3);
assertThat(dlqMessage).isNotNull();
assertThat(dlqMessage.getPayload()).isEqualTo(testMessagePayload);
// first attempt fails
assertThat(handler.getReceivedMessages().entrySet()).hasSize(1);
Message<?> handledMessage = handler.getReceivedMessages().entrySet().iterator().next().getValue();
assertThat(handledMessage).isNotNull();
assertThat(handledMessage.getPayload()).isEqualTo(testMessagePayload);
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
dlqConsumerBinding.unbind();
consumerBinding.unbind();
// on the second attempt the message is not redelivered because the DLQ is set
QueueChannel successfulInputChannel = new QueueChannel();
consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0", "testGroup",
successfulInputChannel, consumerProperties);
String testMessage2Payload = "test." + UUID.randomUUID().toString();
Message<String> testMessage2 = MessageBuilder.withPayload(testMessage2Payload).build();
moduleOutputChannel.send(testMessage2);
Message<?> receivedMessage = receive(successfulInputChannel);
assertThat(receivedMessage.getPayload()).isEqualTo(testMessage2Payload);
consumerBinding.unbind();
producerBinding.unbind();
}
@Test(expected = IllegalArgumentException.class)
public void testValidateKafkaTopicName() {
KafkaMessageChannelBinder.validateTopicName("foo:bar");
}
@Test
public void testCompression() throws Exception {
final ProducerMetadata.CompressionType[] codecs = new ProducerMetadata.CompressionType[] {
ProducerMetadata.CompressionType.none, ProducerMetadata.CompressionType.gzip,
ProducerMetadata.CompressionType.snappy };
byte[] testPayload = new byte[2048];
Arrays.fill(testPayload, (byte) 65);
KafkaTestBinder binder = getBinder();
for (ProducerMetadata.CompressionType codec : codecs) {
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.getExtension().setCompressionType(fromProducerMetadataCompressionType(codec));
Binding<MessageChannel> producerBinding = binder.bindProducer("foo.0", moduleOutputChannel,
producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo.0", "test", moduleInputChannel,
createConsumerProperties());
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
.build();
// Let the consumer actually bind to the producer before sending a msg
binderBindUnbindLatency();
moduleOutputChannel.send(message);
Message<?> inbound = receive(moduleInputChannel);
assertThat(inbound).isNotNull();
assertThat((byte[]) inbound.getPayload()).containsExactly(testPayload);
producerBinding.unbind();
consumerBinding.unbind();
}
}
@Test
public void testCustomPartitionCountOverridesDefaultIfLarger() throws Exception {
byte[] testPayload = new byte[2048];
Arrays.fill(testPayload, (byte) 65);
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
binderConfiguration.setMinPartitionCount(10);
KafkaTestBinder binder = new KafkaTestBinder(binderConfiguration);
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(10);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("foo" + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo" + uniqueBindingId + ".0", null,
moduleInputChannel, consumerProperties);
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
.build();
// Let the consumer actually bind to the producer before sending a msg
binderBindUnbindLatency();
moduleOutputChannel.send(message);
Message<?> inbound = receive(moduleInputChannel);
assertThat(inbound).isNotNull();
assertThat((byte[]) inbound.getPayload()).containsExactly(testPayload);
Collection<Partition> partitions = binder.getCoreBinder().getConnectionFactory()
.getPartitions("foo" + uniqueBindingId + ".0");
assertThat(partitions).hasSize(10);
producerBinding.unbind();
consumerBinding.unbind();
}
@Test
public void testCustomPartitionCountDoesNotOverridePartitioningIfSmaller() throws Exception {
byte[] testPayload = new byte[2048];
Arrays.fill(testPayload, (byte) 65);
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
binderConfiguration.setMinPartitionCount(6);
KafkaTestBinder binder = new KafkaTestBinder(binderConfiguration);
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(5);
producerProperties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload"));
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("foo" + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo" + uniqueBindingId + ".0", null,
moduleInputChannel, consumerProperties);
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
.build();
// Let the consumer actually bind to the producer before sending a msg
binderBindUnbindLatency();
moduleOutputChannel.send(message);
Message<?> inbound = receive(moduleInputChannel);
assertThat(inbound).isNotNull();
assertThat((byte[]) inbound.getPayload()).containsExactly(testPayload);
Collection<Partition> partitions = binder.getCoreBinder().getConnectionFactory()
.getPartitions("foo" + uniqueBindingId + ".0");
assertThat(partitions).hasSize(6);
producerBinding.unbind();
consumerBinding.unbind();
}
@Test
public void testCustomPartitionCountOverridesPartitioningIfLarger() throws Exception {
byte[] testPayload = new byte[2048];
Arrays.fill(testPayload, (byte) 65);
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
binderConfiguration.setMinPartitionCount(4);
KafkaTestBinder binder = new KafkaTestBinder(binderConfiguration);
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(5);
producerProperties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload"));
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("foo" + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo" + uniqueBindingId + ".0", null,
moduleInputChannel, consumerProperties);
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
.build();
// Let the consumer actually bind to the producer before sending a msg
binderBindUnbindLatency();
moduleOutputChannel.send(message);
Message<?> inbound = receive(moduleInputChannel);
assertThat(inbound).isNotNull();
assertThat((byte[]) inbound.getPayload()).containsExactly(testPayload);
Collection<Partition> partitions = binder.getCoreBinder().getConnectionFactory()
.getPartitions("foo" + uniqueBindingId + ".0");
assertThat(partitions).hasSize(5);
producerBinding.unbind();
consumerBinding.unbind();
}
@Test
@SuppressWarnings("unchecked")
public void testDefaultConsumerStartsAtEarliest() throws Exception {
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(createConfigurationProperties());
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
QueueChannel input1 = new QueueChannel();
String testTopicName = UUID.randomUUID().toString();
binder.bindProducer(testTopicName, output, createProducerProperties());
String testPayload1 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
binder.bindConsumer(testTopicName, "startOffsets", input1, createConsumerProperties());
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
assertThat(new String(receivedMessage1.getPayload())).isEqualTo(testPayload1);
String testPayload2 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload2.getBytes()));
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isEqualTo(testPayload2);
}
@Test
@SuppressWarnings("unchecked")
public void testEarliest() throws Exception {
KafkaTestBinder binder = getBinder();
DirectChannel output = new DirectChannel();
QueueChannel input1 = new QueueChannel();
String testTopicName = UUID.randomUUID().toString();
binder.bindProducer(testTopicName, output, createProducerProperties());
String testPayload1 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> properties = createConsumerProperties();
properties.getExtension().setStartOffset(KafkaConsumerProperties.StartOffset.earliest);
binder.bindConsumer(testTopicName, "startOffsets", input1, properties);
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
String testPayload2 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload2.getBytes()));
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isEqualTo(testPayload2);
}
@Test
@SuppressWarnings("unchecked")
public void testReset() throws Exception {
KafkaTestBinder binder = getBinder();
DirectChannel output = new DirectChannel();
QueueChannel input1 = new QueueChannel();
String testTopicName = UUID.randomUUID().toString();
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, output,
createProducerProperties());
String testPayload1 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> properties = createConsumerProperties();
properties.getExtension().setResetOffsets(true);
properties.getExtension().setStartOffset(KafkaConsumerProperties.StartOffset.earliest);
Binding<MessageChannel> consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1,
properties);
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
String testPayload2 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload2.getBytes()));
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isEqualTo(testPayload2);
consumerBinding.unbind();
String testPayload3 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload3.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> properties2 = createConsumerProperties();
properties2.getExtension().setResetOffsets(true);
properties2.getExtension().setStartOffset(KafkaConsumerProperties.StartOffset.earliest);
consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1, properties2);
Message<byte[]> receivedMessage4 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage4).isNotNull();
assertThat(new String(receivedMessage4.getPayload())).isEqualTo(testPayload1);
Message<byte[]> receivedMessage5 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage5).isNotNull();
assertThat(new String(receivedMessage5.getPayload())).isEqualTo(testPayload2);
Message<byte[]> receivedMessage6 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage6).isNotNull();
assertThat(new String(receivedMessage6.getPayload())).isEqualTo(testPayload3);
consumerBinding.unbind();
producerBinding.unbind();
}
@Test
@SuppressWarnings("unchecked")
public void testResume() throws Exception {
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
QueueChannel input1 = new QueueChannel();
String testTopicName = UUID.randomUUID().toString();
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, output,
createProducerProperties());
String testPayload1 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> firstConsumerProperties = createConsumerProperties();
Binding<MessageChannel> consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1,
firstConsumerProperties);
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
String testPayload2 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload2.getBytes()));
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isNotNull();
consumerBinding.unbind();
String testPayload3 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload3.getBytes()));
consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1, createConsumerProperties());
Message<byte[]> receivedMessage3 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage3).isNotNull();
assertThat(new String(receivedMessage3.getPayload())).isEqualTo(testPayload3);
consumerBinding.unbind();
producerBinding.unbind();
}
@Test
public void testSyncProducerMetadata() throws Exception {
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(createConfigurationProperties());
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
String testTopicName = UUID.randomUUID().toString();
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
properties.getExtension().setSync(true);
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, output, properties);
DirectFieldAccessor accessor = new DirectFieldAccessor(extractEndpoint(producerBinding));
MessageHandler handler = (MessageHandler) accessor.getPropertyValue("handler");
DirectFieldAccessor accessor1 = new DirectFieldAccessor(handler);
ProducerConfiguration producerConfiguration = (ProducerConfiguration) accessor1
.getPropertyValue("producerConfiguration");
assertThat(producerConfiguration.getProducerMetadata().isSync())
.withFailMessage("Kafka Sync Producer should have been enabled.");
producerBinding.unbind();
}
@Test
public void testAutoCreateTopicsDisabledFailsIfTopicMissing() throws Exception {
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setAutoCreateTopics(false);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(1000);
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
String testTopicName = "nonexisting" + System.currentTimeMillis();
try {
binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
fail();
}
catch (Exception e) {
assertThat(e).isInstanceOf(BinderException.class);
assertThat(e).hasMessageContaining("Topic " + testTopicName + " does not exist");
}
try {
binder.getConnectionFactory().getPartitions(testTopicName);
fail();
}
catch (Exception e) {
assertThat(e).isInstanceOf(TopicNotFoundException.class);
}
}
@Test
public void testAutoConfigureTopicsDisabledSucceedsIfTopicExisting() throws Exception {
String testTopicName = "existing" + System.currentTimeMillis();
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 5, 1, new Properties());
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setAutoCreateTopics(false);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
Binding<MessageChannel> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
binding.unbind();
}
@Test
public void testAutoAddPartitionsDisabledFailsIfTopicUnderpartitioned() throws Exception {
String testTopicName = "existing" + System.currentTimeMillis();
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 1, 1, new Properties());
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setAutoAddPartitions(false);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
// this consumer must consume from partition 2
consumerProperties.setInstanceCount(3);
consumerProperties.setInstanceIndex(2);
try {
binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
}
catch (Exception e) {
assertThat(e).isInstanceOf(BinderException.class);
assertThat(e)
.hasMessageContaining("The number of expected partitions was: 3, but 1 has been found instead");
}
}
@Test
public void testAutoAddPartitionsDisabledSucceedsIfTopicPartitionedCorrectly() throws Exception {
String testTopicName = "existing" + System.currentTimeMillis();
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 6, 1, new Properties());
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setAutoAddPartitions(false);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(1000);
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
// this consumer must consume from partition 2
consumerProperties.setInstanceCount(3);
consumerProperties.setInstanceIndex(2);
Binding<?> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
Partition[] listenedPartitions = TestUtils.getPropertyValue(binding,
"endpoint.val$messageListenerContainer.partitions", Partition[].class);
assertThat(listenedPartitions).hasSize(2);
assertThat(listenedPartitions).contains(new Partition(testTopicName, 2), new Partition(testTopicName, 5));
Collection<Partition> partitions = binder.getConnectionFactory().getPartitions(testTopicName);
assertThat(partitions).hasSize(6);
binding.unbind();
}
@Test
public void testAutoCreateTopicsEnabledSucceeds() throws Exception {
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setAutoCreateTopics(true);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(1000);
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
String testTopicName = "nonexisting" + System.currentTimeMillis();
Binding<?> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
binding.unbind();
}
@Test
public void testPartitionCountNotReduced() throws Exception {
String testTopicName = "existing" + System.currentTimeMillis();
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 6, 1, new Properties());
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setAutoAddPartitions(true);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(1000);
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
Binding<?> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
binding.unbind();
TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(testTopicName,
kafkaTestSupport.getZkClient());
assertThat(topicMetadata.partitionsMetadata().size()).isEqualTo(6);
}
@Test
public void testPartitionCountIncreasedIfAutoAddPartitionsSet() throws Exception {
String testTopicName = "existing" + System.currentTimeMillis();
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 1, 1, new Properties());
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setMinPartitionCount(6);
configurationProperties.setAutoAddPartitions(true);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(1000);
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
Binding<?> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
binding.unbind();
TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(testTopicName,
kafkaTestSupport.getZkClient());
assertThat(topicMetadata.partitionsMetadata().size()).isEqualTo(6);
}
KafkaProducerProperties.CompressionType fromProducerMetadataCompressionType(
ProducerMetadata.CompressionType compressionType) {
switch (compressionType) {
case snappy : return KafkaProducerProperties.CompressionType.snappy;
case gzip : return KafkaProducerProperties.CompressionType.gzip;
default: return KafkaProducerProperties.CompressionType.none;
}
}
private static final class FailingInvocationCountingMessageHandler implements MessageHandler {
private int invocationCount;
private final LinkedHashMap<Long, Message<?>> receivedMessages = new LinkedHashMap<>();
private final CountDownLatch latch;
private FailingInvocationCountingMessageHandler(int latchSize) {
latch = new CountDownLatch(latchSize);
}
private FailingInvocationCountingMessageHandler() {
this(1);
}
@Override
public void handleMessage(Message<?> message) throws MessagingException {
invocationCount++;
Long offset = message.getHeaders().get(KafkaHeaders.OFFSET, Long.class);
// using the offset as key allows to ensure that we don't store duplicate
// messages on retry
if (!receivedMessages.containsKey(offset)) {
receivedMessages.put(offset, message);
latch.countDown();
}
throw new RuntimeException();
}
public LinkedHashMap<Long, Message<?>> getReceivedMessages() {
return receivedMessages;
}
public int getInvocationCount() {
return invocationCount;
}
public CountDownLatch getLatch() {
return latch;
}
}
}

View File

@@ -0,0 +1,91 @@
/*
* Copyright 2015-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.util.List;
import org.springframework.cloud.stream.binder.AbstractTestBinder;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaConsumerProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaProducerProperties;
import org.springframework.cloud.stream.binder.test.junit.kafka.TestKafkaCluster;
import org.springframework.context.support.GenericApplicationContext;
import org.springframework.integration.codec.Codec;
import org.springframework.integration.codec.kryo.KryoRegistrar;
import org.springframework.integration.codec.kryo.PojoCodec;
import org.springframework.integration.kafka.support.LoggingProducerListener;
import org.springframework.integration.kafka.support.ProducerListener;
import org.springframework.integration.tuple.TupleKryoRegistrar;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.Registration;
/**
* Test support class for {@link KafkaMessageChannelBinder}. Creates a binder that uses a
* test {@link TestKafkaCluster kafka cluster}.
* @author Eric Bottard
* @author Marius Bogoevici
* @author David Turanski
* @author Gary Russell
* @author Soby Chacko
*/
public class KafkaTestBinder extends
AbstractTestBinder<KafkaMessageChannelBinder, ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>> {
public KafkaTestBinder(KafkaBinderConfigurationProperties binderConfiguration) {
try {
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(binderConfiguration);
binder.setCodec(getCodec());
ProducerListener producerListener = new LoggingProducerListener();
binder.setProducerListener(producerListener);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
this.setBinder(binder);
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public void cleanup() {
// do nothing - the rule will take care of that
}
private static Codec getCodec() {
return new PojoCodec(new TupleRegistrar());
}
private static class TupleRegistrar implements KryoRegistrar {
private final TupleKryoRegistrar delegate = new TupleKryoRegistrar();
@Override
public void registerTypes(Kryo kryo) {
this.delegate.registerTypes(kryo);
}
@Override
public List<Registration> getRegistrations() {
return this.delegate.getRegistrations();
}
}
}

View File

@@ -0,0 +1,38 @@
/*
* Copyright 2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import org.springframework.cloud.stream.binder.PartitionKeyExtractorStrategy;
import org.springframework.cloud.stream.binder.PartitionSelectorStrategy;
import org.springframework.messaging.Message;
/**
* @author Marius Bogoevici
*/
public class RawKafkaPartitionTestSupport implements PartitionKeyExtractorStrategy, PartitionSelectorStrategy {
@Override
public int selectPartition(Object key, int divisor) {
return ((byte[]) key)[0] % divisor;
}
@Override
public Object extractKey(Message<?> message) {
return message.getPayload();
}
}

View File

@@ -0,0 +1,264 @@
/*
* Copyright 2015-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.Arrays;
import org.junit.Ignore;
import org.junit.Test;
import org.springframework.cloud.stream.binder.Binding;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.HeaderMode;
import org.springframework.cloud.stream.binder.kafka.config.KafkaConsumerProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaProducerProperties;
import org.springframework.integration.IntegrationMessageHeaderAccessor;
import org.springframework.integration.channel.DirectChannel;
import org.springframework.integration.channel.QueueChannel;
import org.springframework.integration.endpoint.AbstractEndpoint;
import org.springframework.integration.support.MessageBuilder;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageChannel;
import org.springframework.messaging.support.GenericMessage;
/**
* @author Marius Bogoevici
* @author David Turanski
* @author Gary Russell
* @author Mark Fisher
*/
public class RawModeKafkaBinderTests extends KafkaBinderTests {
@Test
@Override
public void testPartitionedModuleJava() throws Exception {
KafkaTestBinder binder = getBinder();
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
properties.setHeaderMode(HeaderMode.raw);
properties.setPartitionKeyExtractorClass(RawKafkaPartitionTestSupport.class);
properties.setPartitionSelectorClass(RawKafkaPartitionTestSupport.class);
properties.setPartitionCount(6);
DirectChannel output = new DirectChannel();
output.setBeanName("test.output");
Binding<MessageChannel> outputBinding = binder.bindProducer("partJ.0", output, properties);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setConcurrency(2);
consumerProperties.setInstanceCount(3);
consumerProperties.setInstanceIndex(0);
consumerProperties.setPartitioned(true);
consumerProperties.setHeaderMode(HeaderMode.raw);
QueueChannel input0 = new QueueChannel();
input0.setBeanName("test.input0J");
Binding<MessageChannel> input0Binding = binder.bindConsumer("partJ.0", "test", input0, consumerProperties);
consumerProperties.setInstanceIndex(1);
QueueChannel input1 = new QueueChannel();
input1.setBeanName("test.input1J");
Binding<MessageChannel> input1Binding = binder.bindConsumer("partJ.0", "test", input1, consumerProperties);
consumerProperties.setInstanceIndex(2);
QueueChannel input2 = new QueueChannel();
input2.setBeanName("test.input2J");
Binding<MessageChannel> input2Binding = binder.bindConsumer("partJ.0", "test", input2, consumerProperties);
output.send(new GenericMessage<>(new byte[] { (byte) 0 }));
output.send(new GenericMessage<>(new byte[] { (byte) 1 }));
output.send(new GenericMessage<>(new byte[] { (byte) 2 }));
Message<?> receive0 = receive(input0);
assertThat(receive0).isNotNull();
Message<?> receive1 = receive(input1);
assertThat(receive1).isNotNull();
Message<?> receive2 = receive(input2);
assertThat(receive2).isNotNull();
assertThat(Arrays.asList(((byte[]) receive0.getPayload())[0], ((byte[]) receive1.getPayload())[0],
((byte[]) receive2.getPayload())[0])).containsExactlyInAnyOrder((byte) 0, (byte) 1, (byte) 2);
input0Binding.unbind();
input1Binding.unbind();
input2Binding.unbind();
outputBinding.unbind();
}
@Test
@Override
public void testPartitionedModuleSpEL() throws Exception {
KafkaTestBinder binder = getBinder();
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
properties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload[0]"));
properties.setPartitionSelectorExpression(spelExpressionParser.parseExpression("hashCode()"));
properties.setPartitionCount(6);
properties.setHeaderMode(HeaderMode.raw);
DirectChannel output = new DirectChannel();
output.setBeanName("test.output");
Binding<MessageChannel> outputBinding = binder.bindProducer("part.0", output, properties);
try {
AbstractEndpoint endpoint = (AbstractEndpoint)extractEndpoint(outputBinding);
assertThat(getEndpointRouting(endpoint)).contains("part.0-' + headers['partition']");
}
catch (UnsupportedOperationException ignored) {
}
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setConcurrency(2);
consumerProperties.setInstanceIndex(0);
consumerProperties.setInstanceCount(3);
consumerProperties.setPartitioned(true);
consumerProperties.setHeaderMode(HeaderMode.raw);
QueueChannel input0 = new QueueChannel();
input0.setBeanName("test.input0S");
Binding<MessageChannel> input0Binding = binder.bindConsumer("part.0", "test", input0, consumerProperties);
consumerProperties.setInstanceIndex(1);
QueueChannel input1 = new QueueChannel();
input1.setBeanName("test.input1S");
Binding<MessageChannel> input1Binding = binder.bindConsumer("part.0", "test", input1, consumerProperties);
consumerProperties.setInstanceIndex(2);
QueueChannel input2 = new QueueChannel();
input2.setBeanName("test.input2S");
Binding<MessageChannel> input2Binding = binder.bindConsumer("part.0", "test", input2, consumerProperties);
Message<byte[]> message2 = MessageBuilder.withPayload(new byte[] { 2 })
.setHeader(IntegrationMessageHeaderAccessor.CORRELATION_ID, "foo")
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_NUMBER, 42)
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_SIZE, 43).build();
output.send(message2);
output.send(new GenericMessage<>(new byte[] { 1 }));
output.send(new GenericMessage<>(new byte[] { 0 }));
Message<?> receive0 = receive(input0);
assertThat(receive0).isNotNull();
Message<?> receive1 = receive(input1);
assertThat(receive1).isNotNull();
Message<?> receive2 = receive(input2);
assertThat(receive2).isNotNull();
assertThat(Arrays.asList(((byte[]) receive0.getPayload())[0], ((byte[]) receive1.getPayload())[0],
((byte[]) receive2.getPayload())[0])).containsExactlyInAnyOrder((byte) 0, (byte) 1, (byte) 2);
input0Binding.unbind();
input1Binding.unbind();
input2Binding.unbind();
outputBinding.unbind();
}
@Test
@Override
public void testSendAndReceive() throws Exception {
KafkaTestBinder binder = getBinder();
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setHeaderMode(HeaderMode.raw);
Binding<MessageChannel> producerBinding = binder.bindProducer("foo.0", moduleOutputChannel, producerProperties);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setHeaderMode(HeaderMode.raw);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo.0", "test", moduleInputChannel,
consumerProperties);
Message<?> message = MessageBuilder.withPayload("foo".getBytes()).build();
// Let the consumer actually bind to the producer before sending a msg
binderBindUnbindLatency();
moduleOutputChannel.send(message);
Message<?> inbound = receive(moduleInputChannel);
assertThat(inbound).isNotNull();
assertThat(new String((byte[]) inbound.getPayload())).isEqualTo("foo");
producerBinding.unbind();
consumerBinding.unbind();
}
// Ignored, since raw mode does not support headers
@Test
@Override
@Ignore
public void testSendAndReceiveNoOriginalContentType() throws Exception {
}
@Test
public void testSendAndReceiveWithExplicitConsumerGroup() {
KafkaTestBinder binder = getBinder();
DirectChannel moduleOutputChannel = new DirectChannel();
// Test pub/sub by emulating how StreamPlugin handles taps
QueueChannel module1InputChannel = new QueueChannel();
QueueChannel module2InputChannel = new QueueChannel();
QueueChannel module3InputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setHeaderMode(HeaderMode.raw);
Binding<MessageChannel> producerBinding = binder.bindProducer("baz.0", moduleOutputChannel, producerProperties);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setHeaderMode(HeaderMode.raw);
Binding<MessageChannel> input1Binding = binder.bindConsumer("baz.0", "test", module1InputChannel,
consumerProperties);
// A new module is using the tap as an input channel
String fooTapName = "baz.0";
Binding<MessageChannel> input2Binding = binder.bindConsumer(fooTapName, "tap1", module2InputChannel,
consumerProperties);
// Another new module is using tap as an input channel
String barTapName = "baz.0";
Binding<MessageChannel> input3Binding = binder.bindConsumer(barTapName, "tap2", module3InputChannel,
consumerProperties);
Message<?> message = MessageBuilder.withPayload("foo".getBytes()).build();
boolean success = false;
boolean retried = false;
while (!success) {
moduleOutputChannel.send(message);
Message<?> inbound = receive(module1InputChannel);
assertThat(inbound).isNotNull();
assertThat(new String((byte[]) inbound.getPayload())).isEqualTo("foo");
Message<?> tapped1 = receive(module2InputChannel);
Message<?> tapped2 = receive(module3InputChannel);
if (tapped1 == null || tapped2 == null) {
// listener may not have started
assertThat(retried).isFalse().withFailMessage("Failed to receive tap after retry");
retried = true;
continue;
}
success = true;
assertThat(new String((byte[]) tapped1.getPayload())).isEqualTo("foo");
assertThat(new String((byte[]) tapped2.getPayload())).isEqualTo("foo");
}
// delete one tap stream is deleted
input3Binding.unbind();
Message<?> message2 = MessageBuilder.withPayload("bar".getBytes()).build();
moduleOutputChannel.send(message2);
// other tap still receives messages
Message<?> tapped = receive(module2InputChannel);
assertThat(tapped).isNotNull();
// removed tap does not
assertThat(receive(module3InputChannel)).isNull();
// re-subscribed tap does receive the message
input3Binding = binder.bindConsumer(barTapName, "tap2", module3InputChannel, createConsumerProperties());
assertThat(receive(module3InputChannel)).isNotNull();
// clean up
input1Binding.unbind();
input2Binding.unbind();
input3Binding.unbind();
producerBinding.unbind();
assertThat(extractEndpoint(input1Binding).isRunning()).isFalse();
assertThat(extractEndpoint(input2Binding).isRunning()).isFalse();
assertThat(extractEndpoint(input3Binding).isRunning()).isFalse();
assertThat(extractEndpoint(producerBinding).isRunning()).isFalse();
}
}

View File

@@ -0,0 +1,36 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>spring-cloud-stream-binder-kafka-common</artifactId>
<packaging>jar</packaging>
<name>spring-cloud-stream-binder-kafka-common</name>
<description>Kafka binder common classes</description>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>1.1.0.BUILD-SNAPSHOT</version>
</parent>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-configuration-processor</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-autoconfigure</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-codec</artifactId>
</dependency>
</dependencies>
</project>

View File

@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,10 +14,7 @@
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka.properties;
import java.util.HashMap;
import java.util.Map;
package org.springframework.cloud.stream.binder.kafka.config;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.util.StringUtils;
@@ -31,13 +28,11 @@ import org.springframework.util.StringUtils;
@ConfigurationProperties(prefix = "spring.cloud.stream.kafka.binder")
public class KafkaBinderConfigurationProperties {
private String[] zkNodes = new String[] {"localhost"};
private Map<String, Object> configuration = new HashMap<>();
private String[] zkNodes = new String[] { "localhost" };
private String defaultZkPort = "2181";
private String[] brokers = new String[] {"localhost"};
private String[] brokers = new String[] { "localhost" };
private String defaultBrokerPort = "9092";
@@ -77,7 +72,11 @@ public class KafkaBinderConfigurationProperties {
private int queueSize = 8192;
private JaasLoginModuleConfiguration jaas;
private String consumerGroup;
public String getConsumerGroup() {
return consumerGroup;
}
public String getZkConnectionString() {
return toConnectionString(this.zkNodes, this.defaultZkPort);
@@ -88,7 +87,7 @@ public class KafkaBinderConfigurationProperties {
}
public String[] getHeaders() {
return this.headers;
return headers;
}
public int getOffsetUpdateTimeWindow() {
@@ -104,7 +103,7 @@ public class KafkaBinderConfigurationProperties {
}
public String[] getZkNodes() {
return this.zkNodes;
return zkNodes;
}
public void setZkNodes(String... zkNodes) {
@@ -116,7 +115,7 @@ public class KafkaBinderConfigurationProperties {
}
public String[] getBrokers() {
return this.brokers;
return brokers;
}
public void setBrokers(String... brokers) {
@@ -178,7 +177,7 @@ public class KafkaBinderConfigurationProperties {
}
public int getMaxWait() {
return this.maxWait;
return maxWait;
}
public void setMaxWait(int maxWait) {
@@ -186,7 +185,7 @@ public class KafkaBinderConfigurationProperties {
}
public int getRequiredAcks() {
return this.requiredAcks;
return requiredAcks;
}
public void setRequiredAcks(int requiredAcks) {
@@ -194,7 +193,7 @@ public class KafkaBinderConfigurationProperties {
}
public int getReplicationFactor() {
return this.replicationFactor;
return replicationFactor;
}
public void setReplicationFactor(int replicationFactor) {
@@ -202,7 +201,7 @@ public class KafkaBinderConfigurationProperties {
}
public int getFetchSize() {
return this.fetchSize;
return fetchSize;
}
public void setFetchSize(int fetchSize) {
@@ -210,7 +209,7 @@ public class KafkaBinderConfigurationProperties {
}
public int getMinPartitionCount() {
return this.minPartitionCount;
return minPartitionCount;
}
public void setMinPartitionCount(int minPartitionCount) {
@@ -218,7 +217,7 @@ public class KafkaBinderConfigurationProperties {
}
public int getQueueSize() {
return this.queueSize;
return queueSize;
}
public void setQueueSize(int queueSize) {
@@ -226,7 +225,7 @@ public class KafkaBinderConfigurationProperties {
}
public boolean isAutoCreateTopics() {
return this.autoCreateTopics;
return autoCreateTopics;
}
public void setAutoCreateTopics(boolean autoCreateTopics) {
@@ -234,7 +233,7 @@ public class KafkaBinderConfigurationProperties {
}
public boolean isAutoAddPartitions() {
return this.autoAddPartitions;
return autoAddPartitions;
}
public void setAutoAddPartitions(boolean autoAddPartitions) {
@@ -242,27 +241,15 @@ public class KafkaBinderConfigurationProperties {
}
public int getSocketBufferSize() {
return this.socketBufferSize;
return socketBufferSize;
}
public void setSocketBufferSize(int socketBufferSize) {
this.socketBufferSize = socketBufferSize;
}
public Map<String, Object> getConfiguration() {
return configuration;
}
public void setConfiguration(Map<String, Object> configuration) {
this.configuration = configuration;
}
public JaasLoginModuleConfiguration getJaas() {
return jaas;
}
public void setJaas(JaasLoginModuleConfiguration jaas) {
this.jaas = jaas;
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
}

View File

@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka.properties;
package org.springframework.cloud.stream.binder.kafka.config;
/**
* @author Marius Bogoevici

View File

@@ -1,11 +1,11 @@
/*
* Copyright 2016-2017 the original author or authors.
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,21 +14,13 @@
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka.properties;
import java.util.HashMap;
import java.util.Map;
package org.springframework.cloud.stream.binder.kafka.config;
/**
* @author Marius Bogoevici
* @author Ilayaperumal Gopinathan
*
* <p>Thanks to Laszlo Szabo for providing the initial patch for generic property support.</p>
*/
public class KafkaConsumerProperties {
private boolean autoRebalanceEnabled = true;
private boolean autoCommitOffset = true;
private Boolean autoCommitOnError;
@@ -39,14 +31,10 @@ public class KafkaConsumerProperties {
private boolean enableDlq;
private String dlqName;
private int recoveryInterval = 5000;
private Map<String, String> configuration = new HashMap<>();
public boolean isAutoCommitOffset() {
return this.autoCommitOffset;
return autoCommitOffset;
}
public void setAutoCommitOffset(boolean autoCommitOffset) {
@@ -54,7 +42,7 @@ public class KafkaConsumerProperties {
}
public boolean isResetOffsets() {
return this.resetOffsets;
return resetOffsets;
}
public void setResetOffsets(boolean resetOffsets) {
@@ -62,7 +50,7 @@ public class KafkaConsumerProperties {
}
public StartOffset getStartOffset() {
return this.startOffset;
return startOffset;
}
public void setStartOffset(StartOffset startOffset) {
@@ -70,7 +58,7 @@ public class KafkaConsumerProperties {
}
public boolean isEnableDlq() {
return this.enableDlq;
return enableDlq;
}
public void setEnableDlq(boolean enableDlq) {
@@ -78,7 +66,7 @@ public class KafkaConsumerProperties {
}
public Boolean getAutoCommitOnError() {
return this.autoCommitOnError;
return autoCommitOnError;
}
public void setAutoCommitOnError(Boolean autoCommitOnError) {
@@ -86,21 +74,13 @@ public class KafkaConsumerProperties {
}
public int getRecoveryInterval() {
return this.recoveryInterval;
return recoveryInterval;
}
public void setRecoveryInterval(int recoveryInterval) {
this.recoveryInterval = recoveryInterval;
}
public boolean isAutoRebalanceEnabled() {
return this.autoRebalanceEnabled;
}
public void setAutoRebalanceEnabled(boolean autoRebalanceEnabled) {
this.autoRebalanceEnabled = autoRebalanceEnabled;
}
public enum StartOffset {
earliest(-2L), latest(-1L);
@@ -111,23 +91,7 @@ public class KafkaConsumerProperties {
}
public long getReferencePoint() {
return this.referencePoint;
return referencePoint;
}
}
public Map<String, String> getConfiguration() {
return this.configuration;
}
public void setConfiguration(Map<String, String> configuration) {
this.configuration = configuration;
}
public String getDlqName() {
return dlqName;
}
public void setDlqName(String dlqName) {
this.dlqName = dlqName;
}
}

View File

@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka.properties;
package org.springframework.cloud.stream.binder.kafka.config;
import java.util.HashMap;
import java.util.Map;
@@ -26,13 +26,12 @@ import org.springframework.cloud.stream.binder.ExtendedBindingProperties;
* @author Marius Bogoevici
*/
@ConfigurationProperties("spring.cloud.stream.kafka")
public class KafkaExtendedBindingProperties
implements ExtendedBindingProperties<KafkaConsumerProperties, KafkaProducerProperties> {
public class KafkaExtendedBindingProperties implements ExtendedBindingProperties<KafkaConsumerProperties, KafkaProducerProperties> {
private Map<String, KafkaBindingProperties> bindings = new HashMap<>();
public Map<String, KafkaBindingProperties> getBindings() {
return this.bindings;
return bindings;
}
public void setBindings(Map<String, KafkaBindingProperties> bindings) {
@@ -41,8 +40,8 @@ public class KafkaExtendedBindingProperties
@Override
public KafkaConsumerProperties getExtendedConsumerProperties(String channelName) {
if (this.bindings.containsKey(channelName) && this.bindings.get(channelName).getConsumer() != null) {
return this.bindings.get(channelName).getConsumer();
if (bindings.containsKey(channelName) && bindings.get(channelName).getConsumer() != null) {
return bindings.get(channelName).getConsumer();
}
else {
return new KafkaConsumerProperties();
@@ -51,8 +50,8 @@ public class KafkaExtendedBindingProperties
@Override
public KafkaProducerProperties getExtendedProducerProperties(String channelName) {
if (this.bindings.containsKey(channelName) && this.bindings.get(channelName).getProducer() != null) {
return this.bindings.get(channelName).getProducer();
if (bindings.containsKey(channelName) && bindings.get(channelName).getProducer() != null) {
return bindings.get(channelName).getProducer();
}
else {
return new KafkaProducerProperties();

View File

@@ -1,11 +1,11 @@
/*
* Copyright 2016-2017 the original author or authors.
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,35 +14,25 @@
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka.properties;
import org.springframework.expression.Expression;
import java.util.HashMap;
import java.util.Map;
package org.springframework.cloud.stream.binder.kafka.config;
import javax.validation.constraints.NotNull;
/**
* @author Marius Bogoevici
* @author Henryk Konsek
*/
public class KafkaProducerProperties {
private int bufferSize = 16384;
private CompressionType compressionType = CompressionType.none;
private CompressionType compressionType = CompressionType.none;
private boolean sync;
private int batchTimeout;
private Expression messageKeyExpression;
private Map<String, String> configuration = new HashMap<>();
public int getBufferSize() {
return this.bufferSize;
return bufferSize;
}
public void setBufferSize(int bufferSize) {
@@ -51,7 +41,7 @@ public class KafkaProducerProperties {
@NotNull
public CompressionType getCompressionType() {
return this.compressionType;
return compressionType;
}
public void setCompressionType(CompressionType compressionType) {
@@ -59,7 +49,7 @@ public class KafkaProducerProperties {
}
public boolean isSync() {
return this.sync;
return sync;
}
public void setSync(boolean sync) {
@@ -67,29 +57,13 @@ public class KafkaProducerProperties {
}
public int getBatchTimeout() {
return this.batchTimeout;
return batchTimeout;
}
public void setBatchTimeout(int batchTimeout) {
this.batchTimeout = batchTimeout;
}
public Expression getMessageKeyExpression() {
return messageKeyExpression;
}
public void setMessageKeyExpression(Expression messageKeyExpression) {
this.messageKeyExpression = messageKeyExpression;
}
public Map<String, String> getConfiguration() {
return this.configuration;
}
public void setConfiguration(Map<String, String> configuration) {
this.configuration = configuration;
}
public enum CompressionType {
none,
gzip,

View File

@@ -0,0 +1,20 @@
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package contains an implementation of the {@link org.springframework.cloud.stream.binder.Binder} for Kafka.
*/
package org.springframework.cloud.stream.binder.kafka;

View File

@@ -1,46 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>2.0.0.M1</version>
</parent>
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
<description>Spring Cloud Stream Kafka Binder Core</description>
<url>http://projects.spring.io/spring-cloud</url>
<organization>
<name>Pivotal Software, Inc.</name>
<url>http://www.spring.io</url>
</organization>
<properties>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream</artifactId>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.integration</groupId>
<artifactId>spring-integration-kafka</artifactId>
<version>${spring-integration-kafka.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.avro</groupId>
<artifactId>avro-compiler</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
</project>

View File

@@ -1,75 +0,0 @@
/*
* Copyright 2002-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka.admin;
import java.util.Properties;
import kafka.utils.ZkUtils;
/**
* API around {@link kafka.admin.AdminUtils} to support
* various versions of Kafka brokers.
*
* Note: Implementations that support Kafka brokers other than 0.10, need to use
* a possible strategy that involves reflection around {@link kafka.admin.AdminUtils}.
*
* @author Soby Chacko
*/
public interface AdminUtilsOperation {
/**
* Invoke {@link kafka.admin.AdminUtils#addPartitions}
*
* @param zkUtils Zookeeper utils
* @param topic name of the topic
* @param numPartitions
* @param replicaAssignmentStr
* @param checkBrokerAvailable
*/
void invokeAddPartitions(ZkUtils zkUtils, String topic, int numPartitions,
String replicaAssignmentStr, boolean checkBrokerAvailable);
/**
* Invoke {@link kafka.admin.AdminUtils#fetchTopicMetadataFromZk}
*
* @param topic name
* @param zkUtils zookeeper utils
* @return error code
*/
short errorCodeFromTopicMetadata(String topic, ZkUtils zkUtils);
/**
* Find partition size from Kafka broker using {@link kafka.admin.AdminUtils}
*
* @param topic name
* @param zkUtils zookeeper utils
* @return partition size
*/
int partitionSize(String topic, ZkUtils zkUtils);
/**
* Inovke {@link kafka.admin.AdminUtils#createTopic}
*
* @param zkUtils zookeeper utils
* @param topic name
* @param partitions
* @param replicationFactor
* @param topicConfig
*/
void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions,
int replicationFactor, Properties topicConfig);
}

View File

@@ -1,145 +0,0 @@
/*
* Copyright 2002-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka.admin;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.util.Properties;
import kafka.utils.ZkUtils;
import org.springframework.util.ClassUtils;
import org.springframework.util.ReflectionUtils;
/**
* @author Soby Chacko
*/
public class Kafka09AdminUtilsOperation implements AdminUtilsOperation {
private static Class<?> ADMIN_UTIL_CLASS;
static {
try {
ADMIN_UTIL_CLASS = ClassUtils.forName("kafka.admin.AdminUtils", null);
}
catch (ClassNotFoundException e) {
throw new IllegalStateException("AdminUtils class not found", e);
}
}
public void invokeAddPartitions(ZkUtils zkUtils, String topic, int numPartitions,
String replicaAssignmentStr, boolean checkBrokerAvailable) {
try {
Method[] declaredMethods = ADMIN_UTIL_CLASS.getDeclaredMethods();
Method addPartitions = null;
for (Method m : declaredMethods) {
if (m.getName().equals("addPartitions")) {
addPartitions = m;
}
}
if (addPartitions != null) {
addPartitions.invoke(null, zkUtils, topic, numPartitions,
replicaAssignmentStr, checkBrokerAvailable);
}
else {
throw new InvocationTargetException(
new RuntimeException("method not found"));
}
}
catch (InvocationTargetException e) {
ReflectionUtils.handleInvocationTargetException(e);
}
catch (IllegalAccessException e) {
ReflectionUtils.handleReflectionException(e);
}
}
public short errorCodeFromTopicMetadata(String topic, ZkUtils zkUtils) {
try {
Method fetchTopicMetadataFromZk = ReflectionUtils.findMethod(ADMIN_UTIL_CLASS, "fetchTopicMetadataFromZk", String.class, ZkUtils.class);
Object result = fetchTopicMetadataFromZk.invoke(null, topic, zkUtils);
Class<?> topicMetadataClass = ClassUtils.forName("kafka.api.TopicMetadata", null);
Method errorCodeMethod = ReflectionUtils.findMethod(topicMetadataClass, "errorCode");
return (short) errorCodeMethod.invoke(result);
}
catch (ClassNotFoundException e) {
throw new IllegalStateException("AdminUtils class not found", e);
}
catch (InvocationTargetException e) {
ReflectionUtils.handleInvocationTargetException(e);
}
catch (IllegalAccessException e) {
ReflectionUtils.handleReflectionException(e);
}
return 0;
}
@SuppressWarnings("unchecked")
public int partitionSize(String topic, ZkUtils zkUtils) {
try {
Method fetchTopicMetadataFromZk = ReflectionUtils.findMethod(ADMIN_UTIL_CLASS, "fetchTopicMetadataFromZk", String.class, ZkUtils.class);
Object result = fetchTopicMetadataFromZk.invoke(null, topic, zkUtils);
Class<?> topicMetadataClass = ClassUtils.forName("kafka.api.TopicMetadata", null);
Method partitionsMetadata = ReflectionUtils.findMethod(topicMetadataClass, "partitionsMetadata");
scala.collection.Seq<kafka.api.PartitionMetadata> partitionSize =
(scala.collection.Seq<kafka.api.PartitionMetadata>)partitionsMetadata.invoke(result);
return partitionSize.size();
}
catch (ClassNotFoundException e) {
throw new IllegalStateException("AdminUtils class not found", e);
}
catch (InvocationTargetException e) {
ReflectionUtils.handleInvocationTargetException(e);
}
catch (IllegalAccessException e) {
ReflectionUtils.handleReflectionException(e);
}
return 0;
}
public void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions,
int replicationFactor, Properties topicConfig) {
try {
Method[] declaredMethods = ADMIN_UTIL_CLASS.getDeclaredMethods();
Method createTopic = null;
for (Method m : declaredMethods) {
if (m.getName().equals("createTopic")) {
createTopic = m;
break;
}
}
if (createTopic != null) {
createTopic.invoke(null, zkUtils, topic, partitions,
replicationFactor, topicConfig);
}
else {
throw new InvocationTargetException(
new RuntimeException("method not found"));
}
}
catch (InvocationTargetException e) {
ReflectionUtils.handleInvocationTargetException(e);
}
catch (IllegalAccessException e) {
ReflectionUtils.handleReflectionException(e);
}
}
}

View File

@@ -1,54 +0,0 @@
/*
* Copyright 2002-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka.admin;
import java.util.Properties;
import kafka.admin.AdminUtils;
import kafka.utils.ZkUtils;
import org.apache.kafka.common.requests.MetadataResponse;
/**
* @author Soby Chacko
*/
public class Kafka10AdminUtilsOperation implements AdminUtilsOperation {
public void invokeAddPartitions(ZkUtils zkUtils, String topic, int numPartitions,
String replicaAssignmentStr, boolean checkBrokerAvailable) {
AdminUtils.addPartitions(zkUtils, topic, numPartitions, replicaAssignmentStr, checkBrokerAvailable, null);
}
public short errorCodeFromTopicMetadata(String topic, ZkUtils zkUtils) {
MetadataResponse.TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils);
return topicMetadata.error().code();
}
@SuppressWarnings("unchecked")
public int partitionSize(String topic, ZkUtils zkUtils) {
MetadataResponse.TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils);
return topicMetadata.partitionMetadata().size();
}
public void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions,
int replicationFactor, Properties topicConfig) {
AdminUtils.createTopic(zkUtils, topic, partitions, replicationFactor,
topicConfig, null);
}
}

View File

@@ -1,82 +0,0 @@
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka.properties;
import java.util.HashMap;
import java.util.Map;
import javax.security.auth.login.AppConfigurationEntry;
import org.springframework.util.Assert;
/**
* Contains properties for setting up an {@link AppConfigurationEntry} that can be used
* for the Kafka or Zookeeper client.
*
* @author Marius Bogoevici
*/
public class JaasLoginModuleConfiguration {
private String loginModule = "com.sun.security.auth.module.Krb5LoginModule";
private AppConfigurationEntry.LoginModuleControlFlag controlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUIRED;
private Map<String,String> options = new HashMap<>();
public String getLoginModule() {
return loginModule;
}
public void setLoginModule(String loginModule) {
Assert.notNull(loginModule, "cannot be null");
this.loginModule = loginModule;
}
public String getControlFlag() {
return controlFlag.toString();
}
public AppConfigurationEntry.LoginModuleControlFlag getControlFlagValue() {
return controlFlag;
}
public void setControlFlag(String controlFlag) {
Assert.notNull(controlFlag, "cannot be null");
if (AppConfigurationEntry.LoginModuleControlFlag.OPTIONAL.equals(controlFlag)) {
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.OPTIONAL;
}
else if (AppConfigurationEntry.LoginModuleControlFlag.REQUIRED.equals(controlFlag)) {
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUIRED;
}
else if (AppConfigurationEntry.LoginModuleControlFlag.REQUISITE.equals(controlFlag)) {
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUISITE;
}
else if (AppConfigurationEntry.LoginModuleControlFlag.SUFFICIENT.equals(controlFlag)) {
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.SUFFICIENT;
}
else {
throw new IllegalArgumentException(controlFlag + " is not a supported control flag");
}
}
public Map<String, String> getOptions() {
return options;
}
public void setOptions(Map<String, String> options) {
this.options = options;
}
}

View File

@@ -1,345 +0,0 @@
/*
* Copyright 2014-2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka.provisioning;
import java.util.Collection;
import java.util.Properties;
import java.util.concurrent.Callable;
import kafka.common.ErrorMapping;
import kafka.utils.ZkUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.security.JaasUtils;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.cloud.stream.binder.BinderException;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.kafka.admin.AdminUtilsOperation;
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
import org.springframework.cloud.stream.binder.kafka.utils.KafkaTopicUtils;
import org.springframework.cloud.stream.provisioning.ConsumerDestination;
import org.springframework.cloud.stream.provisioning.ProducerDestination;
import org.springframework.cloud.stream.provisioning.ProvisioningException;
import org.springframework.cloud.stream.provisioning.ProvisioningProvider;
import org.springframework.retry.RetryCallback;
import org.springframework.retry.RetryContext;
import org.springframework.retry.RetryOperations;
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
import org.springframework.util.Assert;
import org.springframework.util.StringUtils;
/**
* Kafka implementation for {@link ProvisioningProvider}
*
* @author Soby Chacko
* @author Gary Russell
* @author Ilayaperumal Gopinathan
* @author Simon Flandergan
*/
public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsumerProperties<KafkaConsumerProperties>,
ExtendedProducerProperties<KafkaProducerProperties>>, InitializingBean {
private final Log logger = LogFactory.getLog(getClass());
private final KafkaBinderConfigurationProperties configurationProperties;
private final AdminUtilsOperation adminUtilsOperation;
private RetryOperations metadataRetryOperations;
public KafkaTopicProvisioner(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties,
AdminUtilsOperation adminUtilsOperation) {
this.configurationProperties = kafkaBinderConfigurationProperties;
this.adminUtilsOperation = adminUtilsOperation;
}
/**
* @param metadataRetryOperations the retry configuration
*/
public void setMetadataRetryOperations(RetryOperations metadataRetryOperations) {
this.metadataRetryOperations = metadataRetryOperations;
}
@Override
public void afterPropertiesSet() throws Exception {
if (this.metadataRetryOperations == null) {
RetryTemplate retryTemplate = new RetryTemplate();
SimpleRetryPolicy simpleRetryPolicy = new SimpleRetryPolicy();
simpleRetryPolicy.setMaxAttempts(10);
retryTemplate.setRetryPolicy(simpleRetryPolicy);
ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy();
backOffPolicy.setInitialInterval(100);
backOffPolicy.setMultiplier(2);
backOffPolicy.setMaxInterval(1000);
retryTemplate.setBackOffPolicy(backOffPolicy);
this.metadataRetryOperations = retryTemplate;
}
}
@Override
public ProducerDestination provisionProducerDestination(final String name, ExtendedProducerProperties<KafkaProducerProperties> properties) {
if (this.logger.isInfoEnabled()) {
this.logger.info("Using kafka topic for outbound: " + name);
}
KafkaTopicUtils.validateTopicName(name);
createTopicsIfAutoCreateEnabledAndAdminUtilsPresent(name, properties.getPartitionCount(), false);
if (this.configurationProperties.isAutoCreateTopics() && adminUtilsOperation != null) {
final ZkUtils zkUtils = ZkUtils.apply(this.configurationProperties.getZkConnectionString(),
this.configurationProperties.getZkSessionTimeout(),
this.configurationProperties.getZkConnectionTimeout(),
JaasUtils.isZkSecurityEnabled());
int partitions = adminUtilsOperation.partitionSize(name, zkUtils);
return new KafkaProducerDestination(name, partitions);
}
else {
return new KafkaProducerDestination(name);
}
}
@Override
public ConsumerDestination provisionConsumerDestination(final String name, final String group, ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
KafkaTopicUtils.validateTopicName(name);
boolean anonymous = !StringUtils.hasText(group);
Assert.isTrue(!anonymous || !properties.getExtension().isEnableDlq(),
"DLQ support is not available for anonymous subscriptions");
if (properties.getInstanceCount() == 0) {
throw new IllegalArgumentException("Instance count cannot be zero");
}
int partitionCount = properties.getInstanceCount() * properties.getConcurrency();
createTopicsIfAutoCreateEnabledAndAdminUtilsPresent(name, partitionCount, properties.getExtension().isAutoRebalanceEnabled());
if (this.configurationProperties.isAutoCreateTopics() && adminUtilsOperation != null) {
final ZkUtils zkUtils = ZkUtils.apply(this.configurationProperties.getZkConnectionString(),
this.configurationProperties.getZkSessionTimeout(),
this.configurationProperties.getZkConnectionTimeout(),
JaasUtils.isZkSecurityEnabled());
int partitions = adminUtilsOperation.partitionSize(name, zkUtils);
if (properties.getExtension().isEnableDlq() && !anonymous) {
String dlqTopic = StringUtils.hasText(properties.getExtension().getDlqName()) ?
properties.getExtension().getDlqName() : "error." + name + "." + group;
createTopicAndPartitions(dlqTopic, partitions, properties.getExtension().isAutoRebalanceEnabled());
return new KafkaConsumerDestination(name, partitions, dlqTopic);
}
return new KafkaConsumerDestination(name, partitions);
}
return new KafkaConsumerDestination(name);
}
private void createTopicsIfAutoCreateEnabledAndAdminUtilsPresent(final String topicName, final int partitionCount,
boolean tolerateLowerPartitionsOnBroker) {
if (this.configurationProperties.isAutoCreateTopics() && adminUtilsOperation != null) {
createTopicAndPartitions(topicName, partitionCount, tolerateLowerPartitionsOnBroker);
}
else if (this.configurationProperties.isAutoCreateTopics() && adminUtilsOperation == null) {
this.logger.warn("Auto creation of topics is enabled, but Kafka AdminUtils class is not present on the classpath. " +
"No topic will be created by the binder");
}
else if (!this.configurationProperties.isAutoCreateTopics()) {
this.logger.info("Auto creation of topics is disabled.");
}
}
/**
* Creates a Kafka topic if needed, or try to increase its partition count to the
* desired number.
*/
private void createTopicAndPartitions(final String topicName, final int partitionCount,
boolean tolerateLowerPartitionsOnBroker) {
final ZkUtils zkUtils = ZkUtils.apply(this.configurationProperties.getZkConnectionString(),
this.configurationProperties.getZkSessionTimeout(),
this.configurationProperties.getZkConnectionTimeout(),
JaasUtils.isZkSecurityEnabled());
try {
short errorCode = adminUtilsOperation.errorCodeFromTopicMetadata(topicName, zkUtils);
if (errorCode == ErrorMapping.NoError()) {
// only consider minPartitionCount for resizing if autoAddPartitions is true
int effectivePartitionCount = this.configurationProperties.isAutoAddPartitions()
? Math.max(this.configurationProperties.getMinPartitionCount(), partitionCount)
: partitionCount;
int partitionSize = adminUtilsOperation.partitionSize(topicName, zkUtils);
if (partitionSize < effectivePartitionCount) {
if (this.configurationProperties.isAutoAddPartitions()) {
adminUtilsOperation.invokeAddPartitions(zkUtils, topicName, effectivePartitionCount, null, false);
}
else if (tolerateLowerPartitionsOnBroker) {
logger.warn("The number of expected partitions was: " + partitionCount + ", but "
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
+ "There will be " + (effectivePartitionCount - partitionSize) + " idle consumers");
}
else {
throw new ProvisioningException("The number of expected partitions was: " + partitionCount + ", but "
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
+ "Consider either increasing the partition count of the topic or enabling " +
"`autoAddPartitions`");
}
}
}
else if (errorCode == ErrorMapping.UnknownTopicOrPartitionCode()) {
// always consider minPartitionCount for topic creation
final int effectivePartitionCount = Math.max(this.configurationProperties.getMinPartitionCount(),
partitionCount);
this.metadataRetryOperations.execute(new RetryCallback<Object, RuntimeException>() {
@Override
public Object doWithRetry(RetryContext context) throws RuntimeException {
try {
adminUtilsOperation.invokeCreateTopic(zkUtils, topicName, effectivePartitionCount,
configurationProperties.getReplicationFactor(), new Properties());
}
catch (Exception e) {
String exceptionClass = e.getClass().getName();
if (exceptionClass.equals("kafka.common.TopicExistsException")
|| exceptionClass.equals("org.apache.kafka.common.errors.TopicExistsException")) {
if (logger.isWarnEnabled()) {
logger.warn("Attempt to create topic: " + topicName + ". Topic already exists.");
}
}
else {
throw e;
}
}
return null;
}
});
}
else {
throw new ProvisioningException("Error fetching Kafka topic metadata: ",
ErrorMapping.exceptionFor(errorCode));
}
}
finally {
zkUtils.close();
}
}
public Collection<PartitionInfo> getPartitionsForTopic(final int partitionCount,
final boolean tolerateLowerPartitionsOnBroker,
final Callable<Collection<PartitionInfo>> callable) {
try {
return this.metadataRetryOperations
.execute(new RetryCallback<Collection<PartitionInfo>, Exception>() {
@Override
public Collection<PartitionInfo> doWithRetry(RetryContext context) throws Exception {
Collection<PartitionInfo> partitions = callable.call();
// do a sanity check on the partition set
int partitionSize = partitions.size();
if (partitionSize < partitionCount) {
if (tolerateLowerPartitionsOnBroker) {
logger.warn("The number of expected partitions was: " + partitionCount + ", but "
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
+ "There will be " + (partitionCount - partitionSize) + " idle consumers");
}
else {
throw new IllegalStateException("The number of expected partitions was: "
+ partitionCount + ", but " + partitionSize
+ (partitionSize > 1 ? " have " : " has ") + "been found instead");
}
}
return partitions;
}
});
}
catch (Exception e) {
this.logger.error("Cannot initialize Binder", e);
throw new BinderException("Cannot initialize binder:", e);
}
}
private static final class KafkaProducerDestination implements ProducerDestination {
private final String producerDestinationName;
private final int partitions;
KafkaProducerDestination(String destinationName) {
this(destinationName, 0);
}
KafkaProducerDestination(String destinationName, Integer partitions) {
this.producerDestinationName = destinationName;
this.partitions = partitions;
}
@Override
public String getName() {
return producerDestinationName;
}
@Override
public String getNameForPartition(int partition) {
return producerDestinationName;
}
@Override
public String toString() {
return "KafkaProducerDestination{" +
"producerDestinationName='" + producerDestinationName + '\'' +
", partitions=" + partitions +
'}';
}
}
private static final class KafkaConsumerDestination implements ConsumerDestination {
private final String consumerDestinationName;
private final int partitions;
private final String dlqName;
KafkaConsumerDestination(String consumerDestinationName) {
this(consumerDestinationName, 0, null);
}
KafkaConsumerDestination(String consumerDestinationName, int partitions) {
this(consumerDestinationName, partitions, null);
}
KafkaConsumerDestination(String consumerDestinationName, Integer partitions, String dlqName) {
this.consumerDestinationName = consumerDestinationName;
this.partitions = partitions;
this.dlqName = dlqName;
}
@Override
public String getName() {
return this.consumerDestinationName;
}
@Override
public String toString() {
return "KafkaConsumerDestination{" +
"consumerDestinationName='" + consumerDestinationName + '\'' +
", partitions=" + partitions +
", dlqName='" + dlqName + '\'' +
'}';
}
}
}

View File

@@ -1,49 +0,0 @@
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka.utils;
import java.io.UnsupportedEncodingException;
/**
* @author Soby Chacko
*/
public final class KafkaTopicUtils {
private KafkaTopicUtils() {
}
/**
* Allowed chars are ASCII alphanumerics, '.', '_' and '-'.
*/
public static void validateTopicName(String topicName) {
try {
byte[] utf8 = topicName.getBytes("UTF-8");
for (byte b : utf8) {
if (!((b >= 'a') && (b <= 'z') || (b >= 'A') && (b <= 'Z') || (b >= '0') && (b <= '9') || (b == '.')
|| (b == '-') || (b == '_'))) {
throw new IllegalArgumentException(
"Topic name can only have ASCII alphanumerics, '.', '_' and '-', but was: '" + topicName
+ "'");
}
}
}
catch (UnsupportedEncodingException e) {
throw new AssertionError(e); // Can't happen
}
}
}

View File

@@ -1,337 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>2.0.0.M1</version>
</parent>
<artifactId>spring-cloud-stream-binder-kafka-docs</artifactId>
<name>spring-cloud-stream-binder-kafka-docs</name>
<description>Spring Cloud Stream Kafka Binder Docs</description>
<properties>
<main.basedir>${basedir}/..</main.basedir>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<profiles>
<profile>
<id>full</id>
<build>
<plugins>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>xml-maven-plugin</artifactId>
<version>1.0</version>
<executions>
<execution>
<goals>
<goal>transform</goal>
</goals>
</execution>
</executions>
<configuration>
<transformationSets>
<transformationSet>
<dir>${project.build.directory}/external-resources</dir>
<stylesheet>src/main/xslt/dependencyVersions.xsl</stylesheet>
<fileMappers>
<fileMapper implementation="org.codehaus.plexus.components.io.filemappers.FileExtensionMapper">
<targetExtension>.adoc</targetExtension>
</fileMapper>
</fileMappers>
<outputDir>${project.build.directory}/generated-resources</outputDir>
</transformationSet>
</transformationSets>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
<phase>prepare-package</phase>
<configuration>
<includeDependencySources>true</includeDependencySources>
<dependencySourceIncludes>
<dependencySourceInclude>${project.groupId}:*</dependencySourceInclude>
</dependencySourceIncludes>
<attach>false</attach>
<quiet>true</quiet>
<stylesheetfile>${basedir}/src/main/javadoc/spring-javadoc.css</stylesheetfile>
<links>
<link>http://docs.spring.io/spring-framework/docs/${spring.version}/javadoc-api/</link>
<link>http://docs.spring.io/spring-shell/docs/current/api/</link>
</links>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.asciidoctor</groupId>
<artifactId>asciidoctor-maven-plugin</artifactId>
<version>1.5.0</version>
<executions>
<execution>
<id>generate-docbook</id>
<phase>generate-resources</phase>
<goals>
<goal>process-asciidoc</goal>
</goals>
<configuration>
<sourceDocumentName>index.adoc</sourceDocumentName>
<backend>docbook5</backend>
<doctype>book</doctype>
<attributes>
<docinfo>true</docinfo>
<spring-cloud-stream-binder-kafka-version>${project.version}</spring-cloud-stream-binder-kafka-version>
<spring-cloud-stream-binder-kafka-docs-version>${project.version}</spring-cloud-stream-binder-kafka-docs-version>
<github-tag>${github-tag}</github-tag>
</attributes>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>com.agilejava.docbkx</groupId>
<artifactId>docbkx-maven-plugin</artifactId>
<version>2.0.15</version>
<configuration>
<sourceDirectory>${basedir}/target/generated-docs</sourceDirectory>
<includes>index.xml</includes>
<xincludeSupported>true</xincludeSupported>
<chunkedOutput>false</chunkedOutput>
<foCustomization>${basedir}/src/main/docbook/xsl/pdf.xsl</foCustomization>
<useExtensions>1</useExtensions>
<highlightSource>1</highlightSource>
<highlightXslthlConfig>${basedir}/src/main/docbook/xsl/xslthl-config.xml</highlightXslthlConfig>
</configuration>
<dependencies>
<dependency>
<groupId>net.sf.xslthl</groupId>
<artifactId>xslthl</artifactId>
<version>2.1.0</version>
</dependency>
<dependency>
<groupId>net.sf.docbook</groupId>
<artifactId>docbook-xml</artifactId>
<version>5.0-all</version>
<classifier>resources</classifier>
<type>zip</type>
<scope>runtime</scope>
</dependency>
</dependencies>
<executions>
<execution>
<id>html-single</id>
<goals>
<goal>generate-html</goal>
</goals>
<phase>generate-resources</phase>
<configuration>
<htmlCustomization>${basedir}/src/main/docbook/xsl/html-singlepage.xsl</htmlCustomization>
<targetDirectory>${basedir}/target/docbook/htmlsingle</targetDirectory>
<postProcess>
<copy todir="${basedir}/target/contents/reference/htmlsingle">
<fileset dir="${basedir}/target/docbook/htmlsingle">
<include name="**/*.html" />
</fileset>
</copy>
<copy todir="${basedir}/target/contents/reference/htmlsingle">
<fileset dir="${basedir}/src/main/docbook">
<include name="**/*.css" />
<include name="**/*.png" />
<include name="**/*.gif" />
<include name="**/*.jpg" />
</fileset>
</copy>
<copy todir="${basedir}/target/contents/reference/htmlsingle">
<fileset dir="${basedir}/src/main/asciidoc">
<include name="images/*.css" />
<include name="images/*.png" />
<include name="images/*.gif" />
<include name="images/*.jpg" />
</fileset>
</copy>
</postProcess>
</configuration>
</execution>
<execution>
<id>html</id>
<goals>
<goal>generate-html</goal>
</goals>
<phase>generate-resources</phase>
<configuration>
<htmlCustomization>${basedir}/src/main/docbook/xsl/html-multipage.xsl</htmlCustomization>
<targetDirectory>${basedir}/target/docbook/html</targetDirectory>
<chunkedOutput>true</chunkedOutput>
<postProcess>
<copy todir="${basedir}/target/contents/reference/html">
<fileset dir="${basedir}/target/docbook/html">
<include name="**/*.html" />
</fileset>
</copy>
<copy todir="${basedir}/target/contents/reference/html">
<fileset dir="${basedir}/src/main/docbook">
<include name="**/*.css" />
<include name="**/*.png" />
<include name="**/*.gif" />
<include name="**/*.jpg" />
</fileset>
</copy>
<copy todir="${basedir}/target/contents/reference/html">
<fileset dir="${basedir}/src/main/asciidoc">
<include name="images/*.css" />
<include name="images/*.png" />
<include name="images/*.gif" />
<include name="images/*.jpg" />
</fileset>
</copy>
</postProcess>
</configuration>
</execution>
<execution>
<id>pdf</id>
<goals>
<goal>generate-pdf</goal>
</goals>
<phase>generate-resources</phase>
<configuration>
<foCustomization>${basedir}/src/main/docbook/xsl/pdf.xsl</foCustomization>
<targetDirectory>${basedir}/target/docbook/pdf</targetDirectory>
<postProcess>
<copy todir="${basedir}/target/contents/reference">
<fileset dir="${basedir}/target/docbook">
<include name="**/*.pdf" />
</fileset>
</copy>
<move file="${basedir}/target/contents/reference/pdf/index.pdf" tofile="${basedir}/target/contents/reference/pdf/spring-cloud-stream-reference.pdf" />
</postProcess>
</configuration>
</execution>
<execution>
<id>epub</id>
<goals>
<goal>generate-epub3</goal>
</goals>
<phase>generate-resources</phase>
<configuration>
<epubCustomization>${basedir}/src/main/docbook/xsl/epub.xsl</epubCustomization>
<targetDirectory>${basedir}/target/docbook/epub</targetDirectory>
<postProcess>
<copy todir="${basedir}/target/contents/reference/epub">
<fileset dir="${basedir}/target/docbook">
<include name="**/*.epub" />
</fileset>
</copy>
<move file="${basedir}/target/contents/reference/epub/index.epub" tofile="${basedir}/target/contents/reference/epub/spring-cloud-stream-reference.epub" />
</postProcess>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<dependencies>
<dependency>
<groupId>ant-contrib</groupId>
<artifactId>ant-contrib</artifactId>
<version>1.0b3</version>
<exclusions>
<exclusion>
<groupId>ant</groupId>
<artifactId>ant</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.ant</groupId>
<artifactId>ant-nodeps</artifactId>
<version>1.8.1</version>
</dependency>
<dependency>
<groupId>org.tigris.antelope</groupId>
<artifactId>antelopetasks</artifactId>
<version>3.2.10</version>
</dependency>
</dependencies>
<executions>
<execution>
<id>package-and-attach-docs-zip</id>
<phase>package</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<zip destfile="${project.build.directory}/${project.artifactId}-${project.version}.zip">
<zipfileset src="${project.build.directory}/${project.artifactId}-${project.version}-javadoc.jar" prefix="api" />
<fileset dir="${project.build.directory}/contents" />
</zip>
</target>
</configuration>
</execution>
<execution>
<id>setup-maven-properties</id>
<phase>validate</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<exportAntProperties>true</exportAntProperties>
<target>
<taskdef resource="net/sf/antcontrib/antcontrib.properties" />
<taskdef name="stringutil" classname="ise.antelope.tasks.StringUtilTask" />
<var name="version-type" value="${project.version}" />
<propertyregex property="version-type" override="true" input="${version-type}" regexp=".*\.(.*)" replace="\1" />
<propertyregex property="version-type" override="true" input="${version-type}" regexp="(M)\d+" replace="MILESTONE" />
<propertyregex property="version-type" override="true" input="${version-type}" regexp="(RC)\d+" replace="MILESTONE" />
<propertyregex property="version-type" override="true" input="${version-type}" regexp="BUILD-(.*)" replace="SNAPSHOT" />
<stringutil string="${version-type}" property="spring-boot-repo">
<lowercase />
</stringutil>
<var name="github-tag" value="v${project.version}" />
<propertyregex property="github-tag" override="true" input="${github-tag}" regexp=".*SNAPSHOT" replace="master" />
</target>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>build-helper-maven-plugin</artifactId>
<executions>
<execution>
<id>attach-zip</id>
<goals>
<goal>attach-artifact</goal>
</goals>
<configuration>
<artifacts>
<artifact>
<file>${project.build.directory}/${project.artifactId}-${project.version}.zip</file>
<type>zip;zip.type=docs;zip.deployed=false</type>
</artifact>
</artifacts>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@@ -1,2 +0,0 @@
*.html
*.css

View File

@@ -1,20 +0,0 @@
require 'asciidoctor'
require 'erb'
guard 'shell' do
watch(/.*\.adoc$/) {|m|
Asciidoctor.render_file('index.adoc', \
:in_place => true, \
:safe => Asciidoctor::SafeMode::UNSAFE, \
:attributes=> { \
'source-highlighter' => 'prettify', \
'icons' => 'font', \
'linkcss'=> 'true', \
'copycss' => 'true', \
'doctype' => 'book'})
}
end
guard 'livereload' do
watch(%r{^.+\.(css|js|html)$})
end

View File

@@ -1,5 +0,0 @@
[[appendix]]
= Appendices

View File

@@ -1,78 +0,0 @@
[[building]]
== Building
:jdkversion: 1.7
=== Basic Compile and Test
To build the source you will need to install JDK {jdkversion}.
The build uses the Maven wrapper so you don't have to install a specific
version of Maven. To enable the tests, you should have Kafka server 0.9 or above running
before building. See below for more information on running the servers.
The main build command is
----
$ ./mvnw clean install
----
You can also add '-DskipTests' if you like, to avoid running the tests.
NOTE: You can also install Maven (>=3.3.3) yourself and run the `mvn` command
in place of `./mvnw` in the examples below. If you do that you also
might need to add `-P spring` if your local Maven settings do not
contain repository declarations for spring pre-release artifacts.
NOTE: Be aware that you might need to increase the amount of memory
available to Maven by setting a `MAVEN_OPTS` environment variable with
a value like `-Xmx512m -XX:MaxPermSize=128m`. We try to cover this in
the `.mvn` configuration, so if you find you have to do it to make a
build succeed, please raise a ticket to get the settings added to
source control.
The projects that require middleware generally include a
`docker-compose.yml`, so consider using
http://compose.docker.io/[Docker Compose] to run the middeware servers
in Docker containers.
=== Documentation
There is a "full" profile that will generate documentation.
=== Working with the code
If you don't have an IDE preference we would recommend that you use
http://www.springsource.com/developer/sts[Spring Tools Suite] or
http://eclipse.org[Eclipse] when working with the code. We use the
http://eclipse.org/m2e/[m2eclipe] eclipse plugin for maven support. Other IDEs and tools
should also work without issue.
==== Importing into eclipse with m2eclipse
We recommend the http://eclipse.org/m2e/[m2eclipe] eclipse plugin when working with
eclipse. If you don't already have m2eclipse installed it is available from the "eclipse
marketplace".
Unfortunately m2e does not yet support Maven 3.3, so once the projects
are imported into Eclipse you will also need to tell m2eclipse to use
the `.settings.xml` file for the projects. If you do not do this you
may see many different errors related to the POMs in the
projects. Open your Eclipse preferences, expand the Maven
preferences, and select User Settings. In the User Settings field
click Browse and navigate to the Spring Cloud project you imported
selecting the `.settings.xml` file in that project. Click Apply and
then OK to save the preference changes.
NOTE: Alternatively you can copy the repository settings from https://github.com/spring-cloud/spring-cloud-build/blob/master/.settings.xml[`.settings.xml`] into your own `~/.m2/settings.xml`.
==== Importing into eclipse without m2eclipse
If you prefer not to use m2eclipse you can generate eclipse project metadata using the
following command:
[indent=0]
----
$ ./mvnw eclipse:eclipse
----
The generated eclipse projects can be imported by selecting `import existing projects`
from the `file` menu.

View File

@@ -1,42 +0,0 @@
[[contributing]
== Contributing
Spring Cloud is released under the non-restrictive Apache 2.0 license,
and follows a very standard Github development process, using Github
tracker for issues and merging pull requests into master. If you want
to contribute even something trivial please do not hesitate, but
follow the guidelines below.
=== Sign the Contributor License Agreement
Before we accept a non-trivial patch or pull request we will need you to sign the
https://support.springsource.com/spring_committer_signup[contributor's agreement].
Signing the contributor's agreement does not grant anyone commit rights to the main
repository, but it does mean that we can accept your contributions, and you will get an
author credit if we do. Active contributors might be asked to join the core team, and
given the ability to merge pull requests.
=== Code Conventions and Housekeeping
None of these is essential for a pull request, but they will all help. They can also be
added after the original pull request but before a merge.
* Use the Spring Framework code format conventions. If you use Eclipse
you can import formatter settings using the
`eclipse-code-formatter.xml` file from the
https://github.com/spring-cloud/build/tree/master/eclipse-coding-conventions.xml[Spring
Cloud Build] project. If using IntelliJ, you can use the
http://plugins.jetbrains.com/plugin/6546[Eclipse Code Formatter
Plugin] to import the same file.
* Make sure all new `.java` files to have a simple Javadoc class comment with at least an
`@author` tag identifying you, and preferably at least a paragraph on what the class is
for.
* Add the ASF license header comment to all new `.java` files (copy from existing files
in the project)
* Add yourself as an `@author` to the .java files that you modify substantially (more
than cosmetic changes).
* Add some Javadocs and, if you change the namespace, some XSD doc elements.
* A few unit tests would help a lot as well -- someone has to do it.
* If no-one else is using your branch, please rebase it against the current master (or
other target branch in the main project).
* When writing a commit message please follow http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html[these conventions],
if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit
message (where XXXX is the issue number).

View File

@@ -1,109 +0,0 @@
[[kafka-dlq-processing]]
== Dead-Letter Topic Processing
Because it can't be anticipated how users would want to dispose of dead-lettered messages, the framework does not provide any standard mechanism to handle them.
If the reason for the dead-lettering is transient, you may wish to route the messages back to the original topic.
However, if the problem is a permanent issue, that could cause an infinite loop.
The following `spring-boot` application is an example of how to route those messages back to the original topic, but moves them to a third "parking lot" topic after three attempts.
The application is simply another spring-cloud-stream application that reads from the dead-letter topic.
It terminates when no messages are received for 5 seconds.
The examples assume the original destination is `so8400out` and the consumer group is `so8400`.
There are several considerations.
- Consider only running the rerouting when the main application is not running.
Otherwise, the retries for transient errors will be used up very quickly.
- Alternatively, use a two-stage approach - use this application to route to a third topic, and another to route from there back to the main topic.
- Since this technique uses a message header to keep track of retries, it won't work with `headerMode=raw`.
In that case, consider adding some data to the payload (that can be ignored by the main application).
- `x-retries` has to be added to the `headers` property `spring.cloud.stream.kafka.binder.headers=x-retries` on both this, and the main application so that the header is transported between the applications.
- Since kafka is publish/subscribe, replayed messages will be sent to each consumer group, even those that successfully processed a message the first time around.
.application.properties
[source]
----
spring.cloud.stream.bindings.input.group=so8400replay
spring.cloud.stream.bindings.input.destination=error.so8400out.so8400
spring.cloud.stream.bindings.output.destination=so8400out
spring.cloud.stream.bindings.output.producer.partitioned=true
spring.cloud.stream.bindings.parkingLot.destination=so8400in.parkingLot
spring.cloud.stream.bindings.parkingLot.producer.partitioned=true
spring.cloud.stream.kafka.binder.configuration.auto.offset.reset=earliest
spring.cloud.stream.kafka.binder.headers=x-retries
----
.Application
[source, java]
----
@SpringBootApplication
@EnableBinding(TwoOutputProcessor.class)
public class ReRouteDlqKApplication implements CommandLineRunner {
private static final String X_RETRIES_HEADER = "x-retries";
public static void main(String[] args) {
SpringApplication.run(ReRouteDlqKApplication.class, args).close();
}
private final AtomicInteger processed = new AtomicInteger();
@Autowired
private MessageChannel parkingLot;
@StreamListener(Processor.INPUT)
@SendTo(Processor.OUTPUT)
public Message<?> reRoute(Message<?> failed) {
processed.incrementAndGet();
Integer retries = failed.getHeaders().get(X_RETRIES_HEADER, Integer.class);
if (retries == null) {
System.out.println("First retry for " + failed);
return MessageBuilder.fromMessage(failed)
.setHeader(X_RETRIES_HEADER, new Integer(1))
.setHeader(BinderHeaders.PARTITION_OVERRIDE,
failed.getHeaders().get(KafkaHeaders.RECEIVED_PARTITION_ID))
.build();
}
else if (retries.intValue() < 3) {
System.out.println("Another retry for " + failed);
return MessageBuilder.fromMessage(failed)
.setHeader(X_RETRIES_HEADER, new Integer(retries.intValue() + 1))
.setHeader(BinderHeaders.PARTITION_OVERRIDE,
failed.getHeaders().get(KafkaHeaders.RECEIVED_PARTITION_ID))
.build();
}
else {
System.out.println("Retries exhausted for " + failed);
parkingLot.send(MessageBuilder.fromMessage(failed)
.setHeader(BinderHeaders.PARTITION_OVERRIDE,
failed.getHeaders().get(KafkaHeaders.RECEIVED_PARTITION_ID))
.build());
}
return null;
}
@Override
public void run(String... args) throws Exception {
while (true) {
int count = this.processed.get();
Thread.sleep(5000);
if (count == this.processed.get()) {
System.out.println("Idle, terminating");
return;
}
}
}
public interface TwoOutputProcessor extends Processor {
@Output("parkingLot")
MessageChannel parkingLot();
}
}
----

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.2 KiB

View File

@@ -1,14 +0,0 @@
<productname>Spring Cloud Stream Kafka Binder</productname>
<releaseinfo>{spring-cloud-stream-binder-kafka-version}</releaseinfo>
<copyright>
<year>2013-2016</year>
<holder>Pivotal Software, Inc.</holder>
</copyright>
<legalnotice>
<para>
Copies of this document may be made for your own use and for distribution to
others, provided that you do not charge any fee for such copies and further
provided that each copy contains this Copyright Notice, whether distributed in
print or electronically.
</para>
</legalnotice>

View File

@@ -1,34 +0,0 @@
[[spring-cloud-stream-binder-kafka-reference]]
= Spring Cloud Stream Kafka Binder Reference Guide
Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinathan, Gunnar Hillert, Mark Pollack, Patrick Peralta, Glenn Renfro, Thomas Risberg, Dave Syer, David Turanski, Janne Valkealahti, Benjamin Klein, Henryk Konsek
:doctype: book
:toc:
:toclevels: 4
:source-highlighter: prettify
:numbered:
:icons: font
:hide-uri-scheme:
:spring-cloud-stream-binder-kafka-repo: snapshot
:github-tag: master
:spring-cloud-stream-binder-kafka-docs-version: current
:spring-cloud-stream-binder-kafka-docs: http://docs.spring.io/spring-cloud-stream-binder-kafka/docs/{spring-cloud-stream-binder-kafka-docs-version}/reference
:spring-cloud-stream-binder-kafka-docs-current: http://docs.spring.io/spring-cloud-stream-binder-kafka/docs/current-SNAPSHOT/reference/html/
:github-repo: spring-cloud/spring-cloud-stream-binder-kafka
:github-raw: http://raw.github.com/{github-repo}/{github-tag}
:github-code: http://github.com/{github-repo}/tree/{github-tag}
:github-wiki: http://github.com/{github-repo}/wiki
:github-master-code: http://github.com/{github-repo}/tree/master
:sc-ext: java
// ======================================================================================
= Reference Guide
include::overview.adoc[]
include::dlq.adoc[]
include::metrics.adoc[]
= Appendices
[appendix]
include::building.adoc[]
include::contributing.adoc[]
// ======================================================================================

View File

@@ -1,10 +0,0 @@
[[kafka-metrics]]
== Kafka metrics
Kafka binder module exposes the following metrics:
`spring.cloud.stream.binder.kafka.someGroup.someTopic.lag` - this metric indicates how many messages
have not been yet consumed from given binder's topic (`someTopic`) by given consumer group (`someGroup`).
For example if the value of the metric `spring.cloud.stream.binder.kafka.myGroup.myTopic.lag` is `1000`, then
consumer group `myGroup` has `1000` messages to waiting to be consumed from topic `myTopic`. This metric is
particularly useful to provide auto-scaling feedback to PaaS platform of your choice.

View File

@@ -1,436 +0,0 @@
[partintro]
--
This guide describes the Apache Kafka implementation of the Spring Cloud Stream Binder.
It contains information about its design, usage and configuration options, as well as information on how the Stream Cloud Stream concepts map into Apache Kafka specific constructs.
--
== Usage
For using the Apache Kafka binder, you just need to add it to your Spring Cloud Stream application, using the following Maven coordinates:
[source,xml]
----
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
</dependency>
----
Alternatively, you can also use the Spring Cloud Stream Kafka Starter.
[source,xml]
----
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
</dependency>
----
== Apache Kafka Binder Overview
A simplified diagram of how the Apache Kafka binder operates can be seen below.
.Kafka Binder
image::kafka-binder.png[width=300,scaledwidth="50%"]
The Apache Kafka Binder implementation maps each destination to an Apache Kafka topic.
The consumer group maps directly to the same Apache Kafka concept.
Partitioning also maps directly to Apache Kafka partitions as well.
== Configuration Options
This section contains the configuration options used by the Apache Kafka binder.
For common configuration options and properties pertaining to binder, refer to the <<binding-properties,core documentation>>.
=== Kafka Binder Properties
spring.cloud.stream.kafka.binder.brokers::
A list of brokers to which the Kafka binder will connect.
+
Default: `localhost`.
spring.cloud.stream.kafka.binder.defaultBrokerPort::
`brokers` allows hosts specified with or without port information (e.g., `host1,host2:port2`).
This sets the default port when no port is configured in the broker list.
+
Default: `9092`.
spring.cloud.stream.kafka.binder.zkNodes::
A list of ZooKeeper nodes to which the Kafka binder can connect.
+
Default: `localhost`.
spring.cloud.stream.kafka.binder.defaultZkPort::
`zkNodes` allows hosts specified with or without port information (e.g., `host1,host2:port2`).
This sets the default port when no port is configured in the node list.
+
Default: `2181`.
spring.cloud.stream.kafka.binder.configuration::
Key/Value map of client properties (both producers and consumer) passed to all clients created by the binder.
Due to the fact that these properties will be used by both producers and consumers, usage should be restricted to common properties, especially security settings.
+
Default: Empty map.
spring.cloud.stream.kafka.binder.headers::
The list of custom headers that will be transported by the binder.
+
Default: empty.
spring.cloud.stream.kafka.binder.offsetUpdateTimeWindow::
The frequency, in milliseconds, with which offsets are saved.
Ignored if `0`.
+
Default: `10000`.
spring.cloud.stream.kafka.binder.offsetUpdateCount::
The frequency, in number of updates, which which consumed offsets are persisted.
Ignored if `0`.
Mutually exclusive with `offsetUpdateTimeWindow`.
+
Default: `0`.
spring.cloud.stream.kafka.binder.requiredAcks::
The number of required acks on the broker.
+
Default: `1`.
spring.cloud.stream.kafka.binder.minPartitionCount::
Effective only if `autoCreateTopics` or `autoAddPartitions` is set.
The global minimum number of partitions that the binder will configure on topics on which it produces/consumes data.
It can be superseded by the `partitionCount` setting of the producer or by the value of `instanceCount` * `concurrency` settings of the producer (if either is larger).
+
Default: `1`.
spring.cloud.stream.kafka.binder.replicationFactor::
The replication factor of auto-created topics if `autoCreateTopics` is active.
+
Default: `1`.
spring.cloud.stream.kafka.binder.autoCreateTopics::
If set to `true`, the binder will create new topics automatically.
If set to `false`, the binder will rely on the topics being already configured.
In the latter case, if the topics do not exist, the binder will fail to start.
Of note, this setting is independent of the `auto.topic.create.enable` setting of the broker and it does not influence it: if the server is set to auto-create topics, they may be created as part of the metadata retrieval request, with default broker settings.
+
Default: `true`.
spring.cloud.stream.kafka.binder.autoAddPartitions::
If set to `true`, the binder will create add new partitions if required.
If set to `false`, the binder will rely on the partition size of the topic being already configured.
If the partition count of the target topic is smaller than the expected value, the binder will fail to start.
+
Default: `false`.
spring.cloud.stream.kafka.binder.socketBufferSize::
Size (in bytes) of the socket buffer to be used by the Kafka consumers.
+
Default: `2097152`.
=== Kafka Consumer Properties
The following properties are available for Kafka consumers only and
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.consumer.`.
autoRebalanceEnabled::
When `true`, topic partitions will be automatically rebalanced between the members of a consumer group.
When `false`, each consumer will be assigned a fixed set of partitions based on `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex`.
This requires both `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex` properties to be set appropriately on each launched instance.
The property `spring.cloud.stream.instanceCount` must typically be greater than 1 in this case.
+
Default: `true`.
autoCommitOffset::
Whether to autocommit offsets when a message has been processed.
If set to `false`, a header with the key `kafka_acknowledgment` of the type `org.springframework.kafka.support.Acknowledgment` header will be present in the inbound message.
Applications may use this header for acknowledging messages.
See the examples section for details.
When this property is set to `false`, Kafka binder will set the ack mode to `org.springframework.kafka.listener.AbstractMessageListenerContainer.AckMode.MANUAL`.
+
Default: `true`.
autoCommitOnError::
Effective only if `autoCommitOffset` is set to `true`.
If set to `false` it suppresses auto-commits for messages that result in errors, and will commit only for successful messages, allows a stream to automatically replay from the last successfully processed message, in case of persistent failures.
If set to `true`, it will always auto-commit (if auto-commit is enabled).
If not set (default), it effectively has the same value as `enableDlq`, auto-committing erroneous messages if they are sent to a DLQ, and not committing them otherwise.
+
Default: not set.
recoveryInterval::
The interval between connection recovery attempts, in milliseconds.
+
Default: `5000`.
resetOffsets::
Whether to reset offsets on the consumer to the value provided by `startOffset`.
+
Default: `false`.
startOffset::
The starting offset for new groups, or when `resetOffsets` is `true`.
Allowed values: `earliest`, `latest`.
If the consumer group is set explicitly for the consumer 'binding' (via `spring.cloud.stream.bindings.<channelName>.group`), then 'startOffset' is set to `earliest`; otherwise it is set to `latest` for the `anonymous` consumer group.
+
Default: null (equivalent to `earliest`).
enableDlq::
When set to true, it will send enable DLQ behavior for the consumer.
By default, messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`.
The DLQ topic name can be configurable via the property `dlqName`.
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
+
Default: `false`.
configuration::
Map with a key/value pair containing generic Kafka consumer properties.
+
Default: Empty map.
dlqName::
The name of the DLQ topic to receive the error messages.
+
Default: null (If not specified, messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`).
=== Kafka Producer Properties
The following properties are available for Kafka producers only and
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.producer.`.
bufferSize::
Upper limit, in bytes, of how much data the Kafka producer will attempt to batch before sending.
+
Default: `16384`.
sync::
Whether the producer is synchronous.
+
Default: `false`.
batchTimeout::
How long the producer will wait before sending in order to allow more messages to accumulate in the same batch.
(Normally the producer does not wait at all, and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.
+
Default: `0`.
messageKeyExpression::
A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message.
For example `headers.key` or `payload.myKey`.
+
Default: `none`.
configuration::
Map with a key/value pair containing generic Kafka producer properties.
+
Default: Empty map.
[NOTE]
====
The Kafka binder will use the `partitionCount` setting of the producer as a hint to create a topic with the given partition count (in conjunction with the `minPartitionCount`, the maximum of the two being the value being used).
Exercise caution when configuring both `minPartitionCount` for a binder and `partitionCount` for an application, as the larger value will be used.
If a topic already exists with a smaller partition count and `autoAddPartitions` is disabled (the default), then the binder will fail to start.
If a topic already exists with a smaller partition count and `autoAddPartitions` is enabled, new partitions will be added.
If a topic already exists with a larger number of partitions than the maximum of (`minPartitionCount` and `partitionCount`), the existing partition count will be used.
====
=== Usage examples
In this section, we illustrate the use of the above properties for specific scenarios.
==== Example: Setting `autoCommitOffset` false and relying on manual acking.
This example illustrates how one may manually acknowledge offsets in a consumer application.
This example requires that `spring.cloud.stream.kafka.bindings.input.consumer.autoCommitOffset` is set to false.
Use the corresponding input channel name for your example.
[source]
----
@SpringBootApplication
@EnableBinding(Sink.class)
public class ManuallyAcknowdledgingConsumer {
public static void main(String[] args) {
SpringApplication.run(ManuallyAcknowdledgingConsumer.class, args);
}
@StreamListener(Sink.INPUT)
public void process(Message<?> message) {
Acknowledgment acknowledgment = message.getHeaders().get(KafkaHeaders.ACKNOWLEDGMENT, Acknowledgment.class);
if (acknowledgment != null) {
System.out.println("Acknowledgment provided");
acknowledgment.acknowledge();
}
}
}
----
==== Example: security configuration
Apache Kafka 0.9 supports secure connections between client and brokers.
To take advantage of this feature, follow the guidelines in the http://kafka.apache.org/090/documentation.html#security_configclients[Apache Kafka Documentation] as well as the Kafka 0.9 http://docs.confluent.io/2.0.0/kafka/security.html[security guidelines from the Confluent documentation].
Use the `spring.cloud.stream.kafka.binder.configuration` option to set security properties for all clients created by the binder.
For example, for setting `security.protocol` to `SASL_SSL`, set:
[source]
----
spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_SSL
----
All the other security properties can be set in a similar manner.
When using Kerberos, follow the instructions in the http://kafka.apache.org/090/documentation.html#security_sasl_clientconfig[reference documentation] for creating and referencing the JAAS configuration.
Spring Cloud Stream supports passing JAAS configuration information to the application using a JAAS configuration file and using Spring Boot properties.
===== Using JAAS configuration files
The JAAS, and (optionally) krb5 file locations can be set for Spring Cloud Stream applications by using system properties.
Here is an example of launching a Spring Cloud Stream application with SASL and Kerberos using a JAAS configuration file:
[source]
----
java -Djava.security.auth.login.config=/path.to/kafka_client_jaas.conf -jar log.jar \
--spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
--spring.cloud.stream.kafka.binder.zkNodes=secure.zookeeper:2181 \
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT
----
===== Using Spring Boot properties
As an alternative to having a JAAS configuration file, Spring Cloud Stream provides a mechanism for setting up the JAAS configuration for Spring Cloud Stream applications using Spring Boot properties.
The following properties can be used for configuring the login context of the Kafka client.
spring.cloud.stream.kafka.binder.jaas.loginModule::
The login module name. Not necessary to be set in normal cases.
+
Default: `com.sun.security.auth.module.Krb5LoginModule`.
spring.cloud.stream.kafka.binder.jaas.controlFlag::
The control flag of the login module.
+
Default: `required`.
spring.cloud.stream.kafka.binder.jaas.options::
Map with a key/value pair containing the login module options.
+
Default: Empty map.
Here is an example of launching a Spring Cloud Stream application with SASL and Kerberos using Spring Boot configuration properties:
[source]
----
java --spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
--spring.cloud.stream.kafka.binder.zkNodes=secure.zookeeper:2181 \
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
--spring.cloud.stream.kafka.binder.autoCreateTopics=false \
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT \
--spring.cloud.stream.kafka.binder.jaas.options.useKeyTab=true \
--spring.cloud.stream.kafka.binder.jaas.options.storeKey=true \
--spring.cloud.stream.kafka.binder.jaas.options.keyTab=/etc/security/keytabs/kafka_client.keytab \
--spring.cloud.stream.kafka.binder.jaas.options.principal=kafka-client-1@EXAMPLE.COM
----
This represents the equivalent of the following JAAS file:
[source]
----
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/security/keytabs/kafka_client.keytab"
principal="kafka-client-1@EXAMPLE.COM";
};
----
If the topics required already exist on the broker, or will be created by an administrator, autocreation can be turned off and only client JAAS properties need to be sent. As an alternative to setting `spring.cloud.stream.kafka.binder.autoCreateTopics` you can simply remove the broker dependency from the application. See <<exclude-admin-utils>> for details.
[NOTE]
====
Do not mix JAAS configuration files and Spring Boot properties in the same application.
If the `-Djava.security.auth.login.config` system property is already present, Spring Cloud Stream will ignore the Spring Boot properties.
====
[NOTE]
====
Exercise caution when using the `autoCreateTopics` and `autoAddPartitions` if using Kerberos.
Usually applications may use principals that do not have administrative rights in Kafka and Zookeeper, and relying on Spring Cloud Stream to create/modify topics may fail.
In secure environments, we strongly recommend creating topics and managing ACLs administratively using Kafka tooling.
====
==== Using the binder with Apache Kafka 0.10
The default Kafka support in Spring Cloud Stream Kafka binder is for Kafka version 0.10.1.1. The binder also supports connecting to other 0.10 based versions and 0.9 clients.
In order to do this, when you create the project that contains your application, include `spring-cloud-starter-stream-kafka` as you normally would do for the default binder.
Then add these dependencies at the top of the `<dependencies>` section in the pom.xml file to override the dependencies.
Here is an example for downgrading your application to 0.10.0.1. Since it is still on the 0.10 line, the default `spring-kafka` and `spring-integration-kafka` versions can be retained.
[source,xml]
----
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<version>0.10.0.1</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.10.0.1</version>
</dependency>
----
Here is another example of using 0.9.0.1 version.
[source,xml]
----
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>1.0.5.RELEASE</version>
</dependency>
<dependency>
<groupId>org.springframework.integration</groupId>
<artifactId>spring-integration-kafka</artifactId>
<version>2.0.1.RELEASE</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<version>0.9.0.1</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.9.0.1</version>
</dependency>
----
[NOTE]
====
The versions above are provided only for the sake of the example.
For best results, we recommend using the most recent 0.10-compatible versions of the projects.
====
[[exclude-admin-utils]]
==== Excluding Kafka broker jar from the classpath of the binder based application
The Apache Kafka Binder uses the administrative utilities which are part of the Apache Kafka server library to create and reconfigure topics.
If the inclusion of the Apache Kafka server library and its dependencies is not necessary at runtime because the application will rely on the topics being configured administratively, the Kafka binder allows for Apache Kafka server dependency to be excluded from the application.
If you use non default versions for Kafka dependencies as advised above, all you have to do is not to include the kafka broker dependency.
If you use the default Kafka version, then ensure that you exclude the kafka broker jar from the `spring-cloud-starter-stream-kafka` dependency as following.
[source,xml]
----
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
</exclusion>
</exclusions>
</dependency>
----
If you exclude the Apache Kafka server dependency and the topic is not present on the server, then the Apache Kafka broker will create the topic if auto topic creation is enabled on the server.
Please keep in mind that if you are relying on this, then the Kafka server will use the default number of partitions and replication factors.
On the other hand, if auto topic creation is disabled on the server, then care must be taken before running the application to create the topic with the desired number of partitions.
If you want to have full control over how partitions are allocated, then leave the default settings as they are, i.e. do not exclude the kafka broker jar and ensure that `spring.cloud.stream.kafka.binder.autoCreateTopics` is set to `true`, which is the default.

View File

@@ -1,35 +0,0 @@
/*
code highlight CSS resemblign the Eclipse IDE default color schema
@author Costin Leau
*/
.hl-keyword {
color: #7F0055;
font-weight: bold;
}
.hl-comment {
color: #3F5F5F;
font-style: italic;
}
.hl-multiline-comment {
color: #3F5FBF;
font-style: italic;
}
.hl-tag {
color: #3F7F7F;
}
.hl-attribute {
color: #7F007F;
}
.hl-value {
color: #2A00FF;
}
.hl-string {
color: #2A00FF;
}

View File

@@ -1,9 +0,0 @@
@IMPORT url("manual.css");
body.firstpage {
background: url("../images/background.png") no-repeat center top;
}
div.part h1 {
border-top: none;
}

View File

@@ -1,6 +0,0 @@
@IMPORT url("manual.css");
body {
background: url("../images/background.png") no-repeat center top;
}

View File

@@ -1,344 +0,0 @@
@IMPORT url("highlight.css");
html {
padding: 0pt;
margin: 0pt;
}
body {
color: #333333;
margin: 15px 30px;
font-family: Helvetica, Arial, Freesans, Clean, Sans-serif;
line-height: 1.6;
-webkit-font-smoothing: antialiased;
}
code {
font-size: 16px;
font-family: Consolas, "Liberation Mono", Courier, monospace;
}
:not(a)>code {
color: #6D180B;
}
:not(pre)>code {
background-color: #F2F2F2;
border: 1px solid #CCCCCC;
border-radius: 4px;
padding: 1px 3px 0;
text-shadow: none;
white-space: nowrap;
}
body>*:first-child {
margin-top: 0 !important;
}
div {
margin: 0pt;
}
hr {
border: 1px solid #CCCCCC;
background: #CCCCCC;
}
h1,h2,h3,h4,h5,h6 {
color: #000000;
cursor: text;
font-weight: bold;
margin: 30px 0 10px;
padding: 0;
}
h1,h2,h3 {
margin: 40px 0 10px;
}
h1 {
margin: 70px 0 30px;
padding-top: 20px;
}
div.part h1 {
border-top: 1px dotted #CCCCCC;
}
h1,h1 code {
font-size: 32px;
}
h2,h2 code {
font-size: 24px;
}
h3,h3 code {
font-size: 20px;
}
h4,h1 code,h5,h5 code,h6,h6 code {
font-size: 18px;
}
div.book,div.chapter,div.appendix,div.part,div.preface {
min-width: 300px;
max-width: 1200px;
margin: 0 auto;
}
p.releaseinfo {
font-weight: bold;
margin-bottom: 40px;
margin-top: 40px;
}
div.authorgroup {
line-height: 1;
}
p.copyright {
line-height: 1;
margin-bottom: -5px;
}
.legalnotice p {
font-style: italic;
font-size: 14px;
line-height: 1;
}
div.titlepage+p,div.titlepage+p {
margin-top: 0;
}
pre {
line-height: 1.0;
color: black;
}
a {
color: #4183C4;
text-decoration: none;
}
p {
margin: 15px 0;
text-align: left;
}
ul,ol {
padding-left: 30px;
}
li p {
margin: 0;
}
div.table {
margin: 1em;
padding: 0.5em;
text-align: center;
}
div.table table,div.informaltable table {
display: table;
width: 100%;
}
div.table td {
padding-left: 7px;
padding-right: 7px;
}
.sidebar {
line-height: 1.4;
padding: 0 20px;
background-color: #F8F8F8;
border: 1px solid #CCCCCC;
border-radius: 3px 3px 3px 3px;
}
.sidebar p.title {
color: #6D180B;
}
pre.programlisting,pre.screen {
font-size: 15px;
padding: 6px 10px;
background-color: #F8F8F8;
border: 1px solid #CCCCCC;
border-radius: 3px 3px 3px 3px;
clear: both;
overflow: auto;
line-height: 1.4;
font-family: Consolas, "Liberation Mono", Courier, monospace;
}
table {
border-collapse: collapse;
border-spacing: 0;
border: 1px solid #DDDDDD !important;
border-radius: 4px !important;
border-collapse: separate !important;
line-height: 1.6;
}
table thead {
background: #F5F5F5;
}
table tr {
border: none;
border-bottom: none;
}
table th {
font-weight: bold;
}
table th,table td {
border: none !important;
padding: 6px 13px;
}
table tr:nth-child(2n) {
background-color: #F8F8F8;
}
td p {
margin: 0 0 15px 0;
}
div.table-contents td p {
margin: 0;
}
div.important *,div.note *,div.tip *,div.warning *,div.navheader *,div.navfooter *,div.calloutlist *
{
border: none !important;
background: none !important;
margin: 0;
}
div.important p,div.note p,div.tip p,div.warning p {
color: #6F6F6F;
line-height: 1.6;
}
div.important code,div.note code,div.tip code,div.warning code {
background-color: #F2F2F2 !important;
border: 1px solid #CCCCCC !important;
border-radius: 4px !important;
padding: 1px 3px 0 !important;
text-shadow: none !important;
white-space: nowrap !important;
}
.note th,.tip th,.warning th {
display: none;
}
.note tr:first-child td,.tip tr:first-child td,.warning tr:first-child td
{
border-right: 1px solid #CCCCCC !important;
padding-top: 10px;
}
div.calloutlist p,div.calloutlist td {
padding: 0;
margin: 0;
}
div.calloutlist>table>tbody>tr>td:first-child {
padding-left: 10px;
width: 30px !important;
}
div.important,div.note,div.tip,div.warning {
margin-left: 0px !important;
margin-right: 20px !important;
margin-top: 20px;
margin-bottom: 20px;
padding-top: 10px;
padding-bottom: 10px;
}
div.toc {
line-height: 1.2;
}
dl,dt {
margin-top: 1px;
margin-bottom: 0;
}
div.toc>dl>dt {
font-size: 32px;
font-weight: bold;
margin: 30px 0 10px 0;
display: block;
}
div.toc>dl>dd>dl>dt {
font-size: 24px;
font-weight: bold;
margin: 20px 0 10px 0;
display: block;
}
div.toc>dl>dd>dl>dd>dl>dt {
font-weight: bold;
font-size: 20px;
margin: 10px 0 0 0;
}
tbody.footnotes * {
border: none !important;
}
div.footnote p {
margin: 0;
line-height: 1;
}
div.footnote p sup {
margin-right: 6px;
vertical-align: middle;
}
div.navheader {
border-bottom: 1px solid #CCCCCC;
}
div.navfooter {
border-top: 1px solid #CCCCCC;
}
.title {
margin-left: -1em;
padding-left: 1em;
}
.title>a {
position: absolute;
visibility: hidden;
display: block;
font-size: 0.85em;
margin-top: 0.05em;
margin-left: -1em;
vertical-align: text-top;
color: black;
}
.title>a:before {
content: "\00A7";
}
.title:hover>a,.title>a:hover,.title:hover>a:hover {
visibility: visible;
}
.title:focus>a,.title>a:focus,.title:focus>a:focus {
outline: 0;
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 931 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.1 KiB

View File

@@ -1,45 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:xslthl="http://xslthl.sf.net"
xmlns:d="http://docbook.org/ns/docbook"
exclude-result-prefixes="xslthl d"
version='1.0'>
<!-- Extensions -->
<xsl:param name="use.extensions">1</xsl:param>
<xsl:param name="tablecolumns.extension">0</xsl:param>
<xsl:param name="callout.extensions">1</xsl:param>
<!-- Graphics -->
<xsl:param name="admon.graphics" select="1"/>
<xsl:param name="admon.graphics.path">images/</xsl:param>
<xsl:param name="admon.graphics.extension">.png</xsl:param>
<!-- Table of Contents -->
<xsl:param name="generate.toc">book toc,title</xsl:param>
<xsl:param name="toc.section.depth">3</xsl:param>
<!-- Hide revhistory -->
<xsl:template match="d:revhistory" mode="titlepage.mode"/>
</xsl:stylesheet>

View File

@@ -1,31 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:xslthl="http://xslthl.sf.net"
xmlns:d="http://docbook.org/ns/docbook"
exclude-result-prefixes="xslthl d"
version='1.0'>
<xsl:import href="urn:docbkx:stylesheet"/>
<xsl:import href="common.xsl"/>
</xsl:stylesheet>

View File

@@ -1,73 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version='1.0'>
<xsl:import href="urn:docbkx:stylesheet"/>
<xsl:import href="html.xsl"/>
<xsl:param name="html.stylesheet">css/manual-multipage.css</xsl:param>
<xsl:param name="chunk.section.depth">'5'</xsl:param>
<xsl:param name="use.id.as.filename">'1'</xsl:param>
<!-- Replace chunk-element-content from chunk-common to add firstpage class to body -->
<xsl:template name="chunk-element-content">
<xsl:param name="prev"/>
<xsl:param name="next"/>
<xsl:param name="nav.context"/>
<xsl:param name="content">
<xsl:apply-imports/>
</xsl:param>
<xsl:call-template name="user.preroot"/>
<html>
<xsl:call-template name="html.head">
<xsl:with-param name="prev" select="$prev"/>
<xsl:with-param name="next" select="$next"/>
</xsl:call-template>
<body>
<xsl:if test="count($prev) = 0">
<xsl:attribute name="class">firstpage</xsl:attribute>
</xsl:if>
<xsl:call-template name="body.attributes"/>
<xsl:call-template name="user.header.navigation"/>
<xsl:call-template name="header.navigation">
<xsl:with-param name="prev" select="$prev"/>
<xsl:with-param name="next" select="$next"/>
<xsl:with-param name="nav.context" select="$nav.context"/>
</xsl:call-template>
<xsl:call-template name="user.header.content"/>
<xsl:copy-of select="$content"/>
<xsl:call-template name="user.footer.content"/>
<xsl:call-template name="footer.navigation">
<xsl:with-param name="prev" select="$prev"/>
<xsl:with-param name="next" select="$next"/>
<xsl:with-param name="nav.context" select="$nav.context"/>
</xsl:call-template>
<xsl:call-template name="user.footer.navigation"/>
</body>
</html>
<xsl:value-of select="$chunk.append"/>
</xsl:template>
</xsl:stylesheet>

View File

@@ -1,30 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version='1.0'>
<xsl:import href="urn:docbkx:stylesheet"/>
<xsl:import href="html.xsl"/>
<xsl:param name="html.stylesheet">css/manual-singlepage.css</xsl:param>
</xsl:stylesheet>

View File

@@ -1,141 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:xslthl="http://xslthl.sf.net"
xmlns:d="http://docbook.org/ns/docbook"
exclude-result-prefixes="xslthl"
version='1.0'>
<xsl:import href="urn:docbkx:stylesheet/highlight.xsl"/>
<xsl:import href="common.xsl"/>
<!-- Only use scaling in FO -->
<xsl:param name="ignore.image.scaling">1</xsl:param>
<!-- Use code syntax highlighting -->
<xsl:param name="highlight.source">1</xsl:param>
<!-- Activate Graphics -->
<xsl:param name="callout.graphics" select="1" />
<xsl:param name="callout.defaultcolumn">120</xsl:param>
<xsl:param name="callout.graphics.path">images/callouts/</xsl:param>
<xsl:param name="callout.graphics.extension">.png</xsl:param>
<xsl:param name="table.borders.with.css" select="1"/>
<xsl:param name="html.stylesheet.type">text/css</xsl:param>
<xsl:param name="admonition.title.properties">text-align: left</xsl:param>
<!-- Leave image paths as relative when navigating XInclude -->
<xsl:param name="keep.relative.image.uris" select="1"/>
<!-- Label Chapters and Sections (numbering) -->
<xsl:param name="chapter.autolabel" select="1"/>
<xsl:param name="section.autolabel" select="1"/>
<xsl:param name="section.autolabel.max.depth" select="2"/>
<xsl:param name="section.label.includes.component.label" select="1"/>
<xsl:param name="table.footnote.number.format" select="'1'"/>
<!-- Remove "Chapter" from the Chapter titles... -->
<xsl:param name="local.l10n.xml" select="document('')"/>
<l:i18n xmlns:l="http://docbook.sourceforge.net/xmlns/l10n/1.0">
<l:l10n language="en">
<l:context name="title-numbered">
<l:template name="chapter" text="%n.&#160;%t"/>
<l:template name="section" text="%n&#160;%t"/>
</l:context>
</l:l10n>
</l:i18n>
<!-- Syntax Highlighting -->
<xsl:template match='xslthl:keyword' mode="xslthl">
<span class="hl-keyword"><xsl:apply-templates mode="xslthl"/></span>
</xsl:template>
<xsl:template match='xslthl:comment' mode="xslthl">
<span class="hl-comment"><xsl:apply-templates mode="xslthl"/></span>
</xsl:template>
<xsl:template match='xslthl:oneline-comment' mode="xslthl">
<span class="hl-comment"><xsl:apply-templates mode="xslthl"/></span>
</xsl:template>
<xsl:template match='xslthl:multiline-comment' mode="xslthl">
<span class="hl-multiline-comment"><xsl:apply-templates mode="xslthl"/></span>
</xsl:template>
<xsl:template match='xslthl:tag' mode="xslthl">
<span class="hl-tag"><xsl:apply-templates mode="xslthl"/></span>
</xsl:template>
<xsl:template match='xslthl:attribute' mode="xslthl">
<span class="hl-attribute"><xsl:apply-templates mode="xslthl"/></span>
</xsl:template>
<xsl:template match='xslthl:value' mode="xslthl">
<span class="hl-value"><xsl:apply-templates mode="xslthl"/></span>
</xsl:template>
<xsl:template match='xslthl:string' mode="xslthl">
<span class="hl-string"><xsl:apply-templates mode="xslthl"/></span>
</xsl:template>
<!-- Custom Title Page -->
<xsl:template match="d:author" mode="titlepage.mode">
<xsl:if test="name(preceding-sibling::*[1]) = 'author'">
<xsl:text>, </xsl:text>
</xsl:if>
<span class="{name(.)}">
<xsl:call-template name="person.name"/>
<xsl:apply-templates mode="titlepage.mode" select="./contrib"/>
</span>
</xsl:template>
<xsl:template match="d:authorgroup" mode="titlepage.mode">
<div class="{name(.)}">
<h2>Authors</h2>
<xsl:apply-templates mode="titlepage.mode"/>
</div>
</xsl:template>
<!-- Title Links -->
<xsl:template name="anchor">
<xsl:param name="node" select="."/>
<xsl:param name="conditional" select="1"/>
<xsl:variable name="id">
<xsl:call-template name="object.id">
<xsl:with-param name="object" select="$node"/>
</xsl:call-template>
</xsl:variable>
<xsl:if test="$conditional = 0 or $node/@id or $node/@xml:id">
<xsl:element name="a">
<xsl:attribute name="name">
<xsl:value-of select="$id"/>
</xsl:attribute>
<xsl:attribute name="href">
<xsl:text>#</xsl:text>
<xsl:value-of select="$id"/>
</xsl:attribute>
</xsl:element>
</xsl:if>
</xsl:template>
</xsl:stylesheet>

View File

@@ -1,582 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:d="http://docbook.org/ns/docbook"
xmlns:fo="http://www.w3.org/1999/XSL/Format"
xmlns:xslthl="http://xslthl.sf.net"
xmlns:xlink='http://www.w3.org/1999/xlink'
xmlns:exsl="http://exslt.org/common"
exclude-result-prefixes="exsl xslthl d xlink"
version='1.0'>
<xsl:import href="urn:docbkx:stylesheet"/>
<xsl:import href="urn:docbkx:stylesheet/highlight.xsl"/>
<xsl:import href="common.xsl"/>
<!-- Extensions -->
<xsl:param name="fop1.extensions" select="1"/>
<xsl:param name="paper.type" select="'A4'"/>
<xsl:param name="page.margin.top" select="'1cm'"/>
<xsl:param name="region.before.extent" select="'1cm'"/>
<xsl:param name="body.margin.top" select="'1.5cm'"/>
<xsl:param name="body.margin.bottom" select="'1.5cm'"/>
<xsl:param name="region.after.extent" select="'1cm'"/>
<xsl:param name="page.margin.bottom" select="'1cm'"/>
<xsl:param name="title.margin.left" select="'0cm'"/>
<!-- allow break across pages -->
<xsl:attribute-set name="formal.object.properties">
<xsl:attribute name="keep-together.within-column">auto</xsl:attribute>
</xsl:attribute-set>
<!-- use color links and sensible rendering -->
<xsl:attribute-set name="xref.properties">
<xsl:attribute name="text-decoration">underline</xsl:attribute>
<xsl:attribute name="color">#204060</xsl:attribute>
</xsl:attribute-set>
<xsl:param name="ulink.show" select="0"></xsl:param>
<xsl:param name="ulink.footnotes" select="0"></xsl:param>
<!-- TITLE PAGE -->
<xsl:template name="book.titlepage.recto">
<fo:block>
<fo:table table-layout="fixed" width="175mm">
<fo:table-column column-width="175mm"/>
<fo:table-body>
<fo:table-row>
<fo:table-cell text-align="center">
<fo:block>
<fo:external-graphic src="images/logo.png" width="240px"
height="auto" content-width="scale-to-fit"
content-height="scale-to-fit"
content-type="content-type:image/png" text-align="center"
/>
</fo:block>
<fo:block font-family="Helvetica" font-size="20pt" font-weight="bold" padding="10mm">
<xsl:value-of select="d:info/d:title"/>
</fo:block>
<fo:block font-family="Helvetica" font-size="14pt" padding-before="2mm">
<xsl:value-of select="d:info/d:subtitle"/>
</fo:block>
<fo:block font-family="Helvetica" font-size="14pt" padding="2mm">
<xsl:value-of select="d:info/d:releaseinfo"/>
</fo:block>
</fo:table-cell>
</fo:table-row>
<fo:table-row>
<fo:table-cell text-align="center">
<fo:block font-family="Helvetica" font-size="14pt" padding="5mm">
<xsl:value-of select="d:info/d:pubdate"/>
</fo:block>
</fo:table-cell>
</fo:table-row>
<fo:table-row>
<fo:table-cell text-align="center">
<fo:block font-family="Helvetica" font-size="10pt" padding="10mm">
<xsl:for-each select="d:info/d:authorgroup/d:author">
<xsl:if test="position() > 1">
<xsl:text>, </xsl:text>
</xsl:if>
<xsl:value-of select="."/>
</xsl:for-each>
</fo:block>
<fo:block font-family="Helvetica" font-size="10pt" padding="5mm">
<xsl:value-of select="d:info/d:pubdate"/>
</fo:block>
<fo:block font-family="Helvetica" font-size="10pt" padding="5mm" padding-before="25em">
<xsl:text>Copyright &#xA9; </xsl:text><xsl:value-of select="d:info/d:copyright"/>
</fo:block>
<fo:block font-family="Helvetica" font-size="8pt" padding="1mm">
<xsl:value-of select="d:info/d:legalnotice"/>
</fo:block>
</fo:table-cell>
</fo:table-row>
</fo:table-body>
</fo:table>
</fo:block>
</xsl:template>
<!-- Prevent blank pages in output -->
<xsl:template name="book.titlepage.before.verso">
</xsl:template>
<xsl:template name="book.titlepage.verso">
</xsl:template>
<xsl:template name="book.titlepage.separator">
</xsl:template>
<!-- HEADER -->
<!-- More space in the center header for long text -->
<xsl:attribute-set name="header.content.properties">
<xsl:attribute name="font-family">
<xsl:value-of select="$body.font.family"/>
</xsl:attribute>
<xsl:attribute name="margin-left">-5em</xsl:attribute>
<xsl:attribute name="margin-right">-5em</xsl:attribute>
<xsl:attribute name="font-size">8pt</xsl:attribute>
</xsl:attribute-set>
<xsl:template name="header.content">
<xsl:param name="pageclass" select="''"/>
<xsl:param name="sequence" select="''"/>
<xsl:param name="position" select="''"/>
<xsl:param name="gentext-key" select="''"/>
<xsl:variable name="Version">
<xsl:choose>
<xsl:when test="//d:title">
<xsl:value-of select="//d:title"/><xsl:text> </xsl:text>
</xsl:when>
<xsl:otherwise>
<xsl:text>please define title in your docbook file!</xsl:text>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:choose>
<xsl:when test="$sequence='blank'">
<xsl:choose>
<xsl:when test="$position='center'">
<xsl:value-of select="$Version"/>
</xsl:when>
<xsl:otherwise>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<xsl:when test="$pageclass='titlepage'">
</xsl:when>
<xsl:when test="$position='center'">
<xsl:value-of select="$Version"/>
</xsl:when>
<xsl:otherwise>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- FOOTER-->
<xsl:attribute-set name="footer.content.properties">
<xsl:attribute name="font-family">
<xsl:value-of select="$body.font.family"/>
</xsl:attribute>
<xsl:attribute name="font-size">8pt</xsl:attribute>
</xsl:attribute-set>
<xsl:template name="footer.content">
<xsl:param name="pageclass" select="''"/>
<xsl:param name="sequence" select="''"/>
<xsl:param name="position" select="''"/>
<xsl:param name="gentext-key" select="''"/>
<xsl:variable name="Version">
<xsl:choose>
<xsl:when test="//d:releaseinfo">
<xsl:value-of select="//d:releaseinfo"/>
</xsl:when>
<xsl:otherwise>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:variable name="Title">
<xsl:choose>
<xsl:when test="//d:productname">
<xsl:value-of select="//d:productname"/><xsl:text> </xsl:text>
</xsl:when>
<xsl:otherwise>
<xsl:text>please define title in your docbook file!</xsl:text>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:choose>
<xsl:when test="$sequence='blank'">
<xsl:choose>
<xsl:when test="$double.sided != 0 and $position = 'left'">
<xsl:value-of select="$Version"/>
</xsl:when>
<xsl:when test="$double.sided = 0 and $position = 'center'">
</xsl:when>
<xsl:otherwise>
<fo:page-number/>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<xsl:when test="$pageclass='titlepage'">
</xsl:when>
<xsl:when test="$double.sided != 0 and $sequence = 'even' and $position='left'">
<fo:page-number/>
</xsl:when>
<xsl:when test="$double.sided != 0 and $sequence = 'odd' and $position='right'">
<fo:page-number/>
</xsl:when>
<xsl:when test="$double.sided = 0 and $position='right'">
<fo:page-number/>
</xsl:when>
<xsl:when test="$double.sided != 0 and $sequence = 'odd' and $position='left'">
<xsl:value-of select="$Version"/>
</xsl:when>
<xsl:when test="$double.sided != 0 and $sequence = 'even' and $position='right'">
<xsl:value-of select="$Version"/>
</xsl:when>
<xsl:when test="$double.sided = 0 and $position='left'">
<xsl:value-of select="$Version"/>
</xsl:when>
<xsl:when test="$position='center'">
<xsl:value-of select="$Title"/>
</xsl:when>
<xsl:otherwise>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="processing-instruction('hard-pagebreak')">
<fo:block break-before='page'/>
</xsl:template>
<!-- PAPER & PAGE SIZE -->
<!-- Paper type, no headers on blank pages, no double sided printing -->
<xsl:param name="double.sided">0</xsl:param>
<xsl:param name="headers.on.blank.pages">0</xsl:param>
<xsl:param name="footers.on.blank.pages">0</xsl:param>
<!-- FONTS & STYLES -->
<xsl:param name="hyphenate">false</xsl:param>
<!-- Default Font size -->
<xsl:param name="body.font.family">Helvetica</xsl:param>
<xsl:param name="body.font.master">10</xsl:param>
<xsl:param name="body.font.small">8</xsl:param>
<xsl:param name="title.font.family">Helvetica</xsl:param>
<!-- Line height in body text -->
<xsl:param name="line-height">1.4</xsl:param>
<!-- Chapter title size -->
<xsl:attribute-set name="chapter.titlepage.recto.style">
<xsl:attribute name="text-align">left</xsl:attribute>
<xsl:attribute name="font-weight">bold</xsl:attribute>
<xsl:attribute name="font-size">
<xsl:value-of select="$body.font.master * 1.8"/>
<xsl:text>pt</xsl:text>
</xsl:attribute>
</xsl:attribute-set>
<!-- Why is the font-size for chapters hardcoded in the XSL FO templates?
Let's remove it, so this sucker can use our attribute-set only... -->
<xsl:template match="d:title" mode="chapter.titlepage.recto.auto.mode">
<fo:block xmlns:fo="http://www.w3.org/1999/XSL/Format"
xsl:use-attribute-sets="chapter.titlepage.recto.style">
<xsl:call-template name="component.title">
<xsl:with-param name="node" select="ancestor-or-self::d:chapter[1]"/>
</xsl:call-template>
</fo:block>
</xsl:template>
<!-- Sections 1, 2 and 3 titles have a small bump factor and padding -->
<xsl:attribute-set name="section.title.level1.properties">
<xsl:attribute name="space-before.optimum">0.6em</xsl:attribute>
<xsl:attribute name="space-before.minimum">0.6em</xsl:attribute>
<xsl:attribute name="space-before.maximum">0.6em</xsl:attribute>
<xsl:attribute name="font-size">
<xsl:value-of select="$body.font.master * 1.5"/>
<xsl:text>pt</xsl:text>
</xsl:attribute>
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
</xsl:attribute-set>
<xsl:attribute-set name="section.title.level2.properties">
<xsl:attribute name="space-before.optimum">0.4em</xsl:attribute>
<xsl:attribute name="space-before.minimum">0.4em</xsl:attribute>
<xsl:attribute name="space-before.maximum">0.4em</xsl:attribute>
<xsl:attribute name="font-size">
<xsl:value-of select="$body.font.master * 1.25"/>
<xsl:text>pt</xsl:text>
</xsl:attribute>
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
</xsl:attribute-set>
<xsl:attribute-set name="section.title.level3.properties">
<xsl:attribute name="space-before.optimum">0.4em</xsl:attribute>
<xsl:attribute name="space-before.minimum">0.4em</xsl:attribute>
<xsl:attribute name="space-before.maximum">0.4em</xsl:attribute>
<xsl:attribute name="font-size">
<xsl:value-of select="$body.font.master * 1.0"/>
<xsl:text>pt</xsl:text>
</xsl:attribute>
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
</xsl:attribute-set>
<xsl:attribute-set name="section.title.level4.properties">
<xsl:attribute name="space-before.optimum">0.3em</xsl:attribute>
<xsl:attribute name="space-before.minimum">0.3em</xsl:attribute>
<xsl:attribute name="space-before.maximum">0.3em</xsl:attribute>
<xsl:attribute name="font-size">
<xsl:value-of select="$body.font.master * 0.9"/>
<xsl:text>pt</xsl:text>
</xsl:attribute>
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
</xsl:attribute-set>
<!-- TABLES -->
<!-- Some padding inside tables -->
<xsl:attribute-set name="table.cell.padding">
<xsl:attribute name="padding-left">4pt</xsl:attribute>
<xsl:attribute name="padding-right">4pt</xsl:attribute>
<xsl:attribute name="padding-top">4pt</xsl:attribute>
<xsl:attribute name="padding-bottom">4pt</xsl:attribute>
</xsl:attribute-set>
<!-- Only hairlines as frame and cell borders in tables -->
<xsl:param name="table.frame.border.thickness">0.1pt</xsl:param>
<xsl:param name="table.cell.border.thickness">0.1pt</xsl:param>
<!-- LABELS -->
<!-- Label Chapters and Sections (numbering) -->
<xsl:param name="chapter.autolabel" select="1"/>
<xsl:param name="section.autolabel" select="1"/>
<xsl:param name="section.autolabel.max.depth" select="1"/>
<xsl:param name="section.label.includes.component.label" select="1"/>
<xsl:param name="table.footnote.number.format" select="'1'"/>
<!-- PROGRAMLISTINGS -->
<!-- Verbatim text formatting (programlistings) -->
<xsl:attribute-set name="monospace.verbatim.properties">
<xsl:attribute name="font-size">7pt</xsl:attribute>
<xsl:attribute name="wrap-option">wrap</xsl:attribute>
<xsl:attribute name="keep-together.within-column">1</xsl:attribute>
</xsl:attribute-set>
<xsl:attribute-set name="verbatim.properties">
<xsl:attribute name="space-before.minimum">1em</xsl:attribute>
<xsl:attribute name="space-before.optimum">1em</xsl:attribute>
<xsl:attribute name="space-before.maximum">1em</xsl:attribute>
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
<xsl:attribute name="border-color">#444444</xsl:attribute>
<xsl:attribute name="border-style">solid</xsl:attribute>
<xsl:attribute name="border-width">0.1pt</xsl:attribute>
<xsl:attribute name="padding-top">0.5em</xsl:attribute>
<xsl:attribute name="padding-left">0.5em</xsl:attribute>
<xsl:attribute name="padding-right">0.5em</xsl:attribute>
<xsl:attribute name="padding-bottom">0.5em</xsl:attribute>
<xsl:attribute name="margin-left">0.5em</xsl:attribute>
<xsl:attribute name="margin-right">0.5em</xsl:attribute>
</xsl:attribute-set>
<!-- Shade (background) programlistings -->
<xsl:param name="shade.verbatim">1</xsl:param>
<xsl:attribute-set name="shade.verbatim.style">
<xsl:attribute name="background-color">#F0F0F0</xsl:attribute>
</xsl:attribute-set>
<xsl:attribute-set name="list.block.spacing">
<xsl:attribute name="space-before.optimum">0.1em</xsl:attribute>
<xsl:attribute name="space-before.minimum">0.1em</xsl:attribute>
<xsl:attribute name="space-before.maximum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
</xsl:attribute-set>
<xsl:attribute-set name="example.properties">
<xsl:attribute name="space-before.minimum">0.5em</xsl:attribute>
<xsl:attribute name="space-before.optimum">0.5em</xsl:attribute>
<xsl:attribute name="space-before.maximum">0.5em</xsl:attribute>
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
</xsl:attribute-set>
<xsl:attribute-set name="sidebar.properties">
<xsl:attribute name="border-color">#444444</xsl:attribute>
<xsl:attribute name="border-style">solid</xsl:attribute>
<xsl:attribute name="border-width">0.1pt</xsl:attribute>
<xsl:attribute name="background-color">#F0F0F0</xsl:attribute>
</xsl:attribute-set>
<!-- TITLE INFORMATION FOR FIGURES, EXAMPLES ETC. -->
<xsl:attribute-set name="formal.title.properties" use-attribute-sets="normal.para.spacing">
<xsl:attribute name="font-weight">normal</xsl:attribute>
<xsl:attribute name="font-style">italic</xsl:attribute>
<xsl:attribute name="font-size">
<xsl:value-of select="$body.font.master"/>
<xsl:text>pt</xsl:text>
</xsl:attribute>
<xsl:attribute name="hyphenate">false</xsl:attribute>
<xsl:attribute name="space-before.minimum">0.1em</xsl:attribute>
<xsl:attribute name="space-before.optimum">0.1em</xsl:attribute>
<xsl:attribute name="space-before.maximum">0.1em</xsl:attribute>
</xsl:attribute-set>
<!-- CALLOUTS -->
<!-- don't use images for callouts -->
<xsl:param name="callout.graphics">0</xsl:param>
<xsl:param name="callout.unicode">1</xsl:param>
<!-- Place callout marks at this column in annotated areas -->
<xsl:param name="callout.defaultcolumn">90</xsl:param>
<!-- MISC -->
<!-- Placement of titles -->
<xsl:param name="formal.title.placement">
figure after
example after
equation before
table before
procedure before
</xsl:param>
<!-- Format Variable Lists as Blocks (prevents horizontal overflow) -->
<xsl:param name="variablelist.as.blocks">1</xsl:param>
<xsl:param name="body.start.indent">0pt</xsl:param>
<!-- Remove "Chapter" from the Chapter titles... -->
<xsl:param name="local.l10n.xml" select="document('')"/>
<l:i18n xmlns:l="http://docbook.sourceforge.net/xmlns/l10n/1.0">
<l:l10n language="en">
<l:context name="title-numbered">
<l:template name="chapter" text="%n.&#160;%t"/>
<l:template name="section" text="%n&#160;%t"/>
</l:context>
<l:context name="title">
<l:template name="example" text="Example&#160;%n&#160;%t"/>
</l:context>
</l:l10n>
</l:i18n>
<!-- admon -->
<xsl:param name="admon.graphics" select="0"/>
<xsl:attribute-set name="nongraphical.admonition.properties">
<xsl:attribute name="margin-left">0.1em</xsl:attribute>
<xsl:attribute name="margin-right">2em</xsl:attribute>
<xsl:attribute name="border-left-width">.75pt</xsl:attribute>
<xsl:attribute name="border-left-style">solid</xsl:attribute>
<xsl:attribute name="border-left-color">#5c5c4f</xsl:attribute>
<xsl:attribute name="padding-left">0.5em</xsl:attribute>
<xsl:attribute name="space-before.optimum">1.5em</xsl:attribute>
<xsl:attribute name="space-before.minimum">1.5em</xsl:attribute>
<xsl:attribute name="space-before.maximum">1.5em</xsl:attribute>
<xsl:attribute name="space-after.optimum">1.5em</xsl:attribute>
<xsl:attribute name="space-after.minimum">1.5em</xsl:attribute>
<xsl:attribute name="space-after.maximum">1.5em</xsl:attribute>
</xsl:attribute-set>
<xsl:attribute-set name="admonition.title.properties">
<xsl:attribute name="font-size">10pt</xsl:attribute>
<xsl:attribute name="font-weight">bold</xsl:attribute>
<xsl:attribute name="hyphenate">false</xsl:attribute>
<xsl:attribute name="keep-with-next.within-column">always</xsl:attribute>
<xsl:attribute name="margin-left">0</xsl:attribute>
</xsl:attribute-set>
<xsl:attribute-set name="admonition.properties">
<xsl:attribute name="space-before.optimum">0em</xsl:attribute>
<xsl:attribute name="space-before.minimum">0em</xsl:attribute>
<xsl:attribute name="space-before.maximum">0em</xsl:attribute>
</xsl:attribute-set>
<!-- Asciidoc -->
<xsl:template match="processing-instruction('asciidoc-br')">
<fo:block/>
</xsl:template>
<xsl:template match="processing-instruction('asciidoc-hr')">
<fo:block space-after="1em">
<fo:leader leader-pattern="rule" rule-thickness="0.5pt" rule-style="solid" leader-length.minimum="100%"/>
</fo:block>
</xsl:template>
<xsl:template match="processing-instruction('asciidoc-pagebreak')">
<fo:block break-after='page'/>
</xsl:template>
<!-- SYNTAX HIGHLIGHT -->
<xsl:template match='xslthl:keyword' mode="xslthl">
<fo:inline font-weight="bold" color="#7F0055"><xsl:apply-templates mode="xslthl"/></fo:inline>
</xsl:template>
<xsl:template match='xslthl:string' mode="xslthl">
<fo:inline font-weight="bold" font-style="italic" color="#2A00FF"><xsl:apply-templates mode="xslthl"/></fo:inline>
</xsl:template>
<xsl:template match='xslthl:comment' mode="xslthl">
<fo:inline font-style="italic" color="#3F5FBF"><xsl:apply-templates mode="xslthl"/></fo:inline>
</xsl:template>
<xsl:template match='xslthl:tag' mode="xslthl">
<fo:inline font-weight="bold" color="#3F7F7F"><xsl:apply-templates mode="xslthl"/></fo:inline>
</xsl:template>
<xsl:template match='xslthl:attribute' mode="xslthl">
<fo:inline font-weight="bold" color="#7F007F"><xsl:apply-templates mode="xslthl"/></fo:inline>
</xsl:template>
<xsl:template match='xslthl:value' mode="xslthl">
<fo:inline font-weight="bold" color="#2A00FF"><xsl:apply-templates mode="xslthl"/></fo:inline>
</xsl:template>
</xsl:stylesheet>

View File

@@ -1,23 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<xslthl-config>
<highlighter id="java" file="./xslthl/java-hl.xml" />
<highlighter id="groovy" file="./xslthl/java-hl.xml" />
<highlighter id="html" file="./xslthl/html-hl.xml" />
<highlighter id="ini" file="./xslthl/ini-hl.xml" />
<highlighter id="php" file="./xslthl/php-hl.xml" />
<highlighter id="c" file="./xslthl/c-hl.xml" />
<highlighter id="cpp" file="./xslthl/cpp-hl.xml" />
<highlighter id="csharp" file="./xslthl/csharp-hl.xml" />
<highlighter id="python" file="./xslthl/python-hl.xml" />
<highlighter id="ruby" file="./xslthl/ruby-hl.xml" />
<highlighter id="perl" file="./xslthl/perl-hl.xml" />
<highlighter id="javascript" file="./xslthl/javascript-hl.xml" />
<highlighter id="bash" file="./xslthl/bourne-hl.xml" />
<highlighter id="css" file="./xslthl/css-hl.xml" />
<highlighter id="sql" file="./xslthl/sql2003-hl.xml" />
<highlighter id="asciidoc" file="./xslthl/asciidoc-hl.xml" />
<highlighter id="properties" file="./xslthl/properties-hl.xml" />
<highlighter id="json" file="./xslthl/json-hl.xml" />
<highlighter id="yaml" file="./xslthl/yaml-hl.xml" />
<namespace prefix="xslthl" uri="http://xslthl.sf.net" />
</xslthl-config>

View File

@@ -1,41 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for AsciiDoc files
-->
<highlighters>
<highlighter type="multiline-comment">
<start>////</start>
<end>////</end>
</highlighter>
<highlighter type="oneline-comment">
<start>//</start>
<solitary/>
</highlighter>
<highlighter type="regex">
<pattern>^(={1,6} .+)$</pattern>
<style>heading</style>
<flags>MULTILINE</flags>
</highlighter>
<highlighter type="regex">
<pattern>^(\.[^\.\s].+)$</pattern>
<style>title</style>
<flags>MULTILINE</flags>
</highlighter>
<highlighter type="regex">
<pattern>^(:!?\w.*?:)</pattern>
<style>attribute</style>
<flags>MULTILINE</flags>
</highlighter>
<highlighter type="regex">
<pattern>^(-|\*{1,5}|\d*\.{1,5})(?= .+$)</pattern>
<style>bullet</style>
<flags>MULTILINE</flags>
</highlighter>
<highlighter type="regex">
<pattern>^(\[.+\])$</pattern>
<style>attribute</style>
<flags>MULTILINE</flags>
</highlighter>
</highlighters>

View File

@@ -1,95 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
Syntax highlighting definition for SH
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2010 Mathieu Malaterre
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
-->
<highlighters>
<highlighter type="oneline-comment">#</highlighter>
<highlighter type="heredoc">
<start>&lt;&lt;</start>
<quote>'</quote>
<quote>"</quote>
<flag>-</flag>
<noWhiteSpace />
<looseTerminator />
</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
<spanNewLines />
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<pointStarts />
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<!-- reserved words -->
<keyword>if</keyword>
<keyword>then</keyword>
<keyword>else</keyword>
<keyword>elif</keyword>
<keyword>fi</keyword>
<keyword>case</keyword>
<keyword>esac</keyword>
<keyword>for</keyword>
<keyword>while</keyword>
<keyword>until</keyword>
<keyword>do</keyword>
<keyword>done</keyword>
<!-- built-ins -->
<keyword>exec</keyword>
<keyword>shift</keyword>
<keyword>exit</keyword>
<keyword>times</keyword>
<keyword>break</keyword>
<keyword>export</keyword>
<keyword>trap</keyword>
<keyword>continue</keyword>
<keyword>readonly</keyword>
<keyword>wait</keyword>
<keyword>eval</keyword>
<keyword>return</keyword>
<!-- other commands -->
<keyword>cd</keyword>
<keyword>echo</keyword>
<keyword>hash</keyword>
<keyword>pwd</keyword>
<keyword>read</keyword>
<keyword>set</keyword>
<keyword>test</keyword>
<keyword>type</keyword>
<keyword>ulimit</keyword>
<keyword>umask</keyword>
<keyword>unset</keyword>
</highlighter>
</highlighters>

View File

@@ -1,117 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for C
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="multiline-comment">
<start>/**</start>
<end>*/</end>
<style>doccomment</style>
</highlighter>
<highlighter type="oneline-comment">
<start><![CDATA[/// ]]></start>
<style>doccomment</style>
</highlighter>
<highlighter type="multiline-comment">
<start>/*</start>
<end>*/</end>
</highlighter>
<highlighter type="oneline-comment">//</highlighter>
<highlighter type="oneline-comment">
<!-- use the online-comment highlighter to detect directives -->
<start>#</start>
<lineBreakEscape>\</lineBreakEscape>
<style>directive</style>
<solitary />
</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<suffix>ul</suffix>
<suffix>lu</suffix>
<suffix>u</suffix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<pointStarts />
<exponent>e</exponent>
<suffix>ul</suffix>
<suffix>lu</suffix>
<suffix>u</suffix>
<suffix>f</suffix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>auto</keyword>
<keyword>_Bool</keyword>
<keyword>break</keyword>
<keyword>case</keyword>
<keyword>char</keyword>
<keyword>_Complex</keyword>
<keyword>const</keyword>
<keyword>continue</keyword>
<keyword>default</keyword>
<keyword>do</keyword>
<keyword>double</keyword>
<keyword>else</keyword>
<keyword>enum</keyword>
<keyword>extern</keyword>
<keyword>float</keyword>
<keyword>for</keyword>
<keyword>goto</keyword>
<keyword>if</keyword>
<keyword>_Imaginary</keyword>
<keyword>inline</keyword>
<keyword>int</keyword>
<keyword>long</keyword>
<keyword>register</keyword>
<keyword>restrict</keyword>
<keyword>return</keyword>
<keyword>short</keyword>
<keyword>signed</keyword>
<keyword>sizeof</keyword>
<keyword>static</keyword>
<keyword>struct</keyword>
<keyword>switch</keyword>
<keyword>typedef</keyword>
<keyword>union</keyword>
<keyword>unsigned</keyword>
<keyword>void</keyword>
<keyword>volatile</keyword>
<keyword>while</keyword>
</highlighter>
</highlighters>

View File

@@ -1,151 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for C++
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="multiline-comment">
<start>/**</start>
<end>*/</end>
<style>doccomment</style>
</highlighter>
<highlighter type="oneline-comment">
<start><![CDATA[/// ]]></start>
<style>doccomment</style>
</highlighter>
<highlighter type="multiline-comment">
<start>/*</start>
<end>*/</end>
</highlighter>
<highlighter type="oneline-comment">//</highlighter>
<highlighter type="oneline-comment">
<!-- use the online-comment highlighter to detect directives -->
<start>#</start>
<lineBreakEscape>\</lineBreakEscape>
<style>directive</style>
<solitary/>
</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<suffix>ul</suffix>
<suffix>lu</suffix>
<suffix>u</suffix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<pointStarts />
<exponent>e</exponent>
<suffix>ul</suffix>
<suffix>lu</suffix>
<suffix>u</suffix>
<suffix>f</suffix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<!-- C keywords -->
<keyword>auto</keyword>
<keyword>_Bool</keyword>
<keyword>break</keyword>
<keyword>case</keyword>
<keyword>char</keyword>
<keyword>_Complex</keyword>
<keyword>const</keyword>
<keyword>continue</keyword>
<keyword>default</keyword>
<keyword>do</keyword>
<keyword>double</keyword>
<keyword>else</keyword>
<keyword>enum</keyword>
<keyword>extern</keyword>
<keyword>float</keyword>
<keyword>for</keyword>
<keyword>goto</keyword>
<keyword>if</keyword>
<keyword>_Imaginary</keyword>
<keyword>inline</keyword>
<keyword>int</keyword>
<keyword>long</keyword>
<keyword>register</keyword>
<keyword>restrict</keyword>
<keyword>return</keyword>
<keyword>short</keyword>
<keyword>signed</keyword>
<keyword>sizeof</keyword>
<keyword>static</keyword>
<keyword>struct</keyword>
<keyword>switch</keyword>
<keyword>typedef</keyword>
<keyword>union</keyword>
<keyword>unsigned</keyword>
<keyword>void</keyword>
<keyword>volatile</keyword>
<keyword>while</keyword>
<!-- C++ keywords -->
<keyword>asm</keyword>
<keyword>dynamic_cast</keyword>
<keyword>namespace</keyword>
<keyword>reinterpret_cast</keyword>
<keyword>try</keyword>
<keyword>bool</keyword>
<keyword>explicit</keyword>
<keyword>new</keyword>
<keyword>static_cast</keyword>
<keyword>typeid</keyword>
<keyword>catch</keyword>
<keyword>false</keyword>
<keyword>operator</keyword>
<keyword>template</keyword>
<keyword>typename</keyword>
<keyword>class</keyword>
<keyword>friend</keyword>
<keyword>private</keyword>
<keyword>this</keyword>
<keyword>using</keyword>
<keyword>const_cast</keyword>
<keyword>inline</keyword>
<keyword>public</keyword>
<keyword>throw</keyword>
<keyword>virtual</keyword>
<keyword>delete</keyword>
<keyword>mutable</keyword>
<keyword>protected</keyword>
<keyword>true</keyword>
<keyword>wchar_t</keyword>
</highlighter>
</highlighters>

View File

@@ -1,194 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for C#
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="multiline-comment">
<start>/**</start>
<end>*/</end>
<style>doccomment</style>
</highlighter>
<highlighter type="oneline-comment">
<start>///</start>
<style>doccomment</style>
</highlighter>
<highlighter type="multiline-comment">
<start>/*</start>
<end>*/</end>
</highlighter>
<highlighter type="oneline-comment">//</highlighter>
<highlighter type="annotation">
<!-- annotations are called (custom) "attributes" in .NET -->
<start>[</start>
<end>]</end>
<valueStart>(</valueStart>
<valueEnd>)</valueEnd>
</highlighter>
<highlighter type="oneline-comment">
<!-- C# supports a couple of directives -->
<start>#</start>
<lineBreakEscape>\</lineBreakEscape>
<style>directive</style>
<solitary/>
</highlighter>
<highlighter type="string">
<!-- strings starting with an "@" can span multiple lines -->
<string>@"</string>
<endString>"</endString>
<escape>\</escape>
<spanNewLines />
</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<suffix>ul</suffix>
<suffix>lu</suffix>
<suffix>u</suffix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<pointStarts />
<exponent>e</exponent>
<suffix>ul</suffix>
<suffix>lu</suffix>
<suffix>u</suffix>
<suffix>f</suffix>
<suffix>d</suffix>
<suffix>m</suffix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>abstract</keyword>
<keyword>as</keyword>
<keyword>base</keyword>
<keyword>bool</keyword>
<keyword>break</keyword>
<keyword>byte</keyword>
<keyword>case</keyword>
<keyword>catch</keyword>
<keyword>char</keyword>
<keyword>checked</keyword>
<keyword>class</keyword>
<keyword>const</keyword>
<keyword>continue</keyword>
<keyword>decimal</keyword>
<keyword>default</keyword>
<keyword>delegate</keyword>
<keyword>do</keyword>
<keyword>double</keyword>
<keyword>else</keyword>
<keyword>enum</keyword>
<keyword>event</keyword>
<keyword>explicit</keyword>
<keyword>extern</keyword>
<keyword>false</keyword>
<keyword>finally</keyword>
<keyword>fixed</keyword>
<keyword>float</keyword>
<keyword>for</keyword>
<keyword>foreach</keyword>
<keyword>goto</keyword>
<keyword>if</keyword>
<keyword>implicit</keyword>
<keyword>in</keyword>
<keyword>int</keyword>
<keyword>interface</keyword>
<keyword>internal</keyword>
<keyword>is</keyword>
<keyword>lock</keyword>
<keyword>long</keyword>
<keyword>namespace</keyword>
<keyword>new</keyword>
<keyword>null</keyword>
<keyword>object</keyword>
<keyword>operator</keyword>
<keyword>out</keyword>
<keyword>override</keyword>
<keyword>params</keyword>
<keyword>private</keyword>
<keyword>protected</keyword>
<keyword>public</keyword>
<keyword>readonly</keyword>
<keyword>ref</keyword>
<keyword>return</keyword>
<keyword>sbyte</keyword>
<keyword>sealed</keyword>
<keyword>short</keyword>
<keyword>sizeof</keyword>
<keyword>stackalloc</keyword>
<keyword>static</keyword>
<keyword>string</keyword>
<keyword>struct</keyword>
<keyword>switch</keyword>
<keyword>this</keyword>
<keyword>throw</keyword>
<keyword>true</keyword>
<keyword>try</keyword>
<keyword>typeof</keyword>
<keyword>uint</keyword>
<keyword>ulong</keyword>
<keyword>unchecked</keyword>
<keyword>unsafe</keyword>
<keyword>ushort</keyword>
<keyword>using</keyword>
<keyword>virtual</keyword>
<keyword>void</keyword>
<keyword>volatile</keyword>
<keyword>while</keyword>
</highlighter>
<highlighter type="keywords">
<!-- special words, not really keywords -->
<keyword>add</keyword>
<keyword>alias</keyword>
<keyword>from</keyword>
<keyword>get</keyword>
<keyword>global</keyword>
<keyword>group</keyword>
<keyword>into</keyword>
<keyword>join</keyword>
<keyword>orderby</keyword>
<keyword>partial</keyword>
<keyword>remove</keyword>
<keyword>select</keyword>
<keyword>set</keyword>
<keyword>value</keyword>
<keyword>where</keyword>
<keyword>yield</keyword>
</highlighter>
</highlighters>

View File

@@ -1,176 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for CSS files
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2011-2012 Martin Hujer, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Martin Hujer <mhujer at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
Reference: http://www.w3.org/TR/CSS21/propidx.html
-->
<highlighters>
<highlighter type="multiline-comment">
<start>/*</start>
<end>*/</end>
</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
<spanNewLines/>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
<spanNewLines/>
</highlighter>
<highlighter type="number">
<point>.</point>
<pointStarts />
</highlighter>
<highlighter type="word">
<word>@charset</word>
<word>@import</word>
<word>@media</word>
<word>@page</word>
<style>directive</style>
</highlighter>
<highlighter type="keywords">
<partChars>-</partChars>
<keyword>azimuth</keyword>
<keyword>background-attachment</keyword>
<keyword>background-color</keyword>
<keyword>background-image</keyword>
<keyword>background-position</keyword>
<keyword>background-repeat</keyword>
<keyword>background</keyword>
<keyword>border-collapse</keyword>
<keyword>border-color</keyword>
<keyword>border-spacing</keyword>
<keyword>border-style</keyword>
<keyword>border-top</keyword>
<keyword>border-right</keyword>
<keyword>border-bottom</keyword>
<keyword>border-left</keyword>
<keyword>border-top-color</keyword>
<keyword>border-right-color</keyword>
<keyword>border-bottom-color</keyword>
<keyword>border-left-color</keyword>
<keyword>border-top-style</keyword>
<keyword>border-right-style</keyword>
<keyword>border-bottom-style</keyword>
<keyword>border-left-style</keyword>
<keyword>border-top-width</keyword>
<keyword>border-right-width</keyword>
<keyword>border-bottom-width</keyword>
<keyword>border-left-width</keyword>
<keyword>border-width</keyword>
<keyword>border</keyword>
<keyword>bottom</keyword>
<keyword>caption-side</keyword>
<keyword>clear</keyword>
<keyword>clip</keyword>
<keyword>color</keyword>
<keyword>content</keyword>
<keyword>counter-increment</keyword>
<keyword>counter-reset</keyword>
<keyword>cue-after</keyword>
<keyword>cue-before</keyword>
<keyword>cue</keyword>
<keyword>cursor</keyword>
<keyword>direction</keyword>
<keyword>display</keyword>
<keyword>elevation</keyword>
<keyword>empty-cells</keyword>
<keyword>float</keyword>
<keyword>font-family</keyword>
<keyword>font-size</keyword>
<keyword>font-style</keyword>
<keyword>font-variant</keyword>
<keyword>font-weight</keyword>
<keyword>font</keyword>
<keyword>height</keyword>
<keyword>left</keyword>
<keyword>letter-spacing</keyword>
<keyword>line-height</keyword>
<keyword>list-style-image</keyword>
<keyword>list-style-position</keyword>
<keyword>list-style-type</keyword>
<keyword>list-style</keyword>
<keyword>margin-right</keyword>
<keyword>margin-left</keyword>
<keyword>margin-top</keyword>
<keyword>margin-bottom</keyword>
<keyword>margin</keyword>
<keyword>max-height</keyword>
<keyword>max-width</keyword>
<keyword>min-height</keyword>
<keyword>min-width</keyword>
<keyword>orphans</keyword>
<keyword>outline-color</keyword>
<keyword>outline-style</keyword>
<keyword>outline-width</keyword>
<keyword>outline</keyword>
<keyword>overflow</keyword>
<keyword>padding-top</keyword>
<keyword>padding-right</keyword>
<keyword>padding-bottom</keyword>
<keyword>padding-left</keyword>
<keyword>padding</keyword>
<keyword>page-break-after</keyword>
<keyword>page-break-before</keyword>
<keyword>page-break-inside</keyword>
<keyword>pause-after</keyword>
<keyword>pause-before</keyword>
<keyword>pause</keyword>
<keyword>pitch-range</keyword>
<keyword>pitch</keyword>
<keyword>play-during</keyword>
<keyword>position</keyword>
<keyword>quotes</keyword>
<keyword>richness</keyword>
<keyword>right</keyword>
<keyword>speak-header</keyword>
<keyword>speak-numeral</keyword>
<keyword>speak-punctuation</keyword>
<keyword>speak</keyword>
<keyword>speech-rate</keyword>
<keyword>stress</keyword>
<keyword>table-layout</keyword>
<keyword>text-align</keyword>
<keyword>text-decoration</keyword>
<keyword>text-indent</keyword>
<keyword>text-transform</keyword>
<keyword>top</keyword>
<keyword>unicode-bidi</keyword>
<keyword>vertical-align</keyword>
<keyword>visibility</keyword>
<keyword>voice-family</keyword>
<keyword>volume</keyword>
<keyword>white-space</keyword>
<keyword>widows</keyword>
<keyword>width</keyword>
<keyword>word-spacing</keyword>
<keyword>z-index</keyword>
</highlighter>
</highlighters>

View File

@@ -1,122 +0,0 @@
<?xml version='1.0'?>
<!--
Bakalarska prace: Zvyraznovani syntaxe v XSLT
Michal Molhanec 2005
myxml-hl.xml - konfigurace zvyraznovace XML, ktera zvlast zvyrazni
HTML elementy a XSL elementy
This file has been customized for the Asciidoctor project (http://asciidoctor.org).
-->
<highlighters>
<highlighter type="xml">
<elementSet>
<style>htmltag</style>
<element>a</element>
<element>abbr</element>
<element>address</element>
<element>area</element>
<element>article</element>
<element>aside</element>
<element>audio</element>
<element>b</element>
<element>base</element>
<element>bdi</element>
<element>blockquote</element>
<element>body</element>
<element>br</element>
<element>button</element>
<element>caption</element>
<element>canvas</element>
<element>cite</element>
<element>code</element>
<element>command</element>
<element>col</element>
<element>colgroup</element>
<element>dd</element>
<element>del</element>
<element>dialog</element>
<element>div</element>
<element>dl</element>
<element>dt</element>
<element>em</element>
<element>embed</element>
<element>fieldset</element>
<element>figcaption</element>
<element>figure</element>
<element>font</element>
<element>form</element>
<element>footer</element>
<element>h1</element>
<element>h2</element>
<element>h3</element>
<element>h4</element>
<element>h5</element>
<element>h6</element>
<element>head</element>
<element>header</element>
<element>hr</element>
<element>html</element>
<element>i</element>
<element>iframe</element>
<element>img</element>
<element>input</element>
<element>ins</element>
<element>kbd</element>
<element>label</element>
<element>legend</element>
<element>li</element>
<element>link</element>
<element>map</element>
<element>mark</element>
<element>menu</element>
<element>menu</element>
<element>meta</element>
<element>nav</element>
<element>noscript</element>
<element>object</element>
<element>ol</element>
<element>optgroup</element>
<element>option</element>
<element>p</element>
<element>param</element>
<element>pre</element>
<element>q</element>
<element>samp</element>
<element>script</element>
<element>section</element>
<element>select</element>
<element>small</element>
<element>source</element>
<element>span</element>
<element>strong</element>
<element>style</element>
<element>sub</element>
<element>summary</element>
<element>sup</element>
<element>table</element>
<element>tbody</element>
<element>td</element>
<element>textarea</element>
<element>tfoot</element>
<element>th</element>
<element>thead</element>
<element>time</element>
<element>title</element>
<element>tr</element>
<element>track</element>
<element>u</element>
<element>ul</element>
<element>var</element>
<element>video</element>
<element>wbr</element>
<element>xmp</element>
<ignoreCase/>
</elementSet>
<elementPrefix>
<style>namespace</style>
<prefix>xsl:</prefix>
</elementPrefix>
</highlighter>
</highlighters>

View File

@@ -1,45 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for ini files
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="oneline-comment">;</highlighter>
<highlighter type="regex">
<!-- ini sections -->
<pattern>^(\[.+\]\s*)$</pattern>
<style>keyword</style>
<flags>MULTILINE</flags>
</highlighter>
<highlighter type="regex">
<!-- the keys in an ini section -->
<pattern>^(.+)(?==)</pattern>
<style>attribute</style>
<flags>MULTILINE</flags>
</highlighter>
</highlighters>

View File

@@ -1,117 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for Java
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="multiline-comment">
<start>/**</start>
<end>*/</end>
<style>doccomment</style>
</highlighter>
<highlighter type="multiline-comment">
<start>/*</start>
<end>*/</end>
</highlighter>
<highlighter type="oneline-comment">//</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
</highlighter>
<highlighter type="annotation">
<start>@</start>
<valueStart>(</valueStart>
<valueEnd>)</valueEnd>
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<exponent>e</exponent>
<suffix>f</suffix>
<suffix>d</suffix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>abstract</keyword>
<keyword>boolean</keyword>
<keyword>break</keyword>
<keyword>byte</keyword>
<keyword>case</keyword>
<keyword>catch</keyword>
<keyword>char</keyword>
<keyword>class</keyword>
<keyword>const</keyword>
<keyword>continue</keyword>
<keyword>default</keyword>
<keyword>do</keyword>
<keyword>double</keyword>
<keyword>else</keyword>
<keyword>extends</keyword>
<keyword>final</keyword>
<keyword>finally</keyword>
<keyword>float</keyword>
<keyword>for</keyword>
<keyword>goto</keyword>
<keyword>if</keyword>
<keyword>implements</keyword>
<keyword>import</keyword>
<keyword>instanceof</keyword>
<keyword>int</keyword>
<keyword>interface</keyword>
<keyword>long</keyword>
<keyword>native</keyword>
<keyword>new</keyword>
<keyword>package</keyword>
<keyword>private</keyword>
<keyword>protected</keyword>
<keyword>public</keyword>
<keyword>return</keyword>
<keyword>short</keyword>
<keyword>static</keyword>
<keyword>strictfp</keyword>
<keyword>super</keyword>
<keyword>switch</keyword>
<keyword>synchronized</keyword>
<keyword>this</keyword>
<keyword>throw</keyword>
<keyword>throws</keyword>
<keyword>transient</keyword>
<keyword>try</keyword>
<keyword>void</keyword>
<keyword>volatile</keyword>
<keyword>while</keyword>
</highlighter>
</highlighters>

View File

@@ -1,147 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for JavaScript
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="multiline-comment">
<start>/*</start>
<end>*/</end>
</highlighter>
<highlighter type="oneline-comment">//</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<exponent>e</exponent>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>break</keyword>
<keyword>case</keyword>
<keyword>catch</keyword>
<keyword>continue</keyword>
<keyword>default</keyword>
<keyword>delete</keyword>
<keyword>do</keyword>
<keyword>else</keyword>
<keyword>finally</keyword>
<keyword>for</keyword>
<keyword>function</keyword>
<keyword>if</keyword>
<keyword>in</keyword>
<keyword>instanceof</keyword>
<keyword>new</keyword>
<keyword>return</keyword>
<keyword>switch</keyword>
<keyword>this</keyword>
<keyword>throw</keyword>
<keyword>try</keyword>
<keyword>typeof</keyword>
<keyword>var</keyword>
<keyword>void</keyword>
<keyword>while</keyword>
<keyword>with</keyword>
<!-- future keywords -->
<keyword>abstract</keyword>
<keyword>boolean</keyword>
<keyword>byte</keyword>
<keyword>char</keyword>
<keyword>class</keyword>
<keyword>const</keyword>
<keyword>debugger</keyword>
<keyword>double</keyword>
<keyword>enum</keyword>
<keyword>export</keyword>
<keyword>extends</keyword>
<keyword>final</keyword>
<keyword>float</keyword>
<keyword>goto</keyword>
<keyword>implements</keyword>
<keyword>import</keyword>
<keyword>int</keyword>
<keyword>interface</keyword>
<keyword>long</keyword>
<keyword>native</keyword>
<keyword>package</keyword>
<keyword>private</keyword>
<keyword>protected</keyword>
<keyword>public</keyword>
<keyword>short</keyword>
<keyword>static</keyword>
<keyword>super</keyword>
<keyword>synchronized</keyword>
<keyword>throws</keyword>
<keyword>transient</keyword>
<keyword>volatile</keyword>
</highlighter>
<highlighter type="keywords">
<keyword>prototype</keyword>
<!-- Global Objects -->
<keyword>Array</keyword>
<keyword>Boolean</keyword>
<keyword>Date</keyword>
<keyword>Error</keyword>
<keyword>EvalError</keyword>
<keyword>Function</keyword>
<keyword>Math</keyword>
<keyword>Number</keyword>
<keyword>Object</keyword>
<keyword>RangeError</keyword>
<keyword>ReferenceError</keyword>
<keyword>RegExp</keyword>
<keyword>String</keyword>
<keyword>SyntaxError</keyword>
<keyword>TypeError</keyword>
<keyword>URIError</keyword>
<!-- Global functions -->
<keyword>decodeURI</keyword>
<keyword>decodeURIComponent</keyword>
<keyword>encodeURI</keyword>
<keyword>encodeURIComponent</keyword>
<keyword>eval</keyword>
<keyword>isFinite</keyword>
<keyword>isNaN</keyword>
<keyword>parseFloat</keyword>
<keyword>parseInt</keyword>
<!-- Global properties -->
<keyword>Infinity</keyword>
<keyword>NaN</keyword>
<keyword>undefined</keyword>
</highlighter>
</highlighters>

View File

@@ -1,37 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<highlighters>
<highlighter type="oneline-comment">#</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
</highlighter>
<highlighter type="annotation">
<start>@</start>
<valueStart>(</valueStart>
<valueEnd>)</valueEnd>
</highlighter>
<highlighter type="number">
<point>.</point>
<exponent>e</exponent>
<suffix>f</suffix>
<suffix>d</suffix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>true</keyword>
<keyword>false</keyword>
</highlighter>
<highlighter type="word">
<word>{</word>
<word>}</word>
<word>,</word>
<word>[</word>
<word>]</word>
<style>keyword</style>
</highlighter>
</highlighters>

View File

@@ -1,120 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for Perl
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="oneline-comment">#</highlighter>
<highlighter type="heredoc">
<start>&lt;&lt;</start>
<quote>'</quote>
<quote>"</quote>
<noWhiteSpace/>
</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
<spanNewLines/>
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<pointStarts />
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>if</keyword>
<keyword>unless</keyword>
<keyword>while</keyword>
<keyword>until</keyword>
<keyword>foreach</keyword>
<keyword>else</keyword>
<keyword>elsif</keyword>
<keyword>for</keyword>
<keyword>when</keyword>
<keyword>default</keyword>
<keyword>given</keyword>
<!-- Keywords related to the control flow of your perl program -->
<keyword>caller</keyword>
<keyword>continue</keyword>
<keyword>die</keyword>
<keyword>do</keyword>
<keyword>dump</keyword>
<keyword>eval</keyword>
<keyword>exit</keyword>
<keyword>goto</keyword>
<keyword>last</keyword>
<keyword>next</keyword>
<keyword>redo</keyword>
<keyword>return</keyword>
<keyword>sub</keyword>
<keyword>wantarray</keyword>
<!-- Keywords related to scoping -->
<keyword>caller</keyword>
<keyword>import</keyword>
<keyword>local</keyword>
<keyword>my</keyword>
<keyword>package</keyword>
<keyword>use</keyword>
<!-- Keywords related to perl modules -->
<keyword>do</keyword>
<keyword>import</keyword>
<keyword>no</keyword>
<keyword>package</keyword>
<keyword>require</keyword>
<keyword>use</keyword>
<!-- Keywords related to classes and object-orientedness -->
<keyword>bless</keyword>
<keyword>dbmclose</keyword>
<keyword>dbmopen</keyword>
<keyword>package</keyword>
<keyword>ref</keyword>
<keyword>tie</keyword>
<keyword>tied</keyword>
<keyword>untie</keyword>
<keyword>use</keyword>
<!-- operators -->
<keyword>and</keyword>
<keyword>or</keyword>
<keyword>not</keyword>
<keyword>eq</keyword>
<keyword>ne</keyword>
<keyword>lt</keyword>
<keyword>gt</keyword>
<keyword>le</keyword>
<keyword>ge</keyword>
<keyword>cmp</keyword>
</highlighter>
</highlighters>

View File

@@ -1,154 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for PHP
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="multiline-comment">
<start>/**</start>
<end>*/</end>
<style>doccomment</style>
</highlighter>
<highlighter type="oneline-comment">
<start><![CDATA[/// ]]></start>
<style>doccomment</style>
</highlighter>
<highlighter type="multiline-comment">
<start>/*</start>
<end>*/</end>
</highlighter>
<highlighter type="oneline-comment">//</highlighter>
<highlighter type="oneline-comment">#</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
<spanNewLines />
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
<spanNewLines />
</highlighter>
<highlighter type="heredoc">
<start>&lt;&lt;&lt;</start>
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<exponent>e</exponent>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>and</keyword>
<keyword>or</keyword>
<keyword>xor</keyword>
<keyword>__FILE__</keyword>
<keyword>exception</keyword>
<keyword>__LINE__</keyword>
<keyword>array</keyword>
<keyword>as</keyword>
<keyword>break</keyword>
<keyword>case</keyword>
<keyword>class</keyword>
<keyword>const</keyword>
<keyword>continue</keyword>
<keyword>declare</keyword>
<keyword>default</keyword>
<keyword>die</keyword>
<keyword>do</keyword>
<keyword>echo</keyword>
<keyword>else</keyword>
<keyword>elseif</keyword>
<keyword>empty</keyword>
<keyword>enddeclare</keyword>
<keyword>endfor</keyword>
<keyword>endforeach</keyword>
<keyword>endif</keyword>
<keyword>endswitch</keyword>
<keyword>endwhile</keyword>
<keyword>eval</keyword>
<keyword>exit</keyword>
<keyword>extends</keyword>
<keyword>for</keyword>
<keyword>foreach</keyword>
<keyword>function</keyword>
<keyword>global</keyword>
<keyword>if</keyword>
<keyword>include</keyword>
<keyword>include_once</keyword>
<keyword>isset</keyword>
<keyword>list</keyword>
<keyword>new</keyword>
<keyword>print</keyword>
<keyword>require</keyword>
<keyword>require_once</keyword>
<keyword>return</keyword>
<keyword>static</keyword>
<keyword>switch</keyword>
<keyword>unset</keyword>
<keyword>use</keyword>
<keyword>var</keyword>
<keyword>while</keyword>
<keyword>__FUNCTION__</keyword>
<keyword>__CLASS__</keyword>
<keyword>__METHOD__</keyword>
<keyword>final</keyword>
<keyword>php_user_filter</keyword>
<keyword>interface</keyword>
<keyword>implements</keyword>
<keyword>extends</keyword>
<keyword>public</keyword>
<keyword>private</keyword>
<keyword>protected</keyword>
<keyword>abstract</keyword>
<keyword>clone</keyword>
<keyword>try</keyword>
<keyword>catch</keyword>
<keyword>throw</keyword>
<keyword>cfunction</keyword>
<keyword>old_function</keyword>
<keyword>true</keyword>
<keyword>false</keyword>
<!-- PHP 5.3 -->
<keyword>namespace</keyword>
<keyword>__NAMESPACE__</keyword>
<keyword>goto</keyword>
<keyword>__DIR__</keyword>
<ignoreCase />
</highlighter>
<highlighter type="word">
<!-- highlight the php open and close tags as directives -->
<word>?&gt;</word>
<word>&lt;?php</word>
<word>&lt;?=</word>
<style>directive</style>
</highlighter>
</highlighters>

View File

@@ -1,38 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for Java
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="oneline-comment">#</highlighter>
<highlighter type="regex">
<pattern>^(.+?)(?==|:)</pattern>
<style>attribute</style>
<flags>MULTILINE</flags>
</highlighter>
</highlighters>

View File

@@ -1,100 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for Python
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="annotation">
<!-- these are actually called decorators -->
<start>@</start>
<valueStart>(</valueStart>
<valueEnd>)</valueEnd>
</highlighter>
<highlighter type="oneline-comment">#</highlighter>
<highlighter type="string">
<string>"""</string>
<spanNewLines />
</highlighter>
<highlighter type="string">
<string>'''</string>
<spanNewLines />
</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<pointStarts />
<exponent>e</exponent>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>and</keyword>
<keyword>del</keyword>
<keyword>from</keyword>
<keyword>not</keyword>
<keyword>while</keyword>
<keyword>as</keyword>
<keyword>elif</keyword>
<keyword>global</keyword>
<keyword>or</keyword>
<keyword>with</keyword>
<keyword>assert</keyword>
<keyword>else</keyword>
<keyword>if</keyword>
<keyword>pass</keyword>
<keyword>yield</keyword>
<keyword>break</keyword>
<keyword>except</keyword>
<keyword>import</keyword>
<keyword>print</keyword>
<keyword>class</keyword>
<keyword>exec</keyword>
<keyword>in</keyword>
<keyword>raise</keyword>
<keyword>continue</keyword>
<keyword>finally</keyword>
<keyword>is</keyword>
<keyword>return</keyword>
<keyword>def</keyword>
<keyword>for</keyword>
<keyword>lambda</keyword>
<keyword>try</keyword>
</highlighter>
</highlighters>

View File

@@ -1,109 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for Ruby
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="oneline-comment">#</highlighter>
<highlighter type="heredoc">
<start>&lt;&lt;</start>
<noWhiteSpace/>
</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>%Q{</string>
<endString>}</endString>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>%/</string>
<endString>/</endString>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>%q{</string>
<endString>}</endString>
<escape>\</escape>
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<exponent>e</exponent>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>alias</keyword>
<keyword>and</keyword>
<keyword>BEGIN</keyword>
<keyword>begin</keyword>
<keyword>break</keyword>
<keyword>case</keyword>
<keyword>class</keyword>
<keyword>def</keyword>
<keyword>defined</keyword>
<keyword>do</keyword>
<keyword>else</keyword>
<keyword>elsif</keyword>
<keyword>END</keyword>
<keyword>end</keyword>
<keyword>ensure</keyword>
<keyword>false</keyword>
<keyword>for</keyword>
<keyword>if</keyword>
<keyword>in</keyword>
<keyword>module</keyword>
<keyword>next</keyword>
<keyword>nil</keyword>
<keyword>not</keyword>
<keyword>or</keyword>
<keyword>redo</keyword>
<keyword>rescue</keyword>
<keyword>retry</keyword>
<keyword>return</keyword>
<keyword>self</keyword>
<keyword>super</keyword>
<keyword>then</keyword>
<keyword>true</keyword>
<keyword>undef</keyword>
<keyword>unless</keyword>
<keyword>until</keyword>
<keyword>when</keyword>
<keyword>while</keyword>
<keyword>yield</keyword>
</highlighter>
</highlighters>

View File

@@ -1,565 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
Syntax highlighting definition for SQL:1999
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2012 Michiel Hendriks, Martin Hujer, k42b3
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
-->
<highlighters>
<highlighter type="oneline-comment">--</highlighter>
<highlighter type="multiline-comment">
<start>/*</start>
<end>*/</end>
</highlighter>
<highlighter type="string">
<string>'</string>
<doubleEscapes />
</highlighter>
<highlighter type="string">
<string>U'</string>
<endString>'</endString>
<doubleEscapes />
</highlighter>
<highlighter type="string">
<string>B'</string>
<endString>'</endString>
<doubleEscapes />
</highlighter>
<highlighter type="string">
<string>N'</string>
<endString>'</endString>
<doubleEscapes />
</highlighter>
<highlighter type="string">
<string>X'</string>
<endString>'</endString>
<doubleEscapes />
</highlighter>
<highlighter type="number">
<point>.</point>
<pointStarts />
<exponent>e</exponent>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<ignoreCase />
<!-- reserved -->
<keyword>A</keyword>
<keyword>ABS</keyword>
<keyword>ABSOLUTE</keyword>
<keyword>ACTION</keyword>
<keyword>ADA</keyword>
<keyword>ADMIN</keyword>
<keyword>AFTER</keyword>
<keyword>ALWAYS</keyword>
<keyword>ASC</keyword>
<keyword>ASSERTION</keyword>
<keyword>ASSIGNMENT</keyword>
<keyword>ATTRIBUTE</keyword>
<keyword>ATTRIBUTES</keyword>
<keyword>AVG</keyword>
<keyword>BEFORE</keyword>
<keyword>BERNOULLI</keyword>
<keyword>BREADTH</keyword>
<keyword>C</keyword>
<keyword>CARDINALITY</keyword>
<keyword>CASCADE</keyword>
<keyword>CATALOG_NAME</keyword>
<keyword>CATALOG</keyword>
<keyword>CEIL</keyword>
<keyword>CEILING</keyword>
<keyword>CHAIN</keyword>
<keyword>CHAR_LENGTH</keyword>
<keyword>CHARACTER_LENGTH</keyword>
<keyword>CHARACTER_SET_CATALOG</keyword>
<keyword>CHARACTER_SET_NAME</keyword>
<keyword>CHARACTER_SET_SCHEMA</keyword>
<keyword>CHARACTERISTICS</keyword>
<keyword>CHARACTERS</keyword>
<keyword>CHECKED</keyword>
<keyword>CLASS_ORIGIN</keyword>
<keyword>COALESCE</keyword>
<keyword>COBOL</keyword>
<keyword>CODE_UNITS</keyword>
<keyword>COLLATION_CATALOG</keyword>
<keyword>COLLATION_NAME</keyword>
<keyword>COLLATION_SCHEMA</keyword>
<keyword>COLLATION</keyword>
<keyword>COLLECT</keyword>
<keyword>COLUMN_NAME</keyword>
<keyword>COMMAND_FUNCTION_CODE</keyword>
<keyword>COMMAND_FUNCTION</keyword>
<keyword>COMMITTED</keyword>
<keyword>CONDITION_NUMBER</keyword>
<keyword>CONDITION</keyword>
<keyword>CONNECTION_NAME</keyword>
<keyword>CONSTRAINT_CATALOG</keyword>
<keyword>CONSTRAINT_NAME</keyword>
<keyword>CONSTRAINT_SCHEMA</keyword>
<keyword>CONSTRAINTS</keyword>
<keyword>CONSTRUCTORS</keyword>
<keyword>CONTAINS</keyword>
<keyword>CONVERT</keyword>
<keyword>CORR</keyword>
<keyword>COUNT</keyword>
<keyword>COVAR_POP</keyword>
<keyword>COVAR_SAMP</keyword>
<keyword>CUME_DIST</keyword>
<keyword>CURRENT_COLLATION</keyword>
<keyword>CURSOR_NAME</keyword>
<keyword>DATA</keyword>
<keyword>DATETIME_INTERVAL_CODE</keyword>
<keyword>DATETIME_INTERVAL_PRECISION</keyword>
<keyword>DEFAULTS</keyword>
<keyword>DEFERRABLE</keyword>
<keyword>DEFERRED</keyword>
<keyword>DEFINED</keyword>
<keyword>DEFINER</keyword>
<keyword>DEGREE</keyword>
<keyword>DENSE_RANK</keyword>
<keyword>DEPTH</keyword>
<keyword>DERIVED</keyword>
<keyword>DESC</keyword>
<keyword>DESCRIPTOR</keyword>
<keyword>DIAGNOSTICS</keyword>
<keyword>DISPATCH</keyword>
<keyword>DOMAIN</keyword>
<keyword>DYNAMIC_FUNCTION_CODE</keyword>
<keyword>DYNAMIC_FUNCTION</keyword>
<keyword>EQUALS</keyword>
<keyword>EVERY</keyword>
<keyword>EXCEPTION</keyword>
<keyword>EXCLUDE</keyword>
<keyword>EXCLUDING</keyword>
<keyword>EXP</keyword>
<keyword>EXTRACT</keyword>
<keyword>FINAL</keyword>
<keyword>FIRST</keyword>
<keyword>FLOOR</keyword>
<keyword>FOLLOWING</keyword>
<keyword>FORTRAN</keyword>
<keyword>FOUND</keyword>
<keyword>FUSION</keyword>
<keyword>G</keyword>
<keyword>GENERAL</keyword>
<keyword>GO</keyword>
<keyword>GOTO</keyword>
<keyword>GRANTED</keyword>
<keyword>HIERARCHY</keyword>
<keyword>IMPLEMENTATION</keyword>
<keyword>INCLUDING</keyword>
<keyword>INCREMENT</keyword>
<keyword>INITIALLY</keyword>
<keyword>INSTANCE</keyword>
<keyword>INSTANTIABLE</keyword>
<keyword>INTERSECTION</keyword>
<keyword>INVOKER</keyword>
<keyword>ISOLATION</keyword>
<keyword>K</keyword>
<keyword>KEY_MEMBER</keyword>
<keyword>KEY_TYPE</keyword>
<keyword>KEY</keyword>
<keyword>LAST</keyword>
<keyword>LENGTH</keyword>
<keyword>LEVEL</keyword>
<keyword>LN</keyword>
<keyword>LOCATOR</keyword>
<keyword>LOWER</keyword>
<keyword>M</keyword>
<keyword>MAP</keyword>
<keyword>MATCHED</keyword>
<keyword>MAX</keyword>
<keyword>MAXVALUE</keyword>
<keyword>MESSAGE_LENGTH</keyword>
<keyword>MESSAGE_OCTET_LENGTH</keyword>
<keyword>MESSAGE_TEXT</keyword>
<keyword>MIN</keyword>
<keyword>MINVALUE</keyword>
<keyword>MOD</keyword>
<keyword>MORE</keyword>
<keyword>MUMPS</keyword>
<keyword>NAME</keyword>
<keyword>NAMES</keyword>
<keyword>NESTING</keyword>
<keyword>NEXT</keyword>
<keyword>NORMALIZE</keyword>
<keyword>NORMALIZED</keyword>
<keyword>NULLABLE</keyword>
<keyword>NULLIF</keyword>
<keyword>NULLS</keyword>
<keyword>NUMBER</keyword>
<keyword>OBJECT</keyword>
<keyword>OCTET_LENGTH</keyword>
<keyword>OCTETS</keyword>
<keyword>OPTION</keyword>
<keyword>OPTIONS</keyword>
<keyword>ORDERING</keyword>
<keyword>ORDINALITY</keyword>
<keyword>OTHERS</keyword>
<keyword>OVERLAY</keyword>
<keyword>OVERRIDING</keyword>
<keyword>PAD</keyword>
<keyword>PARAMETER_MODE</keyword>
<keyword>PARAMETER_NAME</keyword>
<keyword>PARAMETER_ORDINAL_POSITION</keyword>
<keyword>PARAMETER_SPECIFIC_CATALOG</keyword>
<keyword>PARAMETER_SPECIFIC_NAME</keyword>
<keyword>PARAMETER_SPECIFIC_SCHEMA</keyword>
<keyword>PARTIAL</keyword>
<keyword>PASCAL</keyword>
<keyword>PATH</keyword>
<keyword>PERCENT_RANK</keyword>
<keyword>PERCENTILE_CONT</keyword>
<keyword>PERCENTILE_DISC</keyword>
<keyword>PLACING</keyword>
<keyword>PLI</keyword>
<keyword>POSITION</keyword>
<keyword>POWER</keyword>
<keyword>PRECEDING</keyword>
<keyword>PRESERVE</keyword>
<keyword>PRIOR</keyword>
<keyword>PRIVILEGES</keyword>
<keyword>PUBLIC</keyword>
<keyword>RANK</keyword>
<keyword>READ</keyword>
<keyword>RELATIVE</keyword>
<keyword>REPEATABLE</keyword>
<keyword>RESTART</keyword>
<keyword>RETURNED_CARDINALITY</keyword>
<keyword>RETURNED_LENGTH</keyword>
<keyword>RETURNED_OCTET_LENGTH</keyword>
<keyword>RETURNED_SQLSTATE</keyword>
<keyword>ROLE</keyword>
<keyword>ROUTINE_CATALOG</keyword>
<keyword>ROUTINE_NAME</keyword>
<keyword>ROUTINE_SCHEMA</keyword>
<keyword>ROUTINE</keyword>
<keyword>ROW_COUNT</keyword>
<keyword>ROW_NUMBER</keyword>
<keyword>SCALE</keyword>
<keyword>SCHEMA_NAME</keyword>
<keyword>SCHEMA</keyword>
<keyword>SCOPE_CATALOG</keyword>
<keyword>SCOPE_NAME</keyword>
<keyword>SCOPE_SCHEMA</keyword>
<keyword>SECTION</keyword>
<keyword>SECURITY</keyword>
<keyword>SELF</keyword>
<keyword>SEQUENCE</keyword>
<keyword>SERIALIZABLE</keyword>
<keyword>SERVER_NAME</keyword>
<keyword>SESSION</keyword>
<keyword>SETS</keyword>
<keyword>SIMPLE</keyword>
<keyword>SIZE</keyword>
<keyword>SOURCE</keyword>
<keyword>SPACE</keyword>
<keyword>SPECIFIC_NAME</keyword>
<keyword>SQRT</keyword>
<keyword>STATE</keyword>
<keyword>STATEMENT</keyword>
<keyword>STDDEV_POP</keyword>
<keyword>STDDEV_SAMP</keyword>
<keyword>STRUCTURE</keyword>
<keyword>STYLE</keyword>
<keyword>SUBCLASS_ORIGIN</keyword>
<keyword>SUBSTRING</keyword>
<keyword>SUM</keyword>
<keyword>TABLE_NAME</keyword>
<keyword>TABLESAMPLE</keyword>
<keyword>TEMPORARY</keyword>
<keyword>TIES</keyword>
<keyword>TOP_LEVEL_COUNT</keyword>
<keyword>TRANSACTION_ACTIVE</keyword>
<keyword>TRANSACTION</keyword>
<keyword>TRANSACTIONS_COMMITTED</keyword>
<keyword>TRANSACTIONS_ROLLED_BACK</keyword>
<keyword>TRANSFORM</keyword>
<keyword>TRANSFORMS</keyword>
<keyword>TRANSLATE</keyword>
<keyword>TRIGGER_CATALOG</keyword>
<keyword>TRIGGER_NAME</keyword>
<keyword>TRIGGER_SCHEMA</keyword>
<keyword>TRIM</keyword>
<keyword>TYPE</keyword>
<keyword>UNBOUNDED</keyword>
<keyword>UNCOMMITTED</keyword>
<keyword>UNDER</keyword>
<keyword>UNNAMED</keyword>
<keyword>USAGE</keyword>
<keyword>USER_DEFINED_TYPE_CATALOG</keyword>
<keyword>USER_DEFINED_TYPE_CODE</keyword>
<keyword>USER_DEFINED_TYPE_NAME</keyword>
<keyword>USER_DEFINED_TYPE_SCHEMA</keyword>
<keyword>VIEW</keyword>
<keyword>WORK</keyword>
<keyword>WRITE</keyword>
<keyword>ZONE</keyword>
<!-- non reserved -->
<keyword>ADD</keyword>
<keyword>ALL</keyword>
<keyword>ALLOCATE</keyword>
<keyword>ALTER</keyword>
<keyword>AND</keyword>
<keyword>ANY</keyword>
<keyword>ARE</keyword>
<keyword>ARRAY</keyword>
<keyword>AS</keyword>
<keyword>ASENSITIVE</keyword>
<keyword>ASYMMETRIC</keyword>
<keyword>AT</keyword>
<keyword>ATOMIC</keyword>
<keyword>AUTHORIZATION</keyword>
<keyword>BEGIN</keyword>
<keyword>BETWEEN</keyword>
<keyword>BIGINT</keyword>
<keyword>BINARY</keyword>
<keyword>BLOB</keyword>
<keyword>BOOLEAN</keyword>
<keyword>BOTH</keyword>
<keyword>BY</keyword>
<keyword>CALL</keyword>
<keyword>CALLED</keyword>
<keyword>CASCADED</keyword>
<keyword>CASE</keyword>
<keyword>CAST</keyword>
<keyword>CHAR</keyword>
<keyword>CHARACTER</keyword>
<keyword>CHECK</keyword>
<keyword>CLOB</keyword>
<keyword>CLOSE</keyword>
<keyword>COLLATE</keyword>
<keyword>COLUMN</keyword>
<keyword>COMMIT</keyword>
<keyword>CONNECT</keyword>
<keyword>CONSTRAINT</keyword>
<keyword>CONTINUE</keyword>
<keyword>CORRESPONDING</keyword>
<keyword>CREATE</keyword>
<keyword>CROSS</keyword>
<keyword>CUBE</keyword>
<keyword>CURRENT_DATE</keyword>
<keyword>CURRENT_DEFAULT_TRANSFORM_GROUP</keyword>
<keyword>CURRENT_PATH</keyword>
<keyword>CURRENT_ROLE</keyword>
<keyword>CURRENT_TIME</keyword>
<keyword>CURRENT_TIMESTAMP</keyword>
<keyword>CURRENT_TRANSFORM_GROUP_FOR_TYPE</keyword>
<keyword>CURRENT_USER</keyword>
<keyword>CURRENT</keyword>
<keyword>CURSOR</keyword>
<keyword>CYCLE</keyword>
<keyword>DATE</keyword>
<keyword>DAY</keyword>
<keyword>DEALLOCATE</keyword>
<keyword>DEC</keyword>
<keyword>DECIMAL</keyword>
<keyword>DECLARE</keyword>
<keyword>DEFAULT</keyword>
<keyword>DELETE</keyword>
<keyword>DEREF</keyword>
<keyword>DESCRIBE</keyword>
<keyword>DETERMINISTIC</keyword>
<keyword>DISCONNECT</keyword>
<keyword>DISTINCT</keyword>
<keyword>DOUBLE</keyword>
<keyword>DROP</keyword>
<keyword>DYNAMIC</keyword>
<keyword>EACH</keyword>
<keyword>ELEMENT</keyword>
<keyword>ELSE</keyword>
<keyword>END</keyword>
<keyword>END-EXEC</keyword>
<keyword>ESCAPE</keyword>
<keyword>EXCEPT</keyword>
<keyword>EXEC</keyword>
<keyword>EXECUTE</keyword>
<keyword>EXISTS</keyword>
<keyword>EXTERNAL</keyword>
<keyword>FALSE</keyword>
<keyword>FETCH</keyword>
<keyword>FILTER</keyword>
<keyword>FLOAT</keyword>
<keyword>FOR</keyword>
<keyword>FOREIGN</keyword>
<keyword>FREE</keyword>
<keyword>FROM</keyword>
<keyword>FULL</keyword>
<keyword>FUNCTION</keyword>
<keyword>GET</keyword>
<keyword>GLOBAL</keyword>
<keyword>GRANT</keyword>
<keyword>GROUP</keyword>
<keyword>GROUPING</keyword>
<keyword>HAVING</keyword>
<keyword>HOLD</keyword>
<keyword>HOUR</keyword>
<keyword>IDENTITY</keyword>
<keyword>IMMEDIATE</keyword>
<keyword>IN</keyword>
<keyword>INDICATOR</keyword>
<keyword>INNER</keyword>
<keyword>INOUT</keyword>
<keyword>INPUT</keyword>
<keyword>INSENSITIVE</keyword>
<keyword>INSERT</keyword>
<keyword>INT</keyword>
<keyword>INTEGER</keyword>
<keyword>INTERSECT</keyword>
<keyword>INTERVAL</keyword>
<keyword>INTO</keyword>
<keyword>IS</keyword>
<keyword>ISOLATION</keyword>
<keyword>JOIN</keyword>
<keyword>LANGUAGE</keyword>
<keyword>LARGE</keyword>
<keyword>LATERAL</keyword>
<keyword>LEADING</keyword>
<keyword>LEFT</keyword>
<keyword>LIKE</keyword>
<keyword>LOCAL</keyword>
<keyword>LOCALTIME</keyword>
<keyword>LOCALTIMESTAMP</keyword>
<keyword>MATCH</keyword>
<keyword>MEMBER</keyword>
<keyword>MERGE</keyword>
<keyword>METHOD</keyword>
<keyword>MINUTE</keyword>
<keyword>MODIFIES</keyword>
<keyword>MODULE</keyword>
<keyword>MONTH</keyword>
<keyword>MULTISET</keyword>
<keyword>NATIONAL</keyword>
<keyword>NATURAL</keyword>
<keyword>NCHAR</keyword>
<keyword>NCLOB</keyword>
<keyword>NEW</keyword>
<keyword>NO</keyword>
<keyword>NONE</keyword>
<keyword>NOT</keyword>
<keyword>NULL</keyword>
<keyword>NUMERIC</keyword>
<keyword>OF</keyword>
<keyword>OLD</keyword>
<keyword>ON</keyword>
<keyword>ONLY</keyword>
<keyword>OPEN</keyword>
<keyword>OR</keyword>
<keyword>ORDER</keyword>
<keyword>OUT</keyword>
<keyword>OUTER</keyword>
<keyword>OUTPUT</keyword>
<keyword>OVER</keyword>
<keyword>OVERLAPS</keyword>
<keyword>PARAMETER</keyword>
<keyword>PARTITION</keyword>
<keyword>PRECISION</keyword>
<keyword>PREPARE</keyword>
<keyword>PRIMARY</keyword>
<keyword>PROCEDURE</keyword>
<keyword>RANGE</keyword>
<keyword>READS</keyword>
<keyword>REAL</keyword>
<keyword>RECURSIVE</keyword>
<keyword>REF</keyword>
<keyword>REFERENCES</keyword>
<keyword>REFERENCING</keyword>
<keyword>REGR_AVGX</keyword>
<keyword>REGR_AVGY</keyword>
<keyword>REGR_COUNT</keyword>
<keyword>REGR_INTERCEPT</keyword>
<keyword>REGR_R2</keyword>
<keyword>REGR_SLOPE</keyword>
<keyword>REGR_SXX</keyword>
<keyword>REGR_SXY</keyword>
<keyword>REGR_SYY</keyword>
<keyword>RELEASE</keyword>
<keyword>RESULT</keyword>
<keyword>RETURN</keyword>
<keyword>RETURNS</keyword>
<keyword>REVOKE</keyword>
<keyword>RIGHT</keyword>
<keyword>ROLLBACK</keyword>
<keyword>ROLLUP</keyword>
<keyword>ROW</keyword>
<keyword>ROWS</keyword>
<keyword>SAVEPOINT</keyword>
<keyword>SCROLL</keyword>
<keyword>SEARCH</keyword>
<keyword>SECOND</keyword>
<keyword>SELECT</keyword>
<keyword>SENSITIVE</keyword>
<keyword>SESSION_USER</keyword>
<keyword>SET</keyword>
<keyword>SIMILAR</keyword>
<keyword>SMALLINT</keyword>
<keyword>SOME</keyword>
<keyword>SPECIFIC</keyword>
<keyword>SPECIFICTYPE</keyword>
<keyword>SQL</keyword>
<keyword>SQLEXCEPTION</keyword>
<keyword>SQLSTATE</keyword>
<keyword>SQLWARNING</keyword>
<keyword>START</keyword>
<keyword>STATIC</keyword>
<keyword>SUBMULTISET</keyword>
<keyword>SYMMETRIC</keyword>
<keyword>SYSTEM_USER</keyword>
<keyword>SYSTEM</keyword>
<keyword>TABLE</keyword>
<keyword>THEN</keyword>
<keyword>TIME</keyword>
<keyword>TIMESTAMP</keyword>
<keyword>TIMEZONE_HOUR</keyword>
<keyword>TIMEZONE_MINUTE</keyword>
<keyword>TO</keyword>
<keyword>TRAILING</keyword>
<keyword>TRANSLATION</keyword>
<keyword>TREAT</keyword>
<keyword>TRIGGER</keyword>
<keyword>TRUE</keyword>
<keyword>UESCAPE</keyword>
<keyword>UNION</keyword>
<keyword>UNIQUE</keyword>
<keyword>UNKNOWN</keyword>
<keyword>UNNEST</keyword>
<keyword>UPDATE</keyword>
<keyword>UPPER</keyword>
<keyword>USER</keyword>
<keyword>USING</keyword>
<keyword>VALUE</keyword>
<keyword>VALUES</keyword>
<keyword>VAR_POP</keyword>
<keyword>VAR_SAMP</keyword>
<keyword>VARCHAR</keyword>
<keyword>VARYING</keyword>
<keyword>WHEN</keyword>
<keyword>WHENEVER</keyword>
<keyword>WHERE</keyword>
<keyword>WIDTH_BUCKET</keyword>
<keyword>WINDOW</keyword>
<keyword>WITH</keyword>
<keyword>WITHIN</keyword>
<keyword>WITHOUT</keyword>
<keyword>YEAR</keyword>
</highlighter>
</highlighters>

View File

@@ -1,47 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<highlighters>
<highlighter type="oneline-comment">#</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
</highlighter>
<highlighter type="annotation">
<start>@</start>
<valueStart>(</valueStart>
<valueEnd>)</valueEnd>
</highlighter>
<highlighter type="number">
<point>.</point>
<exponent>e</exponent>
<suffix>f</suffix>
<suffix>d</suffix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>true</keyword>
<keyword>false</keyword>
</highlighter>
<highlighter type="word">
<word>{</word>
<word>}</word>
<word>,</word>
<word>[</word>
<word>]</word>
<style>keyword</style>
</highlighter>
<highlighter type="regex">
<pattern>^(---)$</pattern>
<style>comment</style>
<flags>MULTILINE</flags>
</highlighter>
<highlighter type="regex">
<pattern>^(.+?)(?==|:)</pattern>
<style>attribute</style>
<flags>MULTILINE</flags>
</highlighter>
</highlighters>

View File

@@ -1,599 +0,0 @@
/* Javadoc style sheet */
/*
Overall document style
*/
@import url('resources/fonts/dejavu.css');
body {
background-color:#ffffff;
color:#353833;
font-family:'DejaVu Sans', Arial, Helvetica, sans-serif;
font-size:14px;
margin:0;
}
a:link, a:visited {
text-decoration:none;
color:#4A6782;
}
a:hover, a:focus {
text-decoration:none;
color:#bb7a2a;
}
a:active {
text-decoration:none;
color:#4A6782;
}
a[name] {
color:#353833;
}
a[name]:hover {
text-decoration:none;
color:#353833;
}
pre {
font-family:'DejaVu Sans Mono', monospace;
font-size:14px;
}
h1 {
font-size:20px;
}
h2 {
font-size:18px;
}
h3 {
font-size:16px;
font-style:italic;
}
h4 {
font-size:13px;
}
h5 {
font-size:12px;
}
h6 {
font-size:11px;
}
ul {
list-style-type:disc;
}
code, tt {
font-family:'DejaVu Sans Mono', monospace;
font-size:14px;
padding-top:4px;
margin-top:8px;
line-height:1.4em;
}
dt code {
font-family:'DejaVu Sans Mono', monospace;
font-size:14px;
padding-top:4px;
}
table tr td dt code {
font-family:'DejaVu Sans Mono', monospace;
font-size:14px;
vertical-align:top;
padding-top:4px;
}
sup {
font-size:8px;
}
/*
Document title and Copyright styles
*/
.clear {
clear:both;
height:0px;
overflow:hidden;
}
.aboutLanguage {
float:right;
padding:0px 21px;
font-size:11px;
z-index:200;
margin-top:-9px;
}
.legalCopy {
margin-left:.5em;
}
.bar a, .bar a:link, .bar a:visited, .bar a:active {
color:#FFFFFF;
text-decoration:none;
}
.bar a:hover, .bar a:focus {
color:#bb7a2a;
}
.tab {
background-color:#0066FF;
color:#ffffff;
padding:8px;
width:5em;
font-weight:bold;
}
/*
Navigation bar styles
*/
.bar {
background-color:#4D7A97;
color:#FFFFFF;
padding:.8em .5em .4em .8em;
height:auto;/*height:1.8em;*/
font-size:11px;
margin:0;
}
.topNav {
background-color:#4D7A97;
color:#FFFFFF;
float:left;
padding:0;
width:100%;
clear:right;
height:2.8em;
padding-top:10px;
overflow:hidden;
font-size:12px;
}
.bottomNav {
margin-top:10px;
background-color:#4D7A97;
color:#FFFFFF;
float:left;
padding:0;
width:100%;
clear:right;
height:2.8em;
padding-top:10px;
overflow:hidden;
font-size:12px;
}
.subNav {
background-color:#dee3e9;
float:left;
width:100%;
overflow:hidden;
font-size:12px;
}
.subNav div {
clear:left;
float:left;
padding:0 0 5px 6px;
text-transform:uppercase;
}
ul.navList, ul.subNavList {
float:left;
margin:0 25px 0 0;
padding:0;
}
ul.navList li{
list-style:none;
float:left;
padding: 5px 6px;
text-transform:uppercase;
}
ul.subNavList li{
list-style:none;
float:left;
}
.topNav a:link, .topNav a:active, .topNav a:visited, .bottomNav a:link, .bottomNav a:active, .bottomNav a:visited {
color:#FFFFFF;
text-decoration:none;
text-transform:uppercase;
}
.topNav a:hover, .bottomNav a:hover {
text-decoration:none;
color:#bb7a2a;
text-transform:uppercase;
}
.navBarCell1Rev {
background-color:#F8981D;
color:#253441;
margin: auto 5px;
}
.skipNav {
position:absolute;
top:auto;
left:-9999px;
overflow:hidden;
}
/*
Page header and footer styles
*/
.header, .footer {
clear:both;
margin:0 20px;
padding:5px 0 0 0;
}
.indexHeader {
margin:10px;
position:relative;
}
.indexHeader span{
margin-right:15px;
}
.indexHeader h1 {
font-size:13px;
}
.title {
color:#2c4557;
margin:10px 0;
}
.subTitle {
margin:5px 0 0 0;
}
.header ul {
margin:0 0 15px 0;
padding:0;
}
.footer ul {
margin:20px 0 5px 0;
}
.header ul li, .footer ul li {
list-style:none;
font-size:13px;
}
/*
Heading styles
*/
div.details ul.blockList ul.blockList ul.blockList li.blockList h4, div.details ul.blockList ul.blockList ul.blockListLast li.blockList h4 {
background-color:#dee3e9;
border:1px solid #d0d9e0;
margin:0 0 6px -8px;
padding:7px 5px;
}
ul.blockList ul.blockList ul.blockList li.blockList h3 {
background-color:#dee3e9;
border:1px solid #d0d9e0;
margin:0 0 6px -8px;
padding:7px 5px;
}
ul.blockList ul.blockList li.blockList h3 {
padding:0;
margin:15px 0;
}
ul.blockList li.blockList h2 {
padding:0px 0 20px 0;
}
/*
Page layout container styles
*/
.contentContainer, .sourceContainer, .classUseContainer, .serializedFormContainer, .constantValuesContainer {
clear:both;
padding:10px 20px;
position:relative;
}
.indexContainer {
margin:10px;
position:relative;
font-size:12px;
}
.indexContainer h2 {
font-size:13px;
padding:0 0 3px 0;
}
.indexContainer ul {
margin:0;
padding:0;
}
.indexContainer ul li {
list-style:none;
padding-top:2px;
}
.contentContainer .description dl dt, .contentContainer .details dl dt, .serializedFormContainer dl dt {
font-size:12px;
font-weight:bold;
margin:10px 0 0 0;
color:#4E4E4E;
}
.contentContainer .description dl dd, .contentContainer .details dl dd, .serializedFormContainer dl dd {
margin:5px 0 10px 0px;
font-size:14px;
font-family:'DejaVu Sans Mono',monospace;
}
.serializedFormContainer dl.nameValue dt {
margin-left:1px;
font-size:1.1em;
display:inline;
font-weight:bold;
}
.serializedFormContainer dl.nameValue dd {
margin:0 0 0 1px;
font-size:1.1em;
display:inline;
}
/*
List styles
*/
ul.horizontal li {
display:inline;
font-size:0.9em;
}
ul.inheritance {
margin:0;
padding:0;
}
ul.inheritance li {
display:inline;
list-style:none;
}
ul.inheritance li ul.inheritance {
margin-left:15px;
padding-left:15px;
padding-top:1px;
}
ul.blockList, ul.blockListLast {
margin:10px 0 10px 0;
padding:0;
}
ul.blockList li.blockList, ul.blockListLast li.blockList {
list-style:none;
margin-bottom:15px;
line-height:1.4;
}
ul.blockList ul.blockList li.blockList, ul.blockList ul.blockListLast li.blockList {
padding:0px 20px 5px 10px;
border:1px solid #ededed;
background-color:#f8f8f8;
}
ul.blockList ul.blockList ul.blockList li.blockList, ul.blockList ul.blockList ul.blockListLast li.blockList {
padding:0 0 5px 8px;
background-color:#ffffff;
border:none;
}
ul.blockList ul.blockList ul.blockList ul.blockList li.blockList {
margin-left:0;
padding-left:0;
padding-bottom:15px;
border:none;
}
ul.blockList ul.blockList ul.blockList ul.blockList li.blockListLast {
list-style:none;
border-bottom:none;
padding-bottom:0;
}
table tr td dl, table tr td dl dt, table tr td dl dd {
margin-top:0;
margin-bottom:1px;
}
/*
Table styles
*/
.overviewSummary, .memberSummary, .typeSummary, .useSummary, .constantsSummary, .deprecatedSummary {
width:100%;
border-left:1px solid #EEE;
border-right:1px solid #EEE;
border-bottom:1px solid #EEE;
}
.overviewSummary, .memberSummary {
padding:0px;
}
.overviewSummary caption, .memberSummary caption, .typeSummary caption,
.useSummary caption, .constantsSummary caption, .deprecatedSummary caption {
position:relative;
text-align:left;
background-repeat:no-repeat;
color:#253441;
font-weight:bold;
clear:none;
overflow:hidden;
padding:0px;
padding-top:10px;
padding-left:1px;
margin:0px;
white-space:pre;
}
.overviewSummary caption a:link, .memberSummary caption a:link, .typeSummary caption a:link,
.useSummary caption a:link, .constantsSummary caption a:link, .deprecatedSummary caption a:link,
.overviewSummary caption a:hover, .memberSummary caption a:hover, .typeSummary caption a:hover,
.useSummary caption a:hover, .constantsSummary caption a:hover, .deprecatedSummary caption a:hover,
.overviewSummary caption a:active, .memberSummary caption a:active, .typeSummary caption a:active,
.useSummary caption a:active, .constantsSummary caption a:active, .deprecatedSummary caption a:active,
.overviewSummary caption a:visited, .memberSummary caption a:visited, .typeSummary caption a:visited,
.useSummary caption a:visited, .constantsSummary caption a:visited, .deprecatedSummary caption a:visited {
color:#FFFFFF;
}
.overviewSummary caption span, .memberSummary caption span, .typeSummary caption span,
.useSummary caption span, .constantsSummary caption span, .deprecatedSummary caption span {
white-space:nowrap;
padding-top:5px;
padding-left:12px;
padding-right:12px;
padding-bottom:7px;
display:inline-block;
float:left;
background-color:#F8981D;
border: none;
height:16px;
}
.memberSummary caption span.activeTableTab span {
white-space:nowrap;
padding-top:5px;
padding-left:12px;
padding-right:12px;
margin-right:3px;
display:inline-block;
float:left;
background-color:#F8981D;
height:16px;
}
.memberSummary caption span.tableTab span {
white-space:nowrap;
padding-top:5px;
padding-left:12px;
padding-right:12px;
margin-right:3px;
display:inline-block;
float:left;
background-color:#4D7A97;
height:16px;
}
.memberSummary caption span.tableTab, .memberSummary caption span.activeTableTab {
padding-top:0px;
padding-left:0px;
padding-right:0px;
background-image:none;
float:none;
display:inline;
}
.overviewSummary .tabEnd, .memberSummary .tabEnd, .typeSummary .tabEnd,
.useSummary .tabEnd, .constantsSummary .tabEnd, .deprecatedSummary .tabEnd {
display:none;
width:5px;
position:relative;
float:left;
background-color:#F8981D;
}
.memberSummary .activeTableTab .tabEnd {
display:none;
width:5px;
margin-right:3px;
position:relative;
float:left;
background-color:#F8981D;
}
.memberSummary .tableTab .tabEnd {
display:none;
width:5px;
margin-right:3px;
position:relative;
background-color:#4D7A97;
float:left;
}
.overviewSummary td, .memberSummary td, .typeSummary td,
.useSummary td, .constantsSummary td, .deprecatedSummary td {
text-align:left;
padding:0px 0px 12px 10px;
width:100%;
}
th.colOne, th.colFirst, th.colLast, .useSummary th, .constantsSummary th,
td.colOne, td.colFirst, td.colLast, .useSummary td, .constantsSummary td{
vertical-align:top;
padding-right:0px;
padding-top:8px;
padding-bottom:3px;
}
th.colFirst, th.colLast, th.colOne, .constantsSummary th {
background:#dee3e9;
text-align:left;
padding:8px 3px 3px 7px;
}
td.colFirst, th.colFirst {
white-space:nowrap;
font-size:13px;
}
td.colLast, th.colLast {
font-size:13px;
}
td.colOne, th.colOne {
font-size:13px;
}
.overviewSummary td.colFirst, .overviewSummary th.colFirst,
.overviewSummary td.colOne, .overviewSummary th.colOne,
.memberSummary td.colFirst, .memberSummary th.colFirst,
.memberSummary td.colOne, .memberSummary th.colOne,
.typeSummary td.colFirst{
width:25%;
vertical-align:top;
}
td.colOne a:link, td.colOne a:active, td.colOne a:visited, td.colOne a:hover, td.colFirst a:link, td.colFirst a:active, td.colFirst a:visited, td.colFirst a:hover, td.colLast a:link, td.colLast a:active, td.colLast a:visited, td.colLast a:hover, .constantValuesContainer td a:link, .constantValuesContainer td a:active, .constantValuesContainer td a:visited, .constantValuesContainer td a:hover {
font-weight:bold;
}
.tableSubHeadingColor {
background-color:#EEEEFF;
}
.altColor {
background-color:#FFFFFF;
}
.rowColor {
background-color:#EEEEEF;
}
/*
Content styles
*/
.description pre {
margin-top:0;
}
.deprecatedContent {
margin:0;
padding:10px 0;
}
.docSummary {
padding:0;
}
ul.blockList ul.blockList ul.blockList li.blockList h3 {
font-style:normal;
}
div.block {
font-size:14px;
font-family:'DejaVu Serif', Georgia, "Times New Roman", Times, serif;
}
td.colLast div {
padding-top:0px;
}
td.colLast a {
padding-bottom:3px;
}
/*
Formatting effect styles
*/
.sourceLineNo {
color:green;
padding:0 30px 0 0;
}
h1.hidden {
visibility:hidden;
overflow:hidden;
font-size:10px;
}
.block {
display:block;
margin:3px 10px 2px 0px;
color:#474747;
}
.deprecatedLabel, .descfrmTypeLabel, .memberNameLabel, .memberNameLink,
.overrideSpecifyLabel, .packageHierarchyLabel, .paramLabel, .returnLabel,
.seeLabel, .simpleTagLabel, .throwsLabel, .typeNameLabel, .typeNameLink {
font-weight:bold;
}
.deprecationComment, .emphasizedPhrase, .interfaceName {
font-style:italic;
}
div.block div.block span.deprecationComment, div.block div.block span.emphasizedPhrase,
div.block div.block span.interfaceName {
font-style:normal;
}
div.contentContainer ul.blockList li.blockList h2{
padding-bottom:0px;
}
/*
Spring
*/
pre.code {
background-color: #F8F8F8;
border: 1px solid #CCCCCC;
border-radius: 3px 3px 3px 3px;
overflow: auto;
padding: 10px;
margin: 4px 20px 2px 0px;
}
pre.code code, pre.code code * {
font-size: 1em;
}
pre.code code, pre.code code * {
padding: 0 !important;
margin: 0 !important;
}

View File

@@ -1,28 +0,0 @@
<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:mvn="http://maven.apache.org/POM/4.0.0"
version="1.0">
<xsl:output method="text" encoding="UTF-8" indent="no"/>
<xsl:template match="/">
<xsl:text>|===&#xa;</xsl:text>
<xsl:text>| Group ID | Artifact ID | Version&#xa;</xsl:text>
<xsl:for-each select="//mvn:dependency">
<xsl:sort select="mvn:groupId"/>
<xsl:sort select="mvn:artifactId"/>
<xsl:text>&#xa;</xsl:text>
<xsl:text>| `</xsl:text>
<xsl:copy-of select="mvn:groupId"/>
<xsl:text>`&#xa;</xsl:text>
<xsl:text>| `</xsl:text>
<xsl:copy-of select="mvn:artifactId"/>
<xsl:text>`&#xa;</xsl:text>
<xsl:text>| </xsl:text>
<xsl:copy-of select="mvn:version"/>
<xsl:text>&#xa;</xsl:text>
</xsl:for-each>
<xsl:text>|===</xsl:text>
</xsl:template>
</xsl:stylesheet>

View File

@@ -0,0 +1,62 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>1.1.0.BUILD-SNAPSHOT</version>
</parent>
<artifactId>spring-cloud-stream-binder-kafka-test-support</artifactId>
<description>Kafka related test classes</description>
<properties>
<kafka.version>0.8.2.1</kafka.version>
<curator.version>2.6.0</curator.version>
</properties>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-test-support-internal</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-logging</artifactId>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator.version}</version>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>${kafka.version}</version>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<classifier>test</classifier>
<version>${kafka.version}</version>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-test</artifactId>
<version>${curator.version}</version>
<optional>true</optional>
</dependency>
<!-- rabbit -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-amqp</artifactId>
<optional>true</optional>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,108 @@
/*
* Copyright 2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.test.junit.kafka;
import java.io.File;
import java.net.InetSocketAddress;
import org.apache.zookeeper.server.NIOServerCnxnFactory;
import org.apache.zookeeper.server.ZooKeeperServer;
import kafka.utils.TestUtils$;
import kafka.utils.Utils$;
/**
* A port of kafka.zk.EmbeddedZookeeper, compatible with Zookeeper 3.4 API
*
* @author Marius Bogoevici
*/
public class EmbeddedZookeeper {
private String connectString;
private File snapshotDir = TestUtils$.MODULE$.tempDir();
private File logDir = TestUtils$.MODULE$.tempDir();
private int tickTime = 500;
private final ZooKeeperServer zookeeper;
private int port;
private final NIOServerCnxnFactory factory;
public EmbeddedZookeeper(String connectString) throws Exception {
this.connectString = connectString;
port = Integer.parseInt(connectString.split(":")[1]);
zookeeper = new ZooKeeperServer(snapshotDir, logDir, tickTime);
factory = new NIOServerCnxnFactory();
factory.configure(new InetSocketAddress("127.0.0.1", port), 100);
factory.startup(zookeeper);
}
public String getConnectString() {
return connectString;
}
public File getSnapshotDir() {
return snapshotDir;
}
public File getLogDir() {
return logDir;
}
public int getTickTime() {
return tickTime;
}
public ZooKeeperServer getZookeeper() {
return zookeeper;
}
public int getPort() {
return port;
}
public void shutdown() {
try {
zookeeper.shutdown();
}
catch (Exception e) {
// ignore exception
}
try {
factory.shutdown();
}
catch (Exception e) {
// ignore exception
}
try {
Utils$.MODULE$.rm(logDir);
}
catch (Exception e) {
// ignore exception
}
try {
Utils$.MODULE$.rm(snapshotDir);
}
catch (Exception e) {
// ignore exception
}
}
}

View File

@@ -0,0 +1,203 @@
/*
* Copyright 2014-2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.test.junit.kafka;
import java.util.Properties;
import org.I0Itec.zkclient.ZkClient;
import org.I0Itec.zkclient.exception.ZkInterruptedException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Rule;
import org.springframework.cloud.stream.test.junit.AbstractExternalResourceTestSupport;
import org.springframework.util.SocketUtils;
import kafka.server.KafkaConfig;
import kafka.server.KafkaServer;
import kafka.utils.SystemTime$;
import kafka.utils.TestUtils;
import kafka.utils.Utils;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils;
/**
* JUnit {@link Rule} that starts an embedded Kafka server (with an associated Zookeeper)
*
* @author Ilayaperumal Gopinathan
* @author Marius Bogoevici
* @since 1.1
*/
public class KafkaTestSupport extends AbstractExternalResourceTestSupport<String> {
private static final Log log = LogFactory.getLog(KafkaTestSupport.class);
private static final String SCS_KAFKA_TEST_EMBEDDED = "SCS_KAFKA_TEST_EMBEDDED";
public static final boolean defaultEmbedded;
private static final String DEFAULT_ZOOKEEPER_CONNECT = "localhost:2181";
private static final String DEFAULT_KAFKA_CONNECT = "localhost:9092";
private ZkClient zkClient;
private EmbeddedZookeeper zookeeper;
private KafkaServer kafkaServer;
public final boolean embedded;
private final Properties brokerConfig = TestUtils.createBrokerConfig(0, TestUtils.choosePort(), false);
// caches previous failures to reach the external server - preventing repeated retries
private static boolean hasFailedAlready;
static {
// check if either the environment or Java property is set to use embedded tests
// unless the property is explicitly set to false, default to embedded
defaultEmbedded = !("false".equals(System.getenv(SCS_KAFKA_TEST_EMBEDDED))
|| "false".equals(System.getProperty(SCS_KAFKA_TEST_EMBEDDED)));
}
public KafkaTestSupport() {
this(defaultEmbedded);
}
public KafkaTestSupport(boolean embedded) {
super("KAFKA");
this.embedded = embedded;
log.info(String.format("Testing with %s Kafka broker", embedded ? "embedded" : "external"));
}
public KafkaServer getKafkaServer() {
return kafkaServer;
}
public String getZkConnectString() {
if (embedded) {
return zookeeper.getConnectString();
}
else {
return DEFAULT_ZOOKEEPER_CONNECT;
}
}
public ZkClient getZkClient() {
return this.zkClient;
}
public String getBrokerAddress() {
if (embedded) {
return kafkaServer.config().hostName() + ":" + kafkaServer.config().port();
}
else {
return DEFAULT_KAFKA_CONNECT;
}
}
@Override
protected void obtainResource() throws Exception {
if (!hasFailedAlready) {
if (embedded) {
try {
log.debug("Starting Zookeeper");
zookeeper = new EmbeddedZookeeper("127.0.0.1:" + SocketUtils.findAvailableTcpPort());
log.debug("Started Zookeeper at " + zookeeper.getConnectString());
try {
int zkConnectionTimeout = 10000;
int zkSessionTimeout = 10000;
zkClient = new ZkClient(getZkConnectString(), zkSessionTimeout, zkConnectionTimeout,
ZKStringSerializer$.MODULE$);
}
catch (Exception e) {
zookeeper.shutdown();
throw e;
}
try {
log.debug("Creating Kafka server");
Properties brokerConfigProperties = brokerConfig;
brokerConfig.put("zookeeper.connect", zookeeper.getConnectString());
brokerConfig.put("auto.create.topics.enable", "false");
brokerConfig.put("delete.topic.enable", "true");
kafkaServer = TestUtils.createServer(new KafkaConfig(brokerConfigProperties),
SystemTime$.MODULE$);
log.debug("Created Kafka server at " + kafkaServer.config().hostName() + ":"
+ kafkaServer.config().port());
}
catch (Exception e) {
zookeeper.shutdown();
zkClient.close();
throw e;
}
}
catch (Exception e) {
hasFailedAlready = true;
throw e;
}
}
else {
this.zkClient = new ZkClient(DEFAULT_ZOOKEEPER_CONNECT, 10000, 10000, ZKStringSerializer$.MODULE$);
if (ZkUtils.getAllBrokersInCluster(zkClient).size() == 0) {
hasFailedAlready = true;
throw new RuntimeException("Kafka server not available");
}
}
}
else {
throw new RuntimeException("Kafka server not available");
}
}
@Override
protected void cleanupResource() throws Exception {
if (embedded) {
try {
kafkaServer.shutdown();
}
catch (Exception e) {
// ignore errors on shutdown
log.error(e.getMessage(), e);
}
try {
Utils.rm(kafkaServer.config().logDirs());
}
catch (Exception e) {
// ignore errors on shutdown
log.error(e.getMessage(), e);
}
}
try {
zkClient.close();
}
catch (ZkInterruptedException e) {
// ignore errors on shutdown
log.error(e.getMessage(), e);
}
if (embedded) {
try {
zookeeper.shutdown();
}
catch (Exception e) {
// ignore errors on shutdown
log.error(e.getMessage(), e);
}
}
}
}

View File

@@ -0,0 +1,76 @@
/*
* Copyright 2014-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.test.junit.kafka;
import java.io.IOException;
import java.util.Properties;
import org.apache.curator.test.TestingServer;
import org.springframework.util.SocketUtils;
import kafka.server.KafkaConfig;
import kafka.server.KafkaServerStartable;
import kafka.utils.TestUtils;
/**
* A test Kafka + ZooKeeper pair for testing purposes.
*
* @author Eric Bottard
*/
public class TestKafkaCluster {
private KafkaServerStartable kafkaServer;
private TestingServer zkServer;
public TestKafkaCluster() {
try {
zkServer = new TestingServer(SocketUtils.findAvailableTcpPort());
}
catch (Exception e) {
throw new IllegalStateException(e);
}
KafkaConfig config = getKafkaConfig(zkServer.getConnectString());
kafkaServer = new KafkaServerStartable(config);
kafkaServer.startup();
}
private static KafkaConfig getKafkaConfig(final String zkConnectString) {
scala.collection.Iterator<Properties> propsI = TestUtils
.createBrokerConfigs(1, false).iterator();
assert propsI.hasNext();
Properties props = propsI.next();
assert props.containsKey("zookeeper.connect");
props.put("zookeeper.connect", zkConnectString);
return new KafkaConfig(props);
}
public String getKafkaBrokerString() {
return String.format("localhost:%d", kafkaServer.serverConfig().port());
}
public void stop() throws IOException {
kafkaServer.shutdown();
zkServer.stop();
}
public String getZkConnectString() {
return zkServer.getConnectString();
}
}

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
@@ -10,14 +10,21 @@
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>2.0.0.M1</version>
<version>1.1.0.BUILD-SNAPSHOT</version>
</parent>
<properties>
<kafka.version>0.9.0.1</kafka.version>
<spring-kafka.version>1.0.0.RELEASE</spring-kafka.version>
<spring-integration-kafka.version>2.0.0.BUILD-SNAPSHOT</spring-integration-kafka.version>
<rxjava-math.version>1.0.0</rxjava-math.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
<version>2.0.0.M1</version>
<artifactId>spring-cloud-stream-binder-kafka-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
@@ -42,6 +49,34 @@
<artifactId>spring-cloud-stream-binder-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-test-support</artifactId>
<scope>test</scope>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.integration</groupId>
<artifactId>spring-integration-kafka</artifactId>
<version>${spring-integration-kafka.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.avro</groupId>
<artifactId>avro-compiler</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>${spring-kafka.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka-test</artifactId>
<scope>test</scope>
<version>${spring-kafka.version}</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
@@ -51,24 +86,13 @@
<artifactId>kafka-clients</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>${spring-kafka.version}</version>
<groupId>io.reactivex</groupId>
<artifactId>rxjava</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.integration</groupId>
<artifactId>spring-integration-kafka</artifactId>
<version>${spring-integration-kafka.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka-test</artifactId>
<scope>test</scope>
<groupId>io.reactivex</groupId>
<artifactId>rxjava-math</artifactId>
<version>${rxjava-math.version}</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
@@ -76,27 +100,36 @@
<classifier>test</classifier>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-test</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<version>3.0.2</version>
<executions>
<execution>
<goals>
<goal>test-jar</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<version>${kafka.version}</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<classifier>test</classifier>
<version>${kafka.version}</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>${kafka.version}</version>
</dependency>
</dependencies>
</dependencyManagement>
</project>

View File

@@ -1,68 +0,0 @@
/*
* Copyright 2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.env.EnvironmentPostProcessor;
import org.springframework.core.env.ConfigurableEnvironment;
import org.springframework.core.env.MapPropertySource;
/**
* An {@link EnvironmentPostProcessor} that sets some common configuration properties (log config etc.,) for Kafka
* binder.
*
* @author Ilayaperumal Gopinathan
*/
public class KafkaBinderEnvironmentPostProcessor implements EnvironmentPostProcessor {
public final static String SPRING_KAFKA = "spring.kafka";
public final static String SPRING_KAFKA_PRODUCER = SPRING_KAFKA + ".producer";
public final static String SPRING_KAFKA_CONSUMER = SPRING_KAFKA + ".consumer";
public final static String SPRING_KAFKA_PRODUCER_KEY_SERIALIZER = SPRING_KAFKA_PRODUCER + "." + "keySerializer";
public final static String SPRING_KAFKA_PRODUCER_VALUE_SERIALIZER = SPRING_KAFKA_PRODUCER + "." + "valueSerializer";
public final static String SPRING_KAFKA_CONSUMER_KEY_DESERIALIZER = SPRING_KAFKA_CONSUMER + "." + "keyDeserializer";
public final static String SPRING_KAFKA_CONSUMER_VALUE_DESERIALIZER = SPRING_KAFKA_CONSUMER + "." + "valueDeserializer";
private static final String KAFKA_BINDER_DEFAULT_PROPERTIES = "kafkaBinderDefaultProperties";
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
if (!environment.getPropertySources().contains(KAFKA_BINDER_DEFAULT_PROPERTIES)) {
Map<String, Object> kafkaBinderDefaultProperties = new HashMap<>();
kafkaBinderDefaultProperties.put("logging.level.org.I0Itec.zkclient", "ERROR");
kafkaBinderDefaultProperties.put("logging.level.kafka.server.KafkaConfig", "ERROR");
kafkaBinderDefaultProperties.put("logging.level.kafka.admin.AdminClient.AdminConfig", "ERROR");
kafkaBinderDefaultProperties.put(SPRING_KAFKA_PRODUCER_KEY_SERIALIZER, ByteArraySerializer.class.getName());
kafkaBinderDefaultProperties.put(SPRING_KAFKA_PRODUCER_VALUE_SERIALIZER, ByteArraySerializer.class.getName());
kafkaBinderDefaultProperties.put(SPRING_KAFKA_CONSUMER_KEY_DESERIALIZER, ByteArrayDeserializer.class.getName());
kafkaBinderDefaultProperties.put(SPRING_KAFKA_CONSUMER_VALUE_DESERIALIZER, ByteArrayDeserializer.class.getName());
environment.getPropertySources().addLast(new MapPropertySource(KAFKA_BINDER_DEFAULT_PROPERTIES, kafkaBinderDefaultProperties));
}
}
}

View File

@@ -1,74 +0,0 @@
/*
* Copyright 2016-2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.common.PartitionInfo;
import org.springframework.boot.actuate.health.Health;
import org.springframework.boot.actuate.health.HealthIndicator;
import org.springframework.kafka.core.ConsumerFactory;
/**
* Health indicator for Kafka.
*
* @author Ilayaperumal Gopinathan
* @author Marius Bogoevici
* @author Henryk Konsek
*/
public class KafkaBinderHealthIndicator implements HealthIndicator {
private final KafkaMessageChannelBinder binder;
private final ConsumerFactory<?, ?> consumerFactory;
public KafkaBinderHealthIndicator(KafkaMessageChannelBinder binder,
ConsumerFactory<?, ?> consumerFactory) {
this.binder = binder;
this.consumerFactory = consumerFactory;
}
@Override
public Health health() {
try (Consumer<?, ?> metadataConsumer = consumerFactory.createConsumer()) {
Set<String> downMessages = new HashSet<>();
for (String topic : this.binder.getTopicsInUse().keySet()) {
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
for (PartitionInfo partitionInfo : partitionInfos) {
if (this.binder.getTopicsInUse().get(topic).getPartitionInfos().contains(partitionInfo)
&& partitionInfo.leader()
.id() == -1) {
downMessages.add(partitionInfo.toString());
}
}
}
if (downMessages.isEmpty()) {
return Health.up().build();
}
return Health.down().withDetail("Following partitions in use have no leaders: ", downMessages.toString())
.build();
}
catch (Exception e) {
return Health.down(e).build();
}
}
}

View File

@@ -1,118 +0,0 @@
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.io.File;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import org.apache.kafka.common.security.JaasUtils;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.context.ApplicationListener;
import org.springframework.context.event.ContextRefreshedEvent;
import org.springframework.util.Assert;
/**
* @author Marius Bogoevici
*/
public class KafkaBinderJaasInitializerListener implements ApplicationListener<ContextRefreshedEvent>,
ApplicationContextAware, DisposableBean {
public static final String DEFAULT_ZK_LOGIN_CONTEXT_NAME = "Client";
private ApplicationContext applicationContext;
private final boolean ignoreJavaLoginConfigParamSystemProperty;
private final File placeholderJaasConfiguration;
public KafkaBinderJaasInitializerListener() throws IOException {
// we ignore the system property if it wasn't originally set at launch
this.ignoreJavaLoginConfigParamSystemProperty =
(System.getProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM) == null);
this.placeholderJaasConfiguration = File.createTempFile("kafka-client-jaas-config-placeholder", "conf");
this.placeholderJaasConfiguration.deleteOnExit();
}
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.applicationContext = applicationContext;
}
@Override
public void destroy() throws Exception {
if (this.ignoreJavaLoginConfigParamSystemProperty) {
System.clearProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM);
}
}
@Override
public void onApplicationEvent(ContextRefreshedEvent event) {
if (event.getSource() == this.applicationContext) {
KafkaBinderConfigurationProperties binderConfigurationProperties =
applicationContext.getBean(KafkaBinderConfigurationProperties.class);
// only use programmatic support if a file is not set via system property
if (ignoreJavaLoginConfigParamSystemProperty
&& binderConfigurationProperties.getJaas() != null) {
Map<String, AppConfigurationEntry[]> configurationEntries = new HashMap<>();
AppConfigurationEntry kafkaClientConfigurationEntry = new AppConfigurationEntry
(binderConfigurationProperties.getJaas().getLoginModule(),
binderConfigurationProperties.getJaas().getControlFlagValue(),
binderConfigurationProperties.getJaas().getOptions() != null ?
binderConfigurationProperties.getJaas().getOptions() :
Collections.<String, Object>emptyMap());
configurationEntries.put(JaasUtils.LOGIN_CONTEXT_CLIENT,
new AppConfigurationEntry[]{ kafkaClientConfigurationEntry });
Configuration.setConfiguration(new InternalConfiguration(configurationEntries));
// Workaround for a 0.9 client issue where even if the Configuration is set
// a system property check is performed.
// Since the Configuration already exists, this will be ignored.
if (this.placeholderJaasConfiguration != null) {
System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, this.placeholderJaasConfiguration.getAbsolutePath());
}
}
}
}
/**
* A {@link Configuration} set up programmatically by the Kafka binder
*/
public static class InternalConfiguration extends Configuration {
private final Map<String, AppConfigurationEntry[]> configurationEntries;
public InternalConfiguration(Map<String, AppConfigurationEntry[]> configurationEntries) {
Assert.notNull(configurationEntries, " cannot be null");
Assert.notEmpty(configurationEntries, " cannot be empty");
this.configurationEntries = configurationEntries;
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
return configurationEntries.get(name);
}
}
}

View File

@@ -1,126 +0,0 @@
/*
* Copyright 2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.actuate.endpoint.PublicMetrics;
import org.springframework.boot.actuate.metrics.Metric;
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.util.ObjectUtils;
/**
* Metrics for Kafka binder.
*
* @author Henryk Konsek
*/
public class KafkaBinderMetrics implements PublicMetrics {
private final static Logger LOG = LoggerFactory.getLogger(KafkaBinderMetrics.class);
static final String METRIC_PREFIX = "spring.cloud.stream.binder.kafka";
private final KafkaMessageChannelBinder binder;
private final KafkaBinderConfigurationProperties binderConfigurationProperties;
private ConsumerFactory<?, ?> defaultConsumerFactory;
public KafkaBinderMetrics(KafkaMessageChannelBinder binder,
KafkaBinderConfigurationProperties binderConfigurationProperties,
ConsumerFactory<?, ?> defaultConsumerFactory) {
this.binder = binder;
this.binderConfigurationProperties = binderConfigurationProperties;
this.defaultConsumerFactory = defaultConsumerFactory;
}
public KafkaBinderMetrics(KafkaMessageChannelBinder binder,
KafkaBinderConfigurationProperties binderConfigurationProperties) {
this(binder, binderConfigurationProperties, null);
}
@Override
public Collection<Metric<?>> metrics() {
List<Metric<?>> metrics = new LinkedList<>();
for (Map.Entry<String, KafkaMessageChannelBinder.TopicInformation> topicInfo : this.binder.getTopicsInUse()
.entrySet()) {
if (!topicInfo.getValue().isConsumerTopic()) {
continue;
}
String topic = topicInfo.getKey();
String group = topicInfo.getValue().getConsumerGroup();
try (Consumer<?, ?> metadataConsumer = createConsumerFactory(group).createConsumer()) {
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
List<TopicPartition> topicPartitions = new LinkedList<>();
for (PartitionInfo partitionInfo : partitionInfos) {
topicPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
}
Map<TopicPartition, Long> endOffsets = metadataConsumer.endOffsets(topicPartitions);
long lag = 0;
for (Map.Entry<TopicPartition, Long> endOffset : endOffsets.entrySet()) {
OffsetAndMetadata current = metadataConsumer.committed(endOffset.getKey());
if (current != null) {
lag += endOffset.getValue() - current.offset();
}
else {
lag += endOffset.getValue();
}
}
metrics.add(new Metric<>(String.format("%s.%s.%s.lag", METRIC_PREFIX, group, topic), lag));
}
catch (Exception e) {
LOG.debug("Cannot generate metric for topic: " + topic, e);
}
}
return metrics;
}
private ConsumerFactory<?, ?> createConsumerFactory(String group) {
if (defaultConsumerFactory != null) {
return defaultConsumerFactory;
}
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
if (!ObjectUtils.isEmpty(binderConfigurationProperties.getConfiguration())) {
props.putAll(binderConfigurationProperties.getConfiguration());
}
if (!props.containsKey(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
this.binderConfigurationProperties.getKafkaConnectionString());
}
props.put("group.id", group);
return new DefaultKafkaConsumerFactory<>(props);
}
}

View File

@@ -1,11 +1,11 @@
/*
* Copyright 2014-2017 the original author or authors.
* Copyright 2014-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,66 +16,93 @@
package org.springframework.cloud.stream.binder.kafka;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
import org.I0Itec.zkclient.ZkClient;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.utils.Utils;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
import org.springframework.cloud.stream.binder.AbstractMessageChannelBinder;
import org.springframework.cloud.stream.binder.Binder;
import org.springframework.cloud.stream.binder.BinderException;
import org.springframework.cloud.stream.binder.BinderHeaders;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
import org.springframework.cloud.stream.binder.kafka.properties.KafkaExtendedBindingProperties;
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
import org.springframework.cloud.stream.provisioning.ConsumerDestination;
import org.springframework.cloud.stream.provisioning.ProducerDestination;
import org.springframework.cloud.stream.binder.PartitionHandler;
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaConsumerProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaExtendedBindingProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaProducerProperties;
import org.springframework.context.Lifecycle;
import org.springframework.expression.common.LiteralExpression;
import org.springframework.expression.spel.standard.SpelExpressionParser;
import org.springframework.integration.core.MessageProducer;
import org.springframework.integration.endpoint.AbstractEndpoint;
import org.springframework.integration.handler.AbstractMessageHandler;
import org.springframework.integration.kafka.inbound.KafkaMessageDrivenChannelAdapter;
import org.springframework.integration.kafka.outbound.KafkaProducerMessageHandler;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.listener.AbstractMessageListenerContainer;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.kafka.listener.AcknowledgingMessageListener;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.kafka.listener.ErrorHandler;
import org.springframework.kafka.listener.MessageListener;
import org.springframework.kafka.listener.config.ContainerProperties;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.support.ProducerListener;
import org.springframework.kafka.support.SendResult;
import org.springframework.kafka.support.TopicPartitionInitialOffset;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageChannel;
import org.springframework.messaging.MessageHandler;
import org.springframework.messaging.MessageHeaders;
import org.springframework.messaging.MessagingException;
import org.springframework.retry.RetryCallback;
import org.springframework.retry.RetryContext;
import org.springframework.retry.RetryOperations;
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
import org.springframework.scheduling.concurrent.CustomizableThreadFactory;
import org.springframework.util.Assert;
import org.springframework.util.CollectionUtils;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;
import kafka.admin.AdminUtils;
import kafka.api.TopicMetadata;
import kafka.common.ErrorMapping;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils;
import scala.collection.Seq;
/**
* A {@link Binder} that uses Kafka as the underlying middleware.
*
* @author Eric Bottard
* @author Marius Bogoevici
* @author Ilayaperumal Gopinathan
@@ -83,24 +110,41 @@ import org.springframework.util.concurrent.ListenableFutureCallback;
* @author Gary Russell
* @author Mark Fisher
* @author Soby Chacko
* @author Henryk Konsek
* @author Doug Saus
*/
public class KafkaMessageChannelBinder extends
AbstractMessageChannelBinder<ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>, KafkaTopicProvisioner>
implements ExtendedPropertiesBinder<MessageChannel, KafkaConsumerProperties, KafkaProducerProperties> {
AbstractMessageChannelBinder<ExtendedConsumerProperties<KafkaConsumerProperties>,
ExtendedProducerProperties<KafkaProducerProperties>>
implements ExtendedPropertiesBinder<MessageChannel, KafkaConsumerProperties, KafkaProducerProperties>,
DisposableBean {
private static final ByteArraySerializer BYTE_ARRAY_SERIALIZER = new ByteArraySerializer();
private static final ThreadFactory DAEMON_THREAD_FACTORY;
static {
CustomizableThreadFactory threadFactory = new CustomizableThreadFactory("kafka-binder-");
threadFactory.setDaemon(true);
DAEMON_THREAD_FACTORY = threadFactory;
}
private final KafkaBinderConfigurationProperties configurationProperties;
private ProducerListener<byte[], byte[]> producerListener;
private RetryOperations metadataRetryOperations;
private final Map<String, Collection<PartitionInfo>> topicsInUse = new HashMap<>();
// -------- Default values for properties -------
private ConsumerFactory<byte[], byte[]> consumerFactory;
private ProducerListener producerListener;
private volatile Producer<byte[], byte[]> dlqProducer;
private KafkaExtendedBindingProperties extendedBindingProperties = new KafkaExtendedBindingProperties();
private final Map<String, TopicInformation> topicsInUse = new HashMap<>();
public KafkaMessageChannelBinder(KafkaBinderConfigurationProperties configurationProperties,
KafkaTopicProvisioner provisioningProvider) {
super(false, headersToMap(configurationProperties), provisioningProvider);
public KafkaMessageChannelBinder(KafkaBinderConfigurationProperties configurationProperties) {
super(false, headersToMap(configurationProperties));
this.configurationProperties = configurationProperties;
}
@@ -120,16 +164,76 @@ public class KafkaMessageChannelBinder extends
return headersToMap;
}
/**
* Retry configuration for operations such as validating topic creation
* @param metadataRetryOperations the retry configuration
*/
public void setMetadataRetryOperations(RetryOperations metadataRetryOperations) {
this.metadataRetryOperations = metadataRetryOperations;
}
public void setExtendedBindingProperties(KafkaExtendedBindingProperties extendedBindingProperties) {
this.extendedBindingProperties = extendedBindingProperties;
}
public void setProducerListener(ProducerListener<byte[], byte[]> producerListener) {
@Override
public void onInit() throws Exception {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
props.put(ConsumerConfig.GROUP_ID_CONFIG, configurationProperties.getConsumerGroup());
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,
"earliest");
Deserializer<byte[]> valueDecoder = new ByteArrayDeserializer();
Deserializer<byte[]> keyDecoder = new ByteArrayDeserializer();
consumerFactory = new DefaultKafkaConsumerFactory<>(props, keyDecoder, valueDecoder);
if (metadataRetryOperations == null) {
RetryTemplate retryTemplate = new RetryTemplate();
SimpleRetryPolicy simpleRetryPolicy = new SimpleRetryPolicy();
simpleRetryPolicy.setMaxAttempts(10);
retryTemplate.setRetryPolicy(simpleRetryPolicy);
ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy();
backOffPolicy.setInitialInterval(100);
backOffPolicy.setMultiplier(2);
backOffPolicy.setMaxInterval(1000);
retryTemplate.setBackOffPolicy(backOffPolicy);
metadataRetryOperations = retryTemplate;
}
}
@Override
public void destroy() throws Exception {
if (this.dlqProducer != null) {
this.dlqProducer.close();
this.dlqProducer = null;
}
}
public void setProducerListener(ProducerListener producerListener) {
this.producerListener = producerListener;
}
Map<String, TopicInformation> getTopicsInUse() {
return this.topicsInUse;
/**
* Allowed chars are ASCII alphanumerics, '.', '_' and '-'.
*/
static void validateTopicName(String topicName) {
try {
byte[] utf8 = topicName.getBytes("UTF-8");
for (byte b : utf8) {
if (!((b >= 'a') && (b <= 'z') || (b >= 'A') && (b <= 'Z') || (b >= '0') && (b <= '9') || (b == '.')
|| (b == '-') || (b == '_'))) {
throw new IllegalArgumentException(
"Topic name can only have ASCII alphanumerics, '.', '_' and '-'");
}
}
}
catch (UnsupportedEncodingException e) {
throw new AssertionError(e); // Can't happen
}
}
@Override
@@ -142,235 +246,342 @@ public class KafkaMessageChannelBinder extends
return this.extendedBindingProperties.getExtendedProducerProperties(channelName);
}
@Override
protected MessageHandler createProducerMessageHandler(final ProducerDestination destination,
ExtendedProducerProperties<KafkaProducerProperties> producerProperties) throws Exception {
final DefaultKafkaProducerFactory<byte[], byte[]> producerFB = getProducerFactory(producerProperties);
Collection<PartitionInfo> partitions = provisioningProvider.getPartitionsForTopic(
producerProperties.getPartitionCount(),
false,
new Callable<Collection<PartitionInfo>>() {
@Override
public Collection<PartitionInfo> call() throws Exception {
return producerFB.createProducer().partitionsFor(destination.getName());
}
});
this.topicsInUse.put(destination.getName(), new TopicInformation(null, partitions));
if (producerProperties.getPartitionCount() < partitions.size()) {
if (this.logger.isInfoEnabled()) {
this.logger.info("The `partitionCount` of the producer for topic " + destination.getName() + " is "
+ producerProperties.getPartitionCount() + ", smaller than the actual partition count of "
+ partitions.size() + " of the topic. The larger number will be used instead.");
}
}
KafkaTemplate<byte[], byte[]> kafkaTemplate = new KafkaTemplate<>(producerFB);
if (this.producerListener != null) {
kafkaTemplate.setProducerListener(this.producerListener);
}
return new ProducerConfigurationMessageHandler(kafkaTemplate, destination.getName(), producerProperties,
producerFB);
}
private DefaultKafkaProducerFactory<byte[], byte[]> getProducerFactory(
ExtendedProducerProperties<KafkaProducerProperties> producerProperties) {
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.RETRIES_CONFIG, 0);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
props.put(ProducerConfig.ACKS_CONFIG, String.valueOf(this.configurationProperties.getRequiredAcks()));
if (!ObjectUtils.isEmpty(configurationProperties.getConfiguration())) {
props.putAll(configurationProperties.getConfiguration());
}
if (ObjectUtils.isEmpty(props.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configurationProperties.getKafkaConnectionString());
}
if (ObjectUtils.isEmpty(props.get(ProducerConfig.BATCH_SIZE_CONFIG))) {
props.put(ProducerConfig.BATCH_SIZE_CONFIG,
String.valueOf(producerProperties.getExtension().getBufferSize()));
}
if (ObjectUtils.isEmpty(props.get(ProducerConfig.LINGER_MS_CONFIG))) {
props.put(ProducerConfig.LINGER_MS_CONFIG,
String.valueOf(producerProperties.getExtension().getBatchTimeout()));
}
if (ObjectUtils.isEmpty(props.get(ProducerConfig.COMPRESSION_TYPE_CONFIG))) {
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG,
producerProperties.getExtension().getCompressionType().toString());
}
if (!ObjectUtils.isEmpty(producerProperties.getExtension().getConfiguration())) {
props.putAll(producerProperties.getExtension().getConfiguration());
}
return new DefaultKafkaProducerFactory<>(props);
Map<String, Collection<PartitionInfo>> getTopicsInUse() {
return this.topicsInUse;
}
@Override
@SuppressWarnings("unchecked")
protected MessageProducer createConsumerEndpoint(final ConsumerDestination destination, final String group,
final ExtendedConsumerProperties<KafkaConsumerProperties> extendedConsumerProperties) {
boolean anonymous = !StringUtils.hasText(group);
Assert.isTrue(!anonymous || !extendedConsumerProperties.getExtension().isEnableDlq(),
"DLQ support is not available for anonymous subscriptions");
String consumerGroup = anonymous ? "anonymous." + UUID.randomUUID().toString() : group;
final ConsumerFactory<?, ?> consumerFactory = createKafkaConsumerFactory(anonymous, consumerGroup,
extendedConsumerProperties);
int partitionCount = extendedConsumerProperties.getInstanceCount()
* extendedConsumerProperties.getConcurrency();
Collection<PartitionInfo> allPartitions = provisioningProvider.getPartitionsForTopic(partitionCount,
extendedConsumerProperties.getExtension().isAutoRebalanceEnabled(),
new Callable<Collection<PartitionInfo>>() {
@Override
public Collection<PartitionInfo> call() throws Exception {
return consumerFactory.createConsumer().partitionsFor(destination.getName());
}
});
protected Object createConsumerDestinationIfNecessary(String name, String group,
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
validateTopicName(name);
if (properties.getInstanceCount() == 0) {
throw new IllegalArgumentException("Instance count cannot be zero");
}
Collection<PartitionInfo> allPartitions = ensureTopicCreated(name,
properties.getInstanceCount() * properties.getConcurrency());
Collection<PartitionInfo> listenedPartitions;
if (extendedConsumerProperties.getExtension().isAutoRebalanceEnabled() ||
extendedConsumerProperties.getInstanceCount() == 1) {
if (properties.getInstanceCount() == 1) {
listenedPartitions = allPartitions;
}
else {
listenedPartitions = new ArrayList<>();
for (PartitionInfo partition : allPartitions) {
// divide partitions across modules
if ((partition.partition()
% extendedConsumerProperties.getInstanceCount()) == extendedConsumerProperties
.getInstanceIndex()) {
if ((partition.partition() % properties.getInstanceCount()) == properties.getInstanceIndex()) {
listenedPartitions.add(partition);
}
}
}
this.topicsInUse.put(destination.getName(), new TopicInformation(group, listenedPartitions));
this.topicsInUse.put(name, listenedPartitions);
return listenedPartitions;
}
@Override
@SuppressWarnings("unchecked")
protected AbstractEndpoint createConsumerEndpoint(String name, String group, Object queue,
MessageChannel inputChannel, ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
Collection<PartitionInfo> listenedPartitions = (Collection<PartitionInfo>) queue;
Assert.isTrue(!CollectionUtils.isEmpty(listenedPartitions), "A list of partitions must be provided");
final TopicPartitionInitialOffset[] topicPartitionInitialOffsets = getTopicPartitionInitialOffsets(
listenedPartitions);
final ContainerProperties containerProperties = anonymous
|| extendedConsumerProperties.getExtension().isAutoRebalanceEnabled()
? new ContainerProperties(destination.getName())
: new ContainerProperties(topicPartitionInitialOffsets);
int concurrency = Math.min(extendedConsumerProperties.getConcurrency(), listenedPartitions.size());
final ConcurrentMessageListenerContainer<?, ?> messageListenerContainer = new ConcurrentMessageListenerContainer(
consumerFactory, containerProperties) {
@Override
public void stop(Runnable callback) {
super.stop(callback);
}
};
final TopicPartitionInitialOffset[] topicPartitionInitialOffsets =
new TopicPartitionInitialOffset[listenedPartitions.size()];
int i = 0;
for (PartitionInfo partition : listenedPartitions){
topicPartitionInitialOffsets[i++] = new TopicPartitionInitialOffset(partition.topic(),
partition.partition());
}
int concurrency = Math.min(properties.getConcurrency(), listenedPartitions.size());
final ExecutorService dispatcherTaskExecutor =
Executors.newFixedThreadPool(concurrency, DAEMON_THREAD_FACTORY);
final ContainerProperties containerProperties = new ContainerProperties(topicPartitionInitialOffsets);
//final ContainerProperties containerProperties = new ContainerProperties(name);
final ConcurrentMessageListenerContainer<byte[], byte[]> messageListenerContainer = new ConcurrentMessageListenerContainer<>(
consumerFactory, containerProperties);
messageListenerContainer.setConcurrency(concurrency);
messageListenerContainer.getContainerProperties()
.setAckOnError(isAutoCommitOnError(extendedConsumerProperties));
if (!extendedConsumerProperties.getExtension().isAutoCommitOffset()) {
messageListenerContainer.getContainerProperties()
.setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL);
}
if (this.logger.isDebugEnabled()) {
this.logger.debug(
"Listened partitions: " + StringUtils.collectionToCommaDelimitedString(listenedPartitions));
}
if (this.logger.isDebugEnabled()) {
this.logger.debug(
"Listened partitions: " + StringUtils.collectionToCommaDelimitedString(listenedPartitions));
}
final KafkaMessageDrivenChannelAdapter<?, ?> kafkaMessageDrivenChannelAdapter = new KafkaMessageDrivenChannelAdapter<>(
messageListenerContainer);
kafkaMessageDrivenChannelAdapter.setBeanFactory(this.getBeanFactory());
final RetryTemplate retryTemplate = buildRetryTemplate(extendedConsumerProperties);
kafkaMessageDrivenChannelAdapter.setRetryTemplate(retryTemplate);
if (extendedConsumerProperties.getExtension().isEnableDlq()) {
DefaultKafkaProducerFactory<byte[], byte[]> producerFactory = getProducerFactory(
new ExtendedProducerProperties<>(new KafkaProducerProperties()));
final KafkaTemplate<byte[], byte[]> kafkaTemplate = new KafkaTemplate<>(producerFactory);
messageListenerContainer.getContainerProperties().setErrorHandler(new ErrorHandler() {
if (logger.isDebugEnabled()) {
logger.debug("Listened partitions: " + StringUtils.collectionToCommaDelimitedString(listenedPartitions));
}
if (this.logger.isDebugEnabled()) {
this.logger.debug(
"Listened partitions: " + StringUtils.collectionToCommaDelimitedString(listenedPartitions));
}
final KafkaMessageDrivenChannelAdapter<byte[], byte[]> kafkaMessageDrivenChannelAdapter = new KafkaMessageDrivenChannelAdapter<>(
messageListenerContainer);
kafkaMessageDrivenChannelAdapter.setBeanFactory(this.getBeanFactory());
kafkaMessageDrivenChannelAdapter.setOutputChannel(inputChannel);
kafkaMessageDrivenChannelAdapter.afterPropertiesSet();
// we need to wrap the adapter listener into a retrying listener so that the retry
// logic is applied before the ErrorHandler is executed
final RetryTemplate retryTemplate = buildRetryTemplateIfRetryEnabled(properties);
if (retryTemplate != null) {
if (properties.getExtension().isAutoCommitOffset()) {
final MessageListener originalMessageListener = (MessageListener) messageListenerContainer
.getContainerProperties().getMessageListener();
messageListenerContainer.getContainerProperties().setMessageListener(new MessageListener() {
@Override
public void onMessage(final ConsumerRecord message) {
try {
retryTemplate.execute(new RetryCallback<Object, Throwable>() {
@Override
public Object doWithRetry(RetryContext context) {
originalMessageListener.onMessage(message);
return null;
}
});
}
catch (Throwable throwable) {
if (throwable instanceof RuntimeException) {
throw (RuntimeException) throwable;
}
else {
throw new RuntimeException(throwable);
}
}
}
});
}
else {
messageListenerContainer.getContainerProperties().setMessageListener(new AcknowledgingMessageListener() {
final AcknowledgingMessageListener originalMessageListener = (AcknowledgingMessageListener) messageListenerContainer
.getContainerProperties().getMessageListener();
@Override
public void onMessage(final ConsumerRecord message, final Acknowledgment acknowledgment) {
retryTemplate.execute(new RetryCallback<Object, RuntimeException>() {
@Override
public Object doWithRetry(RetryContext context) {
originalMessageListener.onMessage(message, acknowledgment);
return null;
}
});
}
});
}
}
if (properties.getExtension().isEnableDlq()) {
final String dlqTopic = "error." + name + "." + group;
initDlqProducer();
messageListenerContainer.getContainerProperties().setErrorHandler(new ErrorHandler() {
@Override
public void handle(Exception thrownException, final ConsumerRecord message) {
final byte[] key = message.key() != null ? Utils.toArray(ByteBuffer.wrap((byte[]) message.key()))
final byte[] key = message.key() != null ? Utils.toArray(ByteBuffer.wrap((byte[])message.key()))
: null;
final byte[] payload = message.value() != null
? Utils.toArray(ByteBuffer.wrap((byte[]) message.value())) : null;
String dlqName = StringUtils.hasText(extendedConsumerProperties.getExtension().getDlqName())
? extendedConsumerProperties.getExtension().getDlqName()
: "error." + destination.getName() + "." + group;
ListenableFuture<SendResult<byte[], byte[]>> sentDlq = kafkaTemplate.send(dlqName,
message.partition(), key, payload);
sentDlq.addCallback(new ListenableFutureCallback<SendResult<byte[], byte[]>>() {
StringBuilder sb = new StringBuilder().append(" a message with key='")
.append(toDisplayString(ObjectUtils.nullSafeToString(key), 50)).append("'")
.append(" and payload='")
.append(toDisplayString(ObjectUtils.nullSafeToString(payload), 50))
.append("'").append(" received from ")
.append(message.partition());
? Utils.toArray(ByteBuffer.wrap((byte[])message.value())) : null;
dlqProducer.send(new ProducerRecord<>(dlqTopic, key, payload), new Callback() {
@Override
public void onFailure(Throwable ex) {
KafkaMessageChannelBinder.this.logger.error(
"Error sending to DLQ" + sb.toString(), ex);
}
@Override
public void onSuccess(SendResult<byte[], byte[]> result) {
if (KafkaMessageChannelBinder.this.logger.isDebugEnabled()) {
KafkaMessageChannelBinder.this.logger.debug(
"Sent to DLQ " + sb.toString());
public void onCompletion(RecordMetadata metadata, Exception exception) {
StringBuffer messageLog = new StringBuffer();
messageLog.append(" a message with key='"
+ toDisplayString(ObjectUtils.nullSafeToString(key), 50) + "'");
messageLog.append(" and payload='"
+ toDisplayString(ObjectUtils.nullSafeToString(payload), 50) + "'");
messageLog.append(" received from " + message.partition());
if (exception != null) {
logger.error("Error sending to DLQ" + messageLog.toString(), exception);
}
else {
if (logger.isDebugEnabled()) {
logger.debug("Sent to DLQ " + messageLog.toString());
}
}
}
});
}
});
}
kafkaMessageDrivenChannelAdapter.start();
return kafkaMessageDrivenChannelAdapter;
}
private ConsumerFactory<?, ?> createKafkaConsumerFactory(boolean anonymous, String consumerGroup,
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties) {
@Override
protected MessageHandler createProducerMessageHandler(final String name,
ExtendedProducerProperties<KafkaProducerProperties> producerProperties) throws Exception {
validateTopicName(name);
Collection<PartitionInfo> partitions = ensureTopicCreated(name, producerProperties.getPartitionCount());
if (producerProperties.getPartitionCount() < partitions.size()) {
if (logger.isInfoEnabled()) {
logger.info("The `partitionCount` of the producer for topic " + name + " is "
+ producerProperties.getPartitionCount() + ", smaller than the actual partition count of "
+ partitions.size() + " of the topic. The larger number will be used instead.");
}
}
topicsInUse.put(name, partitions);
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 100);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, anonymous ? "latest" : "earliest");
props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroup);
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
props.put(ProducerConfig.RETRIES_CONFIG, 0);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
props.put(ProducerConfig.ACKS_CONFIG, String.valueOf(configurationProperties.getRequiredAcks()));
props.put(ProducerConfig.LINGER_MS_CONFIG,
String.valueOf(producerProperties.getExtension().getBatchTimeout()));
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, producerProperties.getExtension().getCompressionType().toString());
if (!ObjectUtils.isEmpty(configurationProperties.getConfiguration())) {
props.putAll(configurationProperties.getConfiguration());
}
if (ObjectUtils.isEmpty(props.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configurationProperties.getKafkaConnectionString());
}
if (!ObjectUtils.isEmpty(consumerProperties.getExtension().getConfiguration())) {
props.putAll(consumerProperties.getExtension().getConfiguration());
}
if (!ObjectUtils.isEmpty(consumerProperties.getExtension().getStartOffset())) {
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,
consumerProperties.getExtension().getStartOffset().name());
}
ProducerFactory<byte[], byte[]> producerFB = new DefaultKafkaProducerFactory<>(props);
return new DefaultKafkaConsumerFactory<>(props);
// final ProducerConfiguration<byte[], byte[]> producerConfiguration = new ProducerConfiguration<>(
// producerMetadata, producerFB.getObject());
// producerConfiguration.setProducerListener(this.producerListener);
// KafkaProducerContext kafkaProducerContext = new KafkaProducerContext();
// kafkaProducerContext.setProducerConfigurations(
// Collections.<String, ProducerConfiguration<?, ?>>singletonMap(name, producerConfiguration));
// return new ProducerConfigurationMessageHandler(producerConfiguration, name);
//
return new SendingHandler(producerFB, name, producerProperties, partitions.size());
}
private boolean isAutoCommitOnError(ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
return properties.getExtension().getAutoCommitOnError() != null
? properties.getExtension().getAutoCommitOnError()
: properties.getExtension().isAutoCommitOffset() && properties.getExtension().isEnableDlq();
@Override
protected void createProducerDestinationIfNecessary(String name,
ExtendedProducerProperties<KafkaProducerProperties> properties) {
if (this.logger.isInfoEnabled()) {
this.logger.info("Using kafka topic for outbound: " + name);
}
validateTopicName(name);
Collection<PartitionInfo> partitions = ensureTopicCreated(name, properties.getPartitionCount());
if (properties.getPartitionCount() < partitions.size()) {
if (this.logger.isInfoEnabled()) {
this.logger.info("The `partitionCount` of the producer for topic " + name + " is "
+ properties.getPartitionCount() + ", smaller than the actual partition count of "
+ partitions.size() + " of the topic. The larger number will be used instead.");
}
}
this.topicsInUse.put(name, partitions);
}
private TopicPartitionInitialOffset[] getTopicPartitionInitialOffsets(
Collection<PartitionInfo> listenedPartitions) {
final TopicPartitionInitialOffset[] topicPartitionInitialOffsets = new TopicPartitionInitialOffset[listenedPartitions
.size()];
int i = 0;
for (PartitionInfo partition : listenedPartitions) {
/**
* Creates a Kafka topic if needed, or try to increase its partition count to the
* desired number.
*/
private Collection<PartitionInfo> ensureTopicCreated(final String topicName, final int partitionCount) {
final ZkClient zkClient = new ZkClient(configurationProperties.getZkConnectionString(),
configurationProperties.getZkSessionTimeout(), configurationProperties.getZkConnectionTimeout(),
ZKStringSerializer$.MODULE$);
final ZkUtils zkUtils = new ZkUtils(zkClient, null, false);
try {
final Properties topicConfig = new Properties();
TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topicName, zkUtils);
if (topicMetadata.errorCode() == ErrorMapping.NoError()) {
// only consider minPartitionCount for resizing if autoAddPartitions is
// true
int effectivePartitionCount = configurationProperties.isAutoAddPartitions()
? Math.max(configurationProperties.getMinPartitionCount(), partitionCount) : partitionCount;
if (topicMetadata.partitionsMetadata().size() < effectivePartitionCount) {
if (configurationProperties.isAutoAddPartitions()) {
AdminUtils.addPartitions(zkUtils, topicName, effectivePartitionCount, null, false);
}
else {
int topicSize = topicMetadata.partitionsMetadata().size();
throw new BinderException("The number of expected partitions was: " + partitionCount + ", but "
+ topicSize + (topicSize > 1 ? " have " : " has ") + "been found instead."
+ "Consider either increasing the partition count of the topic or enabling `autoAddPartitions`");
}
}
}
else if (topicMetadata.errorCode() == ErrorMapping.UnknownTopicOrPartitionCode()) {
if (configurationProperties.isAutoCreateTopics()) {
Seq<Object> brokerList = zkUtils.getSortedBrokerList();
// always consider minPartitionCount for topic creation
int effectivePartitionCount = Math.max(configurationProperties.getMinPartitionCount(),
partitionCount);
final scala.collection.Map<Object, Seq<Object>> replicaAssignment = AdminUtils
.assignReplicasToBrokers(brokerList, effectivePartitionCount,
configurationProperties.getReplicationFactor(), -1, -1);
metadataRetryOperations.execute(new RetryCallback<Object, RuntimeException>() {
@Override
public Object doWithRetry(RetryContext context) throws RuntimeException {
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topicName,
replicaAssignment, topicConfig, true);
return null;
}
});
}
else {
throw new BinderException("Topic " + topicName + " does not exist");
}
}
else {
throw new BinderException("Error fetching Kafka topic metadata: ",
ErrorMapping.exceptionFor(topicMetadata.errorCode()));
}
try {
Collection<PartitionInfo> partitions = metadataRetryOperations
.execute(new RetryCallback<Collection<PartitionInfo>, Exception>() {
@Override
public Collection<PartitionInfo> doWithRetry(RetryContext context) throws Exception {
Collection<PartitionInfo> partitions = consumerFactory.createConsumer().partitionsFor(topicName);
// do a sanity check on the partition set
if (partitions.size() < partitionCount) {
throw new IllegalStateException("The number of expected partitions was: "
+ partitionCount + ", but " + partitions.size()
+ (partitions.size() > 1 ? " have " : " has ") + "been found instead");
}
return partitions;
}
});
return partitions;
}
catch (Exception e) {
logger.error("Cannot initialize Binder", e);
throw new BinderException("Cannot initialize binder:", e);
}
topicPartitionInitialOffsets[i++] = new TopicPartitionInitialOffset(partition.topic(),
partition.partition());
}
return topicPartitionInitialOffsets;
finally {
zkClient.close();
}
}
private synchronized void initDlqProducer() {
try {
if (dlqProducer == null) {
synchronized (this) {
if (dlqProducer == null) {
// we can use the producer defaults as we do not need to tune
// performance
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
props.put(ProducerConfig.RETRIES_CONFIG, 0);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
DefaultKafkaProducerFactory<byte[], byte[]> defaultKafkaProducerFactory = new DefaultKafkaProducerFactory<>(props);
dlqProducer = defaultKafkaProducerFactory.createProducer();
}
}
}
}
catch (Exception e) {
throw new RuntimeException("Cannot initialize DLQ producer:", e);
}
}
private String toDisplayString(String original, int maxCharacters) {
@@ -380,76 +591,85 @@ public class KafkaMessageChannelBinder extends
return original.substring(0, maxCharacters) + "...";
}
private final class ProducerConfigurationMessageHandler extends KafkaProducerMessageHandler<byte[], byte[]>
implements Lifecycle {
@Override
public void doManualAck(LinkedList<MessageHeaders> messageHeadersList) {
Iterator<MessageHeaders> iterator = messageHeadersList.iterator();
while (iterator.hasNext()) {
MessageHeaders headers = iterator.next();
Acknowledgment acknowledgment = (Acknowledgment) headers.get(KafkaHeaders.ACKNOWLEDGMENT);
Assert.notNull(acknowledgment,
"Acknowledgement shouldn't be null when acknowledging kafka message " + "manually.");
acknowledgment.acknowledge();
}
}
private boolean running = true;
private final class SendingHandler extends AbstractMessageHandler implements Lifecycle {
private final DefaultKafkaProducerFactory<byte[], byte[]> producerFactory;
private final AtomicInteger roundRobinCount = new AtomicInteger();
private ProducerConfigurationMessageHandler(KafkaTemplate<byte[], byte[]> kafkaTemplate, String topic,
private ProducerFactory<byte[], byte[]> delegate;
private String targetTopic;
private final ExtendedProducerProperties<KafkaProducerProperties> producerProperties;
private final PartitionHandler partitionHandler;
private final KafkaTemplate<byte[], byte[]> kafkaTemplate;
private final int numberOfKafkaPartitions;
private SendingHandler(
ProducerFactory<byte[], byte[]> delegate, String targetTopic,
ExtendedProducerProperties<KafkaProducerProperties> producerProperties,
DefaultKafkaProducerFactory<byte[], byte[]> producerFactory) {
super(kafkaTemplate);
setTopicExpression(new LiteralExpression(topic));
setMessageKeyExpression(producerProperties.getExtension().getMessageKeyExpression());
setBeanFactory(KafkaMessageChannelBinder.this.getBeanFactory());
if (producerProperties.isPartitioned()) {
SpelExpressionParser parser = new SpelExpressionParser();
setPartitionIdExpression(parser.parseExpression("headers." + BinderHeaders.PARTITION_HEADER));
}
if (producerProperties.getExtension().isSync()) {
setSync(true);
}
this.producerFactory = producerFactory;
int numberOfPartitions) {
Assert.notNull(delegate, "Delegate cannot be null");
Assert.hasText(targetTopic, "Target topic cannot be null");
this.delegate = delegate;
this.targetTopic = targetTopic;
this.producerProperties = producerProperties;
ConfigurableListableBeanFactory beanFactory = KafkaMessageChannelBinder.this.getBeanFactory();
this.setBeanFactory(beanFactory);
this.numberOfKafkaPartitions = numberOfPartitions;
this.kafkaTemplate = new KafkaTemplate<>(delegate);
this.partitionHandler = new PartitionHandler(beanFactory, evaluationContext, partitionSelector, producerProperties);
}
@Override
public void handleMessageInternal(Message<?> message) throws MessagingException {
int targetPartition;
if (producerProperties.isPartitioned()) {
targetPartition = this.partitionHandler.determinePartition(message);
}
else {
targetPartition = roundRobin() % numberOfKafkaPartitions;
}
// this.delegate.send(this.targetTopic,
// message.getHeaders().get(BinderHeaders.PARTITION_HEADER, Integer.class), null,
// (byte[]) message.getPayload());
kafkaTemplate.send(this.targetTopic, targetPartition, (byte[]) message.getPayload());
}
private int roundRobin() {
int result = roundRobinCount.incrementAndGet();
if (result == Integer.MAX_VALUE) {
roundRobinCount.set(0);
}
return result;
}
@Override
public void start() {
try {
super.onInit();
}
catch (Exception e) {
this.logger.error("Initialization errors: ", e);
throw new RuntimeException(e);
}
}
@Override
public void stop() {
producerFactory.stop();
this.running = false;
}
@Override
public boolean isRunning() {
return this.running;
return false;
}
}
public static class TopicInformation {
private final String consumerGroup;
private final Collection<PartitionInfo> partitionInfos;
public TopicInformation(String consumerGroup, Collection<PartitionInfo> partitionInfos) {
this.consumerGroup = consumerGroup;
this.partitionInfos = partitionInfos;
}
public String getConsumerGroup() {
return consumerGroup;
}
public boolean isConsumerTopic() {
return consumerGroup != null;
}
public Collection<PartitionInfo> getPartitionInfos() {
return partitionInfos;
}
}
}
}

View File

@@ -1,11 +1,11 @@
/*
* Copyright 2015-2017 the original author or authors.
* Copyright 2015-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,55 +16,19 @@
package org.springframework.cloud.stream.binder.kafka.config;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.annotation.PostConstruct;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.utils.AppInfoParser;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.actuate.endpoint.PublicMetrics;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.boot.autoconfigure.PropertyPlaceholderAutoConfiguration;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.context.PropertyPlaceholderAutoConfiguration;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.cloud.stream.binder.Binder;
import org.springframework.cloud.stream.binder.kafka.KafkaBinderHealthIndicator;
import org.springframework.cloud.stream.binder.kafka.KafkaBinderJaasInitializerListener;
import org.springframework.cloud.stream.binder.kafka.KafkaBinderMetrics;
import org.springframework.cloud.stream.binder.kafka.KafkaMessageChannelBinder;
import org.springframework.cloud.stream.binder.kafka.admin.AdminUtilsOperation;
import org.springframework.cloud.stream.binder.kafka.admin.Kafka09AdminUtilsOperation;
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
import org.springframework.cloud.stream.binder.kafka.properties.JaasLoginModuleConfiguration;
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
import org.springframework.cloud.stream.binder.kafka.properties.KafkaExtendedBindingProperties;
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
import org.springframework.cloud.stream.config.codec.kryo.KryoCodecAutoConfiguration;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationListener;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Condition;
import org.springframework.context.annotation.ConditionContext;
import org.springframework.context.annotation.Conditional;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Import;
import org.springframework.core.type.AnnotatedTypeMetadata;
import org.springframework.integration.codec.Codec;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.support.LoggingProducerListener;
import org.springframework.kafka.support.ProducerListener;
import org.springframework.util.ObjectUtils;
/**
* @author David Turanski
@@ -72,17 +36,13 @@ import org.springframework.util.ObjectUtils;
* @author Soby Chacko
* @author Mark Fisher
* @author Ilayaperumal Gopinathan
* @author Henryk Konsek
*/
@Configuration
@ConditionalOnMissingBean(Binder.class)
@Import({ KryoCodecAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class,
KafkaBinderConfiguration.KafkaPropertiesConfiguration.class })
@Import({ KryoCodecAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class })
@EnableConfigurationProperties({ KafkaBinderConfigurationProperties.class, KafkaExtendedBindingProperties.class })
public class KafkaBinderConfiguration {
protected static final Log logger = LogFactory.getLog(KafkaBinderConfiguration.class);
@Autowired
private Codec codec;
@@ -95,24 +55,12 @@ public class KafkaBinderConfiguration {
@Autowired
private ProducerListener producerListener;
@Autowired
private ApplicationContext context;
@Autowired(required = false)
private AdminUtilsOperation adminUtilsOperation;
@Bean
KafkaTopicProvisioner provisioningProvider() {
return new KafkaTopicProvisioner(this.configurationProperties, this.adminUtilsOperation);
}
@Bean
KafkaMessageChannelBinder kafkaMessageChannelBinder() {
KafkaMessageChannelBinder kafkaMessageChannelBinder = new KafkaMessageChannelBinder(
this.configurationProperties, provisioningProvider());
kafkaMessageChannelBinder.setCodec(this.codec);
kafkaMessageChannelBinder.setProducerListener(producerListener);
kafkaMessageChannelBinder.setExtendedBindingProperties(this.kafkaExtendedBindingProperties);
KafkaMessageChannelBinder kafkaMessageChannelBinder = new KafkaMessageChannelBinder(configurationProperties);
kafkaMessageChannelBinder.setCodec(codec);
//kafkaMessageChannelBinder.setProducerListener(producerListener);
kafkaMessageChannelBinder.setExtendedBindingProperties(kafkaExtendedBindingProperties);
return kafkaMessageChannelBinder;
}
@@ -122,115 +70,8 @@ public class KafkaBinderConfiguration {
return new LoggingProducerListener();
}
@Bean
KafkaBinderHealthIndicator healthIndicator(KafkaMessageChannelBinder kafkaMessageChannelBinder) {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
if (!ObjectUtils.isEmpty(configurationProperties.getConfiguration())) {
props.putAll(configurationProperties.getConfiguration());
}
if (!props.containsKey(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG)) {
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configurationProperties.getKafkaConnectionString());
}
ConsumerFactory<?, ?> consumerFactory = new DefaultKafkaConsumerFactory<>(props);
return new KafkaBinderHealthIndicator(kafkaMessageChannelBinder, consumerFactory);
}
@Bean
public PublicMetrics kafkaBinderMetrics(KafkaMessageChannelBinder kafkaMessageChannelBinder) {
return new KafkaBinderMetrics(kafkaMessageChannelBinder, configurationProperties);
}
@Bean(name = "adminUtilsOperation")
@Conditional(Kafka09Present.class)
@ConditionalOnClass(name = "kafka.admin.AdminUtils")
public AdminUtilsOperation kafka09AdminUtilsOperation() {
logger.info("AdminUtils selected: Kafka 0.9 AdminUtils");
return new Kafka09AdminUtilsOperation();
}
@Bean(name = "adminUtilsOperation")
@Conditional(Kafka10Present.class)
@ConditionalOnClass(name = "kafka.admin.AdminUtils")
public AdminUtilsOperation kafka10AdminUtilsOperation() {
logger.info("AdminUtils selected: Kafka 0.10 AdminUtils");
return new Kafka10AdminUtilsOperation();
}
@Bean
public ApplicationListener<?> jaasInitializer() throws IOException {
return new KafkaBinderJaasInitializerListener();
}
static class Kafka10Present implements Condition {
@Override
public boolean matches(ConditionContext conditionContext, AnnotatedTypeMetadata annotatedTypeMetadata) {
return AppInfoParser.getVersion().startsWith("0.10");
}
}
static class Kafka09Present implements Condition {
@Override
public boolean matches(ConditionContext conditionContext, AnnotatedTypeMetadata annotatedTypeMetadata) {
return AppInfoParser.getVersion().startsWith("0.9");
}
}
public static class JaasConfigurationProperties {
private JaasLoginModuleConfiguration kafka;
private JaasLoginModuleConfiguration zookeeper;
}
@ConditionalOnClass(name = "org.springframework.boot.autoconfigure.kafka.KafkaProperties")
public static class KafkaPropertiesConfiguration {
// KafkaProperties can still be unavailable if KafkaAutoConfiguration is disabled.
@Autowired(required = false)
private KafkaProperties kafkaProperties;
@Autowired
private KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties;
@PostConstruct
public void init() {
Map<String, Object> configuration = this.kafkaBinderConfigurationProperties.getConfiguration();
if (this.kafkaProperties != null) {
for (Map.Entry<String, String> properties : this.kafkaProperties.getProperties().entrySet()) {
if (!configuration.containsKey(properties.getKey())) {
configuration.put(properties.getKey(), properties.getValue());
}
}
for (Map.Entry<String, Object> producerProperties : this.kafkaProperties.buildProducerProperties()
.entrySet()) {
if (!configuration.containsKey(producerProperties.getKey())) {
configuration.put(producerProperties.getKey(), producerProperties.getValue());
}
}
for (Map.Entry<String, Object> consumerProperties : this.kafkaProperties.buildConsumerProperties()
.entrySet()) {
if (!configuration.containsKey(consumerProperties.getKey())) {
configuration.put(consumerProperties.getKey(), consumerProperties.getValue());
}
}
if (ObjectUtils.isEmpty(configuration.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG))) {
configuration.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
kafkaBinderConfigurationProperties.getKafkaConnectionString());
}
else {
@SuppressWarnings("unchecked")
List<String> bootStrapServers = (List<String>) configuration
.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
if (bootStrapServers.size() == 1 && bootStrapServers.get(0).equals("localhost:9092")) {
configuration.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
kafkaBinderConfigurationProperties.getKafkaConnectionString());
}
}
}
}
}
// @Bean
// KafkaBinderHealthIndicator healthIndicator(KafkaMessageChannelBinder kafkaMessageChannelBinder) {
// return new KafkaBinderHealthIndicator(kafkaMessageChannelBinder, configurationProperties);
// }
}

Some files were not shown because too many files have changed in this diff Show More