Compare commits

..

9 Commits

Author SHA1 Message Date
Oleg Zhurakousky
ea87792c76 Merge pull request #609 from spring-operator/polish-urls-xml-kafka-0.9-upgrade
URL Cleanup
2019-03-26 14:21:35 +01:00
Spring Operator
c3c8935014 URL Cleanup
This commit updates URLs to prefer the https protocol. Redirects are not followed to avoid accidentally expanding intentionally shortened URLs (i.e. if using a URL shortener).

# Fixed URLs

## Fixed Success
These URLs were switched to an https URL with a 2xx status. While the status was successful, your review is still recommended.

* [ ] http://repo.spring.io/libs-milestone-local with 2 occurrences migrated to:
  https://repo.spring.io/libs-milestone-local ([https](https://repo.spring.io/libs-milestone-local) result 302).
* [ ] http://repo.spring.io/libs-snapshot-local with 2 occurrences migrated to:
  https://repo.spring.io/libs-snapshot-local ([https](https://repo.spring.io/libs-snapshot-local) result 302).
* [ ] http://repo.spring.io/release with 1 occurrences migrated to:
  https://repo.spring.io/release ([https](https://repo.spring.io/release) result 302).

# Ignored
These URLs were intentionally ignored.

* http://maven.apache.org/POM/4.0.0 with 14 occurrences
* http://www.w3.org/2001/XMLSchema-instance with 7 occurrences
2019-03-26 00:24:04 -05:00
Oleg Zhurakousky
92da0fe3e9 Merge pull request #584 from spring-operator/polish-urls-apache-license-kafka-0.9-upgrade
URL Cleanup
2019-03-25 14:55:34 +01:00
Spring Operator
6a60c76dd9 URL Cleanup
This commit updates URLs to prefer the https protocol. Redirects are not followed to avoid accidentally expanding intentionally shortened URLs (i.e. if using a URL shortener).

# Fixed URLs

## Fixed Success
These URLs were switched to an https URL with a 2xx status. While the status was successful, your review is still recommended.

* [ ] http://www.apache.org/licenses/ with 1 occurrences migrated to:
  https://www.apache.org/licenses/ ([https](https://www.apache.org/licenses/) result 200).
* [ ] http://www.apache.org/licenses/LICENSE-2.0 with 26 occurrences migrated to:
  https://www.apache.org/licenses/LICENSE-2.0 ([https](https://www.apache.org/licenses/LICENSE-2.0) result 200).
2019-03-21 13:24:01 -05:00
Spring Operator
b3c26cf93f URL Cleanup
This commit updates URLs to prefer the https protocol. Redirects are not followed to avoid accidentally expanding intentionally shortened URLs (i.e. if using a URL shortener).

# Fixed URLs

## Fixed Success
These URLs were switched to an https URL with a 2xx status. While the status was successful, your review is still recommended.

* http://maven.apache.org/xsd/maven-4.0.0.xsd with 7 occurrences migrated to:
  https://maven.apache.org/xsd/maven-4.0.0.xsd ([https](https://maven.apache.org/xsd/maven-4.0.0.xsd) result 200).
* http://www.apache.org/licenses/LICENSE-2.0 with 2 occurrences migrated to:
  https://www.apache.org/licenses/LICENSE-2.0 ([https](https://www.apache.org/licenses/LICENSE-2.0) result 200).
* http://projects.spring.io/spring-cloud with 2 occurrences migrated to:
  https://projects.spring.io/spring-cloud ([https](https://projects.spring.io/spring-cloud) result 301).
* http://www.spring.io with 2 occurrences migrated to:
  https://www.spring.io ([https](https://www.spring.io) result 301).
* http://repo.spring.io/libs-milestone-local with 2 occurrences migrated to:
  https://repo.spring.io/libs-milestone-local ([https](https://repo.spring.io/libs-milestone-local) result 302).
* http://repo.spring.io/libs-release-local with 1 occurrences migrated to:
  https://repo.spring.io/libs-release-local ([https](https://repo.spring.io/libs-release-local) result 302).
* http://repo.spring.io/libs-snapshot-local with 2 occurrences migrated to:
  https://repo.spring.io/libs-snapshot-local ([https](https://repo.spring.io/libs-snapshot-local) result 302).
* http://repo.spring.io/release with 1 occurrences migrated to:
  https://repo.spring.io/release ([https](https://repo.spring.io/release) result 302).

# Ignored
These URLs were intentionally ignored.

* http://maven.apache.org/POM/4.0.0 with 14 occurrences
* http://www.w3.org/2001/XMLSchema-instance with 7 occurrences
2019-03-20 09:48:06 -04:00
Soby Chacko
3b0bf53896 cleanup 2016-07-08 18:07:55 -04:00
Soby Chacko
d9670e040b Kafka 0.9 upgrade changes
Incorporating refactorings occured in SCS into both 0.8 and 0.9 binders

cleanup
2016-07-08 18:04:00 -04:00
Ilayaperumal Gopinathan
fd28764e39 Initial refactoring to move classes across 2016-06-28 23:01:46 +05:30
Soby Chacko
079592363d Initial project structure changes for the 0.9 upgrade 2016-06-22 18:02:57 -04:00
87 changed files with 3628 additions and 6097 deletions

View File

@@ -21,7 +21,7 @@
<repository>
<id>spring-snapshots</id>
<name>Spring Snapshots</name>
<url>http://repo.spring.io/libs-snapshot-local</url>
<url>https://repo.spring.io/libs-snapshot-local</url>
<snapshots>
<enabled>true</enabled>
</snapshots>
@@ -29,7 +29,7 @@
<repository>
<id>spring-milestones</id>
<name>Spring Milestones</name>
<url>http://repo.spring.io/libs-milestone-local</url>
<url>https://repo.spring.io/libs-milestone-local</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
@@ -37,7 +37,7 @@
<repository>
<id>spring-releases</id>
<name>Spring Releases</name>
<url>http://repo.spring.io/release</url>
<url>https://repo.spring.io/release</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
@@ -47,7 +47,7 @@
<pluginRepository>
<id>spring-snapshots</id>
<name>Spring Snapshots</name>
<url>http://repo.spring.io/libs-snapshot-local</url>
<url>https://repo.spring.io/libs-snapshot-local</url>
<snapshots>
<enabled>true</enabled>
</snapshots>
@@ -55,7 +55,7 @@
<pluginRepository>
<id>spring-milestones</id>
<name>Spring Milestones</name>
<url>http://repo.spring.io/libs-milestone-local</url>
<url>https://repo.spring.io/libs-milestone-local</url>
<snapshots>
<enabled>false</enabled>
</snapshots>

View File

@@ -1,6 +1,6 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
@@ -192,7 +192,7 @@
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,

2
mvnw vendored
View File

@@ -8,7 +8,7 @@
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an

2
mvnw.cmd vendored
View File

@@ -8,7 +8,7 @@
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an

77
pom.xml
View File

@@ -1,64 +1,37 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>1.1.0.M1</version>
<version>1.1.0.BUILD-SNAPSHOT</version>
<packaging>pom</packaging>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-parent</artifactId>
<version>1.1.0.M1</version>
<artifactId>spring-cloud-build</artifactId>
<version>1.1.1.RELEASE</version>
<relativePath />
</parent>
<properties>
<java.version>1.7</java.version>
<kafka.version>0.9.0.1</kafka.version>
<spring-kafka.version>1.0.3.RELEASE</spring-kafka.version>
<spring-integration-kafka.version>2.0.1.RELEASE</spring-integration-kafka.version>
<spring-boot.version>1.4.0.BUILD-SNAPSHOT</spring-boot.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-dependencies</artifactId>
<version>1.1.0.BUILD-SNAPSHOT</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<modules>
<module>spring-cloud-stream-binder-kafka-common</module>
<module>spring-cloud-stream-binder-kafka</module>
<module>spring-cloud-starter-stream-kafka</module>
<module>spring-cloud-stream-binder-kafka-0.8</module>
<module>spring-cloud-starter-stream-kafka-0.8</module>
<module>spring-cloud-stream-binder-kafka-test-support</module>
<module>spring-cloud-stream-binder-kafka-docs</module>
</modules>
<build>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<redirectTestOutputToFile>true</redirectTestOutputToFile>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<version>1.7</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>2.17</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
<version>6.17</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<configuration>
<quiet>true</quiet>
</configuration>
</plugin>
</plugins>
</pluginManagement>
</build>
<profiles>
<profile>
<id>spring</id>
@@ -66,7 +39,7 @@
<repository>
<id>spring-snapshots</id>
<name>Spring Snapshots</name>
<url>http://repo.spring.io/libs-snapshot-local</url>
<url>https://repo.spring.io/libs-snapshot-local</url>
<snapshots>
<enabled>true</enabled>
</snapshots>
@@ -77,7 +50,7 @@
<repository>
<id>spring-milestones</id>
<name>Spring Milestones</name>
<url>http://repo.spring.io/libs-milestone-local</url>
<url>https://repo.spring.io/libs-milestone-local</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
@@ -85,7 +58,7 @@
<repository>
<id>spring-releases</id>
<name>Spring Releases</name>
<url>http://repo.spring.io/release</url>
<url>https://repo.spring.io/release</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
@@ -95,7 +68,7 @@
<pluginRepository>
<id>spring-snapshots</id>
<name>Spring Snapshots</name>
<url>http://repo.spring.io/libs-snapshot-local</url>
<url>https://repo.spring.io/libs-snapshot-local</url>
<snapshots>
<enabled>true</enabled>
</snapshots>
@@ -106,7 +79,7 @@
<pluginRepository>
<id>spring-milestones</id>
<name>Spring Milestones</name>
<url>http://repo.spring.io/libs-milestone-local</url>
<url>https://repo.spring.io/libs-milestone-local</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
@@ -114,7 +87,7 @@
<pluginRepository>
<id>spring-releases</id>
<name>Spring Releases</name>
<url>http://repo.spring.io/libs-release-local</url>
<url>https://repo.spring.io/libs-release-local</url>
<snapshots>
<enabled>false</enabled>
</snapshots>

View File

@@ -0,0 +1,26 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>1.1.0.BUILD-SNAPSHOT</version>
</parent>
<artifactId>spring-cloud-starter-stream-kafka-0.8</artifactId>
<description>Spring Cloud Starter Stream Kafka for 0.8</description>
<url>https://projects.spring.io/spring-cloud</url>
<organization>
<name>Pivotal Software, Inc.</name>
<url>https://www.spring.io</url>
</organization>
<properties>
<main.basedir>${basedir}/../..</main.basedir>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-0.8</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1 @@
provides: spring-cloud-starter-stream-kafka-0.8

View File

@@ -1,17 +1,17 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>1.1.0.M1</version>
<version>1.1.0.BUILD-SNAPSHOT</version>
</parent>
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
<description>Spring Cloud Starter Stream Kafka</description>
<url>http://projects.spring.io/spring-cloud</url>
<url>https://projects.spring.io/spring-cloud</url>
<organization>
<name>Pivotal Software, Inc.</name>
<url>http://www.spring.io</url>
<url>https://www.spring.io</url>
</organization>
<properties>
<main.basedir>${basedir}/../..</main.basedir>
@@ -20,7 +20,7 @@
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
<version>1.1.0.M1</version>
<version>${project.version}</version>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,136 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>spring-cloud-stream-binder-kafka-0.8</artifactId>
<packaging>jar</packaging>
<name>spring-cloud-stream-binder-kafka-0.8</name>
<description>Kafka binder implementation</description>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>1.1.0.BUILD-SNAPSHOT</version>
</parent>
<properties>
<kafka.version>0.8.2.2</kafka.version>
<curator.version>2.6.0</curator.version>
<spring-integration-kafka.version>1.3.1.BUILD-SNAPSHOT</spring-integration-kafka.version>
<rxjava-math.version>1.0.0</rxjava-math.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-autoconfigure</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-test-support</artifactId>
<scope>test</scope>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.integration</groupId>
<artifactId>spring-integration-kafka</artifactId>
<version>${spring-integration-kafka.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.avro</groupId>
<artifactId>avro-compiler</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
</dependency>
<dependency>
<groupId>io.reactivex</groupId>
<artifactId>rxjava</artifactId>
</dependency>
<dependency>
<groupId>io.reactivex</groupId>
<artifactId>rxjava-math</artifactId>
<version>${rxjava-math.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<classifier>test</classifier>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-test</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>${kafka.version}</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<classifier>test</classifier>
<version>${kafka.version}</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>${kafka.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
<version>${curator.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-test</artifactId>
<version>${curator.version}</version>
</dependency>
</dependencies>
</dependencyManagement>
</project>

View File

@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,25 +16,28 @@
package org.springframework.cloud.stream.binder.kafka;
import java.util.HashMap;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import kafka.cluster.Broker;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils$;
import org.I0Itec.zkclient.ZkClient;
import scala.collection.JavaConversions;
import scala.collection.Seq;
import org.springframework.boot.actuate.health.Health;
import org.springframework.boot.actuate.health.HealthIndicator;
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
import org.springframework.integration.kafka.core.BrokerAddress;
import org.springframework.integration.kafka.core.Partition;
/**
* Health indicator for Kafka.
*
* @author Ilayaperumal Gopinathan
* @author Marius Bogoevici
*/
public class KafkaBinderHealthIndicator implements HealthIndicator {
@@ -46,39 +49,47 @@ public class KafkaBinderHealthIndicator implements HealthIndicator {
KafkaBinderConfigurationProperties configurationProperties) {
this.binder = binder;
this.configurationProperties = configurationProperties;
}
@Override
public Health health() {
Map<String, String> properties = new HashMap<>();
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configurationProperties
.getKafkaConnectionString());
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
KafkaConsumer metadataConsumer = new KafkaConsumer(properties);
ZkClient zkClient = null;
try {
zkClient = new ZkClient(configurationProperties.getZkConnectionString(),
configurationProperties.getZkSessionTimeout(),
configurationProperties.getZkConnectionTimeout(), ZKStringSerializer$.MODULE$);
Set<String> brokersInClusterSet = new HashSet<>();
Seq<Broker> allBrokersInCluster = ZkUtils$.MODULE$.getAllBrokersInCluster(zkClient);
Collection<Broker> brokersInCluster = JavaConversions.asJavaCollection(allBrokersInCluster);
for (Broker broker : brokersInCluster) {
brokersInClusterSet.add(broker.connectionString());
}
Set<String> downMessages = new HashSet<>();
for (String topic : this.binder.getTopicsInUse().keySet()) {
List<PartitionInfo> partitionInfos = metadataConsumer.partitionsFor(topic);
for (PartitionInfo partitionInfo : partitionInfos) {
if (this.binder.getTopicsInUse().get(topic).contains(partitionInfo) && partitionInfo.leader()
.id() == -1) {
downMessages.add(partitionInfo.toString());
for (Map.Entry<String, Collection<Partition>> entry : binder.getTopicsInUse().entrySet()) {
for (Partition partition : entry.getValue()) {
BrokerAddress address = binder.getConnectionFactory().getLeader(partition);
if (!brokersInClusterSet.contains(address.toString())) {
downMessages.add(address.toString());
}
}
}
if (downMessages.isEmpty()) {
return Health.up().build();
}
return Health.down().withDetail("Following partitions in use have no leaders: ", downMessages.toString())
.build();
return Health.down().withDetail("Following brokers are down: ", downMessages.toString()).build();
}
catch (Exception e) {
return Health.down(e).build();
}
finally {
metadataConsumer.close();
if (zkClient != null) {
try {
zkClient.close();
}
catch (Exception e) {
// ignore
}
}
}
}
}

View File

@@ -0,0 +1,713 @@
/*
* Copyright 2014-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import org.I0Itec.zkclient.ZkClient;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.utils.Utils;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.cloud.stream.binder.AbstractMessageChannelBinder;
import org.springframework.cloud.stream.binder.Binder;
import org.springframework.cloud.stream.binder.BinderException;
import org.springframework.cloud.stream.binder.BinderHeaders;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaConsumerProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaExtendedBindingProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaProducerProperties;
import org.springframework.context.Lifecycle;
import org.springframework.integration.endpoint.AbstractEndpoint;
import org.springframework.integration.kafka.core.ConnectionFactory;
import org.springframework.integration.kafka.core.DefaultConnectionFactory;
import org.springframework.integration.kafka.core.KafkaMessage;
import org.springframework.integration.kafka.core.Partition;
import org.springframework.integration.kafka.core.ZookeeperConfiguration;
import org.springframework.integration.kafka.inbound.KafkaMessageDrivenChannelAdapter;
import org.springframework.integration.kafka.listener.AcknowledgingMessageListener;
import org.springframework.integration.kafka.listener.Acknowledgment;
import org.springframework.integration.kafka.listener.ErrorHandler;
import org.springframework.integration.kafka.listener.KafkaMessageListenerContainer;
import org.springframework.integration.kafka.listener.KafkaNativeOffsetManager;
import org.springframework.integration.kafka.listener.MessageListener;
import org.springframework.integration.kafka.listener.OffsetManager;
import org.springframework.integration.kafka.support.KafkaHeaders;
import org.springframework.integration.kafka.support.KafkaProducerContext;
import org.springframework.integration.kafka.support.ProducerConfiguration;
import org.springframework.integration.kafka.support.ProducerFactoryBean;
import org.springframework.integration.kafka.support.ProducerListener;
import org.springframework.integration.kafka.support.ProducerMetadata;
import org.springframework.integration.kafka.support.ZookeeperConnect;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageChannel;
import org.springframework.messaging.MessageHandler;
import org.springframework.messaging.MessageHeaders;
import org.springframework.messaging.MessagingException;
import org.springframework.retry.RetryCallback;
import org.springframework.retry.RetryContext;
import org.springframework.retry.RetryOperations;
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
import org.springframework.scheduling.concurrent.CustomizableThreadFactory;
import org.springframework.util.Assert;
import org.springframework.util.CollectionUtils;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
import kafka.admin.AdminUtils;
import kafka.api.OffsetRequest;
import kafka.api.TopicMetadata;
import kafka.common.ErrorMapping;
import kafka.serializer.DefaultDecoder;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils;
import scala.collection.Seq;
/**
* A {@link Binder} that uses Kafka as the underlying middleware.
* @author Eric Bottard
* @author Marius Bogoevici
* @author Ilayaperumal Gopinathan
* @author David Turanski
* @author Gary Russell
* @author Mark Fisher
* @author Soby Chacko
*/
public class KafkaMessageChannelBinder extends
AbstractMessageChannelBinder<ExtendedConsumerProperties<KafkaConsumerProperties>,
ExtendedProducerProperties<KafkaProducerProperties>>
implements ExtendedPropertiesBinder<MessageChannel, KafkaConsumerProperties, KafkaProducerProperties>,
DisposableBean {
private static final ByteArraySerializer BYTE_ARRAY_SERIALIZER = new ByteArraySerializer();
private static final ThreadFactory DAEMON_THREAD_FACTORY;
static {
CustomizableThreadFactory threadFactory = new CustomizableThreadFactory("kafka-binder-");
threadFactory.setDaemon(true);
DAEMON_THREAD_FACTORY = threadFactory;
}
private final KafkaBinderConfigurationProperties configurationProperties;
private RetryOperations metadataRetryOperations;
private final Map<String, Collection<Partition>> topicsInUse = new HashMap<>();
// -------- Default values for properties -------
private ConnectionFactory connectionFactory;
private ProducerListener producerListener;
private volatile Producer<byte[], byte[]> dlqProducer;
private KafkaExtendedBindingProperties extendedBindingProperties = new KafkaExtendedBindingProperties();
public KafkaMessageChannelBinder(KafkaBinderConfigurationProperties configurationProperties) {
super(false, headersToMap(configurationProperties));
this.configurationProperties = configurationProperties;
}
private static String[] headersToMap(KafkaBinderConfigurationProperties configurationProperties) {
String[] headersToMap;
if (ObjectUtils.isEmpty(configurationProperties.getHeaders())) {
headersToMap = BinderHeaders.STANDARD_HEADERS;
}
else {
String[] combinedHeadersToMap = Arrays.copyOfRange(BinderHeaders.STANDARD_HEADERS, 0,
BinderHeaders.STANDARD_HEADERS.length + configurationProperties.getHeaders().length);
System.arraycopy(configurationProperties.getHeaders(), 0, combinedHeadersToMap,
BinderHeaders.STANDARD_HEADERS.length,
configurationProperties.getHeaders().length);
headersToMap = combinedHeadersToMap;
}
return headersToMap;
}
ConnectionFactory getConnectionFactory() {
return this.connectionFactory;
}
public void setProducerListener(ProducerListener producerListener) {
this.producerListener = producerListener;
}
/**
* Retry configuration for operations such as validating topic creation
* @param metadataRetryOperations the retry configuration
*/
public void setMetadataRetryOperations(RetryOperations metadataRetryOperations) {
this.metadataRetryOperations = metadataRetryOperations;
}
public void setExtendedBindingProperties(KafkaExtendedBindingProperties extendedBindingProperties) {
this.extendedBindingProperties = extendedBindingProperties;
}
@Override
public void onInit() throws Exception {
ZookeeperConfiguration configuration = new ZookeeperConfiguration(
new ZookeeperConnect(this.configurationProperties.getZkConnectionString()));
configuration.setBufferSize(this.configurationProperties.getSocketBufferSize());
configuration.setMaxWait(this.configurationProperties.getMaxWait());
DefaultConnectionFactory defaultConnectionFactory = new DefaultConnectionFactory(configuration);
defaultConnectionFactory.afterPropertiesSet();
this.connectionFactory = defaultConnectionFactory;
if (this.metadataRetryOperations == null) {
RetryTemplate retryTemplate = new RetryTemplate();
SimpleRetryPolicy simpleRetryPolicy = new SimpleRetryPolicy();
simpleRetryPolicy.setMaxAttempts(10);
retryTemplate.setRetryPolicy(simpleRetryPolicy);
ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy();
backOffPolicy.setInitialInterval(100);
backOffPolicy.setMultiplier(2);
backOffPolicy.setMaxInterval(1000);
retryTemplate.setBackOffPolicy(backOffPolicy);
this.metadataRetryOperations = retryTemplate;
}
}
@Override
public void destroy() throws Exception {
if (this.dlqProducer != null) {
this.dlqProducer.close();
this.dlqProducer = null;
}
}
/**
* Allowed chars are ASCII alphanumerics, '.', '_' and '-'.
*/
static void validateTopicName(String topicName) {
try {
byte[] utf8 = topicName.getBytes("UTF-8");
for (byte b : utf8) {
if (!((b >= 'a') && (b <= 'z') || (b >= 'A') && (b <= 'Z') || (b >= '0') && (b <= '9') || (b == '.')
|| (b == '-') || (b == '_'))) {
throw new IllegalArgumentException(
"Topic name can only have ASCII alphanumerics, '.', '_' and '-'");
}
}
}
catch (UnsupportedEncodingException e) {
throw new AssertionError(e); // Can't happen
}
}
@Override
public KafkaConsumerProperties getExtendedConsumerProperties(String channelName) {
return this.extendedBindingProperties.getExtendedConsumerProperties(channelName);
}
@Override
public KafkaProducerProperties getExtendedProducerProperties(String channelName) {
return this.extendedBindingProperties.getExtendedProducerProperties(channelName);
}
Map<String, Collection<Partition>> getTopicsInUse() {
return this.topicsInUse;
}
@Override
protected Object createConsumerDestinationIfNecessary(String name, String group,
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
validateTopicName(name);
if (properties.getInstanceCount() == 0) {
throw new IllegalArgumentException("Instance count cannot be zero");
}
Collection<Partition> allPartitions = ensureTopicCreated(name,
properties.getInstanceCount() * properties.getConcurrency());
Collection<Partition> listenedPartitions;
if (properties.getInstanceCount() == 1) {
listenedPartitions = allPartitions;
}
else {
listenedPartitions = new ArrayList<>();
for (Partition partition : allPartitions) {
// divide partitions across modules
if ((partition.getId() % properties.getInstanceCount()) == properties.getInstanceIndex()) {
listenedPartitions.add(partition);
}
}
}
this.topicsInUse.put(name, listenedPartitions);
return listenedPartitions;
}
@Override
@SuppressWarnings("unchecked")
protected AbstractEndpoint createConsumerEndpoint(String name, String group, Object queue,
MessageChannel inputChannel, ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
Collection<Partition> listenedPartitions = (Collection<Partition>) queue;
Assert.isTrue(!CollectionUtils.isEmpty(listenedPartitions), "A list of partitions must be provided");
int concurrency = Math.min(properties.getConcurrency(), listenedPartitions.size());
final ExecutorService dispatcherTaskExecutor =
Executors.newFixedThreadPool(concurrency, DAEMON_THREAD_FACTORY);
final KafkaMessageListenerContainer messageListenerContainer = new KafkaMessageListenerContainer(
this.connectionFactory, listenedPartitions.toArray(new Partition[listenedPartitions.size()])) {
@Override
public void stop(Runnable callback) {
super.stop(callback);
if (getOffsetManager() instanceof DisposableBean) {
try {
((DisposableBean) getOffsetManager()).destroy();
}
catch (Exception e) {
KafkaMessageChannelBinder.this.logger.error("Error while closing the offset manager", e);
}
}
dispatcherTaskExecutor.shutdown();
}
};
if (this.logger.isDebugEnabled()) {
this.logger.debug(
"Listened partitions: " + StringUtils.collectionToCommaDelimitedString(listenedPartitions));
}
boolean anonymous = !StringUtils.hasText(group);
Assert.isTrue(!anonymous || !properties.getExtension().isEnableDlq(),
"DLQ support is not available for anonymous subscriptions");
String consumerGroup = anonymous ? "anonymous." + UUID.randomUUID().toString() : group;
long referencePoint = properties.getExtension().getStartOffset() != null
? properties.getExtension().getStartOffset().getReferencePoint()
: (anonymous ? OffsetRequest.LatestTime() : OffsetRequest.EarliestTime());
OffsetManager offsetManager = createOffsetManager(consumerGroup, referencePoint);
if (properties.getExtension().isResetOffsets()) {
offsetManager.resetOffsets(listenedPartitions);
}
messageListenerContainer.setOffsetManager(offsetManager);
messageListenerContainer.setQueueSize(this.configurationProperties.getQueueSize());
messageListenerContainer.setMaxFetch(this.configurationProperties.getFetchSize());
boolean autoCommitOnError = properties.getExtension().getAutoCommitOnError() != null
? properties.getExtension().getAutoCommitOnError()
: properties.getExtension().isAutoCommitOffset() && properties.getExtension().isEnableDlq();
messageListenerContainer.setAutoCommitOnError(autoCommitOnError);
messageListenerContainer.setRecoveryInterval(properties.getExtension().getRecoveryInterval());
messageListenerContainer.setConcurrency(concurrency);
messageListenerContainer.setDispatcherTaskExecutor(dispatcherTaskExecutor);
final KafkaMessageDrivenChannelAdapter kafkaMessageDrivenChannelAdapter = new KafkaMessageDrivenChannelAdapter(
messageListenerContainer);
kafkaMessageDrivenChannelAdapter.setBeanFactory(this.getBeanFactory());
kafkaMessageDrivenChannelAdapter.setKeyDecoder(new DefaultDecoder(null));
kafkaMessageDrivenChannelAdapter.setPayloadDecoder(new DefaultDecoder(null));
kafkaMessageDrivenChannelAdapter.setOutputChannel(inputChannel);
kafkaMessageDrivenChannelAdapter.setAutoCommitOffset(properties.getExtension().isAutoCommitOffset());
kafkaMessageDrivenChannelAdapter.afterPropertiesSet();
// we need to wrap the adapter listener into a retrying listener so that the retry
// logic is applied before the ErrorHandler is executed
final RetryTemplate retryTemplate = buildRetryTemplateIfRetryEnabled(properties);
if (retryTemplate != null) {
if (properties.getExtension().isAutoCommitOffset()) {
final MessageListener originalMessageListener = (MessageListener) messageListenerContainer
.getMessageListener();
messageListenerContainer.setMessageListener(new MessageListener() {
@Override
public void onMessage(final KafkaMessage message) {
try {
retryTemplate.execute(new RetryCallback<Object, Throwable>() {
@Override
public Object doWithRetry(RetryContext context) {
originalMessageListener.onMessage(message);
return null;
}
});
}
catch (Throwable throwable) {
if (throwable instanceof RuntimeException) {
throw (RuntimeException) throwable;
}
else {
throw new RuntimeException(throwable);
}
}
}
});
}
else {
messageListenerContainer.setMessageListener(new AcknowledgingMessageListener() {
final AcknowledgingMessageListener originalMessageListener =
(AcknowledgingMessageListener) messageListenerContainer
.getMessageListener();
@Override
public void onMessage(final KafkaMessage message, final Acknowledgment acknowledgment) {
retryTemplate.execute(new RetryCallback<Object, RuntimeException>() {
@Override
public Object doWithRetry(RetryContext context) {
originalMessageListener.onMessage(message, acknowledgment);
return null;
}
});
}
});
}
}
if (properties.getExtension().isEnableDlq()) {
final String dlqTopic = "error." + name + "." + consumerGroup;
initDlqProducer();
messageListenerContainer.setErrorHandler(new ErrorHandler() {
@Override
public void handle(Exception thrownException, final KafkaMessage message) {
final byte[] key = message.getMessage().key() != null ? Utils.toArray(message.getMessage().key())
: null;
final byte[] payload = message.getMessage().payload() != null
? Utils.toArray(message.getMessage().payload()) : null;
KafkaMessageChannelBinder.this.dlqProducer.send(new ProducerRecord<>(dlqTopic, key, payload),
new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
StringBuffer messageLog = new StringBuffer();
messageLog.append(" a message with key='"
+ toDisplayString(ObjectUtils.nullSafeToString(key), 50) + "'");
messageLog.append(" and payload='"
+ toDisplayString(ObjectUtils.nullSafeToString(payload), 50) + "'");
messageLog.append(" received from " + message.getMetadata().getPartition());
if (exception != null) {
KafkaMessageChannelBinder.this.logger.error(
"Error sending to DLQ" + messageLog.toString(), exception);
}
else {
if (KafkaMessageChannelBinder.this.logger.isDebugEnabled()) {
KafkaMessageChannelBinder.this.logger.debug(
"Sent to DLQ " + messageLog.toString());
}
}
}
});
}
});
}
kafkaMessageDrivenChannelAdapter.start();
return kafkaMessageDrivenChannelAdapter;
}
@Override
protected MessageHandler createProducerMessageHandler(final String name,
ExtendedProducerProperties<KafkaProducerProperties> producerProperties) throws Exception {
ProducerMetadata<byte[], byte[]> producerMetadata = new ProducerMetadata<>(name, byte[].class, byte[].class,
BYTE_ARRAY_SERIALIZER, BYTE_ARRAY_SERIALIZER);
producerMetadata.setSync(producerProperties.getExtension().isSync());
KafkaProducerProperties.CompressionType compressionType = producerProperties.getExtension().getCompressionType();
producerMetadata.setCompressionType(fromKafkaProducerPropertiesCompressionType(compressionType));
producerMetadata.setBatchBytes(producerProperties.getExtension().getBufferSize());
Properties additional = new Properties();
additional.put(ProducerConfig.ACKS_CONFIG, String.valueOf(this.configurationProperties.getRequiredAcks()));
additional.put(ProducerConfig.LINGER_MS_CONFIG,
String.valueOf(producerProperties.getExtension().getBatchTimeout()));
ProducerFactoryBean<byte[], byte[]> producerFB = new ProducerFactoryBean<>(producerMetadata,
this.configurationProperties.getKafkaConnectionString(), additional);
final ProducerConfiguration<byte[], byte[]> producerConfiguration = new ProducerConfiguration<>(
producerMetadata, producerFB.getObject());
producerConfiguration.setProducerListener(this.producerListener);
KafkaProducerContext kafkaProducerContext = new KafkaProducerContext();
kafkaProducerContext.setProducerConfigurations(
Collections.<String, ProducerConfiguration<?, ?>>singletonMap(name, producerConfiguration));
return new ProducerConfigurationMessageHandler(producerConfiguration, name);
}
ProducerMetadata.CompressionType fromKafkaProducerPropertiesCompressionType(
KafkaProducerProperties.CompressionType compressionType) {
switch (compressionType) {
case snappy : return ProducerMetadata.CompressionType.snappy;
case gzip : return ProducerMetadata.CompressionType.gzip;
default: return ProducerMetadata.CompressionType.none;
}
}
@Override
protected void createProducerDestinationIfNecessary(String name,
ExtendedProducerProperties<KafkaProducerProperties> properties) {
if (this.logger.isInfoEnabled()) {
this.logger.info("Using kafka topic for outbound: " + name);
}
validateTopicName(name);
Collection<Partition> partitions = ensureTopicCreated(name, properties.getPartitionCount());
if (properties.getPartitionCount() < partitions.size()) {
if (this.logger.isInfoEnabled()) {
this.logger.info("The `partitionCount` of the producer for topic " + name + " is "
+ properties.getPartitionCount() + ", smaller than the actual partition count of "
+ partitions.size() + " of the topic. The larger number will be used instead.");
}
}
this.topicsInUse.put(name, partitions);
}
/**
* Creates a Kafka topic if needed, or try to increase its partition count to the
* desired number.
*/
private Collection<Partition> ensureTopicCreated(final String topicName, final int partitionCount) {
final ZkClient zkClient = new ZkClient(this.configurationProperties.getZkConnectionString(),
this.configurationProperties.getZkSessionTimeout(),
this.configurationProperties.getZkConnectionTimeout(),
ZKStringSerializer$.MODULE$);
try {
final Properties topicConfig = new Properties();
TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topicName, zkClient);
if (topicMetadata.errorCode() == ErrorMapping.NoError()) {
// only consider minPartitionCount for resizing if autoAddPartitions is
// true
int effectivePartitionCount = this.configurationProperties.isAutoAddPartitions()
? Math.max(this.configurationProperties.getMinPartitionCount(),
partitionCount) : partitionCount;
if (topicMetadata.partitionsMetadata().size() < effectivePartitionCount) {
if (this.configurationProperties.isAutoAddPartitions()) {
AdminUtils.addPartitions(zkClient, topicName, effectivePartitionCount, null, false,
new Properties());
}
else {
int topicSize = topicMetadata.partitionsMetadata().size();
throw new BinderException("The number of expected partitions was: " + partitionCount + ", but "
+ topicSize + (topicSize > 1 ? " have " : " has ") + "been found instead."
+ "Consider either increasing the partition count of the topic or enabling " +
"`autoAddPartitions`");
}
}
}
else if (topicMetadata.errorCode() == ErrorMapping.UnknownTopicOrPartitionCode()) {
if (this.configurationProperties.isAutoCreateTopics()) {
Seq<Object> brokerList = ZkUtils.getSortedBrokerList(zkClient);
// always consider minPartitionCount for topic creation
int effectivePartitionCount = Math.max(this.configurationProperties.getMinPartitionCount(),
partitionCount);
final scala.collection.Map<Object, Seq<Object>> replicaAssignment = AdminUtils
.assignReplicasToBrokers(brokerList, effectivePartitionCount,
this.configurationProperties.getReplicationFactor(), -1, -1);
this.metadataRetryOperations.execute(new RetryCallback<Object, RuntimeException>() {
@Override
public Object doWithRetry(RetryContext context) throws RuntimeException {
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topicName,
replicaAssignment, topicConfig, true);
return null;
}
});
}
else {
throw new BinderException("Topic " + topicName + " does not exist");
}
}
else {
throw new BinderException("Error fetching Kafka topic metadata: ",
ErrorMapping.exceptionFor(topicMetadata.errorCode()));
}
try {
Collection<Partition> partitions = this.metadataRetryOperations
.execute(new RetryCallback<Collection<Partition>, Exception>() {
@Override
public Collection<Partition> doWithRetry(RetryContext context) throws Exception {
KafkaMessageChannelBinder.this.connectionFactory.refreshMetadata(
Collections.singleton(topicName));
Collection<Partition> partitions =
KafkaMessageChannelBinder.this.connectionFactory.getPartitions(topicName);
// do a sanity check on the partition set
if (partitions.size() < partitionCount) {
throw new IllegalStateException("The number of expected partitions was: "
+ partitionCount + ", but " + partitions.size()
+ (partitions.size() > 1 ? " have " : " has ") + "been found instead");
}
KafkaMessageChannelBinder.this.connectionFactory.getLeaders(partitions);
return partitions;
}
});
return partitions;
}
catch (Exception e) {
this.logger.error("Cannot initialize Binder", e);
throw new BinderException("Cannot initialize binder:", e);
}
}
finally {
zkClient.close();
}
}
private synchronized void initDlqProducer() {
try {
if (this.dlqProducer == null) {
synchronized (this) {
if (this.dlqProducer == null) {
// we can use the producer defaults as we do not need to tune
// performance
ProducerMetadata<byte[], byte[]> producerMetadata = new ProducerMetadata<>("dlqKafkaProducer",
byte[].class, byte[].class, BYTE_ARRAY_SERIALIZER, BYTE_ARRAY_SERIALIZER);
producerMetadata.setSync(false);
producerMetadata.setCompressionType(ProducerMetadata.CompressionType.none);
producerMetadata.setBatchBytes(16384);
Properties additionalProps = new Properties();
additionalProps.put(ProducerConfig.ACKS_CONFIG,
String.valueOf(this.configurationProperties.getRequiredAcks()));
additionalProps.put(ProducerConfig.LINGER_MS_CONFIG, String.valueOf(0));
ProducerFactoryBean<byte[], byte[]> producerFactoryBean = new ProducerFactoryBean<>(
producerMetadata, this.configurationProperties.getKafkaConnectionString(),
additionalProps);
this.dlqProducer = producerFactoryBean.getObject();
}
}
}
}
catch (Exception e) {
throw new RuntimeException("Cannot initialize DLQ producer:", e);
}
}
private OffsetManager createOffsetManager(String group, long referencePoint) {
try {
KafkaNativeOffsetManager kafkaOffsetManager = new KafkaNativeOffsetManager(this.connectionFactory,
new ZookeeperConnect(this.configurationProperties.getZkConnectionString()),
Collections.<Partition, Long>emptyMap());
kafkaOffsetManager.setConsumerId(group);
kafkaOffsetManager.setReferenceTimestamp(referencePoint);
kafkaOffsetManager.afterPropertiesSet();
WindowingOffsetManager windowingOffsetManager = new WindowingOffsetManager(kafkaOffsetManager);
windowingOffsetManager.setTimespan(this.configurationProperties.getOffsetUpdateTimeWindow());
windowingOffsetManager.setCount(this.configurationProperties.getOffsetUpdateCount());
windowingOffsetManager.setShutdownTimeout(this.configurationProperties.getOffsetUpdateShutdownTimeout());
windowingOffsetManager.afterPropertiesSet();
return windowingOffsetManager;
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
private String toDisplayString(String original, int maxCharacters) {
if (original.length() <= maxCharacters) {
return original;
}
return original.substring(0, maxCharacters) + "...";
}
@Override
public void doManualAck(LinkedList<MessageHeaders> messageHeadersList) {
Iterator<MessageHeaders> iterator = messageHeadersList.iterator();
while (iterator.hasNext()) {
MessageHeaders headers = iterator.next();
Acknowledgment acknowledgment = (Acknowledgment) headers.get(KafkaHeaders.ACKNOWLEDGMENT);
Assert.notNull(acknowledgment,
"Acknowledgement shouldn't be null when acknowledging kafka message " + "manually.");
acknowledgment.acknowledge();
}
}
public enum StartOffset {
earliest(OffsetRequest.EarliestTime()), latest(OffsetRequest.LatestTime());
private final long referencePoint;
StartOffset(long referencePoint) {
this.referencePoint = referencePoint;
}
public long getReferencePoint() {
return this.referencePoint;
}
}
private final static class ProducerConfigurationMessageHandler implements MessageHandler, Lifecycle {
private ProducerConfiguration<byte[], byte[]> delegate;
private String targetTopic;
private boolean running = true;
private ProducerConfigurationMessageHandler(
ProducerConfiguration<byte[], byte[]> delegate, String targetTopic) {
Assert.notNull(delegate, "Delegate cannot be null");
Assert.hasText(targetTopic, "Target topic cannot be null");
this.delegate = delegate;
this.targetTopic = targetTopic;
}
@Override
public void start() {
}
@Override
public void stop() {
this.delegate.stop();
this.running = false;
}
@Override
public boolean isRunning() {
return this.running;
}
@Override
public void handleMessage(Message<?> message) throws MessagingException {
this.delegate.send(this.targetTopic,
message.getHeaders().get(BinderHeaders.PARTITION_HEADER, Integer.class), null,
(byte[]) message.getPayload());
}
}
}

View File

@@ -0,0 +1,262 @@
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.io.IOException;
import java.util.Collection;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import rx.Observable;
import rx.Subscription;
import rx.functions.Action0;
import rx.functions.Action1;
import rx.functions.Func1;
import rx.functions.Func2;
import rx.observables.GroupedObservable;
import rx.observables.MathObservable;
import rx.subjects.PublishSubject;
import rx.subjects.SerializedSubject;
import rx.subjects.Subject;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.integration.kafka.core.Partition;
import org.springframework.integration.kafka.listener.OffsetManager;
import org.springframework.util.Assert;
/**
* An {@link OffsetManager} that aggregates writes over a time or count window, using an underlying delegate to
* do the actual operations. Its purpose is to reduce the performance impact of writing operations
* wherever this is desirable.
*
* Either a time window or a number of writes can be specified, but not both.
*
* @author Marius Bogoevici
*/
public class WindowingOffsetManager implements OffsetManager, InitializingBean, DisposableBean {
private final CreatePartitionAndOffsetFunction createPartitionAndOffsetFunction = new CreatePartitionAndOffsetFunction();
private final GetOffsetFunction getOffsetFunction = new GetOffsetFunction();
private final ComputeMaximumOffsetByPartitionFunction findHighestOffsetInPartitionGroup = new ComputeMaximumOffsetByPartitionFunction();
private final GetPartitionFunction getPartition = new GetPartitionFunction();
private final FindHighestOffsetsByPartitionFunction findHighestOffsetsByPartition = new FindHighestOffsetsByPartitionFunction();
private final DelegateUpdateOffsetAction delegateUpdateOffsetAction = new DelegateUpdateOffsetAction();
private final NotifyObservableClosedAction notifyObservableClosed = new NotifyObservableClosedAction();
private final OffsetManager delegate;
private long timespan = 10 * 1000;
private int count;
private Subject<PartitionAndOffset, PartitionAndOffset> offsets;
private Subscription subscription;
private int shutdownTimeout = 2000;
private CountDownLatch shutdownLatch;
public WindowingOffsetManager(OffsetManager offsetManager) {
this.delegate = offsetManager;
}
/**
* The timespan for aggregating write operations, before invoking the underlying {@link OffsetManager}.
*
* @param timespan duration in milliseconds
*/
public void setTimespan(long timespan) {
Assert.isTrue(timespan >= 0, "Timespan must be a positive value");
this.timespan = timespan;
}
/**
* How many writes should be aggregated, before invoking the underlying {@link OffsetManager}. Setting this value
* to 1 effectively disables windowing.
*
* @param count number of writes
*/
public void setCount(int count) {
Assert.isTrue(count >= 0, "Count must be a positive value");
this.count = count;
}
/**
* The timeout that {@link #close()} and {@link #destroy()} operations will wait for receving a confirmation that the
* underlying writes have been processed.
*
* @param shutdownTimeout duration in milliseconds
*/
public void setShutdownTimeout(int shutdownTimeout) {
this.shutdownTimeout = shutdownTimeout;
}
@Override
public void afterPropertiesSet() throws Exception {
Assert.isTrue(timespan > 0 ^ count > 0, "Only one of the timespan or count must be set");
// create the stream if windowing is set, and count is higher than 1
if (timespan > 0 || count > 1) {
offsets = new SerializedSubject<>(PublishSubject.<PartitionAndOffset>create());
// window by either count or time
Observable<Observable<PartitionAndOffset>> window =
timespan > 0 ? offsets.window(timespan, TimeUnit.MILLISECONDS) : offsets.window(count);
Observable<PartitionAndOffset> maximumOffsetsByWindow = window
.flatMap(findHighestOffsetsByPartition)
.doOnCompleted(notifyObservableClosed);
subscription = maximumOffsetsByWindow.subscribe(delegateUpdateOffsetAction);
}
else {
offsets = null;
}
}
@Override
public void destroy() throws Exception {
this.flush();
this.close();
if (delegate instanceof DisposableBean) {
((DisposableBean) delegate).destroy();
}
}
@Override
public void updateOffset(Partition partition, long offset) {
if (offsets != null) {
offsets.onNext(new PartitionAndOffset(partition, offset));
}
else {
delegate.updateOffset(partition, offset);
}
}
@Override
public long getOffset(Partition partition) {
return delegate.getOffset(partition);
}
@Override
public void deleteOffset(Partition partition) {
delegate.deleteOffset(partition);
}
@Override
public void resetOffsets(Collection<Partition> partition) {
delegate.resetOffsets(partition);
}
@Override
public void close() throws IOException {
if (offsets != null) {
shutdownLatch = new CountDownLatch(1);
offsets.onCompleted();
try {
shutdownLatch.await(shutdownTimeout, TimeUnit.MILLISECONDS);
}
catch (InterruptedException e) {
// ignore
}
subscription.unsubscribe();
}
delegate.close();
}
@Override
public void flush() throws IOException {
delegate.flush();
}
private final class PartitionAndOffset {
private final Partition partition;
private final Long offset;
private PartitionAndOffset(Partition partition, Long offset) {
this.partition = partition;
this.offset = offset;
}
public Partition getPartition() {
return partition;
}
public Long getOffset() {
return offset;
}
}
private class DelegateUpdateOffsetAction implements Action1<PartitionAndOffset> {
@Override
public void call(PartitionAndOffset partitionAndOffset) {
delegate.updateOffset(partitionAndOffset.getPartition(), partitionAndOffset.getOffset());
}
}
private class NotifyObservableClosedAction implements Action0 {
@Override
public void call() {
if (shutdownLatch != null) {
shutdownLatch.countDown();
}
}
}
private class CreatePartitionAndOffsetFunction implements Func2<Partition, Long, PartitionAndOffset> {
@Override
public PartitionAndOffset call(Partition partition, Long offset) {
return new PartitionAndOffset(partition, offset);
}
}
private class GetOffsetFunction implements Func1<PartitionAndOffset, Long> {
@Override
public Long call(PartitionAndOffset partitionAndOffset) {
return partitionAndOffset.getOffset();
}
}
private class ComputeMaximumOffsetByPartitionFunction implements Func1<GroupedObservable<Partition, PartitionAndOffset>, Observable<PartitionAndOffset>> {
@Override
public Observable<PartitionAndOffset> call(GroupedObservable<Partition, PartitionAndOffset> group) {
return Observable.zip(Observable.just(group.getKey()),
MathObservable.max(group.map(getOffsetFunction)),
createPartitionAndOffsetFunction);
}
}
private class GetPartitionFunction implements Func1<PartitionAndOffset, Partition> {
@Override
public Partition call(PartitionAndOffset partitionAndOffset) {
return partitionAndOffset.getPartition();
}
}
private class FindHighestOffsetsByPartitionFunction implements Func1<Observable<PartitionAndOffset>, Observable<PartitionAndOffset>> {
@Override
public Observable<PartitionAndOffset> call(Observable<PartitionAndOffset> windowBuffer) {
return windowBuffer.groupBy(getPartition).flatMap(findHighestOffsetInPartitionGroup);
}
}
}

View File

@@ -0,0 +1,79 @@
/*
* Copyright 2015-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka.config;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.PropertyPlaceholderAutoConfiguration;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.cloud.stream.binder.Binder;
import org.springframework.cloud.stream.binder.kafka.KafkaBinderHealthIndicator;
import org.springframework.cloud.stream.binder.kafka.KafkaMessageChannelBinder;
import org.springframework.cloud.stream.binder.kafka.config.KafkaExtendedBindingProperties;
import org.springframework.cloud.stream.config.codec.kryo.KryoCodecAutoConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Import;
import org.springframework.integration.codec.Codec;
import org.springframework.integration.kafka.support.LoggingProducerListener;
import org.springframework.integration.kafka.support.ProducerListener;
/**
* @author David Turanski
* @author Marius Bogoevici
* @author Soby Chacko
* @author Mark Fisher
* @author Ilayaperumal Gopinathan
*/
@Configuration
@ConditionalOnMissingBean(Binder.class)
@Import({ KryoCodecAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class })
@EnableConfigurationProperties({ KafkaBinderConfigurationProperties.class, KafkaExtendedBindingProperties.class })
public class KafkaBinderConfiguration {
@Autowired
private Codec codec;
@Autowired
private KafkaBinderConfigurationProperties configurationProperties;
@Autowired
private KafkaExtendedBindingProperties kafkaExtendedBindingProperties;
@Autowired
private ProducerListener producerListener;
@Bean
KafkaMessageChannelBinder kafkaMessageChannelBinder() {
KafkaMessageChannelBinder kafkaMessageChannelBinder = new KafkaMessageChannelBinder(configurationProperties);
kafkaMessageChannelBinder.setCodec(codec);
kafkaMessageChannelBinder.setProducerListener(producerListener);
kafkaMessageChannelBinder.setExtendedBindingProperties(kafkaExtendedBindingProperties);
return kafkaMessageChannelBinder;
}
@Bean
@ConditionalOnMissingBean(ProducerListener.class)
ProducerListener producerListener() {
return new LoggingProducerListener();
}
@Bean
KafkaBinderHealthIndicator healthIndicator(KafkaMessageChannelBinder kafkaMessageChannelBinder) {
return new KafkaBinderHealthIndicator(kafkaMessageChannelBinder, configurationProperties);
}
}

View File

@@ -0,0 +1,20 @@
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package contains an implementation of the {@link org.springframework.cloud.stream.binder.Binder} for Kafka.
*/
package org.springframework.cloud.stream.binder.kafka;

View File

@@ -0,0 +1,2 @@
kafka:\
org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfiguration

View File

@@ -0,0 +1,820 @@
/*
* Copyright 2014-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.fail;
import java.util.Arrays;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
import org.springframework.beans.DirectFieldAccessor;
import org.springframework.cloud.stream.binder.BinderException;
import org.springframework.cloud.stream.binder.Binding;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.PartitionCapableBinderTests;
import org.springframework.cloud.stream.binder.Spy;
import org.springframework.cloud.stream.binder.TestUtils;
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaConsumerProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaProducerProperties;
import org.springframework.cloud.stream.binder.test.junit.kafka.KafkaTestSupport;
import org.springframework.context.support.GenericApplicationContext;
import org.springframework.integration.channel.DirectChannel;
import org.springframework.integration.channel.QueueChannel;
import org.springframework.integration.kafka.core.Partition;
import org.springframework.integration.kafka.core.TopicNotFoundException;
import org.springframework.integration.kafka.support.KafkaHeaders;
import org.springframework.integration.kafka.support.ProducerConfiguration;
import org.springframework.integration.kafka.support.ProducerMetadata;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageChannel;
import org.springframework.messaging.MessageHandler;
import org.springframework.messaging.MessagingException;
import org.springframework.messaging.support.GenericMessage;
import org.springframework.messaging.support.MessageBuilder;
import org.springframework.retry.backoff.FixedBackOffPolicy;
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
import kafka.admin.AdminUtils;
import kafka.api.TopicMetadata;
/**
* Integration tests for the {@link KafkaMessageChannelBinder}.
* @author Eric Bottard
* @author Marius Bogoevici
* @author Mark Fisher
* @author Ilayaperumal Gopinathan
*/
public class KafkaBinderTests extends
PartitionCapableBinderTests<KafkaTestBinder, ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>> {
private final String CLASS_UNDER_TEST_NAME = KafkaMessageChannelBinder.class.getSimpleName();
@ClassRule
public static KafkaTestSupport kafkaTestSupport = new KafkaTestSupport();
private KafkaTestBinder binder;
@Override
protected void binderBindUnbindLatency() throws InterruptedException {
Thread.sleep(500);
}
@Override
protected KafkaTestBinder getBinder() {
if (binder == null) {
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
binder = new KafkaTestBinder(binderConfiguration);
}
return binder;
}
private KafkaBinderConfigurationProperties createConfigurationProperties() {
KafkaBinderConfigurationProperties binderConfiguration = new KafkaBinderConfigurationProperties();
binderConfiguration.setBrokers(kafkaTestSupport.getBrokerAddress());
binderConfiguration.setZkNodes(kafkaTestSupport.getZkConnectString());
return binderConfiguration;
}
@Override
protected ExtendedConsumerProperties<KafkaConsumerProperties> createConsumerProperties() {
return new ExtendedConsumerProperties<>(new KafkaConsumerProperties());
}
@Override
protected ExtendedProducerProperties<KafkaProducerProperties> createProducerProperties() {
return new ExtendedProducerProperties<>(new KafkaProducerProperties());
}
@Before
public void init() {
String multiplier = System.getenv("KAFKA_TIMEOUT_MULTIPLIER");
if (multiplier != null) {
timeoutMultiplier = Double.parseDouble(multiplier);
}
}
@Override
protected boolean usesExplicitRouting() {
return false;
}
@Override
protected String getClassUnderTestName() {
return CLASS_UNDER_TEST_NAME;
}
@Override
public Spy spyOn(final String name) {
throw new UnsupportedOperationException("'spyOn' is not used by Kafka tests");
}
@Test
public void testDlqAndRetry() {
KafkaTestBinder binder = getBinder();
DirectChannel moduleOutputChannel = new DirectChannel();
DirectChannel moduleInputChannel = new DirectChannel();
QueueChannel dlqChannel = new QueueChannel();
FailingInvocationCountingMessageHandler handler = new FailingInvocationCountingMessageHandler();
moduleInputChannel.subscribe(handler);
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(10);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setMaxAttempts(3);
consumerProperties.setBackOffInitialInterval(100);
consumerProperties.setBackOffMaxInterval(150);
consumerProperties.getExtension().setEnableDlq(true);
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("retryTest." + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0",
"testGroup", moduleInputChannel, consumerProperties);
ExtendedConsumerProperties<KafkaConsumerProperties> dlqConsumerProperties = createConsumerProperties();
dlqConsumerProperties.setMaxAttempts(1);
Binding<MessageChannel> dlqConsumerBinding = binder.bindConsumer(
"error.retryTest." + uniqueBindingId + ".0.testGroup", null, dlqChannel, dlqConsumerProperties);
String testMessagePayload = "test." + UUID.randomUUID().toString();
Message<String> testMessage = MessageBuilder.withPayload(testMessagePayload).build();
moduleOutputChannel.send(testMessage);
Message<?> receivedMessage = receive(dlqChannel, 3);
assertThat(receivedMessage).isNotNull();
assertThat(receivedMessage.getPayload()).isEqualTo(testMessagePayload);
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
dlqConsumerBinding.unbind();
consumerBinding.unbind();
producerBinding.unbind();
}
@Test
public void testDefaultAutoCommitOnErrorWithoutDlq() throws Exception {
KafkaTestBinder binder = getBinder();
DirectChannel moduleOutputChannel = new DirectChannel();
DirectChannel moduleInputChannel = new DirectChannel();
FailingInvocationCountingMessageHandler handler = new FailingInvocationCountingMessageHandler();
moduleInputChannel.subscribe(handler);
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(10);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setMaxAttempts(1);
consumerProperties.setBackOffInitialInterval(100);
consumerProperties.setBackOffMaxInterval(150);
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("retryTest." + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0",
"testGroup", moduleInputChannel, consumerProperties);
String testMessagePayload = "test." + UUID.randomUUID().toString();
Message<String> testMessage = MessageBuilder.withPayload(testMessagePayload).build();
moduleOutputChannel.send(testMessage);
assertThat(handler.getLatch().await((int) (timeoutMultiplier * 1000), TimeUnit.MILLISECONDS));
// first attempt fails
assertThat(handler.getReceivedMessages().entrySet()).hasSize(1);
Message<?> receivedMessage = handler.getReceivedMessages().entrySet().iterator().next().getValue();
assertThat(receivedMessage).isNotNull();
assertThat(receivedMessage.getPayload()).isEqualTo(testMessagePayload);
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
consumerBinding.unbind();
// on the second attempt the message is redelivered
QueueChannel successfulInputChannel = new QueueChannel();
consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0", "testGroup",
successfulInputChannel, consumerProperties);
String testMessage2Payload = "test." + UUID.randomUUID().toString();
Message<String> testMessage2 = MessageBuilder.withPayload(testMessage2Payload).build();
moduleOutputChannel.send(testMessage2);
Message<?> firstReceived = receive(successfulInputChannel);
assertThat(firstReceived.getPayload()).isEqualTo(testMessagePayload);
Message<?> secondReceived = receive(successfulInputChannel);
assertThat(secondReceived.getPayload()).isEqualTo(testMessage2Payload);
consumerBinding.unbind();
producerBinding.unbind();
}
@Test
public void testDefaultAutoCommitOnErrorWithDlq() throws Exception {
KafkaTestBinder binder = getBinder();
DirectChannel moduleOutputChannel = new DirectChannel();
DirectChannel moduleInputChannel = new DirectChannel();
FailingInvocationCountingMessageHandler handler = new FailingInvocationCountingMessageHandler();
moduleInputChannel.subscribe(handler);
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(10);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setMaxAttempts(3);
consumerProperties.setBackOffInitialInterval(100);
consumerProperties.setBackOffMaxInterval(150);
consumerProperties.getExtension().setEnableDlq(true);
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("retryTest." + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0",
"testGroup", moduleInputChannel, consumerProperties);
ExtendedConsumerProperties<KafkaConsumerProperties> dlqConsumerProperties = createConsumerProperties();
dlqConsumerProperties.setMaxAttempts(1);
QueueChannel dlqChannel = new QueueChannel();
Binding<MessageChannel> dlqConsumerBinding = binder.bindConsumer(
"error.retryTest." + uniqueBindingId + ".0.testGroup", null, dlqChannel, dlqConsumerProperties);
String testMessagePayload = "test." + UUID.randomUUID().toString();
Message<String> testMessage = MessageBuilder.withPayload(testMessagePayload).build();
moduleOutputChannel.send(testMessage);
Message<?> dlqMessage = receive(dlqChannel, 3);
assertThat(dlqMessage).isNotNull();
assertThat(dlqMessage.getPayload()).isEqualTo(testMessagePayload);
// first attempt fails
assertThat(handler.getReceivedMessages().entrySet()).hasSize(1);
Message<?> handledMessage = handler.getReceivedMessages().entrySet().iterator().next().getValue();
assertThat(handledMessage).isNotNull();
assertThat(handledMessage.getPayload()).isEqualTo(testMessagePayload);
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
dlqConsumerBinding.unbind();
consumerBinding.unbind();
// on the second attempt the message is not redelivered because the DLQ is set
QueueChannel successfulInputChannel = new QueueChannel();
consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0", "testGroup",
successfulInputChannel, consumerProperties);
String testMessage2Payload = "test." + UUID.randomUUID().toString();
Message<String> testMessage2 = MessageBuilder.withPayload(testMessage2Payload).build();
moduleOutputChannel.send(testMessage2);
Message<?> receivedMessage = receive(successfulInputChannel);
assertThat(receivedMessage.getPayload()).isEqualTo(testMessage2Payload);
consumerBinding.unbind();
producerBinding.unbind();
}
@Test(expected = IllegalArgumentException.class)
public void testValidateKafkaTopicName() {
KafkaMessageChannelBinder.validateTopicName("foo:bar");
}
@Test
public void testCompression() throws Exception {
final ProducerMetadata.CompressionType[] codecs = new ProducerMetadata.CompressionType[] {
ProducerMetadata.CompressionType.none, ProducerMetadata.CompressionType.gzip,
ProducerMetadata.CompressionType.snappy };
byte[] testPayload = new byte[2048];
Arrays.fill(testPayload, (byte) 65);
KafkaTestBinder binder = getBinder();
for (ProducerMetadata.CompressionType codec : codecs) {
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.getExtension().setCompressionType(fromProducerMetadataCompressionType(codec));
Binding<MessageChannel> producerBinding = binder.bindProducer("foo.0", moduleOutputChannel,
producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo.0", "test", moduleInputChannel,
createConsumerProperties());
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
.build();
// Let the consumer actually bind to the producer before sending a msg
binderBindUnbindLatency();
moduleOutputChannel.send(message);
Message<?> inbound = receive(moduleInputChannel);
assertThat(inbound).isNotNull();
assertThat((byte[]) inbound.getPayload()).containsExactly(testPayload);
producerBinding.unbind();
consumerBinding.unbind();
}
}
@Test
public void testCustomPartitionCountOverridesDefaultIfLarger() throws Exception {
byte[] testPayload = new byte[2048];
Arrays.fill(testPayload, (byte) 65);
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
binderConfiguration.setMinPartitionCount(10);
KafkaTestBinder binder = new KafkaTestBinder(binderConfiguration);
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(10);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("foo" + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo" + uniqueBindingId + ".0", null,
moduleInputChannel, consumerProperties);
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
.build();
// Let the consumer actually bind to the producer before sending a msg
binderBindUnbindLatency();
moduleOutputChannel.send(message);
Message<?> inbound = receive(moduleInputChannel);
assertThat(inbound).isNotNull();
assertThat((byte[]) inbound.getPayload()).containsExactly(testPayload);
Collection<Partition> partitions = binder.getCoreBinder().getConnectionFactory()
.getPartitions("foo" + uniqueBindingId + ".0");
assertThat(partitions).hasSize(10);
producerBinding.unbind();
consumerBinding.unbind();
}
@Test
public void testCustomPartitionCountDoesNotOverridePartitioningIfSmaller() throws Exception {
byte[] testPayload = new byte[2048];
Arrays.fill(testPayload, (byte) 65);
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
binderConfiguration.setMinPartitionCount(6);
KafkaTestBinder binder = new KafkaTestBinder(binderConfiguration);
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(5);
producerProperties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload"));
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("foo" + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo" + uniqueBindingId + ".0", null,
moduleInputChannel, consumerProperties);
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
.build();
// Let the consumer actually bind to the producer before sending a msg
binderBindUnbindLatency();
moduleOutputChannel.send(message);
Message<?> inbound = receive(moduleInputChannel);
assertThat(inbound).isNotNull();
assertThat((byte[]) inbound.getPayload()).containsExactly(testPayload);
Collection<Partition> partitions = binder.getCoreBinder().getConnectionFactory()
.getPartitions("foo" + uniqueBindingId + ".0");
assertThat(partitions).hasSize(6);
producerBinding.unbind();
consumerBinding.unbind();
}
@Test
public void testCustomPartitionCountOverridesPartitioningIfLarger() throws Exception {
byte[] testPayload = new byte[2048];
Arrays.fill(testPayload, (byte) 65);
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
binderConfiguration.setMinPartitionCount(4);
KafkaTestBinder binder = new KafkaTestBinder(binderConfiguration);
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(5);
producerProperties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload"));
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("foo" + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo" + uniqueBindingId + ".0", null,
moduleInputChannel, consumerProperties);
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
.build();
// Let the consumer actually bind to the producer before sending a msg
binderBindUnbindLatency();
moduleOutputChannel.send(message);
Message<?> inbound = receive(moduleInputChannel);
assertThat(inbound).isNotNull();
assertThat((byte[]) inbound.getPayload()).containsExactly(testPayload);
Collection<Partition> partitions = binder.getCoreBinder().getConnectionFactory()
.getPartitions("foo" + uniqueBindingId + ".0");
assertThat(partitions).hasSize(5);
producerBinding.unbind();
consumerBinding.unbind();
}
@Test
@SuppressWarnings("unchecked")
public void testDefaultConsumerStartsAtEarliest() throws Exception {
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(createConfigurationProperties());
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
QueueChannel input1 = new QueueChannel();
String testTopicName = UUID.randomUUID().toString();
binder.bindProducer(testTopicName, output, createProducerProperties());
String testPayload1 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
binder.bindConsumer(testTopicName, "startOffsets", input1, createConsumerProperties());
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
assertThat(new String(receivedMessage1.getPayload())).isEqualTo(testPayload1);
String testPayload2 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload2.getBytes()));
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isEqualTo(testPayload2);
}
@Test
@SuppressWarnings("unchecked")
public void testEarliest() throws Exception {
KafkaTestBinder binder = getBinder();
DirectChannel output = new DirectChannel();
QueueChannel input1 = new QueueChannel();
String testTopicName = UUID.randomUUID().toString();
binder.bindProducer(testTopicName, output, createProducerProperties());
String testPayload1 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> properties = createConsumerProperties();
properties.getExtension().setStartOffset(KafkaConsumerProperties.StartOffset.earliest);
binder.bindConsumer(testTopicName, "startOffsets", input1, properties);
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
String testPayload2 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload2.getBytes()));
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isEqualTo(testPayload2);
}
@Test
@SuppressWarnings("unchecked")
public void testReset() throws Exception {
KafkaTestBinder binder = getBinder();
DirectChannel output = new DirectChannel();
QueueChannel input1 = new QueueChannel();
String testTopicName = UUID.randomUUID().toString();
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, output,
createProducerProperties());
String testPayload1 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> properties = createConsumerProperties();
properties.getExtension().setResetOffsets(true);
properties.getExtension().setStartOffset(KafkaConsumerProperties.StartOffset.earliest);
Binding<MessageChannel> consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1,
properties);
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
String testPayload2 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload2.getBytes()));
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isEqualTo(testPayload2);
consumerBinding.unbind();
String testPayload3 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload3.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> properties2 = createConsumerProperties();
properties2.getExtension().setResetOffsets(true);
properties2.getExtension().setStartOffset(KafkaConsumerProperties.StartOffset.earliest);
consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1, properties2);
Message<byte[]> receivedMessage4 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage4).isNotNull();
assertThat(new String(receivedMessage4.getPayload())).isEqualTo(testPayload1);
Message<byte[]> receivedMessage5 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage5).isNotNull();
assertThat(new String(receivedMessage5.getPayload())).isEqualTo(testPayload2);
Message<byte[]> receivedMessage6 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage6).isNotNull();
assertThat(new String(receivedMessage6.getPayload())).isEqualTo(testPayload3);
consumerBinding.unbind();
producerBinding.unbind();
}
@Test
@SuppressWarnings("unchecked")
public void testResume() throws Exception {
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
QueueChannel input1 = new QueueChannel();
String testTopicName = UUID.randomUUID().toString();
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, output,
createProducerProperties());
String testPayload1 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> firstConsumerProperties = createConsumerProperties();
Binding<MessageChannel> consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1,
firstConsumerProperties);
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
String testPayload2 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload2.getBytes()));
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isNotNull();
consumerBinding.unbind();
String testPayload3 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload3.getBytes()));
consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1, createConsumerProperties());
Message<byte[]> receivedMessage3 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage3).isNotNull();
assertThat(new String(receivedMessage3.getPayload())).isEqualTo(testPayload3);
consumerBinding.unbind();
producerBinding.unbind();
}
@Test
public void testSyncProducerMetadata() throws Exception {
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(createConfigurationProperties());
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
String testTopicName = UUID.randomUUID().toString();
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
properties.getExtension().setSync(true);
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, output, properties);
DirectFieldAccessor accessor = new DirectFieldAccessor(extractEndpoint(producerBinding));
MessageHandler handler = (MessageHandler) accessor.getPropertyValue("handler");
DirectFieldAccessor accessor1 = new DirectFieldAccessor(handler);
ProducerConfiguration producerConfiguration = (ProducerConfiguration) accessor1
.getPropertyValue("producerConfiguration");
assertThat(producerConfiguration.getProducerMetadata().isSync())
.withFailMessage("Kafka Sync Producer should have been enabled.");
producerBinding.unbind();
}
@Test
public void testAutoCreateTopicsDisabledFailsIfTopicMissing() throws Exception {
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setAutoCreateTopics(false);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(1000);
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
String testTopicName = "nonexisting" + System.currentTimeMillis();
try {
binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
fail();
}
catch (Exception e) {
assertThat(e).isInstanceOf(BinderException.class);
assertThat(e).hasMessageContaining("Topic " + testTopicName + " does not exist");
}
try {
binder.getConnectionFactory().getPartitions(testTopicName);
fail();
}
catch (Exception e) {
assertThat(e).isInstanceOf(TopicNotFoundException.class);
}
}
@Test
public void testAutoConfigureTopicsDisabledSucceedsIfTopicExisting() throws Exception {
String testTopicName = "existing" + System.currentTimeMillis();
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 5, 1, new Properties());
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setAutoCreateTopics(false);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
Binding<MessageChannel> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
binding.unbind();
}
@Test
public void testAutoAddPartitionsDisabledFailsIfTopicUnderpartitioned() throws Exception {
String testTopicName = "existing" + System.currentTimeMillis();
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 1, 1, new Properties());
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setAutoAddPartitions(false);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
// this consumer must consume from partition 2
consumerProperties.setInstanceCount(3);
consumerProperties.setInstanceIndex(2);
try {
binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
}
catch (Exception e) {
assertThat(e).isInstanceOf(BinderException.class);
assertThat(e)
.hasMessageContaining("The number of expected partitions was: 3, but 1 has been found instead");
}
}
@Test
public void testAutoAddPartitionsDisabledSucceedsIfTopicPartitionedCorrectly() throws Exception {
String testTopicName = "existing" + System.currentTimeMillis();
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 6, 1, new Properties());
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setAutoAddPartitions(false);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(1000);
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
// this consumer must consume from partition 2
consumerProperties.setInstanceCount(3);
consumerProperties.setInstanceIndex(2);
Binding<?> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
Partition[] listenedPartitions = TestUtils.getPropertyValue(binding,
"endpoint.val$messageListenerContainer.partitions", Partition[].class);
assertThat(listenedPartitions).hasSize(2);
assertThat(listenedPartitions).contains(new Partition(testTopicName, 2), new Partition(testTopicName, 5));
Collection<Partition> partitions = binder.getConnectionFactory().getPartitions(testTopicName);
assertThat(partitions).hasSize(6);
binding.unbind();
}
@Test
public void testAutoCreateTopicsEnabledSucceeds() throws Exception {
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setAutoCreateTopics(true);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(1000);
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
String testTopicName = "nonexisting" + System.currentTimeMillis();
Binding<?> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
binding.unbind();
}
@Test
public void testPartitionCountNotReduced() throws Exception {
String testTopicName = "existing" + System.currentTimeMillis();
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 6, 1, new Properties());
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setAutoAddPartitions(true);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(1000);
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
Binding<?> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
binding.unbind();
TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(testTopicName,
kafkaTestSupport.getZkClient());
assertThat(topicMetadata.partitionsMetadata().size()).isEqualTo(6);
}
@Test
public void testPartitionCountIncreasedIfAutoAddPartitionsSet() throws Exception {
String testTopicName = "existing" + System.currentTimeMillis();
AdminUtils.createTopic(kafkaTestSupport.getZkClient(), testTopicName, 1, 1, new Properties());
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
configurationProperties.setMinPartitionCount(6);
configurationProperties.setAutoAddPartitions(true);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(1000);
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
Binding<?> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
binding.unbind();
TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(testTopicName,
kafkaTestSupport.getZkClient());
assertThat(topicMetadata.partitionsMetadata().size()).isEqualTo(6);
}
KafkaProducerProperties.CompressionType fromProducerMetadataCompressionType(
ProducerMetadata.CompressionType compressionType) {
switch (compressionType) {
case snappy : return KafkaProducerProperties.CompressionType.snappy;
case gzip : return KafkaProducerProperties.CompressionType.gzip;
default: return KafkaProducerProperties.CompressionType.none;
}
}
private static final class FailingInvocationCountingMessageHandler implements MessageHandler {
private int invocationCount;
private final LinkedHashMap<Long, Message<?>> receivedMessages = new LinkedHashMap<>();
private final CountDownLatch latch;
private FailingInvocationCountingMessageHandler(int latchSize) {
latch = new CountDownLatch(latchSize);
}
private FailingInvocationCountingMessageHandler() {
this(1);
}
@Override
public void handleMessage(Message<?> message) throws MessagingException {
invocationCount++;
Long offset = message.getHeaders().get(KafkaHeaders.OFFSET, Long.class);
// using the offset as key allows to ensure that we don't store duplicate
// messages on retry
if (!receivedMessages.containsKey(offset)) {
receivedMessages.put(offset, message);
latch.countDown();
}
throw new RuntimeException();
}
public LinkedHashMap<Long, Message<?>> getReceivedMessages() {
return receivedMessages;
}
public int getInvocationCount() {
return invocationCount;
}
public CountDownLatch getLatch() {
return latch;
}
}
}

View File

@@ -0,0 +1,91 @@
/*
* Copyright 2015-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.util.List;
import org.springframework.cloud.stream.binder.AbstractTestBinder;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaConsumerProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaProducerProperties;
import org.springframework.cloud.stream.binder.test.junit.kafka.TestKafkaCluster;
import org.springframework.context.support.GenericApplicationContext;
import org.springframework.integration.codec.Codec;
import org.springframework.integration.codec.kryo.KryoRegistrar;
import org.springframework.integration.codec.kryo.PojoCodec;
import org.springframework.integration.kafka.support.LoggingProducerListener;
import org.springframework.integration.kafka.support.ProducerListener;
import org.springframework.integration.tuple.TupleKryoRegistrar;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.Registration;
/**
* Test support class for {@link KafkaMessageChannelBinder}. Creates a binder that uses a
* test {@link TestKafkaCluster kafka cluster}.
* @author Eric Bottard
* @author Marius Bogoevici
* @author David Turanski
* @author Gary Russell
* @author Soby Chacko
*/
public class KafkaTestBinder extends
AbstractTestBinder<KafkaMessageChannelBinder, ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>> {
public KafkaTestBinder(KafkaBinderConfigurationProperties binderConfiguration) {
try {
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(binderConfiguration);
binder.setCodec(getCodec());
ProducerListener producerListener = new LoggingProducerListener();
binder.setProducerListener(producerListener);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
this.setBinder(binder);
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public void cleanup() {
// do nothing - the rule will take care of that
}
private static Codec getCodec() {
return new PojoCodec(new TupleRegistrar());
}
private static class TupleRegistrar implements KryoRegistrar {
private final TupleKryoRegistrar delegate = new TupleKryoRegistrar();
@Override
public void registerTypes(Kryo kryo) {
this.delegate.registerTypes(kryo);
}
@Override
public List<Registration> getRegistrations() {
return this.delegate.getRegistrations();
}
}
}

View File

@@ -0,0 +1,38 @@
/*
* Copyright 2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import org.springframework.cloud.stream.binder.PartitionKeyExtractorStrategy;
import org.springframework.cloud.stream.binder.PartitionSelectorStrategy;
import org.springframework.messaging.Message;
/**
* @author Marius Bogoevici
*/
public class RawKafkaPartitionTestSupport implements PartitionKeyExtractorStrategy, PartitionSelectorStrategy {
@Override
public int selectPartition(Object key, int divisor) {
return ((byte[]) key)[0] % divisor;
}
@Override
public Object extractKey(Message<?> message) {
return message.getPayload();
}
}

View File

@@ -0,0 +1,264 @@
/*
* Copyright 2015-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.Arrays;
import org.junit.Ignore;
import org.junit.Test;
import org.springframework.cloud.stream.binder.Binding;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.HeaderMode;
import org.springframework.cloud.stream.binder.kafka.config.KafkaConsumerProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaProducerProperties;
import org.springframework.integration.IntegrationMessageHeaderAccessor;
import org.springframework.integration.channel.DirectChannel;
import org.springframework.integration.channel.QueueChannel;
import org.springframework.integration.endpoint.AbstractEndpoint;
import org.springframework.integration.support.MessageBuilder;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageChannel;
import org.springframework.messaging.support.GenericMessage;
/**
* @author Marius Bogoevici
* @author David Turanski
* @author Gary Russell
* @author Mark Fisher
*/
public class RawModeKafkaBinderTests extends KafkaBinderTests {
@Test
@Override
public void testPartitionedModuleJava() throws Exception {
KafkaTestBinder binder = getBinder();
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
properties.setHeaderMode(HeaderMode.raw);
properties.setPartitionKeyExtractorClass(RawKafkaPartitionTestSupport.class);
properties.setPartitionSelectorClass(RawKafkaPartitionTestSupport.class);
properties.setPartitionCount(6);
DirectChannel output = new DirectChannel();
output.setBeanName("test.output");
Binding<MessageChannel> outputBinding = binder.bindProducer("partJ.0", output, properties);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setConcurrency(2);
consumerProperties.setInstanceCount(3);
consumerProperties.setInstanceIndex(0);
consumerProperties.setPartitioned(true);
consumerProperties.setHeaderMode(HeaderMode.raw);
QueueChannel input0 = new QueueChannel();
input0.setBeanName("test.input0J");
Binding<MessageChannel> input0Binding = binder.bindConsumer("partJ.0", "test", input0, consumerProperties);
consumerProperties.setInstanceIndex(1);
QueueChannel input1 = new QueueChannel();
input1.setBeanName("test.input1J");
Binding<MessageChannel> input1Binding = binder.bindConsumer("partJ.0", "test", input1, consumerProperties);
consumerProperties.setInstanceIndex(2);
QueueChannel input2 = new QueueChannel();
input2.setBeanName("test.input2J");
Binding<MessageChannel> input2Binding = binder.bindConsumer("partJ.0", "test", input2, consumerProperties);
output.send(new GenericMessage<>(new byte[] { (byte) 0 }));
output.send(new GenericMessage<>(new byte[] { (byte) 1 }));
output.send(new GenericMessage<>(new byte[] { (byte) 2 }));
Message<?> receive0 = receive(input0);
assertThat(receive0).isNotNull();
Message<?> receive1 = receive(input1);
assertThat(receive1).isNotNull();
Message<?> receive2 = receive(input2);
assertThat(receive2).isNotNull();
assertThat(Arrays.asList(((byte[]) receive0.getPayload())[0], ((byte[]) receive1.getPayload())[0],
((byte[]) receive2.getPayload())[0])).containsExactlyInAnyOrder((byte) 0, (byte) 1, (byte) 2);
input0Binding.unbind();
input1Binding.unbind();
input2Binding.unbind();
outputBinding.unbind();
}
@Test
@Override
public void testPartitionedModuleSpEL() throws Exception {
KafkaTestBinder binder = getBinder();
ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
properties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload[0]"));
properties.setPartitionSelectorExpression(spelExpressionParser.parseExpression("hashCode()"));
properties.setPartitionCount(6);
properties.setHeaderMode(HeaderMode.raw);
DirectChannel output = new DirectChannel();
output.setBeanName("test.output");
Binding<MessageChannel> outputBinding = binder.bindProducer("part.0", output, properties);
try {
AbstractEndpoint endpoint = (AbstractEndpoint)extractEndpoint(outputBinding);
assertThat(getEndpointRouting(endpoint)).contains("part.0-' + headers['partition']");
}
catch (UnsupportedOperationException ignored) {
}
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setConcurrency(2);
consumerProperties.setInstanceIndex(0);
consumerProperties.setInstanceCount(3);
consumerProperties.setPartitioned(true);
consumerProperties.setHeaderMode(HeaderMode.raw);
QueueChannel input0 = new QueueChannel();
input0.setBeanName("test.input0S");
Binding<MessageChannel> input0Binding = binder.bindConsumer("part.0", "test", input0, consumerProperties);
consumerProperties.setInstanceIndex(1);
QueueChannel input1 = new QueueChannel();
input1.setBeanName("test.input1S");
Binding<MessageChannel> input1Binding = binder.bindConsumer("part.0", "test", input1, consumerProperties);
consumerProperties.setInstanceIndex(2);
QueueChannel input2 = new QueueChannel();
input2.setBeanName("test.input2S");
Binding<MessageChannel> input2Binding = binder.bindConsumer("part.0", "test", input2, consumerProperties);
Message<byte[]> message2 = MessageBuilder.withPayload(new byte[] { 2 })
.setHeader(IntegrationMessageHeaderAccessor.CORRELATION_ID, "foo")
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_NUMBER, 42)
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_SIZE, 43).build();
output.send(message2);
output.send(new GenericMessage<>(new byte[] { 1 }));
output.send(new GenericMessage<>(new byte[] { 0 }));
Message<?> receive0 = receive(input0);
assertThat(receive0).isNotNull();
Message<?> receive1 = receive(input1);
assertThat(receive1).isNotNull();
Message<?> receive2 = receive(input2);
assertThat(receive2).isNotNull();
assertThat(Arrays.asList(((byte[]) receive0.getPayload())[0], ((byte[]) receive1.getPayload())[0],
((byte[]) receive2.getPayload())[0])).containsExactlyInAnyOrder((byte) 0, (byte) 1, (byte) 2);
input0Binding.unbind();
input1Binding.unbind();
input2Binding.unbind();
outputBinding.unbind();
}
@Test
@Override
public void testSendAndReceive() throws Exception {
KafkaTestBinder binder = getBinder();
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setHeaderMode(HeaderMode.raw);
Binding<MessageChannel> producerBinding = binder.bindProducer("foo.0", moduleOutputChannel, producerProperties);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setHeaderMode(HeaderMode.raw);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo.0", "test", moduleInputChannel,
consumerProperties);
Message<?> message = MessageBuilder.withPayload("foo".getBytes()).build();
// Let the consumer actually bind to the producer before sending a msg
binderBindUnbindLatency();
moduleOutputChannel.send(message);
Message<?> inbound = receive(moduleInputChannel);
assertThat(inbound).isNotNull();
assertThat(new String((byte[]) inbound.getPayload())).isEqualTo("foo");
producerBinding.unbind();
consumerBinding.unbind();
}
// Ignored, since raw mode does not support headers
@Test
@Override
@Ignore
public void testSendAndReceiveNoOriginalContentType() throws Exception {
}
@Test
public void testSendAndReceiveWithExplicitConsumerGroup() {
KafkaTestBinder binder = getBinder();
DirectChannel moduleOutputChannel = new DirectChannel();
// Test pub/sub by emulating how StreamPlugin handles taps
QueueChannel module1InputChannel = new QueueChannel();
QueueChannel module2InputChannel = new QueueChannel();
QueueChannel module3InputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setHeaderMode(HeaderMode.raw);
Binding<MessageChannel> producerBinding = binder.bindProducer("baz.0", moduleOutputChannel, producerProperties);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setHeaderMode(HeaderMode.raw);
Binding<MessageChannel> input1Binding = binder.bindConsumer("baz.0", "test", module1InputChannel,
consumerProperties);
// A new module is using the tap as an input channel
String fooTapName = "baz.0";
Binding<MessageChannel> input2Binding = binder.bindConsumer(fooTapName, "tap1", module2InputChannel,
consumerProperties);
// Another new module is using tap as an input channel
String barTapName = "baz.0";
Binding<MessageChannel> input3Binding = binder.bindConsumer(barTapName, "tap2", module3InputChannel,
consumerProperties);
Message<?> message = MessageBuilder.withPayload("foo".getBytes()).build();
boolean success = false;
boolean retried = false;
while (!success) {
moduleOutputChannel.send(message);
Message<?> inbound = receive(module1InputChannel);
assertThat(inbound).isNotNull();
assertThat(new String((byte[]) inbound.getPayload())).isEqualTo("foo");
Message<?> tapped1 = receive(module2InputChannel);
Message<?> tapped2 = receive(module3InputChannel);
if (tapped1 == null || tapped2 == null) {
// listener may not have started
assertThat(retried).isFalse().withFailMessage("Failed to receive tap after retry");
retried = true;
continue;
}
success = true;
assertThat(new String((byte[]) tapped1.getPayload())).isEqualTo("foo");
assertThat(new String((byte[]) tapped2.getPayload())).isEqualTo("foo");
}
// delete one tap stream is deleted
input3Binding.unbind();
Message<?> message2 = MessageBuilder.withPayload("bar".getBytes()).build();
moduleOutputChannel.send(message2);
// other tap still receives messages
Message<?> tapped = receive(module2InputChannel);
assertThat(tapped).isNotNull();
// removed tap does not
assertThat(receive(module3InputChannel)).isNull();
// re-subscribed tap does receive the message
input3Binding = binder.bindConsumer(barTapName, "tap2", module3InputChannel, createConsumerProperties());
assertThat(receive(module3InputChannel)).isNotNull();
// clean up
input1Binding.unbind();
input2Binding.unbind();
input3Binding.unbind();
producerBinding.unbind();
assertThat(extractEndpoint(input1Binding).isRunning()).isFalse();
assertThat(extractEndpoint(input2Binding).isRunning()).isFalse();
assertThat(extractEndpoint(input3Binding).isRunning()).isFalse();
assertThat(extractEndpoint(producerBinding).isRunning()).isFalse();
}
}

View File

@@ -0,0 +1,36 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>spring-cloud-stream-binder-kafka-common</artifactId>
<packaging>jar</packaging>
<name>spring-cloud-stream-binder-kafka-common</name>
<description>Kafka binder common classes</description>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>1.1.0.BUILD-SNAPSHOT</version>
</parent>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-configuration-processor</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-autoconfigure</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-codec</artifactId>
</dependency>
</dependencies>
</project>

View File

@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,9 +16,6 @@
package org.springframework.cloud.stream.binder.kafka.config;
import java.util.HashMap;
import java.util.Map;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.util.StringUtils;
@@ -31,13 +28,11 @@ import org.springframework.util.StringUtils;
@ConfigurationProperties(prefix = "spring.cloud.stream.kafka.binder")
public class KafkaBinderConfigurationProperties {
private String[] zkNodes = new String[] {"localhost"};
private Map<String, String> configuration = new HashMap<>();
private String[] zkNodes = new String[] { "localhost" };
private String defaultZkPort = "2181";
private String[] brokers = new String[] {"localhost"};
private String[] brokers = new String[] { "localhost" };
private String defaultBrokerPort = "9092";
@@ -77,6 +72,12 @@ public class KafkaBinderConfigurationProperties {
private int queueSize = 8192;
private String consumerGroup;
public String getConsumerGroup() {
return consumerGroup;
}
public String getZkConnectionString() {
return toConnectionString(this.zkNodes, this.defaultZkPort);
}
@@ -86,7 +87,7 @@ public class KafkaBinderConfigurationProperties {
}
public String[] getHeaders() {
return this.headers;
return headers;
}
public int getOffsetUpdateTimeWindow() {
@@ -102,7 +103,7 @@ public class KafkaBinderConfigurationProperties {
}
public String[] getZkNodes() {
return this.zkNodes;
return zkNodes;
}
public void setZkNodes(String... zkNodes) {
@@ -114,7 +115,7 @@ public class KafkaBinderConfigurationProperties {
}
public String[] getBrokers() {
return this.brokers;
return brokers;
}
public void setBrokers(String... brokers) {
@@ -176,7 +177,7 @@ public class KafkaBinderConfigurationProperties {
}
public int getMaxWait() {
return this.maxWait;
return maxWait;
}
public void setMaxWait(int maxWait) {
@@ -184,7 +185,7 @@ public class KafkaBinderConfigurationProperties {
}
public int getRequiredAcks() {
return this.requiredAcks;
return requiredAcks;
}
public void setRequiredAcks(int requiredAcks) {
@@ -192,7 +193,7 @@ public class KafkaBinderConfigurationProperties {
}
public int getReplicationFactor() {
return this.replicationFactor;
return replicationFactor;
}
public void setReplicationFactor(int replicationFactor) {
@@ -200,7 +201,7 @@ public class KafkaBinderConfigurationProperties {
}
public int getFetchSize() {
return this.fetchSize;
return fetchSize;
}
public void setFetchSize(int fetchSize) {
@@ -208,7 +209,7 @@ public class KafkaBinderConfigurationProperties {
}
public int getMinPartitionCount() {
return this.minPartitionCount;
return minPartitionCount;
}
public void setMinPartitionCount(int minPartitionCount) {
@@ -216,7 +217,7 @@ public class KafkaBinderConfigurationProperties {
}
public int getQueueSize() {
return this.queueSize;
return queueSize;
}
public void setQueueSize(int queueSize) {
@@ -224,7 +225,7 @@ public class KafkaBinderConfigurationProperties {
}
public boolean isAutoCreateTopics() {
return this.autoCreateTopics;
return autoCreateTopics;
}
public void setAutoCreateTopics(boolean autoCreateTopics) {
@@ -232,7 +233,7 @@ public class KafkaBinderConfigurationProperties {
}
public boolean isAutoAddPartitions() {
return this.autoAddPartitions;
return autoAddPartitions;
}
public void setAutoAddPartitions(boolean autoAddPartitions) {
@@ -240,18 +241,15 @@ public class KafkaBinderConfigurationProperties {
}
public int getSocketBufferSize() {
return this.socketBufferSize;
return socketBufferSize;
}
public void setSocketBufferSize(int socketBufferSize) {
this.socketBufferSize = socketBufferSize;
}
public Map<String, String> getConfiguration() {
return configuration;
public void setConsumerGroup(String consumerGroup) {
this.consumerGroup = consumerGroup;
}
public void setConfiguration(Map<String, String> configuration) {
this.configuration = configuration;
}
}

View File

@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
package org.springframework.cloud.stream.binder.kafka.config;
/**
* @author Marius Bogoevici

View File

@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,20 +14,13 @@
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.util.HashMap;
import java.util.Map;
package org.springframework.cloud.stream.binder.kafka.config;
/**
* @author Marius Bogoevici
*
* <p>Thanks to Laszlo Szabo for providing the initial patch for generic property support.</p>
*/
public class KafkaConsumerProperties {
private boolean autoRebalanceEnabled = true;
private boolean autoCommitOffset = true;
private Boolean autoCommitOnError;
@@ -40,10 +33,8 @@ public class KafkaConsumerProperties {
private int recoveryInterval = 5000;
private Map<String, String> configuration = new HashMap<>();
public boolean isAutoCommitOffset() {
return this.autoCommitOffset;
return autoCommitOffset;
}
public void setAutoCommitOffset(boolean autoCommitOffset) {
@@ -51,7 +42,7 @@ public class KafkaConsumerProperties {
}
public boolean isResetOffsets() {
return this.resetOffsets;
return resetOffsets;
}
public void setResetOffsets(boolean resetOffsets) {
@@ -59,7 +50,7 @@ public class KafkaConsumerProperties {
}
public StartOffset getStartOffset() {
return this.startOffset;
return startOffset;
}
public void setStartOffset(StartOffset startOffset) {
@@ -67,7 +58,7 @@ public class KafkaConsumerProperties {
}
public boolean isEnableDlq() {
return this.enableDlq;
return enableDlq;
}
public void setEnableDlq(boolean enableDlq) {
@@ -75,7 +66,7 @@ public class KafkaConsumerProperties {
}
public Boolean getAutoCommitOnError() {
return this.autoCommitOnError;
return autoCommitOnError;
}
public void setAutoCommitOnError(Boolean autoCommitOnError) {
@@ -83,21 +74,13 @@ public class KafkaConsumerProperties {
}
public int getRecoveryInterval() {
return this.recoveryInterval;
return recoveryInterval;
}
public void setRecoveryInterval(int recoveryInterval) {
this.recoveryInterval = recoveryInterval;
}
public boolean isAutoRebalanceEnabled() {
return this.autoRebalanceEnabled;
}
public void setAutoRebalanceEnabled(boolean autoRebalanceEnabled) {
this.autoRebalanceEnabled = autoRebalanceEnabled;
}
public enum StartOffset {
earliest(-2L), latest(-1L);
@@ -108,15 +91,7 @@ public class KafkaConsumerProperties {
}
public long getReferencePoint() {
return this.referencePoint;
return referencePoint;
}
}
public Map<String, String> getConfiguration() {
return this.configuration;
}
public void setConfiguration(Map<String, String> configuration) {
this.configuration = configuration;
}
}

View File

@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,7 +14,7 @@
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
package org.springframework.cloud.stream.binder.kafka.config;
import java.util.HashMap;
import java.util.Map;
@@ -26,13 +26,12 @@ import org.springframework.cloud.stream.binder.ExtendedBindingProperties;
* @author Marius Bogoevici
*/
@ConfigurationProperties("spring.cloud.stream.kafka")
public class KafkaExtendedBindingProperties
implements ExtendedBindingProperties<KafkaConsumerProperties, KafkaProducerProperties> {
public class KafkaExtendedBindingProperties implements ExtendedBindingProperties<KafkaConsumerProperties, KafkaProducerProperties> {
private Map<String, KafkaBindingProperties> bindings = new HashMap<>();
public Map<String, KafkaBindingProperties> getBindings() {
return this.bindings;
return bindings;
}
public void setBindings(Map<String, KafkaBindingProperties> bindings) {
@@ -41,8 +40,8 @@ public class KafkaExtendedBindingProperties
@Override
public KafkaConsumerProperties getExtendedConsumerProperties(String channelName) {
if (this.bindings.containsKey(channelName) && this.bindings.get(channelName).getConsumer() != null) {
return this.bindings.get(channelName).getConsumer();
if (bindings.containsKey(channelName) && bindings.get(channelName).getConsumer() != null) {
return bindings.get(channelName).getConsumer();
}
else {
return new KafkaConsumerProperties();
@@ -51,8 +50,8 @@ public class KafkaExtendedBindingProperties
@Override
public KafkaProducerProperties getExtendedProducerProperties(String channelName) {
if (this.bindings.containsKey(channelName) && this.bindings.get(channelName).getProducer() != null) {
return this.bindings.get(channelName).getProducer();
if (bindings.containsKey(channelName) && bindings.get(channelName).getProducer() != null) {
return bindings.get(channelName).getProducer();
}
else {
return new KafkaProducerProperties();

View File

@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -14,10 +14,7 @@
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.util.HashMap;
import java.util.Map;
package org.springframework.cloud.stream.binder.kafka.config;
import javax.validation.constraints.NotNull;
@@ -28,16 +25,14 @@ public class KafkaProducerProperties {
private int bufferSize = 16384;
private CompressionType compressionType = CompressionType.none;
private CompressionType compressionType = CompressionType.none;
private boolean sync;
private int batchTimeout;
private Map<String, String> configuration = new HashMap<>();
public int getBufferSize() {
return this.bufferSize;
return bufferSize;
}
public void setBufferSize(int bufferSize) {
@@ -46,7 +41,7 @@ public class KafkaProducerProperties {
@NotNull
public CompressionType getCompressionType() {
return this.compressionType;
return compressionType;
}
public void setCompressionType(CompressionType compressionType) {
@@ -54,7 +49,7 @@ public class KafkaProducerProperties {
}
public boolean isSync() {
return this.sync;
return sync;
}
public void setSync(boolean sync) {
@@ -62,21 +57,13 @@ public class KafkaProducerProperties {
}
public int getBatchTimeout() {
return this.batchTimeout;
return batchTimeout;
}
public void setBatchTimeout(int batchTimeout) {
this.batchTimeout = batchTimeout;
}
public Map<String, String> getConfiguration() {
return this.configuration;
}
public void setConfiguration(Map<String, String> configuration) {
this.configuration = configuration;
}
public enum CompressionType {
none,
gzip,

View File

@@ -0,0 +1,20 @@
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package contains an implementation of the {@link org.springframework.cloud.stream.binder.Binder} for Kafka.
*/
package org.springframework.cloud.stream.binder.kafka;

View File

@@ -1,337 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>1.1.0.M1</version>
</parent>
<artifactId>spring-cloud-stream-binder-kafka-docs</artifactId>
<name>spring-cloud-stream-binder-kafka-docs</name>
<description>Spring Cloud Stream Kafka Binder Docs</description>
<properties>
<main.basedir>${basedir}/..</main.basedir>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
<version>1.1.0.M1</version>
</dependency>
</dependencies>
<profiles>
<profile>
<id>full</id>
<build>
<plugins>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>xml-maven-plugin</artifactId>
<version>1.0</version>
<executions>
<execution>
<goals>
<goal>transform</goal>
</goals>
</execution>
</executions>
<configuration>
<transformationSets>
<transformationSet>
<dir>${project.build.directory}/external-resources</dir>
<stylesheet>src/main/xslt/dependencyVersions.xsl</stylesheet>
<fileMappers>
<fileMapper implementation="org.codehaus.plexus.components.io.filemappers.FileExtensionMapper">
<targetExtension>.adoc</targetExtension>
</fileMapper>
</fileMappers>
<outputDir>${project.build.directory}/generated-resources</outputDir>
</transformationSet>
</transformationSets>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
<phase>prepare-package</phase>
<configuration>
<includeDependencySources>true</includeDependencySources>
<dependencySourceIncludes>
<dependencySourceInclude>${project.groupId}:*</dependencySourceInclude>
</dependencySourceIncludes>
<attach>false</attach>
<quiet>true</quiet>
<stylesheetfile>${basedir}/src/main/javadoc/spring-javadoc.css</stylesheetfile>
<links>
<link>http://docs.spring.io/spring-framework/docs/${spring.version}/javadoc-api/</link>
<link>http://docs.spring.io/spring-shell/docs/current/api/</link>
</links>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.asciidoctor</groupId>
<artifactId>asciidoctor-maven-plugin</artifactId>
<version>1.5.0</version>
<executions>
<execution>
<id>generate-docbook</id>
<phase>generate-resources</phase>
<goals>
<goal>process-asciidoc</goal>
</goals>
<configuration>
<sourceDocumentName>index.adoc</sourceDocumentName>
<backend>docbook5</backend>
<doctype>book</doctype>
<attributes>
<docinfo>true</docinfo>
<spring-cloud-stream-binder-kafka-version>${project.version}</spring-cloud-stream-binder-kafka-version>
<spring-cloud-stream-binder-kafka-docs-version>${project.version}</spring-cloud-stream-binder-kafka-docs-version>
<github-tag>${github-tag}</github-tag>
</attributes>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>com.agilejava.docbkx</groupId>
<artifactId>docbkx-maven-plugin</artifactId>
<version>2.0.15</version>
<configuration>
<sourceDirectory>${basedir}/target/generated-docs</sourceDirectory>
<includes>index.xml</includes>
<xincludeSupported>true</xincludeSupported>
<chunkedOutput>false</chunkedOutput>
<foCustomization>${basedir}/src/main/docbook/xsl/pdf.xsl</foCustomization>
<useExtensions>1</useExtensions>
<highlightSource>1</highlightSource>
<highlightXslthlConfig>${basedir}/src/main/docbook/xsl/xslthl-config.xml</highlightXslthlConfig>
</configuration>
<dependencies>
<dependency>
<groupId>net.sf.xslthl</groupId>
<artifactId>xslthl</artifactId>
<version>2.1.0</version>
</dependency>
<dependency>
<groupId>net.sf.docbook</groupId>
<artifactId>docbook-xml</artifactId>
<version>5.0-all</version>
<classifier>resources</classifier>
<type>zip</type>
<scope>runtime</scope>
</dependency>
</dependencies>
<executions>
<execution>
<id>html-single</id>
<goals>
<goal>generate-html</goal>
</goals>
<phase>generate-resources</phase>
<configuration>
<htmlCustomization>${basedir}/src/main/docbook/xsl/html-singlepage.xsl</htmlCustomization>
<targetDirectory>${basedir}/target/docbook/htmlsingle</targetDirectory>
<postProcess>
<copy todir="${basedir}/target/contents/reference/htmlsingle">
<fileset dir="${basedir}/target/docbook/htmlsingle">
<include name="**/*.html" />
</fileset>
</copy>
<copy todir="${basedir}/target/contents/reference/htmlsingle">
<fileset dir="${basedir}/src/main/docbook">
<include name="**/*.css" />
<include name="**/*.png" />
<include name="**/*.gif" />
<include name="**/*.jpg" />
</fileset>
</copy>
<copy todir="${basedir}/target/contents/reference/htmlsingle">
<fileset dir="${basedir}/src/main/asciidoc">
<include name="images/*.css" />
<include name="images/*.png" />
<include name="images/*.gif" />
<include name="images/*.jpg" />
</fileset>
</copy>
</postProcess>
</configuration>
</execution>
<execution>
<id>html</id>
<goals>
<goal>generate-html</goal>
</goals>
<phase>generate-resources</phase>
<configuration>
<htmlCustomization>${basedir}/src/main/docbook/xsl/html-multipage.xsl</htmlCustomization>
<targetDirectory>${basedir}/target/docbook/html</targetDirectory>
<chunkedOutput>true</chunkedOutput>
<postProcess>
<copy todir="${basedir}/target/contents/reference/html">
<fileset dir="${basedir}/target/docbook/html">
<include name="**/*.html" />
</fileset>
</copy>
<copy todir="${basedir}/target/contents/reference/html">
<fileset dir="${basedir}/src/main/docbook">
<include name="**/*.css" />
<include name="**/*.png" />
<include name="**/*.gif" />
<include name="**/*.jpg" />
</fileset>
</copy>
<copy todir="${basedir}/target/contents/reference/html">
<fileset dir="${basedir}/src/main/asciidoc">
<include name="images/*.css" />
<include name="images/*.png" />
<include name="images/*.gif" />
<include name="images/*.jpg" />
</fileset>
</copy>
</postProcess>
</configuration>
</execution>
<execution>
<id>pdf</id>
<goals>
<goal>generate-pdf</goal>
</goals>
<phase>generate-resources</phase>
<configuration>
<foCustomization>${basedir}/src/main/docbook/xsl/pdf.xsl</foCustomization>
<targetDirectory>${basedir}/target/docbook/pdf</targetDirectory>
<postProcess>
<copy todir="${basedir}/target/contents/reference">
<fileset dir="${basedir}/target/docbook">
<include name="**/*.pdf" />
</fileset>
</copy>
<move file="${basedir}/target/contents/reference/pdf/index.pdf" tofile="${basedir}/target/contents/reference/pdf/spring-cloud-stream-reference.pdf" />
</postProcess>
</configuration>
</execution>
<execution>
<id>epub</id>
<goals>
<goal>generate-epub3</goal>
</goals>
<phase>generate-resources</phase>
<configuration>
<epubCustomization>${basedir}/src/main/docbook/xsl/epub.xsl</epubCustomization>
<targetDirectory>${basedir}/target/docbook/epub</targetDirectory>
<postProcess>
<copy todir="${basedir}/target/contents/reference/epub">
<fileset dir="${basedir}/target/docbook">
<include name="**/*.epub" />
</fileset>
</copy>
<move file="${basedir}/target/contents/reference/epub/index.epub" tofile="${basedir}/target/contents/reference/epub/spring-cloud-stream-reference.epub" />
</postProcess>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<dependencies>
<dependency>
<groupId>ant-contrib</groupId>
<artifactId>ant-contrib</artifactId>
<version>1.0b3</version>
<exclusions>
<exclusion>
<groupId>ant</groupId>
<artifactId>ant</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.ant</groupId>
<artifactId>ant-nodeps</artifactId>
<version>1.8.1</version>
</dependency>
<dependency>
<groupId>org.tigris.antelope</groupId>
<artifactId>antelopetasks</artifactId>
<version>3.2.10</version>
</dependency>
</dependencies>
<executions>
<execution>
<id>package-and-attach-docs-zip</id>
<phase>package</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<zip destfile="${project.build.directory}/${project.artifactId}-${project.version}.zip">
<zipfileset src="${project.build.directory}/${project.artifactId}-${project.version}-javadoc.jar" prefix="api" />
<fileset dir="${project.build.directory}/contents" />
</zip>
</target>
</configuration>
</execution>
<execution>
<id>setup-maven-properties</id>
<phase>validate</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<exportAntProperties>true</exportAntProperties>
<target>
<taskdef resource="net/sf/antcontrib/antcontrib.properties" />
<taskdef name="stringutil" classname="ise.antelope.tasks.StringUtilTask" />
<var name="version-type" value="${project.version}" />
<propertyregex property="version-type" override="true" input="${version-type}" regexp=".*\.(.*)" replace="\1" />
<propertyregex property="version-type" override="true" input="${version-type}" regexp="(M)\d+" replace="MILESTONE" />
<propertyregex property="version-type" override="true" input="${version-type}" regexp="(RC)\d+" replace="MILESTONE" />
<propertyregex property="version-type" override="true" input="${version-type}" regexp="BUILD-(.*)" replace="SNAPSHOT" />
<stringutil string="${version-type}" property="spring-boot-repo">
<lowercase />
</stringutil>
<var name="github-tag" value="v${project.version}" />
<propertyregex property="github-tag" override="true" input="${github-tag}" regexp=".*SNAPSHOT" replace="master" />
</target>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>build-helper-maven-plugin</artifactId>
<executions>
<execution>
<id>attach-zip</id>
<goals>
<goal>attach-artifact</goal>
</goals>
<configuration>
<artifacts>
<artifact>
<file>${project.build.directory}/${project.artifactId}-${project.version}.zip</file>
<type>zip;zip.type=docs;zip.deployed=false</type>
</artifact>
</artifacts>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@@ -1,2 +0,0 @@
*.html
*.css

View File

@@ -1,20 +0,0 @@
require 'asciidoctor'
require 'erb'
guard 'shell' do
watch(/.*\.adoc$/) {|m|
Asciidoctor.render_file('index.adoc', \
:in_place => true, \
:safe => Asciidoctor::SafeMode::UNSAFE, \
:attributes=> { \
'source-highlighter' => 'prettify', \
'icons' => 'font', \
'linkcss'=> 'true', \
'copycss' => 'true', \
'doctype' => 'book'})
}
end
guard 'livereload' do
watch(%r{^.+\.(css|js|html)$})
end

View File

@@ -1,5 +0,0 @@
[[appendix]]
= Appendices

View File

@@ -1,78 +0,0 @@
[[building]]
== Building
:jdkversion: 1.7
=== Basic Compile and Test
To build the source you will need to install JDK {jdkversion}.
The build uses the Maven wrapper so you don't have to install a specific
version of Maven. To enable the tests, you should have Kafka server 0.9 or above running
before building. See below for more information on running the servers.
The main build command is
----
$ ./mvnw clean install
----
You can also add '-DskipTests' if you like, to avoid running the tests.
NOTE: You can also install Maven (>=3.3.3) yourself and run the `mvn` command
in place of `./mvnw` in the examples below. If you do that you also
might need to add `-P spring` if your local Maven settings do not
contain repository declarations for spring pre-release artifacts.
NOTE: Be aware that you might need to increase the amount of memory
available to Maven by setting a `MAVEN_OPTS` environment variable with
a value like `-Xmx512m -XX:MaxPermSize=128m`. We try to cover this in
the `.mvn` configuration, so if you find you have to do it to make a
build succeed, please raise a ticket to get the settings added to
source control.
The projects that require middleware generally include a
`docker-compose.yml`, so consider using
http://compose.docker.io/[Docker Compose] to run the middeware servers
in Docker containers.
=== Documentation
There is a "full" profile that will generate documentation.
=== Working with the code
If you don't have an IDE preference we would recommend that you use
http://www.springsource.com/developer/sts[Spring Tools Suite] or
http://eclipse.org[Eclipse] when working with the code. We use the
http://eclipse.org/m2e/[m2eclipe] eclipse plugin for maven support. Other IDEs and tools
should also work without issue.
==== Importing into eclipse with m2eclipse
We recommend the http://eclipse.org/m2e/[m2eclipe] eclipse plugin when working with
eclipse. If you don't already have m2eclipse installed it is available from the "eclipse
marketplace".
Unfortunately m2e does not yet support Maven 3.3, so once the projects
are imported into Eclipse you will also need to tell m2eclipse to use
the `.settings.xml` file for the projects. If you do not do this you
may see many different errors related to the POMs in the
projects. Open your Eclipse preferences, expand the Maven
preferences, and select User Settings. In the User Settings field
click Browse and navigate to the Spring Cloud project you imported
selecting the `.settings.xml` file in that project. Click Apply and
then OK to save the preference changes.
NOTE: Alternatively you can copy the repository settings from https://github.com/spring-cloud/spring-cloud-build/blob/master/.settings.xml[`.settings.xml`] into your own `~/.m2/settings.xml`.
==== Importing into eclipse without m2eclipse
If you prefer not to use m2eclipse you can generate eclipse project metadata using the
following command:
[indent=0]
----
$ ./mvnw eclipse:eclipse
----
The generated eclipse projects can be imported by selecting `import existing projects`
from the `file` menu.

View File

@@ -1,42 +0,0 @@
[[contributing]
== Contributing
Spring Cloud is released under the non-restrictive Apache 2.0 license,
and follows a very standard Github development process, using Github
tracker for issues and merging pull requests into master. If you want
to contribute even something trivial please do not hesitate, but
follow the guidelines below.
=== Sign the Contributor License Agreement
Before we accept a non-trivial patch or pull request we will need you to sign the
https://support.springsource.com/spring_committer_signup[contributor's agreement].
Signing the contributor's agreement does not grant anyone commit rights to the main
repository, but it does mean that we can accept your contributions, and you will get an
author credit if we do. Active contributors might be asked to join the core team, and
given the ability to merge pull requests.
=== Code Conventions and Housekeeping
None of these is essential for a pull request, but they will all help. They can also be
added after the original pull request but before a merge.
* Use the Spring Framework code format conventions. If you use Eclipse
you can import formatter settings using the
`eclipse-code-formatter.xml` file from the
https://github.com/spring-cloud/build/tree/master/eclipse-coding-conventions.xml[Spring
Cloud Build] project. If using IntelliJ, you can use the
http://plugins.jetbrains.com/plugin/6546[Eclipse Code Formatter
Plugin] to import the same file.
* Make sure all new `.java` files to have a simple Javadoc class comment with at least an
`@author` tag identifying you, and preferably at least a paragraph on what the class is
for.
* Add the ASF license header comment to all new `.java` files (copy from existing files
in the project)
* Add yourself as an `@author` to the .java files that you modify substantially (more
than cosmetic changes).
* Add some Javadocs and, if you change the namespace, some XSD doc elements.
* A few unit tests would help a lot as well -- someone has to do it.
* If no-one else is using your branch, please rebase it against the current master (or
other target branch in the main project).
* When writing a commit message please follow http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html[these conventions],
if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit
message (where XXXX is the issue number).

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.2 KiB

View File

@@ -1,14 +0,0 @@
<productname>Spring Cloud Stream Kafka Binder</productname>
<releaseinfo>{spring-cloud-stream-binder-kafka-version}</releaseinfo>
<copyright>
<year>2013-2016</year>
<holder>Pivotal Software, Inc.</holder>
</copyright>
<legalnotice>
<para>
Copies of this document may be made for your own use and for distribution to
others, provided that you do not charge any fee for such copies and further
provided that each copy contains this Copyright Notice, whether distributed in
print or electronically.
</para>
</legalnotice>

View File

@@ -1,33 +0,0 @@
[[spring-cloud-stream-binder-kafka-reference]]
= Spring Cloud Stream Kafka Binder Reference Guide
Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinathan, Gunnar Hillert, Mark Pollack, Patrick Peralta, Glenn Renfro, Thomas Risberg, Dave Syer, David Turanski, Janne Valkealahti, Benjamin Klein
:doctype: book
:toc:
:toclevels: 4
:source-highlighter: prettify
:numbered:
:icons: font
:hide-uri-scheme:
:spring-cloud-stream-binder-kafka-repo: snapshot
:github-tag: master
:spring-cloud-stream-binder-kafka-docs-version: current
:spring-cloud-stream-binder-kafka-docs: http://docs.spring.io/spring-cloud-stream-binder-kafka/docs/{spring-cloud-stream-binder-kafka-docs-version}/reference
:spring-cloud-stream-binder-kafka-docs-current: http://docs.spring.io/spring-cloud-stream-binder-kafka/docs/current-SNAPSHOT/reference/html/
:github-repo: spring-cloud/spring-cloud-stream-binder-kafka
:github-raw: http://raw.github.com/{github-repo}/{github-tag}
:github-code: http://github.com/{github-repo}/tree/{github-tag}
:github-wiki: http://github.com/{github-repo}/wiki
:github-master-code: http://github.com/{github-repo}/tree/master
:sc-ext: java
// ======================================================================================
= Reference Guide
include::overview.adoc[]
= Appendices
[appendix]
include::building.adoc[]
include::contributing.adoc[]
// ======================================================================================

View File

@@ -1,229 +0,0 @@
[partintro]
--
This guide describes the Apache Kafka implementation of the Spring Cloud Stream Binder.
It contains information about its design, usage and configuration options, as well as information on how the Stream Cloud Stream concepts map into Apache Kafka specific constructs.
--
== Usage
For using the Apache Kafka binder, you just need to add it to your Spring Cloud Stream application, using the following Maven coordinates:
[source,xml]
----
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
</dependency>
----
Alternatively, you can also use the Spring Cloud Stream Kafka Starter.
[source,xml]
----
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
</dependency>
----
== Apache Kafka Binder Overview
A simplified diagram of how the Apache Kafka binder operates can be seen below.
.Kafka Binder
image::kafka-binder.png[width=300,scaledwidth="50%"]
The Apache Kafka Binder implementation maps each destination to an Apache Kafka topic.
The consumer group maps directly to the same Apache Kafka concept.
Partitioning also maps directly to Apache Kafka partitions as well.
== Configuration Options
This section contains the configuration options used by the Apache Kafka binder.
For common configuration options and properties pertaining to binder, refer to the https://github.com/spring-cloud/spring-cloud-stream/blob/master/spring-cloud-stream-docs/src/main/asciidoc/spring-cloud-stream-overview.adoc#configuration-options[core docs].
=== Kafka Binder Properties
spring.cloud.stream.kafka.binder.brokers::
A list of brokers to which the Kafka binder will connect.
+
Default: `localhost`.
spring.cloud.stream.kafka.binder.defaultBrokerPort::
`brokers` allows hosts specified with or without port information (e.g., `host1,host2:port2`).
This sets the default port when no port is configured in the broker list.
+
Default: `9092`.
spring.cloud.stream.kafka.binder.zkNodes::
A list of ZooKeeper nodes to which the Kafka binder can connect.
+
Default: `localhost`.
spring.cloud.stream.kafka.binder.defaultZkPort::
`zkNodes` allows hosts specified with or without port information (e.g., `host1,host2:port2`).
This sets the default port when no port is configured in the node list.
+
Default: `2181`.
spring.cloud.stream.kafka.binder.configuration::
Key/Value map of client properties (both producers and consumer) passed to all clients created by the binder.
Due to the fact that these properties will be used by both producers and consumers, usage should be restricted to common properties, especially security settings.
+
Default: Empty map.
spring.cloud.stream.kafka.binder.headers::
The list of custom headers that will be transported by the binder.
+
Default: empty.
spring.cloud.stream.kafka.binder.offsetUpdateTimeWindow::
The frequency, in milliseconds, with which offsets are saved.
Ignored if `0`.
+
Default: `10000`.
spring.cloud.stream.kafka.binder.offsetUpdateCount::
The frequency, in number of updates, which which consumed offsets are persisted.
Ignored if `0`.
Mutually exclusive with `offsetUpdateTimeWindow`.
+
Default: `0`.
spring.cloud.stream.kafka.binder.requiredAcks::
The number of required acks on the broker.
+
Default: `1`.
spring.cloud.stream.kafka.binder.minPartitionCount::
Effective only if `autoCreateTopics` or `autoAddPartitions` is set.
The global minimum number of partitions that the binder will configure on topics on which it produces/consumes data.
It can be superseded by the `partitionCount` setting of the producer or by the value of `instanceCount` * `concurrency` settings of the producer (if either is larger).
+
Default: `1`.
spring.cloud.stream.kafka.binder.replicationFactor::
The replication factor of auto-created topics if `autoCreateTopics` is active.
+
Default: `1`.
spring.cloud.stream.kafka.binder.autoCreateTopics::
If set to `true`, the binder will create new topics automatically.
If set to `false`, the binder will rely on the topics being already configured.
In the latter case, if the topics do not exist, the binder will fail to start.
Of note, this setting is independent of the `auto.topic.create.enable` setting of the broker and it does not influence it: if the server is set to auto-create topics, they may be created as part of the metadata retrieval request, with default broker settings.
+
Default: `true`.
spring.cloud.stream.kafka.binder.autoAddPartitions::
If set to `true`, the binder will create add new partitions if required.
If set to `false`, the binder will rely on the partition size of the topic being already configured.
If the partition count of the target topic is smaller than the expected value, the binder will fail to start.
+
Default: `false`.
spring.cloud.stream.kafka.binder.socketBufferSize::
Size (in bytes) of the socket buffer to be used by the Kafka consumers.
+
Default: `2097152`.
=== Kafka Consumer Properties
The following properties are available for Kafka consumers only and
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.consumer.`.
autoCommitOffset::
Whether to autocommit offsets when a message has been processed.
If set to `false`, an `Acknowledgment` header will be available in the message headers for late acknowledgment.
+
Default: `true`.
autoCommitOnError::
Effective only if `autoCommitOffset` is set to `true`.
If set to `false` it suppresses auto-commits for messages that result in errors, and will commit only for successful messages, allows a stream to automatically replay from the last successfully processed message, in case of persistent failures.
If set to `true`, it will always auto-commit (if auto-commit is enabled).
If not set (default), it effectively has the same value as `enableDlq`, auto-committing erroneous messages if they are sent to a DLQ, and not committing them otherwise.
+
Default: not set.
recoveryInterval::
The interval between connection recovery attempts, in milliseconds.
+
Default: `5000`.
resetOffsets::
Whether to reset offsets on the consumer to the value provided by `startOffset`.
+
Default: `false`.
startOffset::
The starting offset for new groups, or when `resetOffsets` is `true`.
Allowed values: `earliest`, `latest`.
+
Default: null (equivalent to `earliest`).
enableDlq::
When set to true, it will send enable DLQ behavior for the consumer.
Messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`.
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
+
Default: `false`.
configuration::
Map with a key/value pair containing generic Kafka consumer properties.
+
Default: Empty map.
=== Kafka Producer Properties
The following properties are available for Kafka producers only and
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.producer.`.
bufferSize::
Upper limit, in bytes, of how much data the Kafka producer will attempt to batch before sending.
+
Default: `16384`.
sync::
Whether the producer is synchronous.
+
Default: `false`.
batchTimeout::
How long the producer will wait before sending in order to allow more messages to accumulate in the same batch.
(Normally the producer does not wait at all, and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.
+
Default: `0`.
configuration::
Map with a key/value pair containing generic Kafka producer properties.
+
Default: Empty map.
[NOTE]
====
The Kafka binder will use the `partitionCount` setting of the producer as a hint to create a topic with the given partition count (in conjunction with the `minPartitionCount`, the maximum of the two being the value being used).
Exercise caution when configuring both `minPartitionCount` for a binder and `partitionCount` for an application, as the larger value will be used.
If a topic already exists with a smaller partition count and `autoAddPartitions` is disabled (the default), then the binder will fail to start.
If a topic already exists with a smaller partition count and `autoAddPartitions` is enabled, new partitions will be added.
If a topic already exists with a larger number of partitions than the maximum of (`minPartitionCount` and `partitionCount`), the existing partition count will be used.
====
=== Usage examples
In this section, we illustrate the use of the above properties for specific scenarios.
==== Example: security configuration
Apache Kafka 0.9 supports secure connections between client and brokers.
To take advantage of this feature, follow the guidelines in the http://kafka.apache.org/090/documentation.html#security_configclients[Apache Kafka Documentation] as well as the Kafka 0.9 http://docs.confluent.io/2.0.0/kafka/security.html[security guidelines from the Confluent documentation].
Use the `spring.cloud.stream.kafka.binder.configuration` option to set security properties for all clients created by the binder.
For example, for setting `security.protocol` to `SASL_SSL`, set:
[source]
----
spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_SSL
----
All the other security properties can be set in a similar manner.
When using Kerberos, follow the instructions in the http://kafka.apache.org/090/documentation.html#security_sasl_clientconfig[reference documentation] for creating and referencing the JAAS configuration.
At the time of this release, the JAAS, and (optionally) krb5 file locations must be set for Spring Cloud Stream applications by using system properties.
Here is an example of launching a Spring Cloud Stream application with SASL and Kerberos.
[source]
----
java -Djava.security.auth.login.config=/path.to/kafka_client_jaas.conf -jar log.jar \\
--spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \\
--spring.cloud.stream.kafka.binder.zkNodes=secure.zookeeper:2181 \\
--spring.cloud.stream.bindings.input.destination=stream.ticktock \\
--spring.cloud.stream.kafka.binder.clientConfiguration.security.protocol=SASL_PLAINTEXT
----
[NOTE]
====
Exercise caution when using the `autoCreateTopics` and `autoAddPartitions` if using Kerberos.
Usually applications may use principals that do not have administrative rights in Kafka and Zookeeper, and relying on Spring Cloud Stream to create/modify topics may fail.
In secure environments, we strongly recommend creating topics and managing ACLs administratively using Kafka tooling.
====

View File

@@ -1,35 +0,0 @@
/*
code highlight CSS resemblign the Eclipse IDE default color schema
@author Costin Leau
*/
.hl-keyword {
color: #7F0055;
font-weight: bold;
}
.hl-comment {
color: #3F5F5F;
font-style: italic;
}
.hl-multiline-comment {
color: #3F5FBF;
font-style: italic;
}
.hl-tag {
color: #3F7F7F;
}
.hl-attribute {
color: #7F007F;
}
.hl-value {
color: #2A00FF;
}
.hl-string {
color: #2A00FF;
}

View File

@@ -1,9 +0,0 @@
@IMPORT url("manual.css");
body.firstpage {
background: url("../images/background.png") no-repeat center top;
}
div.part h1 {
border-top: none;
}

View File

@@ -1,6 +0,0 @@
@IMPORT url("manual.css");
body {
background: url("../images/background.png") no-repeat center top;
}

View File

@@ -1,344 +0,0 @@
@IMPORT url("highlight.css");
html {
padding: 0pt;
margin: 0pt;
}
body {
color: #333333;
margin: 15px 30px;
font-family: Helvetica, Arial, Freesans, Clean, Sans-serif;
line-height: 1.6;
-webkit-font-smoothing: antialiased;
}
code {
font-size: 16px;
font-family: Consolas, "Liberation Mono", Courier, monospace;
}
:not(a)>code {
color: #6D180B;
}
:not(pre)>code {
background-color: #F2F2F2;
border: 1px solid #CCCCCC;
border-radius: 4px;
padding: 1px 3px 0;
text-shadow: none;
white-space: nowrap;
}
body>*:first-child {
margin-top: 0 !important;
}
div {
margin: 0pt;
}
hr {
border: 1px solid #CCCCCC;
background: #CCCCCC;
}
h1,h2,h3,h4,h5,h6 {
color: #000000;
cursor: text;
font-weight: bold;
margin: 30px 0 10px;
padding: 0;
}
h1,h2,h3 {
margin: 40px 0 10px;
}
h1 {
margin: 70px 0 30px;
padding-top: 20px;
}
div.part h1 {
border-top: 1px dotted #CCCCCC;
}
h1,h1 code {
font-size: 32px;
}
h2,h2 code {
font-size: 24px;
}
h3,h3 code {
font-size: 20px;
}
h4,h1 code,h5,h5 code,h6,h6 code {
font-size: 18px;
}
div.book,div.chapter,div.appendix,div.part,div.preface {
min-width: 300px;
max-width: 1200px;
margin: 0 auto;
}
p.releaseinfo {
font-weight: bold;
margin-bottom: 40px;
margin-top: 40px;
}
div.authorgroup {
line-height: 1;
}
p.copyright {
line-height: 1;
margin-bottom: -5px;
}
.legalnotice p {
font-style: italic;
font-size: 14px;
line-height: 1;
}
div.titlepage+p,div.titlepage+p {
margin-top: 0;
}
pre {
line-height: 1.0;
color: black;
}
a {
color: #4183C4;
text-decoration: none;
}
p {
margin: 15px 0;
text-align: left;
}
ul,ol {
padding-left: 30px;
}
li p {
margin: 0;
}
div.table {
margin: 1em;
padding: 0.5em;
text-align: center;
}
div.table table,div.informaltable table {
display: table;
width: 100%;
}
div.table td {
padding-left: 7px;
padding-right: 7px;
}
.sidebar {
line-height: 1.4;
padding: 0 20px;
background-color: #F8F8F8;
border: 1px solid #CCCCCC;
border-radius: 3px 3px 3px 3px;
}
.sidebar p.title {
color: #6D180B;
}
pre.programlisting,pre.screen {
font-size: 15px;
padding: 6px 10px;
background-color: #F8F8F8;
border: 1px solid #CCCCCC;
border-radius: 3px 3px 3px 3px;
clear: both;
overflow: auto;
line-height: 1.4;
font-family: Consolas, "Liberation Mono", Courier, monospace;
}
table {
border-collapse: collapse;
border-spacing: 0;
border: 1px solid #DDDDDD !important;
border-radius: 4px !important;
border-collapse: separate !important;
line-height: 1.6;
}
table thead {
background: #F5F5F5;
}
table tr {
border: none;
border-bottom: none;
}
table th {
font-weight: bold;
}
table th,table td {
border: none !important;
padding: 6px 13px;
}
table tr:nth-child(2n) {
background-color: #F8F8F8;
}
td p {
margin: 0 0 15px 0;
}
div.table-contents td p {
margin: 0;
}
div.important *,div.note *,div.tip *,div.warning *,div.navheader *,div.navfooter *,div.calloutlist *
{
border: none !important;
background: none !important;
margin: 0;
}
div.important p,div.note p,div.tip p,div.warning p {
color: #6F6F6F;
line-height: 1.6;
}
div.important code,div.note code,div.tip code,div.warning code {
background-color: #F2F2F2 !important;
border: 1px solid #CCCCCC !important;
border-radius: 4px !important;
padding: 1px 3px 0 !important;
text-shadow: none !important;
white-space: nowrap !important;
}
.note th,.tip th,.warning th {
display: none;
}
.note tr:first-child td,.tip tr:first-child td,.warning tr:first-child td
{
border-right: 1px solid #CCCCCC !important;
padding-top: 10px;
}
div.calloutlist p,div.calloutlist td {
padding: 0;
margin: 0;
}
div.calloutlist>table>tbody>tr>td:first-child {
padding-left: 10px;
width: 30px !important;
}
div.important,div.note,div.tip,div.warning {
margin-left: 0px !important;
margin-right: 20px !important;
margin-top: 20px;
margin-bottom: 20px;
padding-top: 10px;
padding-bottom: 10px;
}
div.toc {
line-height: 1.2;
}
dl,dt {
margin-top: 1px;
margin-bottom: 0;
}
div.toc>dl>dt {
font-size: 32px;
font-weight: bold;
margin: 30px 0 10px 0;
display: block;
}
div.toc>dl>dd>dl>dt {
font-size: 24px;
font-weight: bold;
margin: 20px 0 10px 0;
display: block;
}
div.toc>dl>dd>dl>dd>dl>dt {
font-weight: bold;
font-size: 20px;
margin: 10px 0 0 0;
}
tbody.footnotes * {
border: none !important;
}
div.footnote p {
margin: 0;
line-height: 1;
}
div.footnote p sup {
margin-right: 6px;
vertical-align: middle;
}
div.navheader {
border-bottom: 1px solid #CCCCCC;
}
div.navfooter {
border-top: 1px solid #CCCCCC;
}
.title {
margin-left: -1em;
padding-left: 1em;
}
.title>a {
position: absolute;
visibility: hidden;
display: block;
font-size: 0.85em;
margin-top: 0.05em;
margin-left: -1em;
vertical-align: text-top;
color: black;
}
.title>a:before {
content: "\00A7";
}
.title:hover>a,.title>a:hover,.title:hover>a:hover {
visibility: visible;
}
.title:focus>a,.title>a:focus,.title:focus>a:focus {
outline: 0;
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 931 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.1 KiB

View File

@@ -1,45 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:xslthl="http://xslthl.sf.net"
xmlns:d="http://docbook.org/ns/docbook"
exclude-result-prefixes="xslthl d"
version='1.0'>
<!-- Extensions -->
<xsl:param name="use.extensions">1</xsl:param>
<xsl:param name="tablecolumns.extension">0</xsl:param>
<xsl:param name="callout.extensions">1</xsl:param>
<!-- Graphics -->
<xsl:param name="admon.graphics" select="1"/>
<xsl:param name="admon.graphics.path">images/</xsl:param>
<xsl:param name="admon.graphics.extension">.png</xsl:param>
<!-- Table of Contents -->
<xsl:param name="generate.toc">book toc,title</xsl:param>
<xsl:param name="toc.section.depth">3</xsl:param>
<!-- Hide revhistory -->
<xsl:template match="d:revhistory" mode="titlepage.mode"/>
</xsl:stylesheet>

View File

@@ -1,31 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:xslthl="http://xslthl.sf.net"
xmlns:d="http://docbook.org/ns/docbook"
exclude-result-prefixes="xslthl d"
version='1.0'>
<xsl:import href="urn:docbkx:stylesheet"/>
<xsl:import href="common.xsl"/>
</xsl:stylesheet>

View File

@@ -1,73 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version='1.0'>
<xsl:import href="urn:docbkx:stylesheet"/>
<xsl:import href="html.xsl"/>
<xsl:param name="html.stylesheet">css/manual-multipage.css</xsl:param>
<xsl:param name="chunk.section.depth">'5'</xsl:param>
<xsl:param name="use.id.as.filename">'1'</xsl:param>
<!-- Replace chunk-element-content from chunk-common to add firstpage class to body -->
<xsl:template name="chunk-element-content">
<xsl:param name="prev"/>
<xsl:param name="next"/>
<xsl:param name="nav.context"/>
<xsl:param name="content">
<xsl:apply-imports/>
</xsl:param>
<xsl:call-template name="user.preroot"/>
<html>
<xsl:call-template name="html.head">
<xsl:with-param name="prev" select="$prev"/>
<xsl:with-param name="next" select="$next"/>
</xsl:call-template>
<body>
<xsl:if test="count($prev) = 0">
<xsl:attribute name="class">firstpage</xsl:attribute>
</xsl:if>
<xsl:call-template name="body.attributes"/>
<xsl:call-template name="user.header.navigation"/>
<xsl:call-template name="header.navigation">
<xsl:with-param name="prev" select="$prev"/>
<xsl:with-param name="next" select="$next"/>
<xsl:with-param name="nav.context" select="$nav.context"/>
</xsl:call-template>
<xsl:call-template name="user.header.content"/>
<xsl:copy-of select="$content"/>
<xsl:call-template name="user.footer.content"/>
<xsl:call-template name="footer.navigation">
<xsl:with-param name="prev" select="$prev"/>
<xsl:with-param name="next" select="$next"/>
<xsl:with-param name="nav.context" select="$nav.context"/>
</xsl:call-template>
<xsl:call-template name="user.footer.navigation"/>
</body>
</html>
<xsl:value-of select="$chunk.append"/>
</xsl:template>
</xsl:stylesheet>

View File

@@ -1,30 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version='1.0'>
<xsl:import href="urn:docbkx:stylesheet"/>
<xsl:import href="html.xsl"/>
<xsl:param name="html.stylesheet">css/manual-singlepage.css</xsl:param>
</xsl:stylesheet>

View File

@@ -1,141 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:xslthl="http://xslthl.sf.net"
xmlns:d="http://docbook.org/ns/docbook"
exclude-result-prefixes="xslthl"
version='1.0'>
<xsl:import href="urn:docbkx:stylesheet/highlight.xsl"/>
<xsl:import href="common.xsl"/>
<!-- Only use scaling in FO -->
<xsl:param name="ignore.image.scaling">1</xsl:param>
<!-- Use code syntax highlighting -->
<xsl:param name="highlight.source">1</xsl:param>
<!-- Activate Graphics -->
<xsl:param name="callout.graphics" select="1" />
<xsl:param name="callout.defaultcolumn">120</xsl:param>
<xsl:param name="callout.graphics.path">images/callouts/</xsl:param>
<xsl:param name="callout.graphics.extension">.png</xsl:param>
<xsl:param name="table.borders.with.css" select="1"/>
<xsl:param name="html.stylesheet.type">text/css</xsl:param>
<xsl:param name="admonition.title.properties">text-align: left</xsl:param>
<!-- Leave image paths as relative when navigating XInclude -->
<xsl:param name="keep.relative.image.uris" select="1"/>
<!-- Label Chapters and Sections (numbering) -->
<xsl:param name="chapter.autolabel" select="1"/>
<xsl:param name="section.autolabel" select="1"/>
<xsl:param name="section.autolabel.max.depth" select="2"/>
<xsl:param name="section.label.includes.component.label" select="1"/>
<xsl:param name="table.footnote.number.format" select="'1'"/>
<!-- Remove "Chapter" from the Chapter titles... -->
<xsl:param name="local.l10n.xml" select="document('')"/>
<l:i18n xmlns:l="http://docbook.sourceforge.net/xmlns/l10n/1.0">
<l:l10n language="en">
<l:context name="title-numbered">
<l:template name="chapter" text="%n.&#160;%t"/>
<l:template name="section" text="%n&#160;%t"/>
</l:context>
</l:l10n>
</l:i18n>
<!-- Syntax Highlighting -->
<xsl:template match='xslthl:keyword' mode="xslthl">
<span class="hl-keyword"><xsl:apply-templates mode="xslthl"/></span>
</xsl:template>
<xsl:template match='xslthl:comment' mode="xslthl">
<span class="hl-comment"><xsl:apply-templates mode="xslthl"/></span>
</xsl:template>
<xsl:template match='xslthl:oneline-comment' mode="xslthl">
<span class="hl-comment"><xsl:apply-templates mode="xslthl"/></span>
</xsl:template>
<xsl:template match='xslthl:multiline-comment' mode="xslthl">
<span class="hl-multiline-comment"><xsl:apply-templates mode="xslthl"/></span>
</xsl:template>
<xsl:template match='xslthl:tag' mode="xslthl">
<span class="hl-tag"><xsl:apply-templates mode="xslthl"/></span>
</xsl:template>
<xsl:template match='xslthl:attribute' mode="xslthl">
<span class="hl-attribute"><xsl:apply-templates mode="xslthl"/></span>
</xsl:template>
<xsl:template match='xslthl:value' mode="xslthl">
<span class="hl-value"><xsl:apply-templates mode="xslthl"/></span>
</xsl:template>
<xsl:template match='xslthl:string' mode="xslthl">
<span class="hl-string"><xsl:apply-templates mode="xslthl"/></span>
</xsl:template>
<!-- Custom Title Page -->
<xsl:template match="d:author" mode="titlepage.mode">
<xsl:if test="name(preceding-sibling::*[1]) = 'author'">
<xsl:text>, </xsl:text>
</xsl:if>
<span class="{name(.)}">
<xsl:call-template name="person.name"/>
<xsl:apply-templates mode="titlepage.mode" select="./contrib"/>
</span>
</xsl:template>
<xsl:template match="d:authorgroup" mode="titlepage.mode">
<div class="{name(.)}">
<h2>Authors</h2>
<xsl:apply-templates mode="titlepage.mode"/>
</div>
</xsl:template>
<!-- Title Links -->
<xsl:template name="anchor">
<xsl:param name="node" select="."/>
<xsl:param name="conditional" select="1"/>
<xsl:variable name="id">
<xsl:call-template name="object.id">
<xsl:with-param name="object" select="$node"/>
</xsl:call-template>
</xsl:variable>
<xsl:if test="$conditional = 0 or $node/@id or $node/@xml:id">
<xsl:element name="a">
<xsl:attribute name="name">
<xsl:value-of select="$id"/>
</xsl:attribute>
<xsl:attribute name="href">
<xsl:text>#</xsl:text>
<xsl:value-of select="$id"/>
</xsl:attribute>
</xsl:element>
</xsl:if>
</xsl:template>
</xsl:stylesheet>

View File

@@ -1,582 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:d="http://docbook.org/ns/docbook"
xmlns:fo="http://www.w3.org/1999/XSL/Format"
xmlns:xslthl="http://xslthl.sf.net"
xmlns:xlink='http://www.w3.org/1999/xlink'
xmlns:exsl="http://exslt.org/common"
exclude-result-prefixes="exsl xslthl d xlink"
version='1.0'>
<xsl:import href="urn:docbkx:stylesheet"/>
<xsl:import href="urn:docbkx:stylesheet/highlight.xsl"/>
<xsl:import href="common.xsl"/>
<!-- Extensions -->
<xsl:param name="fop1.extensions" select="1"/>
<xsl:param name="paper.type" select="'A4'"/>
<xsl:param name="page.margin.top" select="'1cm'"/>
<xsl:param name="region.before.extent" select="'1cm'"/>
<xsl:param name="body.margin.top" select="'1.5cm'"/>
<xsl:param name="body.margin.bottom" select="'1.5cm'"/>
<xsl:param name="region.after.extent" select="'1cm'"/>
<xsl:param name="page.margin.bottom" select="'1cm'"/>
<xsl:param name="title.margin.left" select="'0cm'"/>
<!-- allow break across pages -->
<xsl:attribute-set name="formal.object.properties">
<xsl:attribute name="keep-together.within-column">auto</xsl:attribute>
</xsl:attribute-set>
<!-- use color links and sensible rendering -->
<xsl:attribute-set name="xref.properties">
<xsl:attribute name="text-decoration">underline</xsl:attribute>
<xsl:attribute name="color">#204060</xsl:attribute>
</xsl:attribute-set>
<xsl:param name="ulink.show" select="0"></xsl:param>
<xsl:param name="ulink.footnotes" select="0"></xsl:param>
<!-- TITLE PAGE -->
<xsl:template name="book.titlepage.recto">
<fo:block>
<fo:table table-layout="fixed" width="175mm">
<fo:table-column column-width="175mm"/>
<fo:table-body>
<fo:table-row>
<fo:table-cell text-align="center">
<fo:block>
<fo:external-graphic src="images/logo.png" width="240px"
height="auto" content-width="scale-to-fit"
content-height="scale-to-fit"
content-type="content-type:image/png" text-align="center"
/>
</fo:block>
<fo:block font-family="Helvetica" font-size="20pt" font-weight="bold" padding="10mm">
<xsl:value-of select="d:info/d:title"/>
</fo:block>
<fo:block font-family="Helvetica" font-size="14pt" padding-before="2mm">
<xsl:value-of select="d:info/d:subtitle"/>
</fo:block>
<fo:block font-family="Helvetica" font-size="14pt" padding="2mm">
<xsl:value-of select="d:info/d:releaseinfo"/>
</fo:block>
</fo:table-cell>
</fo:table-row>
<fo:table-row>
<fo:table-cell text-align="center">
<fo:block font-family="Helvetica" font-size="14pt" padding="5mm">
<xsl:value-of select="d:info/d:pubdate"/>
</fo:block>
</fo:table-cell>
</fo:table-row>
<fo:table-row>
<fo:table-cell text-align="center">
<fo:block font-family="Helvetica" font-size="10pt" padding="10mm">
<xsl:for-each select="d:info/d:authorgroup/d:author">
<xsl:if test="position() > 1">
<xsl:text>, </xsl:text>
</xsl:if>
<xsl:value-of select="."/>
</xsl:for-each>
</fo:block>
<fo:block font-family="Helvetica" font-size="10pt" padding="5mm">
<xsl:value-of select="d:info/d:pubdate"/>
</fo:block>
<fo:block font-family="Helvetica" font-size="10pt" padding="5mm" padding-before="25em">
<xsl:text>Copyright &#xA9; </xsl:text><xsl:value-of select="d:info/d:copyright"/>
</fo:block>
<fo:block font-family="Helvetica" font-size="8pt" padding="1mm">
<xsl:value-of select="d:info/d:legalnotice"/>
</fo:block>
</fo:table-cell>
</fo:table-row>
</fo:table-body>
</fo:table>
</fo:block>
</xsl:template>
<!-- Prevent blank pages in output -->
<xsl:template name="book.titlepage.before.verso">
</xsl:template>
<xsl:template name="book.titlepage.verso">
</xsl:template>
<xsl:template name="book.titlepage.separator">
</xsl:template>
<!-- HEADER -->
<!-- More space in the center header for long text -->
<xsl:attribute-set name="header.content.properties">
<xsl:attribute name="font-family">
<xsl:value-of select="$body.font.family"/>
</xsl:attribute>
<xsl:attribute name="margin-left">-5em</xsl:attribute>
<xsl:attribute name="margin-right">-5em</xsl:attribute>
<xsl:attribute name="font-size">8pt</xsl:attribute>
</xsl:attribute-set>
<xsl:template name="header.content">
<xsl:param name="pageclass" select="''"/>
<xsl:param name="sequence" select="''"/>
<xsl:param name="position" select="''"/>
<xsl:param name="gentext-key" select="''"/>
<xsl:variable name="Version">
<xsl:choose>
<xsl:when test="//d:title">
<xsl:value-of select="//d:title"/><xsl:text> </xsl:text>
</xsl:when>
<xsl:otherwise>
<xsl:text>please define title in your docbook file!</xsl:text>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:choose>
<xsl:when test="$sequence='blank'">
<xsl:choose>
<xsl:when test="$position='center'">
<xsl:value-of select="$Version"/>
</xsl:when>
<xsl:otherwise>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<xsl:when test="$pageclass='titlepage'">
</xsl:when>
<xsl:when test="$position='center'">
<xsl:value-of select="$Version"/>
</xsl:when>
<xsl:otherwise>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- FOOTER-->
<xsl:attribute-set name="footer.content.properties">
<xsl:attribute name="font-family">
<xsl:value-of select="$body.font.family"/>
</xsl:attribute>
<xsl:attribute name="font-size">8pt</xsl:attribute>
</xsl:attribute-set>
<xsl:template name="footer.content">
<xsl:param name="pageclass" select="''"/>
<xsl:param name="sequence" select="''"/>
<xsl:param name="position" select="''"/>
<xsl:param name="gentext-key" select="''"/>
<xsl:variable name="Version">
<xsl:choose>
<xsl:when test="//d:releaseinfo">
<xsl:value-of select="//d:releaseinfo"/>
</xsl:when>
<xsl:otherwise>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:variable name="Title">
<xsl:choose>
<xsl:when test="//d:productname">
<xsl:value-of select="//d:productname"/><xsl:text> </xsl:text>
</xsl:when>
<xsl:otherwise>
<xsl:text>please define title in your docbook file!</xsl:text>
</xsl:otherwise>
</xsl:choose>
</xsl:variable>
<xsl:choose>
<xsl:when test="$sequence='blank'">
<xsl:choose>
<xsl:when test="$double.sided != 0 and $position = 'left'">
<xsl:value-of select="$Version"/>
</xsl:when>
<xsl:when test="$double.sided = 0 and $position = 'center'">
</xsl:when>
<xsl:otherwise>
<fo:page-number/>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<xsl:when test="$pageclass='titlepage'">
</xsl:when>
<xsl:when test="$double.sided != 0 and $sequence = 'even' and $position='left'">
<fo:page-number/>
</xsl:when>
<xsl:when test="$double.sided != 0 and $sequence = 'odd' and $position='right'">
<fo:page-number/>
</xsl:when>
<xsl:when test="$double.sided = 0 and $position='right'">
<fo:page-number/>
</xsl:when>
<xsl:when test="$double.sided != 0 and $sequence = 'odd' and $position='left'">
<xsl:value-of select="$Version"/>
</xsl:when>
<xsl:when test="$double.sided != 0 and $sequence = 'even' and $position='right'">
<xsl:value-of select="$Version"/>
</xsl:when>
<xsl:when test="$double.sided = 0 and $position='left'">
<xsl:value-of select="$Version"/>
</xsl:when>
<xsl:when test="$position='center'">
<xsl:value-of select="$Title"/>
</xsl:when>
<xsl:otherwise>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="processing-instruction('hard-pagebreak')">
<fo:block break-before='page'/>
</xsl:template>
<!-- PAPER & PAGE SIZE -->
<!-- Paper type, no headers on blank pages, no double sided printing -->
<xsl:param name="double.sided">0</xsl:param>
<xsl:param name="headers.on.blank.pages">0</xsl:param>
<xsl:param name="footers.on.blank.pages">0</xsl:param>
<!-- FONTS & STYLES -->
<xsl:param name="hyphenate">false</xsl:param>
<!-- Default Font size -->
<xsl:param name="body.font.family">Helvetica</xsl:param>
<xsl:param name="body.font.master">10</xsl:param>
<xsl:param name="body.font.small">8</xsl:param>
<xsl:param name="title.font.family">Helvetica</xsl:param>
<!-- Line height in body text -->
<xsl:param name="line-height">1.4</xsl:param>
<!-- Chapter title size -->
<xsl:attribute-set name="chapter.titlepage.recto.style">
<xsl:attribute name="text-align">left</xsl:attribute>
<xsl:attribute name="font-weight">bold</xsl:attribute>
<xsl:attribute name="font-size">
<xsl:value-of select="$body.font.master * 1.8"/>
<xsl:text>pt</xsl:text>
</xsl:attribute>
</xsl:attribute-set>
<!-- Why is the font-size for chapters hardcoded in the XSL FO templates?
Let's remove it, so this sucker can use our attribute-set only... -->
<xsl:template match="d:title" mode="chapter.titlepage.recto.auto.mode">
<fo:block xmlns:fo="http://www.w3.org/1999/XSL/Format"
xsl:use-attribute-sets="chapter.titlepage.recto.style">
<xsl:call-template name="component.title">
<xsl:with-param name="node" select="ancestor-or-self::d:chapter[1]"/>
</xsl:call-template>
</fo:block>
</xsl:template>
<!-- Sections 1, 2 and 3 titles have a small bump factor and padding -->
<xsl:attribute-set name="section.title.level1.properties">
<xsl:attribute name="space-before.optimum">0.6em</xsl:attribute>
<xsl:attribute name="space-before.minimum">0.6em</xsl:attribute>
<xsl:attribute name="space-before.maximum">0.6em</xsl:attribute>
<xsl:attribute name="font-size">
<xsl:value-of select="$body.font.master * 1.5"/>
<xsl:text>pt</xsl:text>
</xsl:attribute>
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
</xsl:attribute-set>
<xsl:attribute-set name="section.title.level2.properties">
<xsl:attribute name="space-before.optimum">0.4em</xsl:attribute>
<xsl:attribute name="space-before.minimum">0.4em</xsl:attribute>
<xsl:attribute name="space-before.maximum">0.4em</xsl:attribute>
<xsl:attribute name="font-size">
<xsl:value-of select="$body.font.master * 1.25"/>
<xsl:text>pt</xsl:text>
</xsl:attribute>
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
</xsl:attribute-set>
<xsl:attribute-set name="section.title.level3.properties">
<xsl:attribute name="space-before.optimum">0.4em</xsl:attribute>
<xsl:attribute name="space-before.minimum">0.4em</xsl:attribute>
<xsl:attribute name="space-before.maximum">0.4em</xsl:attribute>
<xsl:attribute name="font-size">
<xsl:value-of select="$body.font.master * 1.0"/>
<xsl:text>pt</xsl:text>
</xsl:attribute>
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
</xsl:attribute-set>
<xsl:attribute-set name="section.title.level4.properties">
<xsl:attribute name="space-before.optimum">0.3em</xsl:attribute>
<xsl:attribute name="space-before.minimum">0.3em</xsl:attribute>
<xsl:attribute name="space-before.maximum">0.3em</xsl:attribute>
<xsl:attribute name="font-size">
<xsl:value-of select="$body.font.master * 0.9"/>
<xsl:text>pt</xsl:text>
</xsl:attribute>
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
</xsl:attribute-set>
<!-- TABLES -->
<!-- Some padding inside tables -->
<xsl:attribute-set name="table.cell.padding">
<xsl:attribute name="padding-left">4pt</xsl:attribute>
<xsl:attribute name="padding-right">4pt</xsl:attribute>
<xsl:attribute name="padding-top">4pt</xsl:attribute>
<xsl:attribute name="padding-bottom">4pt</xsl:attribute>
</xsl:attribute-set>
<!-- Only hairlines as frame and cell borders in tables -->
<xsl:param name="table.frame.border.thickness">0.1pt</xsl:param>
<xsl:param name="table.cell.border.thickness">0.1pt</xsl:param>
<!-- LABELS -->
<!-- Label Chapters and Sections (numbering) -->
<xsl:param name="chapter.autolabel" select="1"/>
<xsl:param name="section.autolabel" select="1"/>
<xsl:param name="section.autolabel.max.depth" select="1"/>
<xsl:param name="section.label.includes.component.label" select="1"/>
<xsl:param name="table.footnote.number.format" select="'1'"/>
<!-- PROGRAMLISTINGS -->
<!-- Verbatim text formatting (programlistings) -->
<xsl:attribute-set name="monospace.verbatim.properties">
<xsl:attribute name="font-size">7pt</xsl:attribute>
<xsl:attribute name="wrap-option">wrap</xsl:attribute>
<xsl:attribute name="keep-together.within-column">1</xsl:attribute>
</xsl:attribute-set>
<xsl:attribute-set name="verbatim.properties">
<xsl:attribute name="space-before.minimum">1em</xsl:attribute>
<xsl:attribute name="space-before.optimum">1em</xsl:attribute>
<xsl:attribute name="space-before.maximum">1em</xsl:attribute>
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
<xsl:attribute name="border-color">#444444</xsl:attribute>
<xsl:attribute name="border-style">solid</xsl:attribute>
<xsl:attribute name="border-width">0.1pt</xsl:attribute>
<xsl:attribute name="padding-top">0.5em</xsl:attribute>
<xsl:attribute name="padding-left">0.5em</xsl:attribute>
<xsl:attribute name="padding-right">0.5em</xsl:attribute>
<xsl:attribute name="padding-bottom">0.5em</xsl:attribute>
<xsl:attribute name="margin-left">0.5em</xsl:attribute>
<xsl:attribute name="margin-right">0.5em</xsl:attribute>
</xsl:attribute-set>
<!-- Shade (background) programlistings -->
<xsl:param name="shade.verbatim">1</xsl:param>
<xsl:attribute-set name="shade.verbatim.style">
<xsl:attribute name="background-color">#F0F0F0</xsl:attribute>
</xsl:attribute-set>
<xsl:attribute-set name="list.block.spacing">
<xsl:attribute name="space-before.optimum">0.1em</xsl:attribute>
<xsl:attribute name="space-before.minimum">0.1em</xsl:attribute>
<xsl:attribute name="space-before.maximum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
</xsl:attribute-set>
<xsl:attribute-set name="example.properties">
<xsl:attribute name="space-before.minimum">0.5em</xsl:attribute>
<xsl:attribute name="space-before.optimum">0.5em</xsl:attribute>
<xsl:attribute name="space-before.maximum">0.5em</xsl:attribute>
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
</xsl:attribute-set>
<xsl:attribute-set name="sidebar.properties">
<xsl:attribute name="border-color">#444444</xsl:attribute>
<xsl:attribute name="border-style">solid</xsl:attribute>
<xsl:attribute name="border-width">0.1pt</xsl:attribute>
<xsl:attribute name="background-color">#F0F0F0</xsl:attribute>
</xsl:attribute-set>
<!-- TITLE INFORMATION FOR FIGURES, EXAMPLES ETC. -->
<xsl:attribute-set name="formal.title.properties" use-attribute-sets="normal.para.spacing">
<xsl:attribute name="font-weight">normal</xsl:attribute>
<xsl:attribute name="font-style">italic</xsl:attribute>
<xsl:attribute name="font-size">
<xsl:value-of select="$body.font.master"/>
<xsl:text>pt</xsl:text>
</xsl:attribute>
<xsl:attribute name="hyphenate">false</xsl:attribute>
<xsl:attribute name="space-before.minimum">0.1em</xsl:attribute>
<xsl:attribute name="space-before.optimum">0.1em</xsl:attribute>
<xsl:attribute name="space-before.maximum">0.1em</xsl:attribute>
</xsl:attribute-set>
<!-- CALLOUTS -->
<!-- don't use images for callouts -->
<xsl:param name="callout.graphics">0</xsl:param>
<xsl:param name="callout.unicode">1</xsl:param>
<!-- Place callout marks at this column in annotated areas -->
<xsl:param name="callout.defaultcolumn">90</xsl:param>
<!-- MISC -->
<!-- Placement of titles -->
<xsl:param name="formal.title.placement">
figure after
example after
equation before
table before
procedure before
</xsl:param>
<!-- Format Variable Lists as Blocks (prevents horizontal overflow) -->
<xsl:param name="variablelist.as.blocks">1</xsl:param>
<xsl:param name="body.start.indent">0pt</xsl:param>
<!-- Remove "Chapter" from the Chapter titles... -->
<xsl:param name="local.l10n.xml" select="document('')"/>
<l:i18n xmlns:l="http://docbook.sourceforge.net/xmlns/l10n/1.0">
<l:l10n language="en">
<l:context name="title-numbered">
<l:template name="chapter" text="%n.&#160;%t"/>
<l:template name="section" text="%n&#160;%t"/>
</l:context>
<l:context name="title">
<l:template name="example" text="Example&#160;%n&#160;%t"/>
</l:context>
</l:l10n>
</l:i18n>
<!-- admon -->
<xsl:param name="admon.graphics" select="0"/>
<xsl:attribute-set name="nongraphical.admonition.properties">
<xsl:attribute name="margin-left">0.1em</xsl:attribute>
<xsl:attribute name="margin-right">2em</xsl:attribute>
<xsl:attribute name="border-left-width">.75pt</xsl:attribute>
<xsl:attribute name="border-left-style">solid</xsl:attribute>
<xsl:attribute name="border-left-color">#5c5c4f</xsl:attribute>
<xsl:attribute name="padding-left">0.5em</xsl:attribute>
<xsl:attribute name="space-before.optimum">1.5em</xsl:attribute>
<xsl:attribute name="space-before.minimum">1.5em</xsl:attribute>
<xsl:attribute name="space-before.maximum">1.5em</xsl:attribute>
<xsl:attribute name="space-after.optimum">1.5em</xsl:attribute>
<xsl:attribute name="space-after.minimum">1.5em</xsl:attribute>
<xsl:attribute name="space-after.maximum">1.5em</xsl:attribute>
</xsl:attribute-set>
<xsl:attribute-set name="admonition.title.properties">
<xsl:attribute name="font-size">10pt</xsl:attribute>
<xsl:attribute name="font-weight">bold</xsl:attribute>
<xsl:attribute name="hyphenate">false</xsl:attribute>
<xsl:attribute name="keep-with-next.within-column">always</xsl:attribute>
<xsl:attribute name="margin-left">0</xsl:attribute>
</xsl:attribute-set>
<xsl:attribute-set name="admonition.properties">
<xsl:attribute name="space-before.optimum">0em</xsl:attribute>
<xsl:attribute name="space-before.minimum">0em</xsl:attribute>
<xsl:attribute name="space-before.maximum">0em</xsl:attribute>
</xsl:attribute-set>
<!-- Asciidoc -->
<xsl:template match="processing-instruction('asciidoc-br')">
<fo:block/>
</xsl:template>
<xsl:template match="processing-instruction('asciidoc-hr')">
<fo:block space-after="1em">
<fo:leader leader-pattern="rule" rule-thickness="0.5pt" rule-style="solid" leader-length.minimum="100%"/>
</fo:block>
</xsl:template>
<xsl:template match="processing-instruction('asciidoc-pagebreak')">
<fo:block break-after='page'/>
</xsl:template>
<!-- SYNTAX HIGHLIGHT -->
<xsl:template match='xslthl:keyword' mode="xslthl">
<fo:inline font-weight="bold" color="#7F0055"><xsl:apply-templates mode="xslthl"/></fo:inline>
</xsl:template>
<xsl:template match='xslthl:string' mode="xslthl">
<fo:inline font-weight="bold" font-style="italic" color="#2A00FF"><xsl:apply-templates mode="xslthl"/></fo:inline>
</xsl:template>
<xsl:template match='xslthl:comment' mode="xslthl">
<fo:inline font-style="italic" color="#3F5FBF"><xsl:apply-templates mode="xslthl"/></fo:inline>
</xsl:template>
<xsl:template match='xslthl:tag' mode="xslthl">
<fo:inline font-weight="bold" color="#3F7F7F"><xsl:apply-templates mode="xslthl"/></fo:inline>
</xsl:template>
<xsl:template match='xslthl:attribute' mode="xslthl">
<fo:inline font-weight="bold" color="#7F007F"><xsl:apply-templates mode="xslthl"/></fo:inline>
</xsl:template>
<xsl:template match='xslthl:value' mode="xslthl">
<fo:inline font-weight="bold" color="#2A00FF"><xsl:apply-templates mode="xslthl"/></fo:inline>
</xsl:template>
</xsl:stylesheet>

View File

@@ -1,23 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<xslthl-config>
<highlighter id="java" file="./xslthl/java-hl.xml" />
<highlighter id="groovy" file="./xslthl/java-hl.xml" />
<highlighter id="html" file="./xslthl/html-hl.xml" />
<highlighter id="ini" file="./xslthl/ini-hl.xml" />
<highlighter id="php" file="./xslthl/php-hl.xml" />
<highlighter id="c" file="./xslthl/c-hl.xml" />
<highlighter id="cpp" file="./xslthl/cpp-hl.xml" />
<highlighter id="csharp" file="./xslthl/csharp-hl.xml" />
<highlighter id="python" file="./xslthl/python-hl.xml" />
<highlighter id="ruby" file="./xslthl/ruby-hl.xml" />
<highlighter id="perl" file="./xslthl/perl-hl.xml" />
<highlighter id="javascript" file="./xslthl/javascript-hl.xml" />
<highlighter id="bash" file="./xslthl/bourne-hl.xml" />
<highlighter id="css" file="./xslthl/css-hl.xml" />
<highlighter id="sql" file="./xslthl/sql2003-hl.xml" />
<highlighter id="asciidoc" file="./xslthl/asciidoc-hl.xml" />
<highlighter id="properties" file="./xslthl/properties-hl.xml" />
<highlighter id="json" file="./xslthl/json-hl.xml" />
<highlighter id="yaml" file="./xslthl/yaml-hl.xml" />
<namespace prefix="xslthl" uri="http://xslthl.sf.net" />
</xslthl-config>

View File

@@ -1,41 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for AsciiDoc files
-->
<highlighters>
<highlighter type="multiline-comment">
<start>////</start>
<end>////</end>
</highlighter>
<highlighter type="oneline-comment">
<start>//</start>
<solitary/>
</highlighter>
<highlighter type="regex">
<pattern>^(={1,6} .+)$</pattern>
<style>heading</style>
<flags>MULTILINE</flags>
</highlighter>
<highlighter type="regex">
<pattern>^(\.[^\.\s].+)$</pattern>
<style>title</style>
<flags>MULTILINE</flags>
</highlighter>
<highlighter type="regex">
<pattern>^(:!?\w.*?:)</pattern>
<style>attribute</style>
<flags>MULTILINE</flags>
</highlighter>
<highlighter type="regex">
<pattern>^(-|\*{1,5}|\d*\.{1,5})(?= .+$)</pattern>
<style>bullet</style>
<flags>MULTILINE</flags>
</highlighter>
<highlighter type="regex">
<pattern>^(\[.+\])$</pattern>
<style>attribute</style>
<flags>MULTILINE</flags>
</highlighter>
</highlighters>

View File

@@ -1,95 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
Syntax highlighting definition for SH
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2010 Mathieu Malaterre
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
-->
<highlighters>
<highlighter type="oneline-comment">#</highlighter>
<highlighter type="heredoc">
<start>&lt;&lt;</start>
<quote>'</quote>
<quote>"</quote>
<flag>-</flag>
<noWhiteSpace />
<looseTerminator />
</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
<spanNewLines />
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<pointStarts />
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<!-- reserved words -->
<keyword>if</keyword>
<keyword>then</keyword>
<keyword>else</keyword>
<keyword>elif</keyword>
<keyword>fi</keyword>
<keyword>case</keyword>
<keyword>esac</keyword>
<keyword>for</keyword>
<keyword>while</keyword>
<keyword>until</keyword>
<keyword>do</keyword>
<keyword>done</keyword>
<!-- built-ins -->
<keyword>exec</keyword>
<keyword>shift</keyword>
<keyword>exit</keyword>
<keyword>times</keyword>
<keyword>break</keyword>
<keyword>export</keyword>
<keyword>trap</keyword>
<keyword>continue</keyword>
<keyword>readonly</keyword>
<keyword>wait</keyword>
<keyword>eval</keyword>
<keyword>return</keyword>
<!-- other commands -->
<keyword>cd</keyword>
<keyword>echo</keyword>
<keyword>hash</keyword>
<keyword>pwd</keyword>
<keyword>read</keyword>
<keyword>set</keyword>
<keyword>test</keyword>
<keyword>type</keyword>
<keyword>ulimit</keyword>
<keyword>umask</keyword>
<keyword>unset</keyword>
</highlighter>
</highlighters>

View File

@@ -1,117 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for C
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="multiline-comment">
<start>/**</start>
<end>*/</end>
<style>doccomment</style>
</highlighter>
<highlighter type="oneline-comment">
<start><![CDATA[/// ]]></start>
<style>doccomment</style>
</highlighter>
<highlighter type="multiline-comment">
<start>/*</start>
<end>*/</end>
</highlighter>
<highlighter type="oneline-comment">//</highlighter>
<highlighter type="oneline-comment">
<!-- use the online-comment highlighter to detect directives -->
<start>#</start>
<lineBreakEscape>\</lineBreakEscape>
<style>directive</style>
<solitary />
</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<suffix>ul</suffix>
<suffix>lu</suffix>
<suffix>u</suffix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<pointStarts />
<exponent>e</exponent>
<suffix>ul</suffix>
<suffix>lu</suffix>
<suffix>u</suffix>
<suffix>f</suffix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>auto</keyword>
<keyword>_Bool</keyword>
<keyword>break</keyword>
<keyword>case</keyword>
<keyword>char</keyword>
<keyword>_Complex</keyword>
<keyword>const</keyword>
<keyword>continue</keyword>
<keyword>default</keyword>
<keyword>do</keyword>
<keyword>double</keyword>
<keyword>else</keyword>
<keyword>enum</keyword>
<keyword>extern</keyword>
<keyword>float</keyword>
<keyword>for</keyword>
<keyword>goto</keyword>
<keyword>if</keyword>
<keyword>_Imaginary</keyword>
<keyword>inline</keyword>
<keyword>int</keyword>
<keyword>long</keyword>
<keyword>register</keyword>
<keyword>restrict</keyword>
<keyword>return</keyword>
<keyword>short</keyword>
<keyword>signed</keyword>
<keyword>sizeof</keyword>
<keyword>static</keyword>
<keyword>struct</keyword>
<keyword>switch</keyword>
<keyword>typedef</keyword>
<keyword>union</keyword>
<keyword>unsigned</keyword>
<keyword>void</keyword>
<keyword>volatile</keyword>
<keyword>while</keyword>
</highlighter>
</highlighters>

View File

@@ -1,151 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for C++
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="multiline-comment">
<start>/**</start>
<end>*/</end>
<style>doccomment</style>
</highlighter>
<highlighter type="oneline-comment">
<start><![CDATA[/// ]]></start>
<style>doccomment</style>
</highlighter>
<highlighter type="multiline-comment">
<start>/*</start>
<end>*/</end>
</highlighter>
<highlighter type="oneline-comment">//</highlighter>
<highlighter type="oneline-comment">
<!-- use the online-comment highlighter to detect directives -->
<start>#</start>
<lineBreakEscape>\</lineBreakEscape>
<style>directive</style>
<solitary/>
</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<suffix>ul</suffix>
<suffix>lu</suffix>
<suffix>u</suffix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<pointStarts />
<exponent>e</exponent>
<suffix>ul</suffix>
<suffix>lu</suffix>
<suffix>u</suffix>
<suffix>f</suffix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<!-- C keywords -->
<keyword>auto</keyword>
<keyword>_Bool</keyword>
<keyword>break</keyword>
<keyword>case</keyword>
<keyword>char</keyword>
<keyword>_Complex</keyword>
<keyword>const</keyword>
<keyword>continue</keyword>
<keyword>default</keyword>
<keyword>do</keyword>
<keyword>double</keyword>
<keyword>else</keyword>
<keyword>enum</keyword>
<keyword>extern</keyword>
<keyword>float</keyword>
<keyword>for</keyword>
<keyword>goto</keyword>
<keyword>if</keyword>
<keyword>_Imaginary</keyword>
<keyword>inline</keyword>
<keyword>int</keyword>
<keyword>long</keyword>
<keyword>register</keyword>
<keyword>restrict</keyword>
<keyword>return</keyword>
<keyword>short</keyword>
<keyword>signed</keyword>
<keyword>sizeof</keyword>
<keyword>static</keyword>
<keyword>struct</keyword>
<keyword>switch</keyword>
<keyword>typedef</keyword>
<keyword>union</keyword>
<keyword>unsigned</keyword>
<keyword>void</keyword>
<keyword>volatile</keyword>
<keyword>while</keyword>
<!-- C++ keywords -->
<keyword>asm</keyword>
<keyword>dynamic_cast</keyword>
<keyword>namespace</keyword>
<keyword>reinterpret_cast</keyword>
<keyword>try</keyword>
<keyword>bool</keyword>
<keyword>explicit</keyword>
<keyword>new</keyword>
<keyword>static_cast</keyword>
<keyword>typeid</keyword>
<keyword>catch</keyword>
<keyword>false</keyword>
<keyword>operator</keyword>
<keyword>template</keyword>
<keyword>typename</keyword>
<keyword>class</keyword>
<keyword>friend</keyword>
<keyword>private</keyword>
<keyword>this</keyword>
<keyword>using</keyword>
<keyword>const_cast</keyword>
<keyword>inline</keyword>
<keyword>public</keyword>
<keyword>throw</keyword>
<keyword>virtual</keyword>
<keyword>delete</keyword>
<keyword>mutable</keyword>
<keyword>protected</keyword>
<keyword>true</keyword>
<keyword>wchar_t</keyword>
</highlighter>
</highlighters>

View File

@@ -1,194 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for C#
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="multiline-comment">
<start>/**</start>
<end>*/</end>
<style>doccomment</style>
</highlighter>
<highlighter type="oneline-comment">
<start>///</start>
<style>doccomment</style>
</highlighter>
<highlighter type="multiline-comment">
<start>/*</start>
<end>*/</end>
</highlighter>
<highlighter type="oneline-comment">//</highlighter>
<highlighter type="annotation">
<!-- annotations are called (custom) "attributes" in .NET -->
<start>[</start>
<end>]</end>
<valueStart>(</valueStart>
<valueEnd>)</valueEnd>
</highlighter>
<highlighter type="oneline-comment">
<!-- C# supports a couple of directives -->
<start>#</start>
<lineBreakEscape>\</lineBreakEscape>
<style>directive</style>
<solitary/>
</highlighter>
<highlighter type="string">
<!-- strings starting with an "@" can span multiple lines -->
<string>@"</string>
<endString>"</endString>
<escape>\</escape>
<spanNewLines />
</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<suffix>ul</suffix>
<suffix>lu</suffix>
<suffix>u</suffix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<pointStarts />
<exponent>e</exponent>
<suffix>ul</suffix>
<suffix>lu</suffix>
<suffix>u</suffix>
<suffix>f</suffix>
<suffix>d</suffix>
<suffix>m</suffix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>abstract</keyword>
<keyword>as</keyword>
<keyword>base</keyword>
<keyword>bool</keyword>
<keyword>break</keyword>
<keyword>byte</keyword>
<keyword>case</keyword>
<keyword>catch</keyword>
<keyword>char</keyword>
<keyword>checked</keyword>
<keyword>class</keyword>
<keyword>const</keyword>
<keyword>continue</keyword>
<keyword>decimal</keyword>
<keyword>default</keyword>
<keyword>delegate</keyword>
<keyword>do</keyword>
<keyword>double</keyword>
<keyword>else</keyword>
<keyword>enum</keyword>
<keyword>event</keyword>
<keyword>explicit</keyword>
<keyword>extern</keyword>
<keyword>false</keyword>
<keyword>finally</keyword>
<keyword>fixed</keyword>
<keyword>float</keyword>
<keyword>for</keyword>
<keyword>foreach</keyword>
<keyword>goto</keyword>
<keyword>if</keyword>
<keyword>implicit</keyword>
<keyword>in</keyword>
<keyword>int</keyword>
<keyword>interface</keyword>
<keyword>internal</keyword>
<keyword>is</keyword>
<keyword>lock</keyword>
<keyword>long</keyword>
<keyword>namespace</keyword>
<keyword>new</keyword>
<keyword>null</keyword>
<keyword>object</keyword>
<keyword>operator</keyword>
<keyword>out</keyword>
<keyword>override</keyword>
<keyword>params</keyword>
<keyword>private</keyword>
<keyword>protected</keyword>
<keyword>public</keyword>
<keyword>readonly</keyword>
<keyword>ref</keyword>
<keyword>return</keyword>
<keyword>sbyte</keyword>
<keyword>sealed</keyword>
<keyword>short</keyword>
<keyword>sizeof</keyword>
<keyword>stackalloc</keyword>
<keyword>static</keyword>
<keyword>string</keyword>
<keyword>struct</keyword>
<keyword>switch</keyword>
<keyword>this</keyword>
<keyword>throw</keyword>
<keyword>true</keyword>
<keyword>try</keyword>
<keyword>typeof</keyword>
<keyword>uint</keyword>
<keyword>ulong</keyword>
<keyword>unchecked</keyword>
<keyword>unsafe</keyword>
<keyword>ushort</keyword>
<keyword>using</keyword>
<keyword>virtual</keyword>
<keyword>void</keyword>
<keyword>volatile</keyword>
<keyword>while</keyword>
</highlighter>
<highlighter type="keywords">
<!-- special words, not really keywords -->
<keyword>add</keyword>
<keyword>alias</keyword>
<keyword>from</keyword>
<keyword>get</keyword>
<keyword>global</keyword>
<keyword>group</keyword>
<keyword>into</keyword>
<keyword>join</keyword>
<keyword>orderby</keyword>
<keyword>partial</keyword>
<keyword>remove</keyword>
<keyword>select</keyword>
<keyword>set</keyword>
<keyword>value</keyword>
<keyword>where</keyword>
<keyword>yield</keyword>
</highlighter>
</highlighters>

View File

@@ -1,176 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for CSS files
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2011-2012 Martin Hujer, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Martin Hujer <mhujer at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
Reference: http://www.w3.org/TR/CSS21/propidx.html
-->
<highlighters>
<highlighter type="multiline-comment">
<start>/*</start>
<end>*/</end>
</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
<spanNewLines/>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
<spanNewLines/>
</highlighter>
<highlighter type="number">
<point>.</point>
<pointStarts />
</highlighter>
<highlighter type="word">
<word>@charset</word>
<word>@import</word>
<word>@media</word>
<word>@page</word>
<style>directive</style>
</highlighter>
<highlighter type="keywords">
<partChars>-</partChars>
<keyword>azimuth</keyword>
<keyword>background-attachment</keyword>
<keyword>background-color</keyword>
<keyword>background-image</keyword>
<keyword>background-position</keyword>
<keyword>background-repeat</keyword>
<keyword>background</keyword>
<keyword>border-collapse</keyword>
<keyword>border-color</keyword>
<keyword>border-spacing</keyword>
<keyword>border-style</keyword>
<keyword>border-top</keyword>
<keyword>border-right</keyword>
<keyword>border-bottom</keyword>
<keyword>border-left</keyword>
<keyword>border-top-color</keyword>
<keyword>border-right-color</keyword>
<keyword>border-bottom-color</keyword>
<keyword>border-left-color</keyword>
<keyword>border-top-style</keyword>
<keyword>border-right-style</keyword>
<keyword>border-bottom-style</keyword>
<keyword>border-left-style</keyword>
<keyword>border-top-width</keyword>
<keyword>border-right-width</keyword>
<keyword>border-bottom-width</keyword>
<keyword>border-left-width</keyword>
<keyword>border-width</keyword>
<keyword>border</keyword>
<keyword>bottom</keyword>
<keyword>caption-side</keyword>
<keyword>clear</keyword>
<keyword>clip</keyword>
<keyword>color</keyword>
<keyword>content</keyword>
<keyword>counter-increment</keyword>
<keyword>counter-reset</keyword>
<keyword>cue-after</keyword>
<keyword>cue-before</keyword>
<keyword>cue</keyword>
<keyword>cursor</keyword>
<keyword>direction</keyword>
<keyword>display</keyword>
<keyword>elevation</keyword>
<keyword>empty-cells</keyword>
<keyword>float</keyword>
<keyword>font-family</keyword>
<keyword>font-size</keyword>
<keyword>font-style</keyword>
<keyword>font-variant</keyword>
<keyword>font-weight</keyword>
<keyword>font</keyword>
<keyword>height</keyword>
<keyword>left</keyword>
<keyword>letter-spacing</keyword>
<keyword>line-height</keyword>
<keyword>list-style-image</keyword>
<keyword>list-style-position</keyword>
<keyword>list-style-type</keyword>
<keyword>list-style</keyword>
<keyword>margin-right</keyword>
<keyword>margin-left</keyword>
<keyword>margin-top</keyword>
<keyword>margin-bottom</keyword>
<keyword>margin</keyword>
<keyword>max-height</keyword>
<keyword>max-width</keyword>
<keyword>min-height</keyword>
<keyword>min-width</keyword>
<keyword>orphans</keyword>
<keyword>outline-color</keyword>
<keyword>outline-style</keyword>
<keyword>outline-width</keyword>
<keyword>outline</keyword>
<keyword>overflow</keyword>
<keyword>padding-top</keyword>
<keyword>padding-right</keyword>
<keyword>padding-bottom</keyword>
<keyword>padding-left</keyword>
<keyword>padding</keyword>
<keyword>page-break-after</keyword>
<keyword>page-break-before</keyword>
<keyword>page-break-inside</keyword>
<keyword>pause-after</keyword>
<keyword>pause-before</keyword>
<keyword>pause</keyword>
<keyword>pitch-range</keyword>
<keyword>pitch</keyword>
<keyword>play-during</keyword>
<keyword>position</keyword>
<keyword>quotes</keyword>
<keyword>richness</keyword>
<keyword>right</keyword>
<keyword>speak-header</keyword>
<keyword>speak-numeral</keyword>
<keyword>speak-punctuation</keyword>
<keyword>speak</keyword>
<keyword>speech-rate</keyword>
<keyword>stress</keyword>
<keyword>table-layout</keyword>
<keyword>text-align</keyword>
<keyword>text-decoration</keyword>
<keyword>text-indent</keyword>
<keyword>text-transform</keyword>
<keyword>top</keyword>
<keyword>unicode-bidi</keyword>
<keyword>vertical-align</keyword>
<keyword>visibility</keyword>
<keyword>voice-family</keyword>
<keyword>volume</keyword>
<keyword>white-space</keyword>
<keyword>widows</keyword>
<keyword>width</keyword>
<keyword>word-spacing</keyword>
<keyword>z-index</keyword>
</highlighter>
</highlighters>

View File

@@ -1,122 +0,0 @@
<?xml version='1.0'?>
<!--
Bakalarska prace: Zvyraznovani syntaxe v XSLT
Michal Molhanec 2005
myxml-hl.xml - konfigurace zvyraznovace XML, ktera zvlast zvyrazni
HTML elementy a XSL elementy
This file has been customized for the Asciidoctor project (http://asciidoctor.org).
-->
<highlighters>
<highlighter type="xml">
<elementSet>
<style>htmltag</style>
<element>a</element>
<element>abbr</element>
<element>address</element>
<element>area</element>
<element>article</element>
<element>aside</element>
<element>audio</element>
<element>b</element>
<element>base</element>
<element>bdi</element>
<element>blockquote</element>
<element>body</element>
<element>br</element>
<element>button</element>
<element>caption</element>
<element>canvas</element>
<element>cite</element>
<element>code</element>
<element>command</element>
<element>col</element>
<element>colgroup</element>
<element>dd</element>
<element>del</element>
<element>dialog</element>
<element>div</element>
<element>dl</element>
<element>dt</element>
<element>em</element>
<element>embed</element>
<element>fieldset</element>
<element>figcaption</element>
<element>figure</element>
<element>font</element>
<element>form</element>
<element>footer</element>
<element>h1</element>
<element>h2</element>
<element>h3</element>
<element>h4</element>
<element>h5</element>
<element>h6</element>
<element>head</element>
<element>header</element>
<element>hr</element>
<element>html</element>
<element>i</element>
<element>iframe</element>
<element>img</element>
<element>input</element>
<element>ins</element>
<element>kbd</element>
<element>label</element>
<element>legend</element>
<element>li</element>
<element>link</element>
<element>map</element>
<element>mark</element>
<element>menu</element>
<element>menu</element>
<element>meta</element>
<element>nav</element>
<element>noscript</element>
<element>object</element>
<element>ol</element>
<element>optgroup</element>
<element>option</element>
<element>p</element>
<element>param</element>
<element>pre</element>
<element>q</element>
<element>samp</element>
<element>script</element>
<element>section</element>
<element>select</element>
<element>small</element>
<element>source</element>
<element>span</element>
<element>strong</element>
<element>style</element>
<element>sub</element>
<element>summary</element>
<element>sup</element>
<element>table</element>
<element>tbody</element>
<element>td</element>
<element>textarea</element>
<element>tfoot</element>
<element>th</element>
<element>thead</element>
<element>time</element>
<element>title</element>
<element>tr</element>
<element>track</element>
<element>u</element>
<element>ul</element>
<element>var</element>
<element>video</element>
<element>wbr</element>
<element>xmp</element>
<ignoreCase/>
</elementSet>
<elementPrefix>
<style>namespace</style>
<prefix>xsl:</prefix>
</elementPrefix>
</highlighter>
</highlighters>

View File

@@ -1,45 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for ini files
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="oneline-comment">;</highlighter>
<highlighter type="regex">
<!-- ini sections -->
<pattern>^(\[.+\]\s*)$</pattern>
<style>keyword</style>
<flags>MULTILINE</flags>
</highlighter>
<highlighter type="regex">
<!-- the keys in an ini section -->
<pattern>^(.+)(?==)</pattern>
<style>attribute</style>
<flags>MULTILINE</flags>
</highlighter>
</highlighters>

View File

@@ -1,117 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for Java
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="multiline-comment">
<start>/**</start>
<end>*/</end>
<style>doccomment</style>
</highlighter>
<highlighter type="multiline-comment">
<start>/*</start>
<end>*/</end>
</highlighter>
<highlighter type="oneline-comment">//</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
</highlighter>
<highlighter type="annotation">
<start>@</start>
<valueStart>(</valueStart>
<valueEnd>)</valueEnd>
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<exponent>e</exponent>
<suffix>f</suffix>
<suffix>d</suffix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>abstract</keyword>
<keyword>boolean</keyword>
<keyword>break</keyword>
<keyword>byte</keyword>
<keyword>case</keyword>
<keyword>catch</keyword>
<keyword>char</keyword>
<keyword>class</keyword>
<keyword>const</keyword>
<keyword>continue</keyword>
<keyword>default</keyword>
<keyword>do</keyword>
<keyword>double</keyword>
<keyword>else</keyword>
<keyword>extends</keyword>
<keyword>final</keyword>
<keyword>finally</keyword>
<keyword>float</keyword>
<keyword>for</keyword>
<keyword>goto</keyword>
<keyword>if</keyword>
<keyword>implements</keyword>
<keyword>import</keyword>
<keyword>instanceof</keyword>
<keyword>int</keyword>
<keyword>interface</keyword>
<keyword>long</keyword>
<keyword>native</keyword>
<keyword>new</keyword>
<keyword>package</keyword>
<keyword>private</keyword>
<keyword>protected</keyword>
<keyword>public</keyword>
<keyword>return</keyword>
<keyword>short</keyword>
<keyword>static</keyword>
<keyword>strictfp</keyword>
<keyword>super</keyword>
<keyword>switch</keyword>
<keyword>synchronized</keyword>
<keyword>this</keyword>
<keyword>throw</keyword>
<keyword>throws</keyword>
<keyword>transient</keyword>
<keyword>try</keyword>
<keyword>void</keyword>
<keyword>volatile</keyword>
<keyword>while</keyword>
</highlighter>
</highlighters>

View File

@@ -1,147 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for JavaScript
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="multiline-comment">
<start>/*</start>
<end>*/</end>
</highlighter>
<highlighter type="oneline-comment">//</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<exponent>e</exponent>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>break</keyword>
<keyword>case</keyword>
<keyword>catch</keyword>
<keyword>continue</keyword>
<keyword>default</keyword>
<keyword>delete</keyword>
<keyword>do</keyword>
<keyword>else</keyword>
<keyword>finally</keyword>
<keyword>for</keyword>
<keyword>function</keyword>
<keyword>if</keyword>
<keyword>in</keyword>
<keyword>instanceof</keyword>
<keyword>new</keyword>
<keyword>return</keyword>
<keyword>switch</keyword>
<keyword>this</keyword>
<keyword>throw</keyword>
<keyword>try</keyword>
<keyword>typeof</keyword>
<keyword>var</keyword>
<keyword>void</keyword>
<keyword>while</keyword>
<keyword>with</keyword>
<!-- future keywords -->
<keyword>abstract</keyword>
<keyword>boolean</keyword>
<keyword>byte</keyword>
<keyword>char</keyword>
<keyword>class</keyword>
<keyword>const</keyword>
<keyword>debugger</keyword>
<keyword>double</keyword>
<keyword>enum</keyword>
<keyword>export</keyword>
<keyword>extends</keyword>
<keyword>final</keyword>
<keyword>float</keyword>
<keyword>goto</keyword>
<keyword>implements</keyword>
<keyword>import</keyword>
<keyword>int</keyword>
<keyword>interface</keyword>
<keyword>long</keyword>
<keyword>native</keyword>
<keyword>package</keyword>
<keyword>private</keyword>
<keyword>protected</keyword>
<keyword>public</keyword>
<keyword>short</keyword>
<keyword>static</keyword>
<keyword>super</keyword>
<keyword>synchronized</keyword>
<keyword>throws</keyword>
<keyword>transient</keyword>
<keyword>volatile</keyword>
</highlighter>
<highlighter type="keywords">
<keyword>prototype</keyword>
<!-- Global Objects -->
<keyword>Array</keyword>
<keyword>Boolean</keyword>
<keyword>Date</keyword>
<keyword>Error</keyword>
<keyword>EvalError</keyword>
<keyword>Function</keyword>
<keyword>Math</keyword>
<keyword>Number</keyword>
<keyword>Object</keyword>
<keyword>RangeError</keyword>
<keyword>ReferenceError</keyword>
<keyword>RegExp</keyword>
<keyword>String</keyword>
<keyword>SyntaxError</keyword>
<keyword>TypeError</keyword>
<keyword>URIError</keyword>
<!-- Global functions -->
<keyword>decodeURI</keyword>
<keyword>decodeURIComponent</keyword>
<keyword>encodeURI</keyword>
<keyword>encodeURIComponent</keyword>
<keyword>eval</keyword>
<keyword>isFinite</keyword>
<keyword>isNaN</keyword>
<keyword>parseFloat</keyword>
<keyword>parseInt</keyword>
<!-- Global properties -->
<keyword>Infinity</keyword>
<keyword>NaN</keyword>
<keyword>undefined</keyword>
</highlighter>
</highlighters>

View File

@@ -1,37 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<highlighters>
<highlighter type="oneline-comment">#</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
</highlighter>
<highlighter type="annotation">
<start>@</start>
<valueStart>(</valueStart>
<valueEnd>)</valueEnd>
</highlighter>
<highlighter type="number">
<point>.</point>
<exponent>e</exponent>
<suffix>f</suffix>
<suffix>d</suffix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>true</keyword>
<keyword>false</keyword>
</highlighter>
<highlighter type="word">
<word>{</word>
<word>}</word>
<word>,</word>
<word>[</word>
<word>]</word>
<style>keyword</style>
</highlighter>
</highlighters>

View File

@@ -1,120 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for Perl
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="oneline-comment">#</highlighter>
<highlighter type="heredoc">
<start>&lt;&lt;</start>
<quote>'</quote>
<quote>"</quote>
<noWhiteSpace/>
</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
<spanNewLines/>
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<pointStarts />
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>if</keyword>
<keyword>unless</keyword>
<keyword>while</keyword>
<keyword>until</keyword>
<keyword>foreach</keyword>
<keyword>else</keyword>
<keyword>elsif</keyword>
<keyword>for</keyword>
<keyword>when</keyword>
<keyword>default</keyword>
<keyword>given</keyword>
<!-- Keywords related to the control flow of your perl program -->
<keyword>caller</keyword>
<keyword>continue</keyword>
<keyword>die</keyword>
<keyword>do</keyword>
<keyword>dump</keyword>
<keyword>eval</keyword>
<keyword>exit</keyword>
<keyword>goto</keyword>
<keyword>last</keyword>
<keyword>next</keyword>
<keyword>redo</keyword>
<keyword>return</keyword>
<keyword>sub</keyword>
<keyword>wantarray</keyword>
<!-- Keywords related to scoping -->
<keyword>caller</keyword>
<keyword>import</keyword>
<keyword>local</keyword>
<keyword>my</keyword>
<keyword>package</keyword>
<keyword>use</keyword>
<!-- Keywords related to perl modules -->
<keyword>do</keyword>
<keyword>import</keyword>
<keyword>no</keyword>
<keyword>package</keyword>
<keyword>require</keyword>
<keyword>use</keyword>
<!-- Keywords related to classes and object-orientedness -->
<keyword>bless</keyword>
<keyword>dbmclose</keyword>
<keyword>dbmopen</keyword>
<keyword>package</keyword>
<keyword>ref</keyword>
<keyword>tie</keyword>
<keyword>tied</keyword>
<keyword>untie</keyword>
<keyword>use</keyword>
<!-- operators -->
<keyword>and</keyword>
<keyword>or</keyword>
<keyword>not</keyword>
<keyword>eq</keyword>
<keyword>ne</keyword>
<keyword>lt</keyword>
<keyword>gt</keyword>
<keyword>le</keyword>
<keyword>ge</keyword>
<keyword>cmp</keyword>
</highlighter>
</highlighters>

View File

@@ -1,154 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for PHP
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="multiline-comment">
<start>/**</start>
<end>*/</end>
<style>doccomment</style>
</highlighter>
<highlighter type="oneline-comment">
<start><![CDATA[/// ]]></start>
<style>doccomment</style>
</highlighter>
<highlighter type="multiline-comment">
<start>/*</start>
<end>*/</end>
</highlighter>
<highlighter type="oneline-comment">//</highlighter>
<highlighter type="oneline-comment">#</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
<spanNewLines />
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
<spanNewLines />
</highlighter>
<highlighter type="heredoc">
<start>&lt;&lt;&lt;</start>
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<exponent>e</exponent>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>and</keyword>
<keyword>or</keyword>
<keyword>xor</keyword>
<keyword>__FILE__</keyword>
<keyword>exception</keyword>
<keyword>__LINE__</keyword>
<keyword>array</keyword>
<keyword>as</keyword>
<keyword>break</keyword>
<keyword>case</keyword>
<keyword>class</keyword>
<keyword>const</keyword>
<keyword>continue</keyword>
<keyword>declare</keyword>
<keyword>default</keyword>
<keyword>die</keyword>
<keyword>do</keyword>
<keyword>echo</keyword>
<keyword>else</keyword>
<keyword>elseif</keyword>
<keyword>empty</keyword>
<keyword>enddeclare</keyword>
<keyword>endfor</keyword>
<keyword>endforeach</keyword>
<keyword>endif</keyword>
<keyword>endswitch</keyword>
<keyword>endwhile</keyword>
<keyword>eval</keyword>
<keyword>exit</keyword>
<keyword>extends</keyword>
<keyword>for</keyword>
<keyword>foreach</keyword>
<keyword>function</keyword>
<keyword>global</keyword>
<keyword>if</keyword>
<keyword>include</keyword>
<keyword>include_once</keyword>
<keyword>isset</keyword>
<keyword>list</keyword>
<keyword>new</keyword>
<keyword>print</keyword>
<keyword>require</keyword>
<keyword>require_once</keyword>
<keyword>return</keyword>
<keyword>static</keyword>
<keyword>switch</keyword>
<keyword>unset</keyword>
<keyword>use</keyword>
<keyword>var</keyword>
<keyword>while</keyword>
<keyword>__FUNCTION__</keyword>
<keyword>__CLASS__</keyword>
<keyword>__METHOD__</keyword>
<keyword>final</keyword>
<keyword>php_user_filter</keyword>
<keyword>interface</keyword>
<keyword>implements</keyword>
<keyword>extends</keyword>
<keyword>public</keyword>
<keyword>private</keyword>
<keyword>protected</keyword>
<keyword>abstract</keyword>
<keyword>clone</keyword>
<keyword>try</keyword>
<keyword>catch</keyword>
<keyword>throw</keyword>
<keyword>cfunction</keyword>
<keyword>old_function</keyword>
<keyword>true</keyword>
<keyword>false</keyword>
<!-- PHP 5.3 -->
<keyword>namespace</keyword>
<keyword>__NAMESPACE__</keyword>
<keyword>goto</keyword>
<keyword>__DIR__</keyword>
<ignoreCase />
</highlighter>
<highlighter type="word">
<!-- highlight the php open and close tags as directives -->
<word>?&gt;</word>
<word>&lt;?php</word>
<word>&lt;?=</word>
<style>directive</style>
</highlighter>
</highlighters>

View File

@@ -1,38 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for Java
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="oneline-comment">#</highlighter>
<highlighter type="regex">
<pattern>^(.+?)(?==|:)</pattern>
<style>attribute</style>
<flags>MULTILINE</flags>
</highlighter>
</highlighters>

View File

@@ -1,100 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for Python
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="annotation">
<!-- these are actually called decorators -->
<start>@</start>
<valueStart>(</valueStart>
<valueEnd>)</valueEnd>
</highlighter>
<highlighter type="oneline-comment">#</highlighter>
<highlighter type="string">
<string>"""</string>
<spanNewLines />
</highlighter>
<highlighter type="string">
<string>'''</string>
<spanNewLines />
</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<pointStarts />
<exponent>e</exponent>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>and</keyword>
<keyword>del</keyword>
<keyword>from</keyword>
<keyword>not</keyword>
<keyword>while</keyword>
<keyword>as</keyword>
<keyword>elif</keyword>
<keyword>global</keyword>
<keyword>or</keyword>
<keyword>with</keyword>
<keyword>assert</keyword>
<keyword>else</keyword>
<keyword>if</keyword>
<keyword>pass</keyword>
<keyword>yield</keyword>
<keyword>break</keyword>
<keyword>except</keyword>
<keyword>import</keyword>
<keyword>print</keyword>
<keyword>class</keyword>
<keyword>exec</keyword>
<keyword>in</keyword>
<keyword>raise</keyword>
<keyword>continue</keyword>
<keyword>finally</keyword>
<keyword>is</keyword>
<keyword>return</keyword>
<keyword>def</keyword>
<keyword>for</keyword>
<keyword>lambda</keyword>
<keyword>try</keyword>
</highlighter>
</highlighters>

View File

@@ -1,109 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Syntax highlighting definition for Ruby
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Michal Molhanec <mol1111 at users.sourceforge.net>
Jirka Kosek <kosek at users.sourceforge.net>
Michiel Hendriks <elmuerte at users.sourceforge.net>
-->
<highlighters>
<highlighter type="oneline-comment">#</highlighter>
<highlighter type="heredoc">
<start>&lt;&lt;</start>
<noWhiteSpace/>
</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>%Q{</string>
<endString>}</endString>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>%/</string>
<endString>/</endString>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>%q{</string>
<endString>}</endString>
<escape>\</escape>
</highlighter>
<highlighter type="hexnumber">
<prefix>0x</prefix>
<ignoreCase />
</highlighter>
<highlighter type="number">
<point>.</point>
<exponent>e</exponent>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>alias</keyword>
<keyword>and</keyword>
<keyword>BEGIN</keyword>
<keyword>begin</keyword>
<keyword>break</keyword>
<keyword>case</keyword>
<keyword>class</keyword>
<keyword>def</keyword>
<keyword>defined</keyword>
<keyword>do</keyword>
<keyword>else</keyword>
<keyword>elsif</keyword>
<keyword>END</keyword>
<keyword>end</keyword>
<keyword>ensure</keyword>
<keyword>false</keyword>
<keyword>for</keyword>
<keyword>if</keyword>
<keyword>in</keyword>
<keyword>module</keyword>
<keyword>next</keyword>
<keyword>nil</keyword>
<keyword>not</keyword>
<keyword>or</keyword>
<keyword>redo</keyword>
<keyword>rescue</keyword>
<keyword>retry</keyword>
<keyword>return</keyword>
<keyword>self</keyword>
<keyword>super</keyword>
<keyword>then</keyword>
<keyword>true</keyword>
<keyword>undef</keyword>
<keyword>unless</keyword>
<keyword>until</keyword>
<keyword>when</keyword>
<keyword>while</keyword>
<keyword>yield</keyword>
</highlighter>
</highlighters>

View File

@@ -1,565 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
Syntax highlighting definition for SQL:1999
xslthl - XSLT Syntax Highlighting
http://sourceforge.net/projects/xslthl/
Copyright (C) 2012 Michiel Hendriks, Martin Hujer, k42b3
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
-->
<highlighters>
<highlighter type="oneline-comment">--</highlighter>
<highlighter type="multiline-comment">
<start>/*</start>
<end>*/</end>
</highlighter>
<highlighter type="string">
<string>'</string>
<doubleEscapes />
</highlighter>
<highlighter type="string">
<string>U'</string>
<endString>'</endString>
<doubleEscapes />
</highlighter>
<highlighter type="string">
<string>B'</string>
<endString>'</endString>
<doubleEscapes />
</highlighter>
<highlighter type="string">
<string>N'</string>
<endString>'</endString>
<doubleEscapes />
</highlighter>
<highlighter type="string">
<string>X'</string>
<endString>'</endString>
<doubleEscapes />
</highlighter>
<highlighter type="number">
<point>.</point>
<pointStarts />
<exponent>e</exponent>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<ignoreCase />
<!-- reserved -->
<keyword>A</keyword>
<keyword>ABS</keyword>
<keyword>ABSOLUTE</keyword>
<keyword>ACTION</keyword>
<keyword>ADA</keyword>
<keyword>ADMIN</keyword>
<keyword>AFTER</keyword>
<keyword>ALWAYS</keyword>
<keyword>ASC</keyword>
<keyword>ASSERTION</keyword>
<keyword>ASSIGNMENT</keyword>
<keyword>ATTRIBUTE</keyword>
<keyword>ATTRIBUTES</keyword>
<keyword>AVG</keyword>
<keyword>BEFORE</keyword>
<keyword>BERNOULLI</keyword>
<keyword>BREADTH</keyword>
<keyword>C</keyword>
<keyword>CARDINALITY</keyword>
<keyword>CASCADE</keyword>
<keyword>CATALOG_NAME</keyword>
<keyword>CATALOG</keyword>
<keyword>CEIL</keyword>
<keyword>CEILING</keyword>
<keyword>CHAIN</keyword>
<keyword>CHAR_LENGTH</keyword>
<keyword>CHARACTER_LENGTH</keyword>
<keyword>CHARACTER_SET_CATALOG</keyword>
<keyword>CHARACTER_SET_NAME</keyword>
<keyword>CHARACTER_SET_SCHEMA</keyword>
<keyword>CHARACTERISTICS</keyword>
<keyword>CHARACTERS</keyword>
<keyword>CHECKED</keyword>
<keyword>CLASS_ORIGIN</keyword>
<keyword>COALESCE</keyword>
<keyword>COBOL</keyword>
<keyword>CODE_UNITS</keyword>
<keyword>COLLATION_CATALOG</keyword>
<keyword>COLLATION_NAME</keyword>
<keyword>COLLATION_SCHEMA</keyword>
<keyword>COLLATION</keyword>
<keyword>COLLECT</keyword>
<keyword>COLUMN_NAME</keyword>
<keyword>COMMAND_FUNCTION_CODE</keyword>
<keyword>COMMAND_FUNCTION</keyword>
<keyword>COMMITTED</keyword>
<keyword>CONDITION_NUMBER</keyword>
<keyword>CONDITION</keyword>
<keyword>CONNECTION_NAME</keyword>
<keyword>CONSTRAINT_CATALOG</keyword>
<keyword>CONSTRAINT_NAME</keyword>
<keyword>CONSTRAINT_SCHEMA</keyword>
<keyword>CONSTRAINTS</keyword>
<keyword>CONSTRUCTORS</keyword>
<keyword>CONTAINS</keyword>
<keyword>CONVERT</keyword>
<keyword>CORR</keyword>
<keyword>COUNT</keyword>
<keyword>COVAR_POP</keyword>
<keyword>COVAR_SAMP</keyword>
<keyword>CUME_DIST</keyword>
<keyword>CURRENT_COLLATION</keyword>
<keyword>CURSOR_NAME</keyword>
<keyword>DATA</keyword>
<keyword>DATETIME_INTERVAL_CODE</keyword>
<keyword>DATETIME_INTERVAL_PRECISION</keyword>
<keyword>DEFAULTS</keyword>
<keyword>DEFERRABLE</keyword>
<keyword>DEFERRED</keyword>
<keyword>DEFINED</keyword>
<keyword>DEFINER</keyword>
<keyword>DEGREE</keyword>
<keyword>DENSE_RANK</keyword>
<keyword>DEPTH</keyword>
<keyword>DERIVED</keyword>
<keyword>DESC</keyword>
<keyword>DESCRIPTOR</keyword>
<keyword>DIAGNOSTICS</keyword>
<keyword>DISPATCH</keyword>
<keyword>DOMAIN</keyword>
<keyword>DYNAMIC_FUNCTION_CODE</keyword>
<keyword>DYNAMIC_FUNCTION</keyword>
<keyword>EQUALS</keyword>
<keyword>EVERY</keyword>
<keyword>EXCEPTION</keyword>
<keyword>EXCLUDE</keyword>
<keyword>EXCLUDING</keyword>
<keyword>EXP</keyword>
<keyword>EXTRACT</keyword>
<keyword>FINAL</keyword>
<keyword>FIRST</keyword>
<keyword>FLOOR</keyword>
<keyword>FOLLOWING</keyword>
<keyword>FORTRAN</keyword>
<keyword>FOUND</keyword>
<keyword>FUSION</keyword>
<keyword>G</keyword>
<keyword>GENERAL</keyword>
<keyword>GO</keyword>
<keyword>GOTO</keyword>
<keyword>GRANTED</keyword>
<keyword>HIERARCHY</keyword>
<keyword>IMPLEMENTATION</keyword>
<keyword>INCLUDING</keyword>
<keyword>INCREMENT</keyword>
<keyword>INITIALLY</keyword>
<keyword>INSTANCE</keyword>
<keyword>INSTANTIABLE</keyword>
<keyword>INTERSECTION</keyword>
<keyword>INVOKER</keyword>
<keyword>ISOLATION</keyword>
<keyword>K</keyword>
<keyword>KEY_MEMBER</keyword>
<keyword>KEY_TYPE</keyword>
<keyword>KEY</keyword>
<keyword>LAST</keyword>
<keyword>LENGTH</keyword>
<keyword>LEVEL</keyword>
<keyword>LN</keyword>
<keyword>LOCATOR</keyword>
<keyword>LOWER</keyword>
<keyword>M</keyword>
<keyword>MAP</keyword>
<keyword>MATCHED</keyword>
<keyword>MAX</keyword>
<keyword>MAXVALUE</keyword>
<keyword>MESSAGE_LENGTH</keyword>
<keyword>MESSAGE_OCTET_LENGTH</keyword>
<keyword>MESSAGE_TEXT</keyword>
<keyword>MIN</keyword>
<keyword>MINVALUE</keyword>
<keyword>MOD</keyword>
<keyword>MORE</keyword>
<keyword>MUMPS</keyword>
<keyword>NAME</keyword>
<keyword>NAMES</keyword>
<keyword>NESTING</keyword>
<keyword>NEXT</keyword>
<keyword>NORMALIZE</keyword>
<keyword>NORMALIZED</keyword>
<keyword>NULLABLE</keyword>
<keyword>NULLIF</keyword>
<keyword>NULLS</keyword>
<keyword>NUMBER</keyword>
<keyword>OBJECT</keyword>
<keyword>OCTET_LENGTH</keyword>
<keyword>OCTETS</keyword>
<keyword>OPTION</keyword>
<keyword>OPTIONS</keyword>
<keyword>ORDERING</keyword>
<keyword>ORDINALITY</keyword>
<keyword>OTHERS</keyword>
<keyword>OVERLAY</keyword>
<keyword>OVERRIDING</keyword>
<keyword>PAD</keyword>
<keyword>PARAMETER_MODE</keyword>
<keyword>PARAMETER_NAME</keyword>
<keyword>PARAMETER_ORDINAL_POSITION</keyword>
<keyword>PARAMETER_SPECIFIC_CATALOG</keyword>
<keyword>PARAMETER_SPECIFIC_NAME</keyword>
<keyword>PARAMETER_SPECIFIC_SCHEMA</keyword>
<keyword>PARTIAL</keyword>
<keyword>PASCAL</keyword>
<keyword>PATH</keyword>
<keyword>PERCENT_RANK</keyword>
<keyword>PERCENTILE_CONT</keyword>
<keyword>PERCENTILE_DISC</keyword>
<keyword>PLACING</keyword>
<keyword>PLI</keyword>
<keyword>POSITION</keyword>
<keyword>POWER</keyword>
<keyword>PRECEDING</keyword>
<keyword>PRESERVE</keyword>
<keyword>PRIOR</keyword>
<keyword>PRIVILEGES</keyword>
<keyword>PUBLIC</keyword>
<keyword>RANK</keyword>
<keyword>READ</keyword>
<keyword>RELATIVE</keyword>
<keyword>REPEATABLE</keyword>
<keyword>RESTART</keyword>
<keyword>RETURNED_CARDINALITY</keyword>
<keyword>RETURNED_LENGTH</keyword>
<keyword>RETURNED_OCTET_LENGTH</keyword>
<keyword>RETURNED_SQLSTATE</keyword>
<keyword>ROLE</keyword>
<keyword>ROUTINE_CATALOG</keyword>
<keyword>ROUTINE_NAME</keyword>
<keyword>ROUTINE_SCHEMA</keyword>
<keyword>ROUTINE</keyword>
<keyword>ROW_COUNT</keyword>
<keyword>ROW_NUMBER</keyword>
<keyword>SCALE</keyword>
<keyword>SCHEMA_NAME</keyword>
<keyword>SCHEMA</keyword>
<keyword>SCOPE_CATALOG</keyword>
<keyword>SCOPE_NAME</keyword>
<keyword>SCOPE_SCHEMA</keyword>
<keyword>SECTION</keyword>
<keyword>SECURITY</keyword>
<keyword>SELF</keyword>
<keyword>SEQUENCE</keyword>
<keyword>SERIALIZABLE</keyword>
<keyword>SERVER_NAME</keyword>
<keyword>SESSION</keyword>
<keyword>SETS</keyword>
<keyword>SIMPLE</keyword>
<keyword>SIZE</keyword>
<keyword>SOURCE</keyword>
<keyword>SPACE</keyword>
<keyword>SPECIFIC_NAME</keyword>
<keyword>SQRT</keyword>
<keyword>STATE</keyword>
<keyword>STATEMENT</keyword>
<keyword>STDDEV_POP</keyword>
<keyword>STDDEV_SAMP</keyword>
<keyword>STRUCTURE</keyword>
<keyword>STYLE</keyword>
<keyword>SUBCLASS_ORIGIN</keyword>
<keyword>SUBSTRING</keyword>
<keyword>SUM</keyword>
<keyword>TABLE_NAME</keyword>
<keyword>TABLESAMPLE</keyword>
<keyword>TEMPORARY</keyword>
<keyword>TIES</keyword>
<keyword>TOP_LEVEL_COUNT</keyword>
<keyword>TRANSACTION_ACTIVE</keyword>
<keyword>TRANSACTION</keyword>
<keyword>TRANSACTIONS_COMMITTED</keyword>
<keyword>TRANSACTIONS_ROLLED_BACK</keyword>
<keyword>TRANSFORM</keyword>
<keyword>TRANSFORMS</keyword>
<keyword>TRANSLATE</keyword>
<keyword>TRIGGER_CATALOG</keyword>
<keyword>TRIGGER_NAME</keyword>
<keyword>TRIGGER_SCHEMA</keyword>
<keyword>TRIM</keyword>
<keyword>TYPE</keyword>
<keyword>UNBOUNDED</keyword>
<keyword>UNCOMMITTED</keyword>
<keyword>UNDER</keyword>
<keyword>UNNAMED</keyword>
<keyword>USAGE</keyword>
<keyword>USER_DEFINED_TYPE_CATALOG</keyword>
<keyword>USER_DEFINED_TYPE_CODE</keyword>
<keyword>USER_DEFINED_TYPE_NAME</keyword>
<keyword>USER_DEFINED_TYPE_SCHEMA</keyword>
<keyword>VIEW</keyword>
<keyword>WORK</keyword>
<keyword>WRITE</keyword>
<keyword>ZONE</keyword>
<!-- non reserved -->
<keyword>ADD</keyword>
<keyword>ALL</keyword>
<keyword>ALLOCATE</keyword>
<keyword>ALTER</keyword>
<keyword>AND</keyword>
<keyword>ANY</keyword>
<keyword>ARE</keyword>
<keyword>ARRAY</keyword>
<keyword>AS</keyword>
<keyword>ASENSITIVE</keyword>
<keyword>ASYMMETRIC</keyword>
<keyword>AT</keyword>
<keyword>ATOMIC</keyword>
<keyword>AUTHORIZATION</keyword>
<keyword>BEGIN</keyword>
<keyword>BETWEEN</keyword>
<keyword>BIGINT</keyword>
<keyword>BINARY</keyword>
<keyword>BLOB</keyword>
<keyword>BOOLEAN</keyword>
<keyword>BOTH</keyword>
<keyword>BY</keyword>
<keyword>CALL</keyword>
<keyword>CALLED</keyword>
<keyword>CASCADED</keyword>
<keyword>CASE</keyword>
<keyword>CAST</keyword>
<keyword>CHAR</keyword>
<keyword>CHARACTER</keyword>
<keyword>CHECK</keyword>
<keyword>CLOB</keyword>
<keyword>CLOSE</keyword>
<keyword>COLLATE</keyword>
<keyword>COLUMN</keyword>
<keyword>COMMIT</keyword>
<keyword>CONNECT</keyword>
<keyword>CONSTRAINT</keyword>
<keyword>CONTINUE</keyword>
<keyword>CORRESPONDING</keyword>
<keyword>CREATE</keyword>
<keyword>CROSS</keyword>
<keyword>CUBE</keyword>
<keyword>CURRENT_DATE</keyword>
<keyword>CURRENT_DEFAULT_TRANSFORM_GROUP</keyword>
<keyword>CURRENT_PATH</keyword>
<keyword>CURRENT_ROLE</keyword>
<keyword>CURRENT_TIME</keyword>
<keyword>CURRENT_TIMESTAMP</keyword>
<keyword>CURRENT_TRANSFORM_GROUP_FOR_TYPE</keyword>
<keyword>CURRENT_USER</keyword>
<keyword>CURRENT</keyword>
<keyword>CURSOR</keyword>
<keyword>CYCLE</keyword>
<keyword>DATE</keyword>
<keyword>DAY</keyword>
<keyword>DEALLOCATE</keyword>
<keyword>DEC</keyword>
<keyword>DECIMAL</keyword>
<keyword>DECLARE</keyword>
<keyword>DEFAULT</keyword>
<keyword>DELETE</keyword>
<keyword>DEREF</keyword>
<keyword>DESCRIBE</keyword>
<keyword>DETERMINISTIC</keyword>
<keyword>DISCONNECT</keyword>
<keyword>DISTINCT</keyword>
<keyword>DOUBLE</keyword>
<keyword>DROP</keyword>
<keyword>DYNAMIC</keyword>
<keyword>EACH</keyword>
<keyword>ELEMENT</keyword>
<keyword>ELSE</keyword>
<keyword>END</keyword>
<keyword>END-EXEC</keyword>
<keyword>ESCAPE</keyword>
<keyword>EXCEPT</keyword>
<keyword>EXEC</keyword>
<keyword>EXECUTE</keyword>
<keyword>EXISTS</keyword>
<keyword>EXTERNAL</keyword>
<keyword>FALSE</keyword>
<keyword>FETCH</keyword>
<keyword>FILTER</keyword>
<keyword>FLOAT</keyword>
<keyword>FOR</keyword>
<keyword>FOREIGN</keyword>
<keyword>FREE</keyword>
<keyword>FROM</keyword>
<keyword>FULL</keyword>
<keyword>FUNCTION</keyword>
<keyword>GET</keyword>
<keyword>GLOBAL</keyword>
<keyword>GRANT</keyword>
<keyword>GROUP</keyword>
<keyword>GROUPING</keyword>
<keyword>HAVING</keyword>
<keyword>HOLD</keyword>
<keyword>HOUR</keyword>
<keyword>IDENTITY</keyword>
<keyword>IMMEDIATE</keyword>
<keyword>IN</keyword>
<keyword>INDICATOR</keyword>
<keyword>INNER</keyword>
<keyword>INOUT</keyword>
<keyword>INPUT</keyword>
<keyword>INSENSITIVE</keyword>
<keyword>INSERT</keyword>
<keyword>INT</keyword>
<keyword>INTEGER</keyword>
<keyword>INTERSECT</keyword>
<keyword>INTERVAL</keyword>
<keyword>INTO</keyword>
<keyword>IS</keyword>
<keyword>ISOLATION</keyword>
<keyword>JOIN</keyword>
<keyword>LANGUAGE</keyword>
<keyword>LARGE</keyword>
<keyword>LATERAL</keyword>
<keyword>LEADING</keyword>
<keyword>LEFT</keyword>
<keyword>LIKE</keyword>
<keyword>LOCAL</keyword>
<keyword>LOCALTIME</keyword>
<keyword>LOCALTIMESTAMP</keyword>
<keyword>MATCH</keyword>
<keyword>MEMBER</keyword>
<keyword>MERGE</keyword>
<keyword>METHOD</keyword>
<keyword>MINUTE</keyword>
<keyword>MODIFIES</keyword>
<keyword>MODULE</keyword>
<keyword>MONTH</keyword>
<keyword>MULTISET</keyword>
<keyword>NATIONAL</keyword>
<keyword>NATURAL</keyword>
<keyword>NCHAR</keyword>
<keyword>NCLOB</keyword>
<keyword>NEW</keyword>
<keyword>NO</keyword>
<keyword>NONE</keyword>
<keyword>NOT</keyword>
<keyword>NULL</keyword>
<keyword>NUMERIC</keyword>
<keyword>OF</keyword>
<keyword>OLD</keyword>
<keyword>ON</keyword>
<keyword>ONLY</keyword>
<keyword>OPEN</keyword>
<keyword>OR</keyword>
<keyword>ORDER</keyword>
<keyword>OUT</keyword>
<keyword>OUTER</keyword>
<keyword>OUTPUT</keyword>
<keyword>OVER</keyword>
<keyword>OVERLAPS</keyword>
<keyword>PARAMETER</keyword>
<keyword>PARTITION</keyword>
<keyword>PRECISION</keyword>
<keyword>PREPARE</keyword>
<keyword>PRIMARY</keyword>
<keyword>PROCEDURE</keyword>
<keyword>RANGE</keyword>
<keyword>READS</keyword>
<keyword>REAL</keyword>
<keyword>RECURSIVE</keyword>
<keyword>REF</keyword>
<keyword>REFERENCES</keyword>
<keyword>REFERENCING</keyword>
<keyword>REGR_AVGX</keyword>
<keyword>REGR_AVGY</keyword>
<keyword>REGR_COUNT</keyword>
<keyword>REGR_INTERCEPT</keyword>
<keyword>REGR_R2</keyword>
<keyword>REGR_SLOPE</keyword>
<keyword>REGR_SXX</keyword>
<keyword>REGR_SXY</keyword>
<keyword>REGR_SYY</keyword>
<keyword>RELEASE</keyword>
<keyword>RESULT</keyword>
<keyword>RETURN</keyword>
<keyword>RETURNS</keyword>
<keyword>REVOKE</keyword>
<keyword>RIGHT</keyword>
<keyword>ROLLBACK</keyword>
<keyword>ROLLUP</keyword>
<keyword>ROW</keyword>
<keyword>ROWS</keyword>
<keyword>SAVEPOINT</keyword>
<keyword>SCROLL</keyword>
<keyword>SEARCH</keyword>
<keyword>SECOND</keyword>
<keyword>SELECT</keyword>
<keyword>SENSITIVE</keyword>
<keyword>SESSION_USER</keyword>
<keyword>SET</keyword>
<keyword>SIMILAR</keyword>
<keyword>SMALLINT</keyword>
<keyword>SOME</keyword>
<keyword>SPECIFIC</keyword>
<keyword>SPECIFICTYPE</keyword>
<keyword>SQL</keyword>
<keyword>SQLEXCEPTION</keyword>
<keyword>SQLSTATE</keyword>
<keyword>SQLWARNING</keyword>
<keyword>START</keyword>
<keyword>STATIC</keyword>
<keyword>SUBMULTISET</keyword>
<keyword>SYMMETRIC</keyword>
<keyword>SYSTEM_USER</keyword>
<keyword>SYSTEM</keyword>
<keyword>TABLE</keyword>
<keyword>THEN</keyword>
<keyword>TIME</keyword>
<keyword>TIMESTAMP</keyword>
<keyword>TIMEZONE_HOUR</keyword>
<keyword>TIMEZONE_MINUTE</keyword>
<keyword>TO</keyword>
<keyword>TRAILING</keyword>
<keyword>TRANSLATION</keyword>
<keyword>TREAT</keyword>
<keyword>TRIGGER</keyword>
<keyword>TRUE</keyword>
<keyword>UESCAPE</keyword>
<keyword>UNION</keyword>
<keyword>UNIQUE</keyword>
<keyword>UNKNOWN</keyword>
<keyword>UNNEST</keyword>
<keyword>UPDATE</keyword>
<keyword>UPPER</keyword>
<keyword>USER</keyword>
<keyword>USING</keyword>
<keyword>VALUE</keyword>
<keyword>VALUES</keyword>
<keyword>VAR_POP</keyword>
<keyword>VAR_SAMP</keyword>
<keyword>VARCHAR</keyword>
<keyword>VARYING</keyword>
<keyword>WHEN</keyword>
<keyword>WHENEVER</keyword>
<keyword>WHERE</keyword>
<keyword>WIDTH_BUCKET</keyword>
<keyword>WINDOW</keyword>
<keyword>WITH</keyword>
<keyword>WITHIN</keyword>
<keyword>WITHOUT</keyword>
<keyword>YEAR</keyword>
</highlighter>
</highlighters>

View File

@@ -1,47 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<highlighters>
<highlighter type="oneline-comment">#</highlighter>
<highlighter type="string">
<string>"</string>
<escape>\</escape>
</highlighter>
<highlighter type="string">
<string>'</string>
<escape>\</escape>
</highlighter>
<highlighter type="annotation">
<start>@</start>
<valueStart>(</valueStart>
<valueEnd>)</valueEnd>
</highlighter>
<highlighter type="number">
<point>.</point>
<exponent>e</exponent>
<suffix>f</suffix>
<suffix>d</suffix>
<suffix>l</suffix>
<ignoreCase />
</highlighter>
<highlighter type="keywords">
<keyword>true</keyword>
<keyword>false</keyword>
</highlighter>
<highlighter type="word">
<word>{</word>
<word>}</word>
<word>,</word>
<word>[</word>
<word>]</word>
<style>keyword</style>
</highlighter>
<highlighter type="regex">
<pattern>^(---)$</pattern>
<style>comment</style>
<flags>MULTILINE</flags>
</highlighter>
<highlighter type="regex">
<pattern>^(.+?)(?==|:)</pattern>
<style>attribute</style>
<flags>MULTILINE</flags>
</highlighter>
</highlighters>

View File

@@ -1,599 +0,0 @@
/* Javadoc style sheet */
/*
Overall document style
*/
@import url('resources/fonts/dejavu.css');
body {
background-color:#ffffff;
color:#353833;
font-family:'DejaVu Sans', Arial, Helvetica, sans-serif;
font-size:14px;
margin:0;
}
a:link, a:visited {
text-decoration:none;
color:#4A6782;
}
a:hover, a:focus {
text-decoration:none;
color:#bb7a2a;
}
a:active {
text-decoration:none;
color:#4A6782;
}
a[name] {
color:#353833;
}
a[name]:hover {
text-decoration:none;
color:#353833;
}
pre {
font-family:'DejaVu Sans Mono', monospace;
font-size:14px;
}
h1 {
font-size:20px;
}
h2 {
font-size:18px;
}
h3 {
font-size:16px;
font-style:italic;
}
h4 {
font-size:13px;
}
h5 {
font-size:12px;
}
h6 {
font-size:11px;
}
ul {
list-style-type:disc;
}
code, tt {
font-family:'DejaVu Sans Mono', monospace;
font-size:14px;
padding-top:4px;
margin-top:8px;
line-height:1.4em;
}
dt code {
font-family:'DejaVu Sans Mono', monospace;
font-size:14px;
padding-top:4px;
}
table tr td dt code {
font-family:'DejaVu Sans Mono', monospace;
font-size:14px;
vertical-align:top;
padding-top:4px;
}
sup {
font-size:8px;
}
/*
Document title and Copyright styles
*/
.clear {
clear:both;
height:0px;
overflow:hidden;
}
.aboutLanguage {
float:right;
padding:0px 21px;
font-size:11px;
z-index:200;
margin-top:-9px;
}
.legalCopy {
margin-left:.5em;
}
.bar a, .bar a:link, .bar a:visited, .bar a:active {
color:#FFFFFF;
text-decoration:none;
}
.bar a:hover, .bar a:focus {
color:#bb7a2a;
}
.tab {
background-color:#0066FF;
color:#ffffff;
padding:8px;
width:5em;
font-weight:bold;
}
/*
Navigation bar styles
*/
.bar {
background-color:#4D7A97;
color:#FFFFFF;
padding:.8em .5em .4em .8em;
height:auto;/*height:1.8em;*/
font-size:11px;
margin:0;
}
.topNav {
background-color:#4D7A97;
color:#FFFFFF;
float:left;
padding:0;
width:100%;
clear:right;
height:2.8em;
padding-top:10px;
overflow:hidden;
font-size:12px;
}
.bottomNav {
margin-top:10px;
background-color:#4D7A97;
color:#FFFFFF;
float:left;
padding:0;
width:100%;
clear:right;
height:2.8em;
padding-top:10px;
overflow:hidden;
font-size:12px;
}
.subNav {
background-color:#dee3e9;
float:left;
width:100%;
overflow:hidden;
font-size:12px;
}
.subNav div {
clear:left;
float:left;
padding:0 0 5px 6px;
text-transform:uppercase;
}
ul.navList, ul.subNavList {
float:left;
margin:0 25px 0 0;
padding:0;
}
ul.navList li{
list-style:none;
float:left;
padding: 5px 6px;
text-transform:uppercase;
}
ul.subNavList li{
list-style:none;
float:left;
}
.topNav a:link, .topNav a:active, .topNav a:visited, .bottomNav a:link, .bottomNav a:active, .bottomNav a:visited {
color:#FFFFFF;
text-decoration:none;
text-transform:uppercase;
}
.topNav a:hover, .bottomNav a:hover {
text-decoration:none;
color:#bb7a2a;
text-transform:uppercase;
}
.navBarCell1Rev {
background-color:#F8981D;
color:#253441;
margin: auto 5px;
}
.skipNav {
position:absolute;
top:auto;
left:-9999px;
overflow:hidden;
}
/*
Page header and footer styles
*/
.header, .footer {
clear:both;
margin:0 20px;
padding:5px 0 0 0;
}
.indexHeader {
margin:10px;
position:relative;
}
.indexHeader span{
margin-right:15px;
}
.indexHeader h1 {
font-size:13px;
}
.title {
color:#2c4557;
margin:10px 0;
}
.subTitle {
margin:5px 0 0 0;
}
.header ul {
margin:0 0 15px 0;
padding:0;
}
.footer ul {
margin:20px 0 5px 0;
}
.header ul li, .footer ul li {
list-style:none;
font-size:13px;
}
/*
Heading styles
*/
div.details ul.blockList ul.blockList ul.blockList li.blockList h4, div.details ul.blockList ul.blockList ul.blockListLast li.blockList h4 {
background-color:#dee3e9;
border:1px solid #d0d9e0;
margin:0 0 6px -8px;
padding:7px 5px;
}
ul.blockList ul.blockList ul.blockList li.blockList h3 {
background-color:#dee3e9;
border:1px solid #d0d9e0;
margin:0 0 6px -8px;
padding:7px 5px;
}
ul.blockList ul.blockList li.blockList h3 {
padding:0;
margin:15px 0;
}
ul.blockList li.blockList h2 {
padding:0px 0 20px 0;
}
/*
Page layout container styles
*/
.contentContainer, .sourceContainer, .classUseContainer, .serializedFormContainer, .constantValuesContainer {
clear:both;
padding:10px 20px;
position:relative;
}
.indexContainer {
margin:10px;
position:relative;
font-size:12px;
}
.indexContainer h2 {
font-size:13px;
padding:0 0 3px 0;
}
.indexContainer ul {
margin:0;
padding:0;
}
.indexContainer ul li {
list-style:none;
padding-top:2px;
}
.contentContainer .description dl dt, .contentContainer .details dl dt, .serializedFormContainer dl dt {
font-size:12px;
font-weight:bold;
margin:10px 0 0 0;
color:#4E4E4E;
}
.contentContainer .description dl dd, .contentContainer .details dl dd, .serializedFormContainer dl dd {
margin:5px 0 10px 0px;
font-size:14px;
font-family:'DejaVu Sans Mono',monospace;
}
.serializedFormContainer dl.nameValue dt {
margin-left:1px;
font-size:1.1em;
display:inline;
font-weight:bold;
}
.serializedFormContainer dl.nameValue dd {
margin:0 0 0 1px;
font-size:1.1em;
display:inline;
}
/*
List styles
*/
ul.horizontal li {
display:inline;
font-size:0.9em;
}
ul.inheritance {
margin:0;
padding:0;
}
ul.inheritance li {
display:inline;
list-style:none;
}
ul.inheritance li ul.inheritance {
margin-left:15px;
padding-left:15px;
padding-top:1px;
}
ul.blockList, ul.blockListLast {
margin:10px 0 10px 0;
padding:0;
}
ul.blockList li.blockList, ul.blockListLast li.blockList {
list-style:none;
margin-bottom:15px;
line-height:1.4;
}
ul.blockList ul.blockList li.blockList, ul.blockList ul.blockListLast li.blockList {
padding:0px 20px 5px 10px;
border:1px solid #ededed;
background-color:#f8f8f8;
}
ul.blockList ul.blockList ul.blockList li.blockList, ul.blockList ul.blockList ul.blockListLast li.blockList {
padding:0 0 5px 8px;
background-color:#ffffff;
border:none;
}
ul.blockList ul.blockList ul.blockList ul.blockList li.blockList {
margin-left:0;
padding-left:0;
padding-bottom:15px;
border:none;
}
ul.blockList ul.blockList ul.blockList ul.blockList li.blockListLast {
list-style:none;
border-bottom:none;
padding-bottom:0;
}
table tr td dl, table tr td dl dt, table tr td dl dd {
margin-top:0;
margin-bottom:1px;
}
/*
Table styles
*/
.overviewSummary, .memberSummary, .typeSummary, .useSummary, .constantsSummary, .deprecatedSummary {
width:100%;
border-left:1px solid #EEE;
border-right:1px solid #EEE;
border-bottom:1px solid #EEE;
}
.overviewSummary, .memberSummary {
padding:0px;
}
.overviewSummary caption, .memberSummary caption, .typeSummary caption,
.useSummary caption, .constantsSummary caption, .deprecatedSummary caption {
position:relative;
text-align:left;
background-repeat:no-repeat;
color:#253441;
font-weight:bold;
clear:none;
overflow:hidden;
padding:0px;
padding-top:10px;
padding-left:1px;
margin:0px;
white-space:pre;
}
.overviewSummary caption a:link, .memberSummary caption a:link, .typeSummary caption a:link,
.useSummary caption a:link, .constantsSummary caption a:link, .deprecatedSummary caption a:link,
.overviewSummary caption a:hover, .memberSummary caption a:hover, .typeSummary caption a:hover,
.useSummary caption a:hover, .constantsSummary caption a:hover, .deprecatedSummary caption a:hover,
.overviewSummary caption a:active, .memberSummary caption a:active, .typeSummary caption a:active,
.useSummary caption a:active, .constantsSummary caption a:active, .deprecatedSummary caption a:active,
.overviewSummary caption a:visited, .memberSummary caption a:visited, .typeSummary caption a:visited,
.useSummary caption a:visited, .constantsSummary caption a:visited, .deprecatedSummary caption a:visited {
color:#FFFFFF;
}
.overviewSummary caption span, .memberSummary caption span, .typeSummary caption span,
.useSummary caption span, .constantsSummary caption span, .deprecatedSummary caption span {
white-space:nowrap;
padding-top:5px;
padding-left:12px;
padding-right:12px;
padding-bottom:7px;
display:inline-block;
float:left;
background-color:#F8981D;
border: none;
height:16px;
}
.memberSummary caption span.activeTableTab span {
white-space:nowrap;
padding-top:5px;
padding-left:12px;
padding-right:12px;
margin-right:3px;
display:inline-block;
float:left;
background-color:#F8981D;
height:16px;
}
.memberSummary caption span.tableTab span {
white-space:nowrap;
padding-top:5px;
padding-left:12px;
padding-right:12px;
margin-right:3px;
display:inline-block;
float:left;
background-color:#4D7A97;
height:16px;
}
.memberSummary caption span.tableTab, .memberSummary caption span.activeTableTab {
padding-top:0px;
padding-left:0px;
padding-right:0px;
background-image:none;
float:none;
display:inline;
}
.overviewSummary .tabEnd, .memberSummary .tabEnd, .typeSummary .tabEnd,
.useSummary .tabEnd, .constantsSummary .tabEnd, .deprecatedSummary .tabEnd {
display:none;
width:5px;
position:relative;
float:left;
background-color:#F8981D;
}
.memberSummary .activeTableTab .tabEnd {
display:none;
width:5px;
margin-right:3px;
position:relative;
float:left;
background-color:#F8981D;
}
.memberSummary .tableTab .tabEnd {
display:none;
width:5px;
margin-right:3px;
position:relative;
background-color:#4D7A97;
float:left;
}
.overviewSummary td, .memberSummary td, .typeSummary td,
.useSummary td, .constantsSummary td, .deprecatedSummary td {
text-align:left;
padding:0px 0px 12px 10px;
width:100%;
}
th.colOne, th.colFirst, th.colLast, .useSummary th, .constantsSummary th,
td.colOne, td.colFirst, td.colLast, .useSummary td, .constantsSummary td{
vertical-align:top;
padding-right:0px;
padding-top:8px;
padding-bottom:3px;
}
th.colFirst, th.colLast, th.colOne, .constantsSummary th {
background:#dee3e9;
text-align:left;
padding:8px 3px 3px 7px;
}
td.colFirst, th.colFirst {
white-space:nowrap;
font-size:13px;
}
td.colLast, th.colLast {
font-size:13px;
}
td.colOne, th.colOne {
font-size:13px;
}
.overviewSummary td.colFirst, .overviewSummary th.colFirst,
.overviewSummary td.colOne, .overviewSummary th.colOne,
.memberSummary td.colFirst, .memberSummary th.colFirst,
.memberSummary td.colOne, .memberSummary th.colOne,
.typeSummary td.colFirst{
width:25%;
vertical-align:top;
}
td.colOne a:link, td.colOne a:active, td.colOne a:visited, td.colOne a:hover, td.colFirst a:link, td.colFirst a:active, td.colFirst a:visited, td.colFirst a:hover, td.colLast a:link, td.colLast a:active, td.colLast a:visited, td.colLast a:hover, .constantValuesContainer td a:link, .constantValuesContainer td a:active, .constantValuesContainer td a:visited, .constantValuesContainer td a:hover {
font-weight:bold;
}
.tableSubHeadingColor {
background-color:#EEEEFF;
}
.altColor {
background-color:#FFFFFF;
}
.rowColor {
background-color:#EEEEEF;
}
/*
Content styles
*/
.description pre {
margin-top:0;
}
.deprecatedContent {
margin:0;
padding:10px 0;
}
.docSummary {
padding:0;
}
ul.blockList ul.blockList ul.blockList li.blockList h3 {
font-style:normal;
}
div.block {
font-size:14px;
font-family:'DejaVu Serif', Georgia, "Times New Roman", Times, serif;
}
td.colLast div {
padding-top:0px;
}
td.colLast a {
padding-bottom:3px;
}
/*
Formatting effect styles
*/
.sourceLineNo {
color:green;
padding:0 30px 0 0;
}
h1.hidden {
visibility:hidden;
overflow:hidden;
font-size:10px;
}
.block {
display:block;
margin:3px 10px 2px 0px;
color:#474747;
}
.deprecatedLabel, .descfrmTypeLabel, .memberNameLabel, .memberNameLink,
.overrideSpecifyLabel, .packageHierarchyLabel, .paramLabel, .returnLabel,
.seeLabel, .simpleTagLabel, .throwsLabel, .typeNameLabel, .typeNameLink {
font-weight:bold;
}
.deprecationComment, .emphasizedPhrase, .interfaceName {
font-style:italic;
}
div.block div.block span.deprecationComment, div.block div.block span.emphasizedPhrase,
div.block div.block span.interfaceName {
font-style:normal;
}
div.contentContainer ul.blockList li.blockList h2{
padding-bottom:0px;
}
/*
Spring
*/
pre.code {
background-color: #F8F8F8;
border: 1px solid #CCCCCC;
border-radius: 3px 3px 3px 3px;
overflow: auto;
padding: 10px;
margin: 4px 20px 2px 0px;
}
pre.code code, pre.code code * {
font-size: 1em;
}
pre.code code, pre.code code * {
padding: 0 !important;
margin: 0 !important;
}

View File

@@ -1,28 +0,0 @@
<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:mvn="http://maven.apache.org/POM/4.0.0"
version="1.0">
<xsl:output method="text" encoding="UTF-8" indent="no"/>
<xsl:template match="/">
<xsl:text>|===&#xa;</xsl:text>
<xsl:text>| Group ID | Artifact ID | Version&#xa;</xsl:text>
<xsl:for-each select="//mvn:dependency">
<xsl:sort select="mvn:groupId"/>
<xsl:sort select="mvn:artifactId"/>
<xsl:text>&#xa;</xsl:text>
<xsl:text>| `</xsl:text>
<xsl:copy-of select="mvn:groupId"/>
<xsl:text>`&#xa;</xsl:text>
<xsl:text>| `</xsl:text>
<xsl:copy-of select="mvn:artifactId"/>
<xsl:text>`&#xa;</xsl:text>
<xsl:text>| </xsl:text>
<xsl:copy-of select="mvn:version"/>
<xsl:text>&#xa;</xsl:text>
</xsl:for-each>
<xsl:text>|===</xsl:text>
</xsl:template>
</xsl:stylesheet>

View File

@@ -1,14 +1,17 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>1.1.0.M1</version>
<version>1.1.0.BUILD-SNAPSHOT</version>
</parent>
<artifactId>spring-cloud-stream-binder-kafka-test-support</artifactId>
<description>Kafka related test classes</description>
<properties>
<kafka.version>0.8.2.1</kafka.version>
<curator.version>2.6.0</curator.version>
</properties>
<dependencies>
<dependency>
<groupId>junit</groupId>
@@ -16,9 +19,44 @@
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka-test</artifactId>
<version>${spring-kafka.version}</version>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-test-support-internal</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-logging</artifactId>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator.version}</version>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>${kafka.version}</version>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<classifier>test</classifier>
<version>${kafka.version}</version>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-test</artifactId>
<version>${curator.version}</version>
<optional>true</optional>
</dependency>
<!-- rabbit -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-amqp</artifactId>
<optional>true</optional>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,108 @@
/*
* Copyright 2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.test.junit.kafka;
import java.io.File;
import java.net.InetSocketAddress;
import org.apache.zookeeper.server.NIOServerCnxnFactory;
import org.apache.zookeeper.server.ZooKeeperServer;
import kafka.utils.TestUtils$;
import kafka.utils.Utils$;
/**
* A port of kafka.zk.EmbeddedZookeeper, compatible with Zookeeper 3.4 API
*
* @author Marius Bogoevici
*/
public class EmbeddedZookeeper {
private String connectString;
private File snapshotDir = TestUtils$.MODULE$.tempDir();
private File logDir = TestUtils$.MODULE$.tempDir();
private int tickTime = 500;
private final ZooKeeperServer zookeeper;
private int port;
private final NIOServerCnxnFactory factory;
public EmbeddedZookeeper(String connectString) throws Exception {
this.connectString = connectString;
port = Integer.parseInt(connectString.split(":")[1]);
zookeeper = new ZooKeeperServer(snapshotDir, logDir, tickTime);
factory = new NIOServerCnxnFactory();
factory.configure(new InetSocketAddress("127.0.0.1", port), 100);
factory.startup(zookeeper);
}
public String getConnectString() {
return connectString;
}
public File getSnapshotDir() {
return snapshotDir;
}
public File getLogDir() {
return logDir;
}
public int getTickTime() {
return tickTime;
}
public ZooKeeperServer getZookeeper() {
return zookeeper;
}
public int getPort() {
return port;
}
public void shutdown() {
try {
zookeeper.shutdown();
}
catch (Exception e) {
// ignore exception
}
try {
factory.shutdown();
}
catch (Exception e) {
// ignore exception
}
try {
Utils$.MODULE$.rm(logDir);
}
catch (Exception e) {
// ignore exception
}
try {
Utils$.MODULE$.rm(snapshotDir);
}
catch (Exception e) {
// ignore exception
}
}
}

View File

@@ -0,0 +1,203 @@
/*
* Copyright 2014-2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.test.junit.kafka;
import java.util.Properties;
import org.I0Itec.zkclient.ZkClient;
import org.I0Itec.zkclient.exception.ZkInterruptedException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Rule;
import org.springframework.cloud.stream.test.junit.AbstractExternalResourceTestSupport;
import org.springframework.util.SocketUtils;
import kafka.server.KafkaConfig;
import kafka.server.KafkaServer;
import kafka.utils.SystemTime$;
import kafka.utils.TestUtils;
import kafka.utils.Utils;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils;
/**
* JUnit {@link Rule} that starts an embedded Kafka server (with an associated Zookeeper)
*
* @author Ilayaperumal Gopinathan
* @author Marius Bogoevici
* @since 1.1
*/
public class KafkaTestSupport extends AbstractExternalResourceTestSupport<String> {
private static final Log log = LogFactory.getLog(KafkaTestSupport.class);
private static final String SCS_KAFKA_TEST_EMBEDDED = "SCS_KAFKA_TEST_EMBEDDED";
public static final boolean defaultEmbedded;
private static final String DEFAULT_ZOOKEEPER_CONNECT = "localhost:2181";
private static final String DEFAULT_KAFKA_CONNECT = "localhost:9092";
private ZkClient zkClient;
private EmbeddedZookeeper zookeeper;
private KafkaServer kafkaServer;
public final boolean embedded;
private final Properties brokerConfig = TestUtils.createBrokerConfig(0, TestUtils.choosePort(), false);
// caches previous failures to reach the external server - preventing repeated retries
private static boolean hasFailedAlready;
static {
// check if either the environment or Java property is set to use embedded tests
// unless the property is explicitly set to false, default to embedded
defaultEmbedded = !("false".equals(System.getenv(SCS_KAFKA_TEST_EMBEDDED))
|| "false".equals(System.getProperty(SCS_KAFKA_TEST_EMBEDDED)));
}
public KafkaTestSupport() {
this(defaultEmbedded);
}
public KafkaTestSupport(boolean embedded) {
super("KAFKA");
this.embedded = embedded;
log.info(String.format("Testing with %s Kafka broker", embedded ? "embedded" : "external"));
}
public KafkaServer getKafkaServer() {
return kafkaServer;
}
public String getZkConnectString() {
if (embedded) {
return zookeeper.getConnectString();
}
else {
return DEFAULT_ZOOKEEPER_CONNECT;
}
}
public ZkClient getZkClient() {
return this.zkClient;
}
public String getBrokerAddress() {
if (embedded) {
return kafkaServer.config().hostName() + ":" + kafkaServer.config().port();
}
else {
return DEFAULT_KAFKA_CONNECT;
}
}
@Override
protected void obtainResource() throws Exception {
if (!hasFailedAlready) {
if (embedded) {
try {
log.debug("Starting Zookeeper");
zookeeper = new EmbeddedZookeeper("127.0.0.1:" + SocketUtils.findAvailableTcpPort());
log.debug("Started Zookeeper at " + zookeeper.getConnectString());
try {
int zkConnectionTimeout = 10000;
int zkSessionTimeout = 10000;
zkClient = new ZkClient(getZkConnectString(), zkSessionTimeout, zkConnectionTimeout,
ZKStringSerializer$.MODULE$);
}
catch (Exception e) {
zookeeper.shutdown();
throw e;
}
try {
log.debug("Creating Kafka server");
Properties brokerConfigProperties = brokerConfig;
brokerConfig.put("zookeeper.connect", zookeeper.getConnectString());
brokerConfig.put("auto.create.topics.enable", "false");
brokerConfig.put("delete.topic.enable", "true");
kafkaServer = TestUtils.createServer(new KafkaConfig(brokerConfigProperties),
SystemTime$.MODULE$);
log.debug("Created Kafka server at " + kafkaServer.config().hostName() + ":"
+ kafkaServer.config().port());
}
catch (Exception e) {
zookeeper.shutdown();
zkClient.close();
throw e;
}
}
catch (Exception e) {
hasFailedAlready = true;
throw e;
}
}
else {
this.zkClient = new ZkClient(DEFAULT_ZOOKEEPER_CONNECT, 10000, 10000, ZKStringSerializer$.MODULE$);
if (ZkUtils.getAllBrokersInCluster(zkClient).size() == 0) {
hasFailedAlready = true;
throw new RuntimeException("Kafka server not available");
}
}
}
else {
throw new RuntimeException("Kafka server not available");
}
}
@Override
protected void cleanupResource() throws Exception {
if (embedded) {
try {
kafkaServer.shutdown();
}
catch (Exception e) {
// ignore errors on shutdown
log.error(e.getMessage(), e);
}
try {
Utils.rm(kafkaServer.config().logDirs());
}
catch (Exception e) {
// ignore errors on shutdown
log.error(e.getMessage(), e);
}
}
try {
zkClient.close();
}
catch (ZkInterruptedException e) {
// ignore errors on shutdown
log.error(e.getMessage(), e);
}
if (embedded) {
try {
zookeeper.shutdown();
}
catch (Exception e) {
// ignore errors on shutdown
log.error(e.getMessage(), e);
}
}
}
}

View File

@@ -0,0 +1,76 @@
/*
* Copyright 2014-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.test.junit.kafka;
import java.io.IOException;
import java.util.Properties;
import org.apache.curator.test.TestingServer;
import org.springframework.util.SocketUtils;
import kafka.server.KafkaConfig;
import kafka.server.KafkaServerStartable;
import kafka.utils.TestUtils;
/**
* A test Kafka + ZooKeeper pair for testing purposes.
*
* @author Eric Bottard
*/
public class TestKafkaCluster {
private KafkaServerStartable kafkaServer;
private TestingServer zkServer;
public TestKafkaCluster() {
try {
zkServer = new TestingServer(SocketUtils.findAvailableTcpPort());
}
catch (Exception e) {
throw new IllegalStateException(e);
}
KafkaConfig config = getKafkaConfig(zkServer.getConnectString());
kafkaServer = new KafkaServerStartable(config);
kafkaServer.startup();
}
private static KafkaConfig getKafkaConfig(final String zkConnectString) {
scala.collection.Iterator<Properties> propsI = TestUtils
.createBrokerConfigs(1, false).iterator();
assert propsI.hasNext();
Properties props = propsI.next();
assert props.containsKey("zookeeper.connect");
props.put("zookeeper.connect", zkConnectString);
return new KafkaConfig(props);
}
public String getKafkaBrokerString() {
return String.format("localhost:%d", kafkaServer.serverConfig().port());
}
public void stop() throws IOException {
kafkaServer.shutdown();
zkServer.stop();
}
public String getZkConnectString() {
return zkServer.getConnectString();
}
}

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
@@ -10,10 +10,22 @@
<parent>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
<version>1.1.0.M1</version>
<version>1.1.0.BUILD-SNAPSHOT</version>
</parent>
<properties>
<kafka.version>0.9.0.1</kafka.version>
<spring-kafka.version>1.0.0.RELEASE</spring-kafka.version>
<spring-integration-kafka.version>2.0.0.BUILD-SNAPSHOT</spring-integration-kafka.version>
<rxjava-math.version>1.0.0</rxjava-math.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-configuration-processor</artifactId>
@@ -41,7 +53,7 @@
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-stream-binder-kafka-test-support</artifactId>
<scope>test</scope>
<version>1.1.0.M1</version>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.integration</groupId>
@@ -73,6 +85,15 @@
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
</dependency>
<dependency>
<groupId>io.reactivex</groupId>
<artifactId>rxjava</artifactId>
</dependency>
<dependency>
<groupId>io.reactivex</groupId>
<artifactId>rxjava-math</artifactId>
<version>${rxjava-math.version}</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>

View File

@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,19 +16,22 @@
package org.springframework.cloud.stream.binder.kafka;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
import kafka.admin.AdminUtils;
import kafka.api.TopicMetadata;
import kafka.common.ErrorMapping;
import kafka.utils.ZkUtils;
import org.I0Itec.zkclient.ZkClient;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.producer.Callback;
@@ -37,14 +40,13 @@ import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.security.JaasUtils;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.utils.Utils;
import scala.collection.Seq;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
import org.springframework.cloud.stream.binder.AbstractMessageChannelBinder;
import org.springframework.cloud.stream.binder.Binder;
import org.springframework.cloud.stream.binder.BinderException;
@@ -52,35 +54,53 @@ import org.springframework.cloud.stream.binder.BinderHeaders;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.ExtendedPropertiesBinder;
import org.springframework.cloud.stream.binder.PartitionHandler;
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaConsumerProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaExtendedBindingProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaProducerProperties;
import org.springframework.context.Lifecycle;
import org.springframework.expression.common.LiteralExpression;
import org.springframework.expression.spel.standard.SpelExpressionParser;
import org.springframework.integration.core.MessageProducer;
import org.springframework.integration.endpoint.AbstractEndpoint;
import org.springframework.integration.handler.AbstractMessageHandler;
import org.springframework.integration.kafka.inbound.KafkaMessageDrivenChannelAdapter;
import org.springframework.integration.kafka.outbound.KafkaProducerMessageHandler;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.kafka.listener.AcknowledgingMessageListener;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.kafka.listener.ErrorHandler;
import org.springframework.kafka.listener.MessageListener;
import org.springframework.kafka.listener.config.ContainerProperties;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.support.ProducerListener;
import org.springframework.kafka.support.TopicPartitionInitialOffset;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageChannel;
import org.springframework.messaging.MessageHandler;
import org.springframework.messaging.MessageHeaders;
import org.springframework.messaging.MessagingException;
import org.springframework.retry.RetryCallback;
import org.springframework.retry.RetryContext;
import org.springframework.retry.RetryOperations;
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
import org.springframework.scheduling.concurrent.CustomizableThreadFactory;
import org.springframework.util.Assert;
import org.springframework.util.CollectionUtils;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
import kafka.admin.AdminUtils;
import kafka.api.TopicMetadata;
import kafka.common.ErrorMapping;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils;
import scala.collection.Seq;
/**
* A {@link Binder} that uses Kafka as the underlying middleware.
* @author Eric Bottard
@@ -93,17 +113,31 @@ import org.springframework.util.StringUtils;
*/
public class KafkaMessageChannelBinder extends
AbstractMessageChannelBinder<ExtendedConsumerProperties<KafkaConsumerProperties>,
ExtendedProducerProperties<KafkaProducerProperties>, Collection<PartitionInfo>, String>
ExtendedProducerProperties<KafkaProducerProperties>>
implements ExtendedPropertiesBinder<MessageChannel, KafkaConsumerProperties, KafkaProducerProperties>,
DisposableBean {
private static final ByteArraySerializer BYTE_ARRAY_SERIALIZER = new ByteArraySerializer();
private static final ThreadFactory DAEMON_THREAD_FACTORY;
static {
CustomizableThreadFactory threadFactory = new CustomizableThreadFactory("kafka-binder-");
threadFactory.setDaemon(true);
DAEMON_THREAD_FACTORY = threadFactory;
}
private final KafkaBinderConfigurationProperties configurationProperties;
private RetryOperations metadataRetryOperations;
private final Map<String, Collection<PartitionInfo>> topicsInUse = new HashMap<>();
private ProducerListener<byte[], byte[]> producerListener;
// -------- Default values for properties -------
private ConsumerFactory<byte[], byte[]> consumerFactory;
private ProducerListener producerListener;
private volatile Producer<byte[], byte[]> dlqProducer;
@@ -144,8 +178,18 @@ public class KafkaMessageChannelBinder extends
@Override
public void onInit() throws Exception {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
props.put(ConsumerConfig.GROUP_ID_CONFIG, configurationProperties.getConsumerGroup());
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,
"earliest");
Deserializer<byte[]> valueDecoder = new ByteArrayDeserializer();
Deserializer<byte[]> keyDecoder = new ByteArrayDeserializer();
if (this.metadataRetryOperations == null) {
consumerFactory = new DefaultKafkaConsumerFactory<>(props, keyDecoder, valueDecoder);
if (metadataRetryOperations == null) {
RetryTemplate retryTemplate = new RetryTemplate();
SimpleRetryPolicy simpleRetryPolicy = new SimpleRetryPolicy();
@@ -157,7 +201,7 @@ public class KafkaMessageChannelBinder extends
backOffPolicy.setMultiplier(2);
backOffPolicy.setMaxInterval(1000);
retryTemplate.setBackOffPolicy(backOffPolicy);
this.metadataRetryOperations = retryTemplate;
metadataRetryOperations = retryTemplate;
}
}
@@ -169,12 +213,27 @@ public class KafkaMessageChannelBinder extends
}
}
public void setProducerListener(ProducerListener<byte[], byte[]> producerListener) {
public void setProducerListener(ProducerListener producerListener) {
this.producerListener = producerListener;
}
Map<String, Collection<PartitionInfo>> getTopicsInUse() {
return this.topicsInUse;
/**
* Allowed chars are ASCII alphanumerics, '.', '_' and '-'.
*/
static void validateTopicName(String topicName) {
try {
byte[] utf8 = topicName.getBytes("UTF-8");
for (byte b : utf8) {
if (!((b >= 'a') && (b <= 'z') || (b >= 'A') && (b <= 'Z') || (b >= '0') && (b <= '9') || (b == '.')
|| (b == '-') || (b == '_'))) {
throw new IllegalArgumentException(
"Topic name can only have ASCII alphanumerics, '.', '_' and '-'");
}
}
}
catch (UnsupportedEncodingException e) {
throw new AssertionError(e); // Can't happen
}
}
@Override
@@ -187,80 +246,14 @@ public class KafkaMessageChannelBinder extends
return this.extendedBindingProperties.getExtendedProducerProperties(channelName);
}
@Override
protected MessageHandler createProducerMessageHandler(final String destination,
ExtendedProducerProperties<KafkaProducerProperties> producerProperties) throws Exception {
KafkaTopicUtils.validateTopicName(destination);
Collection<PartitionInfo> partitions = ensureTopicCreated(destination, producerProperties.getPartitionCount());
if (producerProperties.getPartitionCount() < partitions.size()) {
if (this.logger.isInfoEnabled()) {
this.logger.info("The `partitionCount` of the producer for topic " + destination + " is "
+ producerProperties.getPartitionCount() + ", smaller than the actual partition count of "
+ partitions.size() + " of the topic. The larger number will be used instead.");
}
}
this.topicsInUse.put(destination, partitions);
DefaultKafkaProducerFactory<byte[], byte[]> producerFB = getProducerFactory(producerProperties);
KafkaTemplate<byte[], byte[]> kafkaTemplate = new KafkaTemplate<>(producerFB);
if (this.producerListener != null) {
kafkaTemplate.setProducerListener(this.producerListener);
}
return new ProducerConfigurationMessageHandler(kafkaTemplate, destination, producerProperties, producerFB);
Map<String, Collection<PartitionInfo>> getTopicsInUse() {
return this.topicsInUse;
}
@Override
protected String createProducerDestinationIfNecessary(String name,
ExtendedProducerProperties<KafkaProducerProperties> properties) {
if (this.logger.isInfoEnabled()) {
this.logger.info("Using kafka topic for outbound: " + name);
}
KafkaTopicUtils.validateTopicName(name);
Collection<PartitionInfo> partitions = ensureTopicCreated(name, properties.getPartitionCount());
if (properties.getPartitionCount() < partitions.size()) {
if (this.logger.isInfoEnabled()) {
this.logger.info("The `partitionCount` of the producer for topic " + name + " is "
+ properties.getPartitionCount() + ", smaller than the actual partition count of "
+ partitions.size() + " of the topic. The larger number will be used instead.");
}
}
this.topicsInUse.put(name, partitions);
return name;
}
private DefaultKafkaProducerFactory<byte[], byte[]> getProducerFactory(
ExtendedProducerProperties<KafkaProducerProperties> producerProperties) {
Map<String, Object> props = new HashMap<>();
if (!ObjectUtils.isEmpty(configurationProperties.getConfiguration())) {
props.putAll(configurationProperties.getConfiguration());
}
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configurationProperties.getKafkaConnectionString());
props.put(ProducerConfig.RETRIES_CONFIG, 0);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
props.put(ProducerConfig.ACKS_CONFIG, String.valueOf(this.configurationProperties.getRequiredAcks()));
props.put(ProducerConfig.LINGER_MS_CONFIG,
String.valueOf(producerProperties.getExtension().getBatchTimeout()));
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG,
producerProperties.getExtension().getCompressionType().toString());
if (!ObjectUtils.isEmpty(producerProperties.getExtension().getConfiguration())) {
props.putAll(producerProperties.getExtension().getConfiguration());
}
return new DefaultKafkaProducerFactory<>(props);
}
@Override
protected Collection<PartitionInfo> createConsumerDestinationIfNecessary(String name, String group,
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
KafkaTopicUtils.validateTopicName(name);
protected Object createConsumerDestinationIfNecessary(String name, String group,
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
validateTopicName(name);
if (properties.getInstanceCount() == 0) {
throw new IllegalArgumentException("Instance count cannot be zero");
}
@@ -285,51 +278,37 @@ public class KafkaMessageChannelBinder extends
return listenedPartitions;
}
@Override
@SuppressWarnings("unchecked")
protected MessageProducer createConsumerEndpoint(String name, String group, Collection<PartitionInfo> destination,
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
boolean anonymous = !StringUtils.hasText(group);
Assert.isTrue(!anonymous || !properties.getExtension().isEnableDlq(),
"DLQ support is not available for anonymous subscriptions");
String consumerGroup = anonymous ? "anonymous." + UUID.randomUUID().toString() : group;
protected AbstractEndpoint createConsumerEndpoint(String name, String group, Object queue,
MessageChannel inputChannel, ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
Map<String, Object> props = getConsumerConfig(anonymous, consumerGroup);
Deserializer<byte[]> valueDecoder = new ByteArrayDeserializer();
Deserializer<byte[]> keyDecoder = new ByteArrayDeserializer();
Collection<PartitionInfo> listenedPartitions = (Collection<PartitionInfo>) queue;
Assert.isTrue(!CollectionUtils.isEmpty(listenedPartitions), "A list of partitions must be provided");
if (!ObjectUtils.isEmpty(properties.getExtension().getConfiguration())) {
props.putAll(properties.getExtension().getConfiguration());
final TopicPartitionInitialOffset[] topicPartitionInitialOffsets =
new TopicPartitionInitialOffset[listenedPartitions.size()];
int i = 0;
for (PartitionInfo partition : listenedPartitions){
topicPartitionInitialOffsets[i++] = new TopicPartitionInitialOffset(partition.topic(),
partition.partition());
}
ConsumerFactory<byte[], byte[]> consumerFactory = new DefaultKafkaConsumerFactory<>(props, keyDecoder,
valueDecoder);
Collection<PartitionInfo> listenedPartitions = destination;
Assert.isTrue(!CollectionUtils.isEmpty(listenedPartitions), "A list of partitions must be provided");
final TopicPartitionInitialOffset[] topicPartitionInitialOffsets = getTopicPartitionInitialOffsets(
listenedPartitions);
final ContainerProperties containerProperties =
anonymous || properties.getExtension().isAutoRebalanceEnabled() ? new ContainerProperties(name)
: new ContainerProperties(topicPartitionInitialOffsets);
int concurrency = Math.min(properties.getConcurrency(), listenedPartitions.size());
final ConcurrentMessageListenerContainer<byte[], byte[]> messageListenerContainer =
new ConcurrentMessageListenerContainer(
consumerFactory, containerProperties) {
@Override
public void stop(Runnable callback) {
super.stop(callback);
}
};
final ExecutorService dispatcherTaskExecutor =
Executors.newFixedThreadPool(concurrency, DAEMON_THREAD_FACTORY);
final ContainerProperties containerProperties = new ContainerProperties(topicPartitionInitialOffsets);
//final ContainerProperties containerProperties = new ContainerProperties(name);
final ConcurrentMessageListenerContainer<byte[], byte[]> messageListenerContainer = new ConcurrentMessageListenerContainer<>(
consumerFactory, containerProperties);
messageListenerContainer.setConcurrency(concurrency);
messageListenerContainer.getContainerProperties().setAckOnError(isAutoCommitOnError(properties));
if (this.logger.isDebugEnabled()) {
this.logger.debug(
"Listened partitions: " + StringUtils.collectionToCommaDelimitedString(listenedPartitions));
if (logger.isDebugEnabled()) {
logger.debug("Listened partitions: " + StringUtils.collectionToCommaDelimitedString(listenedPartitions));
}
if (this.logger.isDebugEnabled()) {
@@ -337,85 +316,160 @@ public class KafkaMessageChannelBinder extends
"Listened partitions: " + StringUtils.collectionToCommaDelimitedString(listenedPartitions));
}
final KafkaMessageDrivenChannelAdapter<byte[], byte[]> kafkaMessageDrivenChannelAdapter =
new KafkaMessageDrivenChannelAdapter<>(
messageListenerContainer);
final KafkaMessageDrivenChannelAdapter<byte[], byte[]> kafkaMessageDrivenChannelAdapter = new KafkaMessageDrivenChannelAdapter<>(
messageListenerContainer);
kafkaMessageDrivenChannelAdapter.setBeanFactory(this.getBeanFactory());
final RetryTemplate retryTemplate = buildRetryTemplate(properties);
kafkaMessageDrivenChannelAdapter.setRetryTemplate(retryTemplate);
kafkaMessageDrivenChannelAdapter.setOutputChannel(inputChannel);
kafkaMessageDrivenChannelAdapter.afterPropertiesSet();
// we need to wrap the adapter listener into a retrying listener so that the retry
// logic is applied before the ErrorHandler is executed
final RetryTemplate retryTemplate = buildRetryTemplateIfRetryEnabled(properties);
if (retryTemplate != null) {
if (properties.getExtension().isAutoCommitOffset()) {
final MessageListener originalMessageListener = (MessageListener) messageListenerContainer
.getContainerProperties().getMessageListener();
messageListenerContainer.getContainerProperties().setMessageListener(new MessageListener() {
@Override
public void onMessage(final ConsumerRecord message) {
try {
retryTemplate.execute(new RetryCallback<Object, Throwable>() {
@Override
public Object doWithRetry(RetryContext context) {
originalMessageListener.onMessage(message);
return null;
}
});
}
catch (Throwable throwable) {
if (throwable instanceof RuntimeException) {
throw (RuntimeException) throwable;
}
else {
throw new RuntimeException(throwable);
}
}
}
});
}
else {
messageListenerContainer.getContainerProperties().setMessageListener(new AcknowledgingMessageListener() {
final AcknowledgingMessageListener originalMessageListener = (AcknowledgingMessageListener) messageListenerContainer
.getContainerProperties().getMessageListener();
@Override
public void onMessage(final ConsumerRecord message, final Acknowledgment acknowledgment) {
retryTemplate.execute(new RetryCallback<Object, RuntimeException>() {
@Override
public Object doWithRetry(RetryContext context) {
originalMessageListener.onMessage(message, acknowledgment);
return null;
}
});
}
});
}
}
if (properties.getExtension().isEnableDlq()) {
final String dlqTopic = "error." + name + "." + group;
initDlqProducer();
messageListenerContainer.getContainerProperties().setErrorHandler(new ErrorHandler() {
@Override
public void handle(Exception thrownException, final ConsumerRecord message) {
final byte[] key = message.key() != null ? Utils.toArray(ByteBuffer.wrap((byte[]) message.key()))
final byte[] key = message.key() != null ? Utils.toArray(ByteBuffer.wrap((byte[])message.key()))
: null;
final byte[] payload = message.value() != null
? Utils.toArray(ByteBuffer.wrap((byte[]) message.value())) : null;
KafkaMessageChannelBinder.this.dlqProducer.send(new ProducerRecord<>(dlqTopic, key, payload),
new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
StringBuffer messageLog = new StringBuffer();
messageLog.append(" a message with key='"
+ toDisplayString(ObjectUtils.nullSafeToString(key), 50) + "'");
messageLog.append(" and payload='"
+ toDisplayString(ObjectUtils.nullSafeToString(payload), 50) + "'");
messageLog.append(" received from " + message.partition());
if (exception != null) {
KafkaMessageChannelBinder.this.logger.error(
"Error sending to DLQ" + messageLog.toString(), exception);
}
else {
if (KafkaMessageChannelBinder.this.logger.isDebugEnabled()) {
KafkaMessageChannelBinder.this.logger.debug(
"Sent to DLQ " + messageLog.toString());
}
}
? Utils.toArray(ByteBuffer.wrap((byte[])message.value())) : null;
dlqProducer.send(new ProducerRecord<>(dlqTopic, key, payload), new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
StringBuffer messageLog = new StringBuffer();
messageLog.append(" a message with key='"
+ toDisplayString(ObjectUtils.nullSafeToString(key), 50) + "'");
messageLog.append(" and payload='"
+ toDisplayString(ObjectUtils.nullSafeToString(payload), 50) + "'");
messageLog.append(" received from " + message.partition());
if (exception != null) {
logger.error("Error sending to DLQ" + messageLog.toString(), exception);
}
else {
if (logger.isDebugEnabled()) {
logger.debug("Sent to DLQ " + messageLog.toString());
}
});
}
}
});
}
});
}
kafkaMessageDrivenChannelAdapter.start();
return kafkaMessageDrivenChannelAdapter;
}
private Map<String, Object> getConsumerConfig(boolean anonymous, String consumerGroup) {
@Override
protected MessageHandler createProducerMessageHandler(final String name,
ExtendedProducerProperties<KafkaProducerProperties> producerProperties) throws Exception {
validateTopicName(name);
Collection<PartitionInfo> partitions = ensureTopicCreated(name, producerProperties.getPartitionCount());
if (producerProperties.getPartitionCount() < partitions.size()) {
if (logger.isInfoEnabled()) {
logger.info("The `partitionCount` of the producer for topic " + name + " is "
+ producerProperties.getPartitionCount() + ", smaller than the actual partition count of "
+ partitions.size() + " of the topic. The larger number will be used instead.");
}
}
topicsInUse.put(name, partitions);
Map<String, Object> props = new HashMap<>();
if (!ObjectUtils.isEmpty(configurationProperties.getConfiguration())) {
props.putAll(configurationProperties.getConfiguration());
}
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.configurationProperties.getKafkaConnectionString());
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroup);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,
anonymous ? "latest" : "earliest");
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 100);
return props;
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
props.put(ProducerConfig.RETRIES_CONFIG, 0);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
props.put(ProducerConfig.ACKS_CONFIG, String.valueOf(configurationProperties.getRequiredAcks()));
props.put(ProducerConfig.LINGER_MS_CONFIG,
String.valueOf(producerProperties.getExtension().getBatchTimeout()));
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, producerProperties.getExtension().getCompressionType().toString());
ProducerFactory<byte[], byte[]> producerFB = new DefaultKafkaProducerFactory<>(props);
// final ProducerConfiguration<byte[], byte[]> producerConfiguration = new ProducerConfiguration<>(
// producerMetadata, producerFB.getObject());
// producerConfiguration.setProducerListener(this.producerListener);
// KafkaProducerContext kafkaProducerContext = new KafkaProducerContext();
// kafkaProducerContext.setProducerConfigurations(
// Collections.<String, ProducerConfiguration<?, ?>>singletonMap(name, producerConfiguration));
// return new ProducerConfigurationMessageHandler(producerConfiguration, name);
//
return new SendingHandler(producerFB, name, producerProperties, partitions.size());
}
private boolean isAutoCommitOnError(ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
return properties.getExtension().getAutoCommitOnError() != null
? properties.getExtension().getAutoCommitOnError()
: properties.getExtension().isAutoCommitOffset() && properties.getExtension().isEnableDlq();
}
private TopicPartitionInitialOffset[] getTopicPartitionInitialOffsets(
Collection<PartitionInfo> listenedPartitions) {
final TopicPartitionInitialOffset[] topicPartitionInitialOffsets =
new TopicPartitionInitialOffset[listenedPartitions.size()];
int i = 0;
for (PartitionInfo partition : listenedPartitions) {
topicPartitionInitialOffsets[i++] = new TopicPartitionInitialOffset(partition.topic(),
partition.partition());
@Override
protected void createProducerDestinationIfNecessary(String name,
ExtendedProducerProperties<KafkaProducerProperties> properties) {
if (this.logger.isInfoEnabled()) {
this.logger.info("Using kafka topic for outbound: " + name);
}
return topicPartitionInitialOffsets;
validateTopicName(name);
Collection<PartitionInfo> partitions = ensureTopicCreated(name, properties.getPartitionCount());
if (properties.getPartitionCount() < partitions.size()) {
if (this.logger.isInfoEnabled()) {
this.logger.info("The `partitionCount` of the producer for topic " + name + " is "
+ properties.getPartitionCount() + ", smaller than the actual partition count of "
+ partitions.size() + " of the topic. The larger number will be used instead.");
}
}
this.topicsInUse.put(name, partitions);
}
/**
@@ -424,44 +478,45 @@ public class KafkaMessageChannelBinder extends
*/
private Collection<PartitionInfo> ensureTopicCreated(final String topicName, final int partitionCount) {
final ZkUtils zkUtils = ZkUtils.apply(this.configurationProperties.getZkConnectionString(),
this.configurationProperties.getZkSessionTimeout(),
this.configurationProperties.getZkConnectionTimeout(),
JaasUtils.isZkSecurityEnabled());
final ZkClient zkClient = new ZkClient(configurationProperties.getZkConnectionString(),
configurationProperties.getZkSessionTimeout(), configurationProperties.getZkConnectionTimeout(),
ZKStringSerializer$.MODULE$);
final ZkUtils zkUtils = new ZkUtils(zkClient, null, false);
try {
final Properties topicConfig = new Properties();
TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topicName, zkUtils);
if (topicMetadata.errorCode() == ErrorMapping.NoError()) {
// only consider minPartitionCount for resizing if autoAddPartitions is
// true
int effectivePartitionCount = this.configurationProperties.isAutoAddPartitions()
? Math.max(this.configurationProperties.getMinPartitionCount(), partitionCount)
: partitionCount;
int effectivePartitionCount = configurationProperties.isAutoAddPartitions()
? Math.max(configurationProperties.getMinPartitionCount(), partitionCount) : partitionCount;
if (topicMetadata.partitionsMetadata().size() < effectivePartitionCount) {
if (this.configurationProperties.isAutoAddPartitions()) {
if (configurationProperties.isAutoAddPartitions()) {
AdminUtils.addPartitions(zkUtils, topicName, effectivePartitionCount, null, false);
}
else {
int topicSize = topicMetadata.partitionsMetadata().size();
throw new BinderException("The number of expected partitions was: " + partitionCount + ", but "
+ topicSize + (topicSize > 1 ? " have " : " has ") + "been found instead."
+ "Consider either increasing the partition count of the topic or enabling " +
"`autoAddPartitions`");
+ "Consider either increasing the partition count of the topic or enabling `autoAddPartitions`");
}
}
}
else if (topicMetadata.errorCode() == ErrorMapping.UnknownTopicOrPartitionCode()) {
if (this.configurationProperties.isAutoCreateTopics()) {
if (configurationProperties.isAutoCreateTopics()) {
Seq<Object> brokerList = zkUtils.getSortedBrokerList();
// always consider minPartitionCount for topic creation
final int effectivePartitionCount = Math.max(this.configurationProperties.getMinPartitionCount(),
int effectivePartitionCount = Math.max(configurationProperties.getMinPartitionCount(),
partitionCount);
this.metadataRetryOperations.execute(new RetryCallback<Object, RuntimeException>() {
final scala.collection.Map<Object, Seq<Object>> replicaAssignment = AdminUtils
.assignReplicasToBrokers(brokerList, effectivePartitionCount,
configurationProperties.getReplicationFactor(), -1, -1);
metadataRetryOperations.execute(new RetryCallback<Object, RuntimeException>() {
@Override
public Object doWithRetry(RetryContext context) throws RuntimeException {
AdminUtils.createTopic(zkUtils, topicName, effectivePartitionCount,
configurationProperties.getReplicationFactor(), new Properties());
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topicName,
replicaAssignment, topicConfig, true);
return null;
}
});
@@ -475,16 +530,12 @@ public class KafkaMessageChannelBinder extends
ErrorMapping.exceptionFor(topicMetadata.errorCode()));
}
try {
return this.metadataRetryOperations
Collection<PartitionInfo> partitions = metadataRetryOperations
.execute(new RetryCallback<Collection<PartitionInfo>, Exception>() {
@Override
public Collection<PartitionInfo> doWithRetry(RetryContext context) throws Exception {
Collection<PartitionInfo> partitions =
getProducerFactory(
new ExtendedProducerProperties<>(new KafkaProducerProperties()))
.createProducer().partitionsFor(topicName);
Collection<PartitionInfo> partitions = consumerFactory.createConsumer().partitionsFor(topicName);
// do a sanity check on the partition set
if (partitions.size() < partitionCount) {
throw new IllegalStateException("The number of expected partitions was: "
@@ -494,37 +545,36 @@ public class KafkaMessageChannelBinder extends
return partitions;
}
});
return partitions;
}
catch (Exception e) {
this.logger.error("Cannot initialize Binder", e);
logger.error("Cannot initialize Binder", e);
throw new BinderException("Cannot initialize binder:", e);
}
}
finally {
zkUtils.close();
zkClient.close();
}
}
private synchronized void initDlqProducer() {
try {
if (this.dlqProducer == null) {
if (dlqProducer == null) {
synchronized (this) {
if (this.dlqProducer == null) {
if (dlqProducer == null) {
// we can use the producer defaults as we do not need to tune
// performance
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
this.configurationProperties.getKafkaConnectionString());
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
props.put(ProducerConfig.RETRIES_CONFIG, 0);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
DefaultKafkaProducerFactory<byte[], byte[]> defaultKafkaProducerFactory =
new DefaultKafkaProducerFactory<>(props);
this.dlqProducer = defaultKafkaProducerFactory.createProducer();
DefaultKafkaProducerFactory<byte[], byte[]> defaultKafkaProducerFactory = new DefaultKafkaProducerFactory<>(props);
dlqProducer = defaultKafkaProducerFactory.createProducer();
}
}
}
@@ -541,49 +591,85 @@ public class KafkaMessageChannelBinder extends
return original.substring(0, maxCharacters) + "...";
}
private final class ProducerConfigurationMessageHandler extends KafkaProducerMessageHandler<byte[], byte[]>
implements Lifecycle {
@Override
public void doManualAck(LinkedList<MessageHeaders> messageHeadersList) {
Iterator<MessageHeaders> iterator = messageHeadersList.iterator();
while (iterator.hasNext()) {
MessageHeaders headers = iterator.next();
Acknowledgment acknowledgment = (Acknowledgment) headers.get(KafkaHeaders.ACKNOWLEDGMENT);
Assert.notNull(acknowledgment,
"Acknowledgement shouldn't be null when acknowledging kafka message " + "manually.");
acknowledgment.acknowledge();
}
}
private boolean running = true;
private final class SendingHandler extends AbstractMessageHandler implements Lifecycle {
private final DefaultKafkaProducerFactory<byte[], byte[]> producerFactory;
private final AtomicInteger roundRobinCount = new AtomicInteger();
private ProducerConfigurationMessageHandler(KafkaTemplate<byte[], byte[]> kafkaTemplate, String topic,
private ProducerFactory<byte[], byte[]> delegate;
private String targetTopic;
private final ExtendedProducerProperties<KafkaProducerProperties> producerProperties;
private final PartitionHandler partitionHandler;
private final KafkaTemplate<byte[], byte[]> kafkaTemplate;
private final int numberOfKafkaPartitions;
private SendingHandler(
ProducerFactory<byte[], byte[]> delegate, String targetTopic,
ExtendedProducerProperties<KafkaProducerProperties> producerProperties,
DefaultKafkaProducerFactory<byte[], byte[]> producerFactory) {
super(kafkaTemplate);
setTopicExpression(new LiteralExpression(topic));
setBeanFactory(KafkaMessageChannelBinder.this.getBeanFactory());
if (producerProperties.isPartitioned()) {
SpelExpressionParser parser = new SpelExpressionParser();
setPartitionIdExpression(parser.parseExpression("headers.partition"));
}
if (producerProperties.getExtension().isSync()) {
setSync(true);
}
this.producerFactory = producerFactory;
int numberOfPartitions) {
Assert.notNull(delegate, "Delegate cannot be null");
Assert.hasText(targetTopic, "Target topic cannot be null");
this.delegate = delegate;
this.targetTopic = targetTopic;
this.producerProperties = producerProperties;
ConfigurableListableBeanFactory beanFactory = KafkaMessageChannelBinder.this.getBeanFactory();
this.setBeanFactory(beanFactory);
this.numberOfKafkaPartitions = numberOfPartitions;
this.kafkaTemplate = new KafkaTemplate<>(delegate);
this.partitionHandler = new PartitionHandler(beanFactory, evaluationContext, partitionSelector, producerProperties);
}
@Override
public void handleMessageInternal(Message<?> message) throws MessagingException {
int targetPartition;
if (producerProperties.isPartitioned()) {
targetPartition = this.partitionHandler.determinePartition(message);
}
else {
targetPartition = roundRobin() % numberOfKafkaPartitions;
}
// this.delegate.send(this.targetTopic,
// message.getHeaders().get(BinderHeaders.PARTITION_HEADER, Integer.class), null,
// (byte[]) message.getPayload());
kafkaTemplate.send(this.targetTopic, targetPartition, (byte[]) message.getPayload());
}
private int roundRobin() {
int result = roundRobinCount.incrementAndGet();
if (result == Integer.MAX_VALUE) {
roundRobinCount.set(0);
}
return result;
}
@Override
public void start() {
try {
super.onInit();
}
catch (Exception e) {
this.logger.error("Initialization errors: ", e);
throw new RuntimeException(e);
}
}
@Override
public void stop() {
producerFactory.stop();
this.running = false;
}
@Override
public boolean isRunning() {
return this.running;
return false;
}
}
}
}

View File

@@ -1,48 +0,0 @@
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.stream.binder.kafka;
import java.io.UnsupportedEncodingException;
/**
* @author Soby Chacko
*/
public final class KafkaTopicUtils {
private KafkaTopicUtils() {
}
/**
* Allowed chars are ASCII alphanumerics, '.', '_' and '-'.
*/
public static void validateTopicName(String topicName) {
try {
byte[] utf8 = topicName.getBytes("UTF-8");
for (byte b : utf8) {
if (!((b >= 'a') && (b <= 'z') || (b >= 'A') && (b <= 'Z') || (b >= '0') && (b <= '9') || (b == '.')
|| (b == '-') || (b == '_'))) {
throw new IllegalArgumentException(
"Topic name can only have ASCII alphanumerics, '.', '_' and '-'");
}
}
}
catch (UnsupportedEncodingException e) {
throw new AssertionError(e); // Can't happen
}
}
}

View File

@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,8 +21,6 @@ import org.springframework.boot.autoconfigure.PropertyPlaceholderAutoConfigurati
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.cloud.stream.binder.Binder;
import org.springframework.cloud.stream.binder.kafka.KafkaBinderHealthIndicator;
import org.springframework.cloud.stream.binder.kafka.KafkaExtendedBindingProperties;
import org.springframework.cloud.stream.binder.kafka.KafkaMessageChannelBinder;
import org.springframework.cloud.stream.config.codec.kryo.KryoCodecAutoConfiguration;
import org.springframework.context.annotation.Bean;
@@ -41,8 +39,8 @@ import org.springframework.kafka.support.ProducerListener;
*/
@Configuration
@ConditionalOnMissingBean(Binder.class)
@Import({KryoCodecAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class})
@EnableConfigurationProperties({KafkaBinderConfigurationProperties.class, KafkaExtendedBindingProperties.class})
@Import({ KryoCodecAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class })
@EnableConfigurationProperties({ KafkaBinderConfigurationProperties.class, KafkaExtendedBindingProperties.class })
public class KafkaBinderConfiguration {
@Autowired
@@ -59,11 +57,10 @@ public class KafkaBinderConfiguration {
@Bean
KafkaMessageChannelBinder kafkaMessageChannelBinder() {
KafkaMessageChannelBinder kafkaMessageChannelBinder = new KafkaMessageChannelBinder(
this.configurationProperties);
kafkaMessageChannelBinder.setCodec(this.codec);
KafkaMessageChannelBinder kafkaMessageChannelBinder = new KafkaMessageChannelBinder(configurationProperties);
kafkaMessageChannelBinder.setCodec(codec);
//kafkaMessageChannelBinder.setProducerListener(producerListener);
kafkaMessageChannelBinder.setExtendedBindingProperties(this.kafkaExtendedBindingProperties);
kafkaMessageChannelBinder.setExtendedBindingProperties(kafkaExtendedBindingProperties);
return kafkaMessageChannelBinder;
}
@@ -73,8 +70,8 @@ public class KafkaBinderConfiguration {
return new LoggingProducerListener();
}
@Bean
KafkaBinderHealthIndicator healthIndicator(KafkaMessageChannelBinder kafkaMessageChannelBinder) {
return new KafkaBinderHealthIndicator(kafkaMessageChannelBinder, this.configurationProperties);
}
// @Bean
// KafkaBinderHealthIndicator healthIndicator(KafkaMessageChannelBinder kafkaMessageChannelBinder) {
// return new KafkaBinderHealthIndicator(kafkaMessageChannelBinder, configurationProperties);
// }
}

View File

@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,

View File

@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,6 +16,9 @@
package org.springframework.cloud.stream.binder.kafka;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.fail;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
@@ -28,44 +31,35 @@ import java.util.UUID;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import kafka.admin.AdminUtils;
import kafka.api.TopicMetadata;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils;
import org.I0Itec.zkclient.ZkClient;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.serialization.Deserializer;
import org.assertj.core.api.Condition;
import org.apache.zookeeper.ZKUtil;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.springframework.beans.DirectFieldAccessor;
import org.springframework.cloud.stream.binder.Binder;
import org.springframework.cloud.stream.binder.BinderException;
import org.springframework.cloud.stream.binder.Binding;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.PartitionCapableBinderTests;
import org.springframework.cloud.stream.binder.PartitionTestSupport;
import org.springframework.cloud.stream.binder.Spy;
import org.springframework.cloud.stream.binder.TestUtils;
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
import org.springframework.cloud.stream.config.BindingProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaConsumerProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaProducerProperties;
import org.springframework.context.support.GenericApplicationContext;
import org.springframework.integration.IntegrationMessageHeaderAccessor;
import org.springframework.integration.channel.DirectChannel;
import org.springframework.integration.channel.QueueChannel;
import org.springframework.integration.kafka.outbound.KafkaProducerMessageHandler;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.kafka.support.TopicPartitionInitialOffset;
import org.springframework.kafka.test.core.BrokerAddress;
import org.springframework.kafka.test.rule.KafkaEmbedded;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageChannel;
import org.springframework.messaging.MessageHandler;
@@ -76,8 +70,14 @@ import org.springframework.retry.backoff.FixedBackOffPolicy;
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.fail;
import kafka.admin.AdminUtils;
import kafka.api.TopicMetadata;
import kafka.cluster.Partition;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils;
import org.springframework.kafka.test.core.BrokerAddress;
import org.springframework.kafka.test.rule.KafkaEmbedded;
/**
* Integration tests for the {@link KafkaMessageChannelBinder}.
@@ -86,15 +86,16 @@ import static org.junit.Assert.fail;
* @author Mark Fisher
* @author Ilayaperumal Gopinathan
*/
public class KafkaBinderTests
extends
PartitionCapableBinderTests<KafkaTestBinder, ExtendedConsumerProperties<KafkaConsumerProperties>,
ExtendedProducerProperties<KafkaProducerProperties>> {
public class KafkaBinderTests extends
PartitionCapableBinderTests<KafkaTestBinder, ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>> {
private final String CLASS_UNDER_TEST_NAME = KafkaMessageChannelBinder.class.getSimpleName();
private static long uniqueBindingId = System.currentTimeMillis();
@ClassRule
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, 10);
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, 10, "retryTest." + uniqueBindingId + ".0",
"error.retryTest." + uniqueBindingId + ".0.testGroup");
private KafkaTestBinder binder;
@@ -112,7 +113,7 @@ public class KafkaBinderTests
return binder;
}
protected KafkaBinderConfigurationProperties createConfigurationProperties() {
private KafkaBinderConfigurationProperties createConfigurationProperties() {
KafkaBinderConfigurationProperties binderConfiguration = new KafkaBinderConfigurationProperties();
BrokerAddress[] brokerAddresses = embeddedKafka.getBrokerAddresses();
List<String> bAddresses = new ArrayList<>();
@@ -122,9 +123,25 @@ public class KafkaBinderTests
String[] foo = new String[bAddresses.size()];
binderConfiguration.setBrokers(bAddresses.toArray(foo));
binderConfiguration.setZkNodes(embeddedKafka.getZookeeperConnectionString());
binderConfiguration.setConsumerGroup("testGroup");
return binderConfiguration;
}
private KafkaBinderConfigurationProperties createConfigurationProperties1() {
KafkaBinderConfigurationProperties binderConfiguration = new KafkaBinderConfigurationProperties();
BrokerAddress[] ba = embeddedKafka.getBrokerAddresses();
List<String> baddresses = new ArrayList<>();
for (BrokerAddress badd : ba) {
baddresses.add(badd.toString());
}
String[] foo = new String[baddresses.size()];
binderConfiguration.setBrokers(baddresses.toArray(foo));
binderConfiguration.setZkNodes(embeddedKafka.getZookeeperConnectionString());
binderConfiguration.setConsumerGroup("startOffsets");
return binderConfiguration;
}
@Override
protected ExtendedConsumerProperties<KafkaConsumerProperties> createConsumerProperties() {
return new ExtendedConsumerProperties<>(new KafkaConsumerProperties());
@@ -163,8 +180,8 @@ public class KafkaBinderTests
Map<String, Object> props = new HashMap<>();
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
props.put(ConsumerConfig.GROUP_ID_CONFIG, "TEST-CONSUMER-GROUP");
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
props.put(ConsumerConfig.GROUP_ID_CONFIG, configurationProperties.getConsumerGroup());
Deserializer<byte[]> valueDecoder = new ByteArrayDeserializer();
Deserializer<byte[]> keyDecoder = new ByteArrayDeserializer();
@@ -173,7 +190,8 @@ public class KafkaBinderTests
}
@Test
public void testDlqAndRetry() throws Exception {
@Ignore
public void testDlqAndRetry() {
KafkaTestBinder binder = getBinder();
DirectChannel moduleOutputChannel = new DirectChannel();
DirectChannel moduleInputChannel = new DirectChannel();
@@ -181,14 +199,12 @@ public class KafkaBinderTests
FailingInvocationCountingMessageHandler handler = new FailingInvocationCountingMessageHandler();
moduleInputChannel.subscribe(handler);
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(2);
producerProperties.setPartitionCount(10);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setMaxAttempts(3);
consumerProperties.setBackOffInitialInterval(100);
consumerProperties.setBackOffMaxInterval(150);
consumerProperties.getExtension().setEnableDlq(true);
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("retryTest." + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
@@ -200,7 +216,7 @@ public class KafkaBinderTests
Binding<MessageChannel> dlqConsumerBinding = binder.bindConsumer(
"error.retryTest." + uniqueBindingId + ".0.testGroup", null, dlqChannel, dlqConsumerProperties);
binderBindUnbindLatency();
String testMessagePayload = "test." + UUID.randomUUID().toString();
Message<String> testMessage = MessageBuilder.withPayload(testMessagePayload).build();
moduleOutputChannel.send(testMessage);
@@ -209,7 +225,6 @@ public class KafkaBinderTests
assertThat(receivedMessage).isNotNull();
assertThat(receivedMessage.getPayload()).isEqualTo(testMessagePayload);
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
binderBindUnbindLatency();
dlqConsumerBinding.unbind();
consumerBinding.unbind();
producerBinding.unbind();
@@ -228,8 +243,7 @@ public class KafkaBinderTests
consumerProperties.setMaxAttempts(1);
consumerProperties.setBackOffInitialInterval(100);
consumerProperties.setBackOffMaxInterval(150);
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
long uniqueBindingId = System.currentTimeMillis();
//long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("retryTest." + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0",
@@ -252,7 +266,6 @@ public class KafkaBinderTests
QueueChannel successfulInputChannel = new QueueChannel();
consumerBinding = binder.bindConsumer("retryTest." + uniqueBindingId + ".0", "testGroup",
successfulInputChannel, consumerProperties);
binderBindUnbindLatency();
String testMessage2Payload = "test." + UUID.randomUUID().toString();
Message<String> testMessage2 = MessageBuilder.withPayload(testMessage2Payload).build();
moduleOutputChannel.send(testMessage2);
@@ -266,6 +279,7 @@ public class KafkaBinderTests
}
@Test
@Ignore
public void testDefaultAutoCommitOnErrorWithDlq() throws Exception {
KafkaTestBinder binder = getBinder();
DirectChannel moduleOutputChannel = new DirectChannel();
@@ -279,7 +293,6 @@ public class KafkaBinderTests
consumerProperties.setBackOffInitialInterval(100);
consumerProperties.setBackOffMaxInterval(150);
consumerProperties.getExtension().setEnableDlq(true);
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("retryTest." + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
@@ -305,7 +318,7 @@ public class KafkaBinderTests
assertThat(handledMessage).isNotNull();
assertThat(handledMessage.getPayload()).isEqualTo(testMessagePayload);
assertThat(handler.getInvocationCount()).isEqualTo(consumerProperties.getMaxAttempts());
binderBindUnbindLatency();
dlqConsumerBinding.unbind();
consumerBinding.unbind();
@@ -320,21 +333,20 @@ public class KafkaBinderTests
Message<?> receivedMessage = receive(successfulInputChannel);
assertThat(receivedMessage.getPayload()).isEqualTo(testMessage2Payload);
binderBindUnbindLatency();
consumerBinding.unbind();
producerBinding.unbind();
}
@Test(expected = IllegalArgumentException.class)
public void testValidateKafkaTopicName() {
KafkaTopicUtils.validateTopicName("foo:bar");
KafkaMessageChannelBinder.validateTopicName("foo:bar");
}
@Test
public void testCompression() throws Exception {
final KafkaProducerProperties.CompressionType[] codecs = new KafkaProducerProperties.CompressionType[] {
KafkaProducerProperties.CompressionType.none, KafkaProducerProperties.CompressionType.gzip,
KafkaProducerProperties.CompressionType.snappy};
KafkaProducerProperties.CompressionType.snappy };
byte[] testPayload = new byte[2048];
Arrays.fill(testPayload, (byte) 65);
KafkaTestBinder binder = getBinder();
@@ -342,14 +354,11 @@ public class KafkaBinderTests
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.getExtension().setCompressionType(
KafkaProducerProperties.CompressionType.valueOf(codec.toString()));
producerProperties.getExtension().setCompressionType(KafkaProducerProperties.CompressionType.valueOf(codec.toString()));
Binding<MessageChannel> producerBinding = binder.bindProducer("foo.0", moduleOutputChannel,
producerProperties);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo.0", "test", moduleInputChannel,
consumerProperties);
createConsumerProperties());
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
.build();
// Let the consumer actually bind to the producer before sending a msg
@@ -371,13 +380,11 @@ public class KafkaBinderTests
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
binderConfiguration.setMinPartitionCount(10);
KafkaTestBinder binder = new KafkaTestBinder(binderConfiguration);
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(10);
DirectChannel moduleOutputChannel = createBindableChannel("output",
createProducerBindingProperties(producerProperties));
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("foo" + uniqueBindingId + ".0",
@@ -410,19 +417,17 @@ public class KafkaBinderTests
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
binderConfiguration.setMinPartitionCount(6);
KafkaTestBinder binder = new KafkaTestBinder(binderConfiguration);
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(5);
producerProperties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload"));
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
long uniqueBindingId = System.currentTimeMillis();
DirectChannel moduleOutputChannel = createBindableChannel("output",
createProducerBindingProperties(producerProperties));
Binding<MessageChannel> producerBinding = binder.bindProducer("foo" + uniqueBindingId + ".0",
moduleOutputChannel, producerProperties);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo" + uniqueBindingId + ".0", null,
moduleInputChannel, consumerProperties);
Thread.sleep(1000);
Message<?> message = org.springframework.integration.support.MessageBuilder.withPayload(testPayload)
.build();
// Let the consumer actually bind to the producer before sending a msg
@@ -448,12 +453,11 @@ public class KafkaBinderTests
binderConfiguration.setMinPartitionCount(4);
KafkaTestBinder binder = new KafkaTestBinder(binderConfiguration);
DirectChannel moduleOutputChannel = new DirectChannel();
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionCount(5);
producerProperties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload"));
DirectChannel moduleOutputChannel = createBindableChannel("output",
createProducerBindingProperties(producerProperties));
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
long uniqueBindingId = System.currentTimeMillis();
Binding<MessageChannel> producerBinding = binder.bindProducer("foo" + uniqueBindingId + ".0",
@@ -478,7 +482,7 @@ public class KafkaBinderTests
@Test
@SuppressWarnings("unchecked")
public void testDefaultConsumerStartsAtEarliest() throws Exception {
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(createConfigurationProperties());
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(createConfigurationProperties1());
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
@@ -487,14 +491,10 @@ public class KafkaBinderTests
QueueChannel input1 = new QueueChannel();
String testTopicName = UUID.randomUUID().toString();
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, output,
createProducerProperties());
binder.bindProducer(testTopicName, output, createProducerProperties());
String testPayload1 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
Binding<MessageChannel> consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1,
consumerProperties);
binder.bindConsumer(testTopicName, "startOffsets", input1, createConsumerProperties());
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
assertThat(new String(receivedMessage1.getPayload())).isEqualTo(testPayload1);
@@ -503,53 +503,33 @@ public class KafkaBinderTests
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isEqualTo(testPayload2);
producerBinding.unbind();
consumerBinding.unbind();
}
@Test
@SuppressWarnings("unchecked")
public void testEarliest() throws Exception {
Binding<MessageChannel> producerBinding = null;
Binding<MessageChannel> consumerBinding = null;
KafkaTestBinder binder = getBinder();
DirectChannel output = new DirectChannel();
QueueChannel input1 = new QueueChannel();
try {
KafkaTestBinder binder = getBinder();
DirectChannel output = new DirectChannel();
QueueChannel input1 = new QueueChannel();
String testTopicName = UUID.randomUUID().toString();
producerBinding = binder.bindProducer(testTopicName, output, createProducerProperties());
String testPayload1 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> properties = createConsumerProperties();
properties.getExtension().setAutoRebalanceEnabled(false);
properties.getExtension().setStartOffset(KafkaConsumerProperties.StartOffset.earliest);
consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1, properties);
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
String testPayload2 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload2.getBytes()));
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isEqualTo(testPayload2);
Thread.sleep(2000);
producerBinding.unbind();
consumerBinding.unbind();
}
finally {
if (consumerBinding != null) {
consumerBinding.unbind();
}
if (producerBinding != null) {
producerBinding.unbind();
}
}
String testTopicName = UUID.randomUUID().toString();
binder.bindProducer(testTopicName, output, createProducerProperties());
String testPayload1 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> properties = createConsumerProperties();
properties.getExtension().setStartOffset(KafkaConsumerProperties.StartOffset.earliest);
binder.bindConsumer(testTopicName, "startOffsets", input1, properties);
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
String testPayload2 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload2.getBytes()));
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isEqualTo(testPayload2);
}
@Test
@Ignore("Needs further discussion")
@Ignore
@SuppressWarnings("unchecked")
public void testReset() throws Exception {
KafkaTestBinder binder = getBinder();
@@ -560,7 +540,7 @@ public class KafkaBinderTests
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, output,
createProducerProperties());
String testPayload1 = "foo1-" + UUID.randomUUID().toString();
String testPayload1 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> properties = createConsumerProperties();
properties.getExtension().setResetOffsets(true);
@@ -569,15 +549,14 @@ public class KafkaBinderTests
properties);
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
assertThat(new String(receivedMessage1.getPayload())).isEqualTo(testPayload1);
String testPayload2 = "foo2-" + UUID.randomUUID().toString();
String testPayload2 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload2.getBytes()));
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isEqualTo(testPayload2);
consumerBinding.unbind();
String testPayload3 = "foo3-" + UUID.randomUUID().toString();
String testPayload3 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload3.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> properties2 = createConsumerProperties();
@@ -598,62 +577,49 @@ public class KafkaBinderTests
}
@Test
@Ignore
@SuppressWarnings("unchecked")
public void testResume() throws Exception {
Binding<MessageChannel> producerBinding = null;
Binding<MessageChannel> consumerBinding = null;
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
QueueChannel input1 = new QueueChannel();
try {
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
QueueChannel input1 = new QueueChannel();
String testTopicName = UUID.randomUUID().toString();
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, output,
createProducerProperties());
String testPayload1 = "foo1-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> firstConsumerProperties = createConsumerProperties();
Binding<MessageChannel> consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1,
firstConsumerProperties);
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
String testPayload2 = "foo2-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload2.getBytes()));
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isNotNull();
consumerBinding.unbind();
String testTopicName = UUID.randomUUID().toString();
producerBinding = binder.bindProducer(testTopicName, output,
createProducerProperties());
String testPayload1 = "foo1-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> firstConsumerProperties = createConsumerProperties();
firstConsumerProperties.getExtension().setAutoRebalanceEnabled(false);
consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1,
firstConsumerProperties);
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
String testPayload2 = "foo2-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload2.getBytes()));
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isNotNull();
consumerBinding.unbind();
String testPayload3 = "foo3-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload3.getBytes()));
String testPayload3 = "foo3-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload3.getBytes()));
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1, consumerProperties);
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
Message<byte[]> receivedMessage3 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage3).isNotNull();
assertThat(new String(receivedMessage3.getPayload())).isEqualTo(testPayload3);
Thread.sleep(2000);
}
finally {
if (consumerBinding != null) {
consumerBinding.unbind();
}
if (producerBinding != null) {
producerBinding.unbind();
}
}
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerBinding = binder.bindConsumer(testTopicName, "startOffsets", input1, consumerProperties);
Message<byte[]> receivedMessage3 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage3).isNotNull();
assertThat(new String(receivedMessage3.getPayload())).isEqualTo(testPayload3);
consumerBinding.unbind();
producerBinding.unbind();
}
@Test
@Ignore
public void testSyncProducerMetadata() throws Exception {
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(createConfigurationProperties());
GenericApplicationContext context = new GenericApplicationContext();
@@ -666,9 +632,12 @@ public class KafkaBinderTests
properties.getExtension().setSync(true);
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, output, properties);
DirectFieldAccessor accessor = new DirectFieldAccessor(extractEndpoint(producerBinding));
KafkaProducerMessageHandler wrappedInstance = (KafkaProducerMessageHandler) accessor.getWrappedInstance();
assertThat(new DirectFieldAccessor(wrappedInstance).getPropertyValue("sync").equals(Boolean.TRUE))
.withFailMessage("Kafka Sync Producer should have been enabled.");
MessageHandler handler = (MessageHandler) accessor.getPropertyValue("handler");
DirectFieldAccessor accessor1 = new DirectFieldAccessor(handler);
// ProducerConfiguration producerConfiguration = (ProducerConfiguration) accessor1
// .getPropertyValue("producerConfiguration");
// assertThat(producerConfiguration.getProducerMetadata().isSync())
// .withFailMessage("Kafka Sync Producer should have been enabled.");
producerBinding.unbind();
}
@@ -698,6 +667,17 @@ public class KafkaBinderTests
assertThat(e).isInstanceOf(BinderException.class);
assertThat(e).hasMessageContaining("Topic " + testTopicName + " does not exist");
}
// try {
//this call seems to create the topic
// Collection<PartitionInfo> partitions =
// consumerFactory().createConsumer().partitionsFor(testTopicName);
// System.out.println("foobar" + partitions);
// fail();
// }
// catch (Exception e) {
// //assertThat(e).isInstanceOf(TopicNotFoundException.class);
// }
}
@Test
@@ -759,53 +739,48 @@ public class KafkaBinderTests
}
@Test
@Ignore
public void testAutoAddPartitionsDisabledSucceedsIfTopicPartitionedCorrectly() throws Exception {
Binding<?> binding = null;
try {
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
final ZkClient zkClient = new ZkClient(configurationProperties.getZkConnectionString(),
configurationProperties.getZkSessionTimeout(), configurationProperties.getZkConnectionTimeout(),
ZKStringSerializer$.MODULE$);
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
final ZkUtils zkUtils = new ZkUtils(zkClient, null, false);
final ZkClient zkClient = new ZkClient(configurationProperties.getZkConnectionString(),
configurationProperties.getZkSessionTimeout(), configurationProperties.getZkConnectionTimeout(),
ZKStringSerializer$.MODULE$);
String testTopicName = "existing" + System.currentTimeMillis();
AdminUtils.createTopic(zkUtils, testTopicName, 6, 1, new Properties());
configurationProperties.setAutoAddPartitions(false);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(1000);
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
// this consumer must consume from partition 2
consumerProperties.setInstanceCount(3);
consumerProperties.setInstanceIndex(2);
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
final ZkUtils zkUtils = new ZkUtils(zkClient, null, false);
binding = binder.doBindConsumer(testTopicName, "test-x", output, consumerProperties);
String testTopicName = "existing" + System.currentTimeMillis();
AdminUtils.createTopic(zkUtils, testTopicName, 6, 1, new Properties());
configurationProperties.setAutoAddPartitions(false);
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(configurationProperties);
GenericApplicationContext context = new GenericApplicationContext();
RetryTemplate metatadataRetrievalRetryOperations = new RetryTemplate();
metatadataRetrievalRetryOperations.setRetryPolicy(new SimpleRetryPolicy());
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(1000);
metatadataRetrievalRetryOperations.setBackOffPolicy(backOffPolicy);
binder.setMetadataRetryOperations(metatadataRetrievalRetryOperations);
context.refresh();
binder.setApplicationContext(context);
binder.afterPropertiesSet();
DirectChannel output = new DirectChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
// this consumer must consume from partition 2
consumerProperties.setInstanceCount(3);
consumerProperties.setInstanceIndex(2);
TopicPartitionInitialOffset[] listenedPartitions = TestUtils.getPropertyValue(binding,
"endpoint.messageListenerContainer.containerProperties.topicPartitions",
TopicPartitionInitialOffset[].class);
assertThat(listenedPartitions).hasSize(2);
assertThat(listenedPartitions).contains(new TopicPartitionInitialOffset(testTopicName, 2),
new TopicPartitionInitialOffset(testTopicName, 5));
Collection<PartitionInfo> partitions =
Binding<?> binding = binder.doBindConsumer(testTopicName, "test", output, consumerProperties);
Partition[] listenedPartitions = TestUtils.getPropertyValue(binding,
"endpoint.val$messageListenerContainer.partitions", Partition[].class);
assertThat(listenedPartitions).hasSize(2);
assertThat(listenedPartitions).contains(new Partition(testTopicName, 2, null, null), new Partition(testTopicName, 5, null, null));
Collection<PartitionInfo> partitions =
consumerFactory().createConsumer().partitionsFor(testTopicName);
assertThat(partitions).hasSize(6);
}
finally {
binding.unbind();
}
assertThat(partitions).hasSize(6);
binding.unbind();
}
@Test
@@ -898,311 +873,6 @@ public class KafkaBinderTests
assertThat(topicMetadata.partitionsMetadata().size()).isEqualTo(6);
}
@Test
@Override
@SuppressWarnings("unchecked")
public void testSendAndReceiveMultipleTopics() throws Exception {
Binder binder = getBinder();
DirectChannel moduleOutputChannel1 = createBindableChannel("output1",
createProducerBindingProperties(createProducerProperties()));
DirectChannel moduleOutputChannel2 = createBindableChannel("output2",
createProducerBindingProperties(createProducerProperties()));
QueueChannel moduleInputChannel = new QueueChannel();
Binding<MessageChannel> producerBinding1 = binder.bindProducer("foo.x", moduleOutputChannel1,
createProducerProperties());
Binding<MessageChannel> producerBinding2 = binder.bindProducer("foo.y", moduleOutputChannel2,
createProducerProperties());
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
Binding<MessageChannel> consumerBinding1 = binder.bindConsumer("foo.x", "test", moduleInputChannel,
consumerProperties);
Binding<MessageChannel> consumerBinding2 = binder.bindConsumer("foo.y", "test", moduleInputChannel,
consumerProperties);
String testPayload1 = "foo" + UUID.randomUUID().toString();
Message<?> message1 = org.springframework.integration.support.MessageBuilder.withPayload(
testPayload1.getBytes()).build();
String testPayload2 = "foo" + UUID.randomUUID().toString();
Message<?> message2 = org.springframework.integration.support.MessageBuilder.withPayload(
testPayload2.getBytes()).build();
// Let the consumer actually bind to the producer before sending a msg
binderBindUnbindLatency();
moduleOutputChannel1.send(message1);
moduleOutputChannel2.send(message2);
Message<?>[] messages = new Message[2];
messages[0] = receive(moduleInputChannel);
messages[1] = receive(moduleInputChannel);
assertThat(messages[0]).isNotNull();
assertThat(messages[1]).isNotNull();
assertThat(messages).extracting("payload").containsExactlyInAnyOrder(testPayload1.getBytes(),
testPayload2.getBytes());
producerBinding1.unbind();
producerBinding2.unbind();
consumerBinding1.unbind();
consumerBinding2.unbind();
}
@Test
@Override
@SuppressWarnings("unchecked")
public void testTwoRequiredGroups() throws Exception {
Binder binder = getBinder();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
DirectChannel output = createBindableChannel("output", createProducerBindingProperties(producerProperties));
String testDestination = "testDestination" + UUID.randomUUID().toString().replace("-", "");
producerProperties.setRequiredGroups("test1", "test2");
Binding<MessageChannel> producerBinding = binder.bindProducer(testDestination, output, producerProperties);
String testPayload = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload.getBytes()));
QueueChannel inbound1 = new QueueChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
Binding<MessageChannel> consumerBinding1 = binder.bindConsumer(testDestination, "test1", inbound1,
consumerProperties);
QueueChannel inbound2 = new QueueChannel();
Binding<MessageChannel> consumerBinding2 = binder.bindConsumer(testDestination, "test2", inbound2,
consumerProperties);
Message<?> receivedMessage1 = receive(inbound1);
assertThat(receivedMessage1).isNotNull();
assertThat(new String((byte[]) receivedMessage1.getPayload())).isEqualTo(testPayload);
Message<?> receivedMessage2 = receive(inbound2);
assertThat(receivedMessage2).isNotNull();
assertThat(new String((byte[]) receivedMessage2.getPayload())).isEqualTo(testPayload);
consumerBinding1.unbind();
consumerBinding2.unbind();
producerBinding.unbind();
}
@Test
@Override
@SuppressWarnings("unchecked")
public void testPartitionedModuleSpEL() throws Exception {
Binder binder = getBinder();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setConcurrency(2);
consumerProperties.setInstanceIndex(0);
consumerProperties.setInstanceCount(3);
consumerProperties.setPartitioned(true);
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
QueueChannel input0 = new QueueChannel();
input0.setBeanName("test.input0S");
Binding<MessageChannel> input0Binding = binder.bindConsumer("part.0", "test", input0, consumerProperties);
consumerProperties.setInstanceIndex(1);
QueueChannel input1 = new QueueChannel();
input1.setBeanName("test.input1S");
Binding<MessageChannel> input1Binding = binder.bindConsumer("part.0", "test", input1, consumerProperties);
consumerProperties.setInstanceIndex(2);
QueueChannel input2 = new QueueChannel();
input2.setBeanName("test.input2S");
Binding<MessageChannel> input2Binding = binder.bindConsumer("part.0", "test", input2, consumerProperties);
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionKeyExpression(spelExpressionParser.parseExpression("payload"));
producerProperties.setPartitionSelectorExpression(spelExpressionParser.parseExpression("hashCode()"));
producerProperties.setPartitionCount(3);
DirectChannel output = createBindableChannel("output", createProducerBindingProperties(producerProperties));
output.setBeanName("test.output");
Binding<MessageChannel> outputBinding = binder.bindProducer("part.0", output, producerProperties);
try {
Object endpoint = extractEndpoint(outputBinding);
assertThat(getEndpointRouting(endpoint))
.contains(getExpectedRoutingBaseDestination("part.0", "test") + "-' + headers['partition']");
}
catch (UnsupportedOperationException ignored) {
}
Message<Integer> message2 = org.springframework.integration.support.MessageBuilder.withPayload(2)
.setHeader(IntegrationMessageHeaderAccessor.CORRELATION_ID, "foo")
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_NUMBER, 42)
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_SIZE, 43).build();
output.send(message2);
output.send(new GenericMessage<>(1));
output.send(new GenericMessage<>(0));
Message<?> receive0 = receive(input0);
assertThat(receive0).isNotNull();
Message<?> receive1 = receive(input1);
assertThat(receive1).isNotNull();
Message<?> receive2 = receive(input2);
assertThat(receive2).isNotNull();
Condition<Message<?>> correlationHeadersForPayload2 = new Condition<Message<?>>() {
@Override
public boolean matches(Message<?> value) {
IntegrationMessageHeaderAccessor accessor = new IntegrationMessageHeaderAccessor(value);
return "foo".equals(accessor.getCorrelationId()) && 42 == accessor.getSequenceNumber()
&& 43 == accessor.getSequenceSize();
}
};
if (usesExplicitRouting()) {
assertThat(receive0.getPayload()).isEqualTo(0);
assertThat(receive1.getPayload()).isEqualTo(1);
assertThat(receive2.getPayload()).isEqualTo(2);
assertThat(receive2).has(correlationHeadersForPayload2);
}
else {
List<Message<?>> receivedMessages = Arrays.asList(receive0, receive1, receive2);
assertThat(receivedMessages).extracting("payload").containsExactlyInAnyOrder(0, 1, 2);
Condition<Message<?>> payloadIs2 = new Condition<Message<?>>() {
@Override
public boolean matches(Message<?> value) {
return value.getPayload().equals(2);
}
};
assertThat(receivedMessages).filteredOn(payloadIs2).areExactly(1, correlationHeadersForPayload2);
}
input0Binding.unbind();
input1Binding.unbind();
input2Binding.unbind();
outputBinding.unbind();
}
@Test
@Override
@SuppressWarnings("unchecked")
public void testPartitionedModuleJava() throws Exception {
Binder binder = getBinder();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setConcurrency(2);
consumerProperties.setInstanceCount(3);
consumerProperties.setInstanceIndex(0);
consumerProperties.setPartitioned(true);
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
QueueChannel input0 = new QueueChannel();
input0.setBeanName("test.input0J");
Binding<MessageChannel> input0Binding = binder.bindConsumer("partJ.0", "test", input0, consumerProperties);
consumerProperties.setInstanceIndex(1);
QueueChannel input1 = new QueueChannel();
input1.setBeanName("test.input1J");
Binding<MessageChannel> input1Binding = binder.bindConsumer("partJ.0", "test", input1, consumerProperties);
consumerProperties.setInstanceIndex(2);
QueueChannel input2 = new QueueChannel();
input2.setBeanName("test.input2J");
Binding<MessageChannel> input2Binding = binder.bindConsumer("partJ.0", "test", input2, consumerProperties);
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setPartitionKeyExtractorClass(PartitionTestSupport.class);
producerProperties.setPartitionSelectorClass(PartitionTestSupport.class);
producerProperties.setPartitionCount(3);
DirectChannel output = createBindableChannel("output", createProducerBindingProperties(producerProperties));
output.setBeanName("test.output");
Binding<MessageChannel> outputBinding = binder.bindProducer("partJ.0", output, producerProperties);
if (usesExplicitRouting()) {
Object endpoint = extractEndpoint(outputBinding);
assertThat(getEndpointRouting(endpoint)).
contains(getExpectedRoutingBaseDestination("partJ.0", "test") + "-' + headers['partition']");
}
output.send(new GenericMessage<>(2));
output.send(new GenericMessage<>(1));
output.send(new GenericMessage<>(0));
Message<?> receive0 = receive(input0);
assertThat(receive0).isNotNull();
Message<?> receive1 = receive(input1);
assertThat(receive1).isNotNull();
Message<?> receive2 = receive(input2);
assertThat(receive2).isNotNull();
if (usesExplicitRouting()) {
assertThat(receive0.getPayload()).isEqualTo(0);
assertThat(receive1.getPayload()).isEqualTo(1);
assertThat(receive2.getPayload()).isEqualTo(2);
}
else {
List<Message<?>> receivedMessages = Arrays.asList(receive0, receive1, receive2);
assertThat(receivedMessages).extracting("payload").containsExactlyInAnyOrder(0, 1, 2);
}
input0Binding.unbind();
input1Binding.unbind();
input2Binding.unbind();
outputBinding.unbind();
}
@Test
@Override
@SuppressWarnings("unchecked")
public void testAnonymousGroup() throws Exception {
Binder binder = getBinder();
BindingProperties producerBindingProperties = createProducerBindingProperties(createProducerProperties());
DirectChannel output = createBindableChannel("output", producerBindingProperties);
Binding<MessageChannel> producerBinding = binder.bindProducer("defaultGroup.0", output,
producerBindingProperties.getProducer());
QueueChannel input1 = new QueueChannel();
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
//consumerProperties.getExtension().setAutoRebalanceEnabled(false);
Binding<MessageChannel> binding1 = binder.bindConsumer("defaultGroup.0", null, input1,
consumerProperties);
QueueChannel input2 = new QueueChannel();
Binding<MessageChannel> binding2 = binder.bindConsumer("defaultGroup.0", null, input2,
consumerProperties);
//Since we don't provide any topic info, let Kafka bind the consumer successfully
Thread.sleep(1000);
String testPayload1 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload1.getBytes()));
Message<byte[]> receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
assertThat(new String(receivedMessage1.getPayload())).isEqualTo(testPayload1);
Message<byte[]> receivedMessage2 = (Message<byte[]>) receive(input2);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isEqualTo(testPayload1);
binding2.unbind();
String testPayload2 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload2.getBytes()));
binding2 = binder.bindConsumer("defaultGroup.0", null, input2, consumerProperties);
//Since we don't provide any topic info, let Kafka bind the consumer successfully
Thread.sleep(1000);
String testPayload3 = "foo-" + UUID.randomUUID().toString();
output.send(new GenericMessage<>(testPayload3.getBytes()));
receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
assertThat(new String(receivedMessage1.getPayload())).isEqualTo(testPayload2);
receivedMessage1 = (Message<byte[]>) receive(input1);
assertThat(receivedMessage1).isNotNull();
assertThat(new String(receivedMessage1.getPayload())).isNotNull();
receivedMessage2 = (Message<byte[]>) receive(input2);
assertThat(receivedMessage2).isNotNull();
assertThat(new String(receivedMessage2.getPayload())).isEqualTo(testPayload3);
producerBinding.unbind();
binding1.unbind();
binding2.unbind();
}
private static final class FailingInvocationCountingMessageHandler implements MessageHandler {
private int invocationCount;

View File

@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,13 +18,13 @@ package org.springframework.cloud.stream.binder.kafka;
import java.util.List;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.Registration;
import org.springframework.cloud.stream.binder.AbstractTestBinder;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaBinderConfigurationProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaConsumerProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaProducerProperties;
import org.springframework.cloud.stream.binder.test.junit.kafka.TestKafkaCluster;
import org.springframework.context.support.GenericApplicationContext;
import org.springframework.integration.codec.Codec;
import org.springframework.integration.codec.kryo.KryoRegistrar;
@@ -33,8 +33,12 @@ import org.springframework.integration.tuple.TupleKryoRegistrar;
import org.springframework.kafka.support.LoggingProducerListener;
import org.springframework.kafka.support.ProducerListener;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.Registration;
/**
* Test support class for {@link KafkaMessageChannelBinder}.
* Test support class for {@link KafkaMessageChannelBinder}. Creates a binder that uses a
* test {@link TestKafkaCluster kafka cluster}.
* @author Eric Bottard
* @author Marius Bogoevici
* @author David Turanski

View File

@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,

View File

@@ -5,7 +5,7 @@
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
@@ -16,24 +16,28 @@
package org.springframework.cloud.stream.binder.kafka;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.Arrays;
import org.junit.Ignore;
import org.junit.Test;
import org.springframework.cloud.stream.binder.Binding;
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
import org.springframework.cloud.stream.binder.HeaderMode;
import org.springframework.cloud.stream.binder.kafka.config.KafkaConsumerProperties;
import org.springframework.cloud.stream.binder.kafka.config.KafkaProducerProperties;
import org.springframework.integration.IntegrationMessageHeaderAccessor;
import org.springframework.integration.channel.DirectChannel;
import org.springframework.integration.channel.QueueChannel;
import org.springframework.integration.endpoint.AbstractEndpoint;
import org.springframework.integration.support.MessageBuilder;
import org.springframework.messaging.Message;
import org.springframework.messaging.MessageChannel;
import org.springframework.messaging.support.GenericMessage;
import static org.assertj.core.api.Assertions.assertThat;
/**
* @author Marius Bogoevici
* @author David Turanski
@@ -52,7 +56,7 @@ public class RawModeKafkaBinderTests extends KafkaBinderTests {
properties.setPartitionSelectorClass(RawKafkaPartitionTestSupport.class);
properties.setPartitionCount(6);
DirectChannel output = createBindableChannel("output", createProducerBindingProperties(properties));
DirectChannel output = new DirectChannel();
output.setBeanName("test.output");
Binding<MessageChannel> outputBinding = binder.bindProducer("partJ.0", output, properties);
@@ -62,7 +66,6 @@ public class RawModeKafkaBinderTests extends KafkaBinderTests {
consumerProperties.setInstanceIndex(0);
consumerProperties.setPartitioned(true);
consumerProperties.setHeaderMode(HeaderMode.raw);
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
QueueChannel input0 = new QueueChannel();
input0.setBeanName("test.input0J");
Binding<MessageChannel> input0Binding = binder.bindConsumer("partJ.0", "test", input0, consumerProperties);
@@ -75,9 +78,9 @@ public class RawModeKafkaBinderTests extends KafkaBinderTests {
input2.setBeanName("test.input2J");
Binding<MessageChannel> input2Binding = binder.bindConsumer("partJ.0", "test", input2, consumerProperties);
output.send(new GenericMessage<>(new byte[] {(byte) 0}));
output.send(new GenericMessage<>(new byte[] {(byte) 1}));
output.send(new GenericMessage<>(new byte[] {(byte) 2}));
output.send(new GenericMessage<>(new byte[] { (byte) 0 }));
output.send(new GenericMessage<>(new byte[] { (byte) 1 }));
output.send(new GenericMessage<>(new byte[] { (byte) 2 }));
Message<?> receive0 = receive(input0);
assertThat(receive0).isNotNull();
@@ -105,17 +108,16 @@ public class RawModeKafkaBinderTests extends KafkaBinderTests {
properties.setPartitionCount(6);
properties.setHeaderMode(HeaderMode.raw);
DirectChannel output = createBindableChannel("output", createProducerBindingProperties(properties));
DirectChannel output = new DirectChannel();
output.setBeanName("test.output");
Binding<MessageChannel> outputBinding = binder.bindProducer("part.0", output, properties);
try {
Object endpoint = extractEndpoint(outputBinding);
assertThat(getEndpointRouting(endpoint))
.contains(getExpectedRoutingBaseDestination("part.0", "test") + "-' + headers['partition']");
AbstractEndpoint endpoint = (AbstractEndpoint)extractEndpoint(outputBinding);
assertThat(getEndpointRouting(endpoint)).contains("part.0-' + headers['partition']");
}
catch (UnsupportedOperationException ignored) {
}
}
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setConcurrency(2);
@@ -123,7 +125,6 @@ public class RawModeKafkaBinderTests extends KafkaBinderTests {
consumerProperties.setInstanceCount(3);
consumerProperties.setPartitioned(true);
consumerProperties.setHeaderMode(HeaderMode.raw);
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
QueueChannel input0 = new QueueChannel();
input0.setBeanName("test.input0S");
Binding<MessageChannel> input0Binding = binder.bindConsumer("part.0", "test", input0, consumerProperties);
@@ -136,13 +137,13 @@ public class RawModeKafkaBinderTests extends KafkaBinderTests {
input2.setBeanName("test.input2S");
Binding<MessageChannel> input2Binding = binder.bindConsumer("part.0", "test", input2, consumerProperties);
Message<byte[]> message2 = MessageBuilder.withPayload(new byte[] {2})
Message<byte[]> message2 = MessageBuilder.withPayload(new byte[] { 2 })
.setHeader(IntegrationMessageHeaderAccessor.CORRELATION_ID, "foo")
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_NUMBER, 42)
.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_SIZE, 43).build();
output.send(message2);
output.send(new GenericMessage<>(new byte[] {1}));
output.send(new GenericMessage<>(new byte[] {0}));
output.send(new GenericMessage<>(new byte[] { 1 }));
output.send(new GenericMessage<>(new byte[] { 0 }));
Message<?> receive0 = receive(input0);
assertThat(receive0).isNotNull();
Message<?> receive1 = receive(input1);
@@ -165,8 +166,7 @@ public class RawModeKafkaBinderTests extends KafkaBinderTests {
QueueChannel moduleInputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setHeaderMode(HeaderMode.raw);
Binding<MessageChannel> producerBinding = binder.bindProducer("foo.0", moduleOutputChannel,
producerProperties);
Binding<MessageChannel> producerBinding = binder.bindProducer("foo.0", moduleOutputChannel, producerProperties);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setHeaderMode(HeaderMode.raw);
Binding<MessageChannel> consumerBinding = binder.bindConsumer("foo.0", "test", moduleInputChannel,
@@ -182,6 +182,14 @@ public class RawModeKafkaBinderTests extends KafkaBinderTests {
consumerBinding.unbind();
}
// Ignored, since raw mode does not support headers
@Test
@Override
@Ignore
public void testSendAndReceiveNoOriginalContentType() throws Exception {
}
@Test
public void testSendAndReceiveWithExplicitConsumerGroup() {
KafkaTestBinder binder = getBinder();
@@ -192,11 +200,9 @@ public class RawModeKafkaBinderTests extends KafkaBinderTests {
QueueChannel module3InputChannel = new QueueChannel();
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
producerProperties.setHeaderMode(HeaderMode.raw);
Binding<MessageChannel> producerBinding = binder.bindProducer("baz.0", moduleOutputChannel,
producerProperties);
Binding<MessageChannel> producerBinding = binder.bindProducer("baz.0", moduleOutputChannel, producerProperties);
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
consumerProperties.setHeaderMode(HeaderMode.raw);
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
Binding<MessageChannel> input1Binding = binder.bindConsumer("baz.0", "test", module1InputChannel,
consumerProperties);
// A new module is using the tap as an input channel