Compare commits
5 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9471976ddd | ||
|
|
4c4873b3c1 | ||
|
|
971bc96962 | ||
|
|
47a225d02a | ||
|
|
e440378e44 |
1
.gitignore
vendored
@@ -23,4 +23,3 @@ _site/
|
||||
dump.rdb
|
||||
.apt_generated
|
||||
artifacts
|
||||
.sts4-cache
|
||||
|
||||
117
.mvn/wrapper/MavenWrapperDownloader.java
vendored
@@ -1,117 +0,0 @@
|
||||
/*
|
||||
* Copyright 2007-present the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
import java.net.*;
|
||||
import java.io.*;
|
||||
import java.nio.channels.*;
|
||||
import java.util.Properties;
|
||||
|
||||
public class MavenWrapperDownloader {
|
||||
|
||||
private static final String WRAPPER_VERSION = "0.5.6";
|
||||
/**
|
||||
* Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
|
||||
*/
|
||||
private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
|
||||
+ WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
|
||||
|
||||
/**
|
||||
* Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
|
||||
* use instead of the default one.
|
||||
*/
|
||||
private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
|
||||
".mvn/wrapper/maven-wrapper.properties";
|
||||
|
||||
/**
|
||||
* Path where the maven-wrapper.jar will be saved to.
|
||||
*/
|
||||
private static final String MAVEN_WRAPPER_JAR_PATH =
|
||||
".mvn/wrapper/maven-wrapper.jar";
|
||||
|
||||
/**
|
||||
* Name of the property which should be used to override the default download url for the wrapper.
|
||||
*/
|
||||
private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
|
||||
|
||||
public static void main(String args[]) {
|
||||
System.out.println("- Downloader started");
|
||||
File baseDirectory = new File(args[0]);
|
||||
System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
|
||||
|
||||
// If the maven-wrapper.properties exists, read it and check if it contains a custom
|
||||
// wrapperUrl parameter.
|
||||
File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
|
||||
String url = DEFAULT_DOWNLOAD_URL;
|
||||
if(mavenWrapperPropertyFile.exists()) {
|
||||
FileInputStream mavenWrapperPropertyFileInputStream = null;
|
||||
try {
|
||||
mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
|
||||
Properties mavenWrapperProperties = new Properties();
|
||||
mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
|
||||
url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
|
||||
} catch (IOException e) {
|
||||
System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
|
||||
} finally {
|
||||
try {
|
||||
if(mavenWrapperPropertyFileInputStream != null) {
|
||||
mavenWrapperPropertyFileInputStream.close();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
// Ignore ...
|
||||
}
|
||||
}
|
||||
}
|
||||
System.out.println("- Downloading from: " + url);
|
||||
|
||||
File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
|
||||
if(!outputFile.getParentFile().exists()) {
|
||||
if(!outputFile.getParentFile().mkdirs()) {
|
||||
System.out.println(
|
||||
"- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'");
|
||||
}
|
||||
}
|
||||
System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
|
||||
try {
|
||||
downloadFileFromURL(url, outputFile);
|
||||
System.out.println("Done");
|
||||
System.exit(0);
|
||||
} catch (Throwable e) {
|
||||
System.out.println("- Error downloading");
|
||||
e.printStackTrace();
|
||||
System.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
private static void downloadFileFromURL(String urlString, File destination) throws Exception {
|
||||
if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) {
|
||||
String username = System.getenv("MVNW_USERNAME");
|
||||
char[] password = System.getenv("MVNW_PASSWORD").toCharArray();
|
||||
Authenticator.setDefault(new Authenticator() {
|
||||
@Override
|
||||
protected PasswordAuthentication getPasswordAuthentication() {
|
||||
return new PasswordAuthentication(username, password);
|
||||
}
|
||||
});
|
||||
}
|
||||
URL website = new URL(urlString);
|
||||
ReadableByteChannel rbc;
|
||||
rbc = Channels.newChannel(website.openStream());
|
||||
FileOutputStream fos = new FileOutputStream(destination);
|
||||
fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
|
||||
fos.close();
|
||||
rbc.close();
|
||||
}
|
||||
|
||||
}
|
||||
BIN
.mvn/wrapper/maven-wrapper.jar
vendored
3
.mvn/wrapper/maven-wrapper.properties
vendored
@@ -1,2 +1 @@
|
||||
distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip
|
||||
wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar
|
||||
distributionUrl=https://repo1.maven.org/maven2/org/apache/maven/apache-maven/3.3.3/apache-maven-3.3.3-bin.zip
|
||||
@@ -21,7 +21,7 @@
|
||||
<repository>
|
||||
<id>spring-snapshots</id>
|
||||
<name>Spring Snapshots</name>
|
||||
<url>https://repo.spring.io/libs-snapshot-local</url>
|
||||
<url>http://repo.spring.io/libs-snapshot-local</url>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
@@ -29,7 +29,7 @@
|
||||
<repository>
|
||||
<id>spring-milestones</id>
|
||||
<name>Spring Milestones</name>
|
||||
<url>https://repo.spring.io/libs-milestone-local</url>
|
||||
<url>http://repo.spring.io/libs-milestone-local</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
@@ -37,7 +37,7 @@
|
||||
<repository>
|
||||
<id>spring-releases</id>
|
||||
<name>Spring Releases</name>
|
||||
<url>https://repo.spring.io/release</url>
|
||||
<url>http://repo.spring.io/release</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
@@ -47,7 +47,7 @@
|
||||
<pluginRepository>
|
||||
<id>spring-snapshots</id>
|
||||
<name>Spring Snapshots</name>
|
||||
<url>https://repo.spring.io/libs-snapshot-local</url>
|
||||
<url>http://repo.spring.io/libs-snapshot-local</url>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
@@ -55,7 +55,7 @@
|
||||
<pluginRepository>
|
||||
<id>spring-milestones</id>
|
||||
<name>Spring Milestones</name>
|
||||
<url>https://repo.spring.io/libs-milestone-local</url>
|
||||
<url>http://repo.spring.io/libs-milestone-local</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
|
||||
4
LICENSE
@@ -1,6 +1,6 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
@@ -192,7 +192,7 @@
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
868
README.adoc
@@ -1,867 +1 @@
|
||||
////
|
||||
DO NOT EDIT THIS FILE. IT WAS GENERATED.
|
||||
Manual changes to this file will be lost when it is generated again.
|
||||
Edit the files in the src/main/asciidoc/ directory instead.
|
||||
////
|
||||
|
||||
|
||||
:jdkversion: 1.8
|
||||
:github-tag: master
|
||||
:github-repo: spring-cloud/spring-cloud-stream-binder-kafka
|
||||
|
||||
:github-raw: https://raw.githubusercontent.com/{github-repo}/{github-tag}
|
||||
:github-code: https://github.com/{github-repo}/tree/{github-tag}
|
||||
|
||||
image::https://circleci.com/gh/spring-cloud/spring-cloud-stream-binder-kafka.svg?style=svg["CircleCI", link="https://circleci.com/gh/spring-cloud/spring-cloud-stream-binder-kafka"]
|
||||
image::https://codecov.io/gh/spring-cloud/spring-cloud-stream-binder-kafka/branch/{github-tag}/graph/badge.svg["codecov", link="https://codecov.io/gh/spring-cloud/spring-cloud-stream-binder-kafka"]
|
||||
image::https://badges.gitter.im/spring-cloud/spring-cloud-stream-binder-kafka.svg[Gitter, link="https://gitter.im/spring-cloud/spring-cloud-stream-binder-kafka?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge"]
|
||||
|
||||
// ======================================================================================
|
||||
|
||||
//= Overview
|
||||
[partintro]
|
||||
--
|
||||
This guide describes the Apache Kafka implementation of the Spring Cloud Stream Binder.
|
||||
It contains information about its design, usage, and configuration options, as well as information on how the Stream Cloud Stream concepts map onto Apache Kafka specific constructs.
|
||||
In addition, this guide explains the Kafka Streams binding capabilities of Spring Cloud Stream.
|
||||
--
|
||||
|
||||
== Apache Kafka Binder
|
||||
|
||||
=== Usage
|
||||
|
||||
To use Apache Kafka binder, you need to add `spring-cloud-stream-binder-kafka` as a dependency to your Spring Cloud Stream application, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
Alternatively, you can also use the Spring Cloud Stream Kafka Starter, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
=== Overview
|
||||
|
||||
The following image shows a simplified diagram of how the Apache Kafka binder operates:
|
||||
|
||||
.Kafka Binder
|
||||
image::{github-raw}/docs/src/main/asciidoc/images/kafka-binder.png[width=300,scaledwidth="50%"]
|
||||
|
||||
The Apache Kafka Binder implementation maps each destination to an Apache Kafka topic.
|
||||
The consumer group maps directly to the same Apache Kafka concept.
|
||||
Partitioning also maps directly to Apache Kafka partitions as well.
|
||||
|
||||
The binder currently uses the Apache Kafka `kafka-clients` version `2.3.1`.
|
||||
This client can communicate with older brokers (see the Kafka documentation), but certain features may not be available.
|
||||
For example, with versions earlier than 0.11.x.x, native headers are not supported.
|
||||
Also, 0.11.x.x does not support the `autoAddPartitions` property.
|
||||
|
||||
=== Configuration Options
|
||||
|
||||
This section contains the configuration options used by the Apache Kafka binder.
|
||||
|
||||
For common configuration options and properties pertaining to binder, see the <<binding-properties,core documentation>>.
|
||||
|
||||
==== Kafka Binder Properties
|
||||
|
||||
spring.cloud.stream.kafka.binder.brokers::
|
||||
A list of brokers to which the Kafka binder connects.
|
||||
+
|
||||
Default: `localhost`.
|
||||
spring.cloud.stream.kafka.binder.defaultBrokerPort::
|
||||
`brokers` allows hosts specified with or without port information (for example, `host1,host2:port2`).
|
||||
This sets the default port when no port is configured in the broker list.
|
||||
+
|
||||
Default: `9092`.
|
||||
spring.cloud.stream.kafka.binder.configuration::
|
||||
Key/Value map of client properties (both producers and consumer) passed to all clients created by the binder.
|
||||
Due to the fact that these properties are used by both producers and consumers, usage should be restricted to common properties -- for example, security settings.
|
||||
Unknown Kafka producer or consumer properties provided through this configuration are filtered out and not allowed to propagate.
|
||||
Properties here supersede any properties set in boot.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.consumerProperties::
|
||||
Key/Value map of arbitrary Kafka client consumer properties.
|
||||
In addition to support known Kafka consumer properties, unknown consumer properties are allowed here as well.
|
||||
Properties here supersede any properties set in boot and in the `configuration` property above.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.headers::
|
||||
The list of custom headers that are transported by the binder.
|
||||
Only required when communicating with older applications (<= 1.3.x) with a `kafka-clients` version < 0.11.0.0. Newer versions support headers natively.
|
||||
+
|
||||
Default: empty.
|
||||
spring.cloud.stream.kafka.binder.healthTimeout::
|
||||
The time to wait to get partition information, in seconds.
|
||||
Health reports as down if this timer expires.
|
||||
+
|
||||
Default: 10.
|
||||
spring.cloud.stream.kafka.binder.requiredAcks::
|
||||
The number of required acks on the broker.
|
||||
See the Kafka documentation for the producer `acks` property.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.minPartitionCount::
|
||||
Effective only if `autoCreateTopics` or `autoAddPartitions` is set.
|
||||
The global minimum number of partitions that the binder configures on topics on which it produces or consumes data.
|
||||
It can be superseded by the `partitionCount` setting of the producer or by the value of `instanceCount * concurrency` settings of the producer (if either is larger).
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.producerProperties::
|
||||
Key/Value map of arbitrary Kafka client producer properties.
|
||||
In addition to support known Kafka producer properties, unknown producer properties are allowed here as well.
|
||||
Properties here supersede any properties set in boot and in the `configuration` property above.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.replicationFactor::
|
||||
The replication factor of auto-created topics if `autoCreateTopics` is active.
|
||||
Can be overridden on each binding.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.autoCreateTopics::
|
||||
If set to `true`, the binder creates new topics automatically.
|
||||
If set to `false`, the binder relies on the topics being already configured.
|
||||
In the latter case, if the topics do not exist, the binder fails to start.
|
||||
+
|
||||
NOTE: This setting is independent of the `auto.create.topics.enable` setting of the broker and does not influence it.
|
||||
If the server is set to auto-create topics, they may be created as part of the metadata retrieval request, with default broker settings.
|
||||
+
|
||||
Default: `true`.
|
||||
spring.cloud.stream.kafka.binder.autoAddPartitions::
|
||||
If set to `true`, the binder creates new partitions if required.
|
||||
If set to `false`, the binder relies on the partition size of the topic being already configured.
|
||||
If the partition count of the target topic is smaller than the expected value, the binder fails to start.
|
||||
+
|
||||
Default: `false`.
|
||||
spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix::
|
||||
Enables transactions in the binder. See `transaction.id` in the Kafka documentation and https://docs.spring.io/spring-kafka/reference/html/_reference.html#transactions[Transactions] in the `spring-kafka` documentation.
|
||||
When transactions are enabled, individual `producer` properties are ignored and all producers use the `spring.cloud.stream.kafka.binder.transaction.producer.*` properties.
|
||||
+
|
||||
Default `null` (no transactions)
|
||||
spring.cloud.stream.kafka.binder.transaction.producer.*::
|
||||
Global producer properties for producers in a transactional binder.
|
||||
See `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` and <<kafka-producer-properties>> and the general producer properties supported by all binders.
|
||||
+
|
||||
Default: See individual producer properties.
|
||||
|
||||
spring.cloud.stream.kafka.binder.headerMapperBeanName::
|
||||
The bean name of a `KafkaHeaderMapper` used for mapping `spring-messaging` headers to and from Kafka headers.
|
||||
Use this, for example, if you wish to customize the trusted packages in a `BinderHeaderMapper` bean that uses JSON deserialization for the headers.
|
||||
If this custom `BinderHeaderMapper` bean is not made available to the binder using this property, then the binder will look for a header mapper bean with the name `kafkaBinderHeaderMapper` that is of type `BinderHeaderMapper` before falling back to a default `BinderHeaderMapper` created by the binder.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
[[kafka-consumer-properties]]
|
||||
==== Kafka Consumer Properties
|
||||
|
||||
NOTE: To avoid repetition, Spring Cloud Stream supports setting values for all channels, in the format of `spring.cloud.stream.kafka.default.consumer.<property>=<value>`.
|
||||
|
||||
|
||||
The following properties are available for Kafka consumers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.consumer.`.
|
||||
|
||||
admin.configuration::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.properties`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replicas-assignment::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replicas-assignment`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replication-factor::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replication-factor`, and support for it will be removed in a future version.
|
||||
|
||||
autoRebalanceEnabled::
|
||||
When `true`, topic partitions is automatically rebalanced between the members of a consumer group.
|
||||
When `false`, each consumer is assigned a fixed set of partitions based on `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex`.
|
||||
This requires both the `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex` properties to be set appropriately on each launched instance.
|
||||
The value of the `spring.cloud.stream.instanceCount` property must typically be greater than 1 in this case.
|
||||
+
|
||||
Default: `true`.
|
||||
ackEachRecord::
|
||||
When `autoCommitOffset` is `true`, this setting dictates whether to commit the offset after each record is processed.
|
||||
By default, offsets are committed after all records in the batch of records returned by `consumer.poll()` have been processed.
|
||||
The number of records returned by a poll can be controlled with the `max.poll.records` Kafka property, which is set through the consumer `configuration` property.
|
||||
Setting this to `true` may cause a degradation in performance, but doing so reduces the likelihood of redelivered records when a failure occurs.
|
||||
Also, see the binder `requiredAcks` property, which also affects the performance of committing offsets.
|
||||
+
|
||||
Default: `false`.
|
||||
autoCommitOffset::
|
||||
Whether to autocommit offsets when a message has been processed.
|
||||
If set to `false`, a header with the key `kafka_acknowledgment` of the type `org.springframework.kafka.support.Acknowledgment` header is present in the inbound message.
|
||||
Applications may use this header for acknowledging messages.
|
||||
See the examples section for details.
|
||||
When this property is set to `false`, Kafka binder sets the ack mode to `org.springframework.kafka.listener.AbstractMessageListenerContainer.AckMode.MANUAL` and the application is responsible for acknowledging records.
|
||||
Also see `ackEachRecord`.
|
||||
+
|
||||
Default: `true`.
|
||||
autoCommitOnError::
|
||||
Effective only if `autoCommitOffset` is set to `true`.
|
||||
If set to `false`, it suppresses auto-commits for messages that result in errors and commits only for successful messages. It allows a stream to automatically replay from the last successfully processed message, in case of persistent failures.
|
||||
If set to `true`, it always auto-commits (if auto-commit is enabled).
|
||||
If not set (the default), it effectively has the same value as `enableDlq`, auto-committing erroneous messages if they are sent to a DLQ and not committing them otherwise.
|
||||
+
|
||||
Default: not set.
|
||||
resetOffsets::
|
||||
Whether to reset offsets on the consumer to the value provided by startOffset.
|
||||
Must be false if a `KafkaRebalanceListener` is provided; see <<rebalance-listener>>.
|
||||
+
|
||||
Default: `false`.
|
||||
startOffset::
|
||||
The starting offset for new groups.
|
||||
Allowed values: `earliest` and `latest`.
|
||||
If the consumer group is set explicitly for the consumer 'binding' (through `spring.cloud.stream.bindings.<channelName>.group`), 'startOffset' is set to `earliest`. Otherwise, it is set to `latest` for the `anonymous` consumer group.
|
||||
Also see `resetOffsets` (earlier in this list).
|
||||
+
|
||||
Default: null (equivalent to `earliest`).
|
||||
enableDlq::
|
||||
When set to true, it enables DLQ behavior for the consumer.
|
||||
By default, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`.
|
||||
The DLQ topic name can be configurable by setting the `dlqName` property.
|
||||
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
|
||||
See <<kafka-dlq-processing>> processing for more information.
|
||||
Starting with version 2.0, messages sent to the DLQ topic are enhanced with the following headers: `x-original-topic`, `x-exception-message`, and `x-exception-stacktrace` as `byte[]`.
|
||||
By default, a failed record is sent to the same partition number in the DLQ topic as the original record.
|
||||
See <<dlq-partition-selection>> for how to change that behavior.
|
||||
**Not allowed when `destinationIsPattern` is `true`.**
|
||||
+
|
||||
Default: `false`.
|
||||
dlqPartitions::
|
||||
When `enableDlq` is true, and this property is not set, a dead letter topic with the same number of partitions as the primary topic(s) is created.
|
||||
Usually, dead-letter records are sent to the same partition in the dead-letter topic as the original record.
|
||||
This behavior can be changed; see <<dlq-partition-selection>>.
|
||||
If this property is set to `1` and there is no `DqlPartitionFunction` bean, all dead-letter records will be written to partition `0`.
|
||||
If this property is greater than `1`, you **MUST** provide a `DlqPartitionFunction` bean.
|
||||
Note that the actual partition count is affected by the binder's `minPartitionCount` property.
|
||||
+
|
||||
Default: `none`
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka consumer properties.
|
||||
In addition to having Kafka consumer properties, other configuration properties can be passed here.
|
||||
For example some properties needed by the application such as `spring.cloud.stream.kafka.bindings.input.consumer.configuration.foo=bar`.
|
||||
The `bootstrap.servers` property cannot be set here; use multi-binder support if you need to connect to multiple clusters.
|
||||
+
|
||||
Default: Empty map.
|
||||
dlqName::
|
||||
The name of the DLQ topic to receive the error messages.
|
||||
+
|
||||
Default: null (If not specified, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`).
|
||||
dlqProducerProperties::
|
||||
Using this, DLQ-specific producer properties can be set.
|
||||
All the properties available through kafka producer properties can be set through this property.
|
||||
When native decoding is enabled on the consumer (i.e., useNativeDecoding: true) , the application must provide corresponding key/value serializers for DLQ.
|
||||
This must be provided in the form of `dlqProducerProperties.configuration.key.serializer` and `dlqProducerProperties.configuration.value.serializer`.
|
||||
+
|
||||
Default: Default Kafka producer properties.
|
||||
standardHeaders::
|
||||
Indicates which standard headers are populated by the inbound channel adapter.
|
||||
Allowed values: `none`, `id`, `timestamp`, or `both`.
|
||||
Useful if using native deserialization and the first component to receive a message needs an `id` (such as an aggregator that is configured to use a JDBC message store).
|
||||
+
|
||||
Default: `none`
|
||||
converterBeanName::
|
||||
The name of a bean that implements `RecordMessageConverter`. Used in the inbound channel adapter to replace the default `MessagingMessageConverter`.
|
||||
+
|
||||
Default: `null`
|
||||
idleEventInterval::
|
||||
The interval, in milliseconds, between events indicating that no messages have recently been received.
|
||||
Use an `ApplicationListener<ListenerContainerIdleEvent>` to receive these events.
|
||||
See <<pause-resume>> for a usage example.
|
||||
+
|
||||
Default: `30000`
|
||||
destinationIsPattern::
|
||||
When true, the destination is treated as a regular expression `Pattern` used to match topic names by the broker.
|
||||
When true, topics are not provisioned, and `enableDlq` is not allowed, because the binder does not know the topic names during the provisioning phase.
|
||||
Note, the time taken to detect new topics that match the pattern is controlled by the consumer property `metadata.max.age.ms`, which (at the time of writing) defaults to 300,000ms (5 minutes).
|
||||
This can be configured using the `configuration` property above.
|
||||
+
|
||||
Default: `false`
|
||||
topic.properties::
|
||||
A `Map` of Kafka topic properties used when provisioning new topics -- for example, `spring.cloud.stream.kafka.bindings.input.consumer.topic.properties.message.format.version=0.9.0.0`
|
||||
+
|
||||
Default: none.
|
||||
topic.replicas-assignment::
|
||||
A Map<Integer, List<Integer>> of replica assignments, with the key being the partition and the value being the assignments.
|
||||
Used when provisioning new topics.
|
||||
See the `NewTopic` Javadocs in the `kafka-clients` jar.
|
||||
+
|
||||
Default: none.
|
||||
topic.replication-factor::
|
||||
The replication factor to use when provisioning topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of 1 is used).
|
||||
pollTimeout::
|
||||
Timeout used for polling in pollable consumers.
|
||||
+
|
||||
Default: 5 seconds.
|
||||
transactionManager::
|
||||
Bean name of a `KafkaAwareTransactionManager` used to override the binder's transaction manager for this binding.
|
||||
Usually needed if you want to synchronize another transaction with the Kafka transaction, using the `ChainedKafkaTransactionManaager`.
|
||||
To achieve exactly once consumption and production of records, the consumer and producer bindings must all be configured with the same transaction manager.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
==== Consuming Batches
|
||||
|
||||
Starting with version 3.0, when `spring.cloud.stream.binding.<name>.consumer.batch-mode` is set to `true`, all of the records received by polling the Kafka `Consumer` will be presented as a `List<?>` to the listener method.
|
||||
Otherwise, the method will be called with one record at a time.
|
||||
The size of the batch is controlled by Kafka consumer properties `max.poll.records`, `min.fetch.bytes`, `fetch.max.wait.ms`; refer to the Kafka documentation for more information.
|
||||
|
||||
Bear in mind that batch mode is not supported with `@StreamListener` - it only works with the newer functional programming model.
|
||||
|
||||
IMPORTANT: Retry within the binder is not supported when using batch mode, so `maxAttempts` will be overridden to 1.
|
||||
You can configure a `SeekToCurrentBatchErrorHandler` (using a `ListenerContainerCustomizer`) to achieve similar functionality to retry in the binder.
|
||||
You can also use a manual `AckMode` and call `Ackowledgment.nack(index, sleep)` to commit the offsets for a partial batch and have the remaining records redelivered.
|
||||
Refer to the https://docs.spring.io/spring-kafka/docs/2.3.0.BUILD-SNAPSHOT/reference/html/#committing-offsets[Spring for Apache Kafka documentation] for more information about these techniques.
|
||||
|
||||
[[kafka-producer-properties]]
|
||||
==== Kafka Producer Properties
|
||||
|
||||
NOTE: To avoid repetition, Spring Cloud Stream supports setting values for all channels, in the format of `spring.cloud.stream.kafka.default.producer.<property>=<value>`.
|
||||
|
||||
|
||||
The following properties are available for Kafka producers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.producer.`.
|
||||
|
||||
admin.configuration::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.properties`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replicas-assignment::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replicas-assignment`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replication-factor::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replication-factor`, and support for it will be removed in a future version.
|
||||
|
||||
bufferSize::
|
||||
Upper limit, in bytes, of how much data the Kafka producer attempts to batch before sending.
|
||||
+
|
||||
Default: `16384`.
|
||||
sync::
|
||||
Whether the producer is synchronous.
|
||||
+
|
||||
Default: `false`.
|
||||
sendTimeoutExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to evaluate the time to wait for ack when synchronous publish is enabled -- for example, `headers['mySendTimeout']`.
|
||||
The value of the timeout is in milliseconds.
|
||||
With versions before 3.0, the payload could not be used unless native encoding was being used because, by the time this expression was evaluated, the payload was already in the form of a `byte[]`.
|
||||
Now, the expression is evaluated before the payload is converted.
|
||||
+
|
||||
Default: `none`.
|
||||
batchTimeout::
|
||||
How long the producer waits to allow more messages to accumulate in the same batch before sending the messages.
|
||||
(Normally, the producer does not wait at all and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.
|
||||
+
|
||||
Default: `0`.
|
||||
messageKeyExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message -- for example, `headers['myKey']`.
|
||||
With versions before 3.0, the payload could not be used unless native encoding was being used because, by the time this expression was evaluated, the payload was already in the form of a `byte[]`.
|
||||
Now, the expression is evaluated before the payload is converted.
|
||||
+
|
||||
Default: `none`.
|
||||
headerPatterns::
|
||||
A comma-delimited list of simple patterns to match Spring messaging headers to be mapped to the Kafka `Headers` in the `ProducerRecord`.
|
||||
Patterns can begin or end with the wildcard character (asterisk).
|
||||
Patterns can be negated by prefixing with `!`.
|
||||
Matching stops after the first match (positive or negative).
|
||||
For example `!ask,as*` will pass `ash` but not `ask`.
|
||||
`id` and `timestamp` are never mapped.
|
||||
+
|
||||
Default: `*` (all headers - except the `id` and `timestamp`)
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka producer properties.
|
||||
The `bootstrap.servers` property cannot be set here; use multi-binder support if you need to connect to multiple clusters.
|
||||
+
|
||||
Default: Empty map.
|
||||
topic.properties::
|
||||
A `Map` of Kafka topic properties used when provisioning new topics -- for example, `spring.cloud.stream.kafka.bindings.output.producer.topic.properties.message.format.version=0.9.0.0`
|
||||
+
|
||||
topic.replicas-assignment::
|
||||
A Map<Integer, List<Integer>> of replica assignments, with the key being the partition and the value being the assignments.
|
||||
Used when provisioning new topics.
|
||||
See the `NewTopic` Javadocs in the `kafka-clients` jar.
|
||||
+
|
||||
Default: none.
|
||||
topic.replication-factor::
|
||||
The replication factor to use when provisioning topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of 1 is used).
|
||||
useTopicHeader::
|
||||
Set to `true` to override the default binding destination (topic name) with the value of the `KafkaHeaders.TOPIC` message header in the outbound message.
|
||||
If the header is not present, the default binding destination is used.
|
||||
Default: `false`.
|
||||
+
|
||||
recordMetadataChannel::
|
||||
The bean name of a `MessageChannel` to which successful send results should be sent; the bean must exist in the application context.
|
||||
The message sent to the channel is the sent message (after conversion, if any) with an additional header `KafkaHeaders.RECORD_METADATA`.
|
||||
The header contains a `RecordMetadata` object provided by the Kafka client; it includes the partition and offset where the record was written in the topic.
|
||||
|
||||
`ResultMetadata meta = sendResultMsg.getHeaders().get(KafkaHeaders.RECORD_METADATA, RecordMetadata.class)`
|
||||
|
||||
Failed sends go the producer error channel (if configured); see <<kafka-error-channels>>.
|
||||
Default: null
|
||||
+
|
||||
|
||||
NOTE: The Kafka binder uses the `partitionCount` setting of the producer as a hint to create a topic with the given partition count (in conjunction with the `minPartitionCount`, the maximum of the two being the value being used).
|
||||
Exercise caution when configuring both `minPartitionCount` for a binder and `partitionCount` for an application, as the larger value is used.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is disabled (the default), the binder fails to start.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is enabled, new partitions are added.
|
||||
If a topic already exists with a larger number of partitions than the maximum of (`minPartitionCount` or `partitionCount`), the existing partition count is used.
|
||||
|
||||
compression::
|
||||
Set the `compression.type` producer property.
|
||||
Supported values are `none`, `gzip`, `snappy` and `lz4`.
|
||||
If you override the `kafka-clients` jar to 2.1.0 (or later), as discussed in the https://docs.spring.io/spring-kafka/docs/2.2.x/reference/html/deps-for-21x.html[Spring for Apache Kafka documentation], and wish to use `zstd` compression, use `spring.cloud.stream.kafka.bindings.<binding-name>.producer.configuration.compression.type=zstd`.
|
||||
+
|
||||
Default: `none`.
|
||||
transactionManager::
|
||||
Bean name of a `KafkaAwareTransactionManager` used to override the binder's transaction manager for this binding.
|
||||
Usually needed if you want to synchronize another transaction with the Kafka transaction, using the `ChainedKafkaTransactionManaager`.
|
||||
To achieve exactly once consumption and production of records, the consumer and producer bindings must all be configured with the same transaction manager.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
closeTimeout::
|
||||
Timeout in number of seconds to wait for when closing the producer.
|
||||
+
|
||||
Default: `30`
|
||||
|
||||
==== Usage examples
|
||||
|
||||
In this section, we show the use of the preceding properties for specific scenarios.
|
||||
|
||||
===== Example: Setting `autoCommitOffset` to `false` and Relying on Manual Acking
|
||||
|
||||
This example illustrates how one may manually acknowledge offsets in a consumer application.
|
||||
|
||||
This example requires that `spring.cloud.stream.kafka.bindings.input.consumer.autoCommitOffset` be set to `false`.
|
||||
Use the corresponding input channel name for your example.
|
||||
|
||||
[source]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class ManuallyAcknowdledgingConsumer {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(ManuallyAcknowdledgingConsumer.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void process(Message<?> message) {
|
||||
Acknowledgment acknowledgment = message.getHeaders().get(KafkaHeaders.ACKNOWLEDGMENT, Acknowledgment.class);
|
||||
if (acknowledgment != null) {
|
||||
System.out.println("Acknowledgment provided");
|
||||
acknowledgment.acknowledge();
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
===== Example: Security Configuration
|
||||
|
||||
Apache Kafka 0.9 supports secure connections between client and brokers.
|
||||
To take advantage of this feature, follow the guidelines in the https://kafka.apache.org/090/documentation.html#security_configclients[Apache Kafka Documentation] as well as the Kafka 0.9 https://docs.confluent.io/2.0.0/kafka/security.html[security guidelines from the Confluent documentation].
|
||||
Use the `spring.cloud.stream.kafka.binder.configuration` option to set security properties for all clients created by the binder.
|
||||
|
||||
For example, to set `security.protocol` to `SASL_SSL`, set the following property:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_SSL
|
||||
----
|
||||
|
||||
All the other security properties can be set in a similar manner.
|
||||
|
||||
When using Kerberos, follow the instructions in the https://kafka.apache.org/090/documentation.html#security_sasl_clientconfig[reference documentation] for creating and referencing the JAAS configuration.
|
||||
|
||||
Spring Cloud Stream supports passing JAAS configuration information to the application by using a JAAS configuration file and using Spring Boot properties.
|
||||
|
||||
====== Using JAAS Configuration Files
|
||||
|
||||
The JAAS and (optionally) krb5 file locations can be set for Spring Cloud Stream applications by using system properties.
|
||||
The following example shows how to launch a Spring Cloud Stream application with SASL and Kerberos by using a JAAS configuration file:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
java -Djava.security.auth.login.config=/path.to/kafka_client_jaas.conf -jar log.jar \
|
||||
--spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT
|
||||
----
|
||||
|
||||
====== Using Spring Boot Properties
|
||||
|
||||
As an alternative to having a JAAS configuration file, Spring Cloud Stream provides a mechanism for setting up the JAAS configuration for Spring Cloud Stream applications by using Spring Boot properties.
|
||||
|
||||
The following properties can be used to configure the login context of the Kafka client:
|
||||
|
||||
spring.cloud.stream.kafka.binder.jaas.loginModule::
|
||||
The login module name. Not necessary to be set in normal cases.
|
||||
+
|
||||
Default: `com.sun.security.auth.module.Krb5LoginModule`.
|
||||
spring.cloud.stream.kafka.binder.jaas.controlFlag::
|
||||
The control flag of the login module.
|
||||
+
|
||||
Default: `required`.
|
||||
spring.cloud.stream.kafka.binder.jaas.options::
|
||||
Map with a key/value pair containing the login module options.
|
||||
+
|
||||
Default: Empty map.
|
||||
|
||||
The following example shows how to launch a Spring Cloud Stream application with SASL and Kerberos by using Spring Boot configuration properties:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
java --spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.autoCreateTopics=false \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.useKeyTab=true \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.storeKey=true \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.keyTab=/etc/security/keytabs/kafka_client.keytab \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.principal=kafka-client-1@EXAMPLE.COM
|
||||
----
|
||||
|
||||
The preceding example represents the equivalent of the following JAAS file:
|
||||
|
||||
[source]
|
||||
----
|
||||
KafkaClient {
|
||||
com.sun.security.auth.module.Krb5LoginModule required
|
||||
useKeyTab=true
|
||||
storeKey=true
|
||||
keyTab="/etc/security/keytabs/kafka_client.keytab"
|
||||
principal="kafka-client-1@EXAMPLE.COM";
|
||||
};
|
||||
----
|
||||
|
||||
If the topics required already exist on the broker or will be created by an administrator, autocreation can be turned off and only client JAAS properties need to be sent.
|
||||
|
||||
NOTE: Do not mix JAAS configuration files and Spring Boot properties in the same application.
|
||||
If the `-Djava.security.auth.login.config` system property is already present, Spring Cloud Stream ignores the Spring Boot properties.
|
||||
|
||||
NOTE: Be careful when using the `autoCreateTopics` and `autoAddPartitions` with Kerberos.
|
||||
Usually, applications may use principals that do not have administrative rights in Kafka and Zookeeper.
|
||||
Consequently, relying on Spring Cloud Stream to create/modify topics may fail.
|
||||
In secure environments, we strongly recommend creating topics and managing ACLs administratively by using Kafka tooling.
|
||||
|
||||
[[pause-resume]]
|
||||
===== Example: Pausing and Resuming the Consumer
|
||||
|
||||
If you wish to suspend consumption but not cause a partition rebalance, you can pause and resume the consumer.
|
||||
This is facilitated by adding the `Consumer` as a parameter to your `@StreamListener`.
|
||||
To resume, you need an `ApplicationListener` for `ListenerContainerIdleEvent` instances.
|
||||
The frequency at which events are published is controlled by the `idleEventInterval` property.
|
||||
Since the consumer is not thread-safe, you must call these methods on the calling thread.
|
||||
|
||||
The following simple application shows how to pause and resume:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class Application {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(Application.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void in(String in, @Header(KafkaHeaders.CONSUMER) Consumer<?, ?> consumer) {
|
||||
System.out.println(in);
|
||||
consumer.pause(Collections.singleton(new TopicPartition("myTopic", 0)));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ApplicationListener<ListenerContainerIdleEvent> idleListener() {
|
||||
return event -> {
|
||||
System.out.println(event);
|
||||
if (event.getConsumer().paused().size() > 0) {
|
||||
event.getConsumer().resume(event.getConsumer().paused());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
[[kafka-transactional-binder]]
|
||||
=== Transactional Binder
|
||||
|
||||
Enable transactions by setting `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` to a non-empty value, e.g. `tx-`.
|
||||
When used in a processor application, the consumer starts the transaction; any records sent on the consumer thread participate in the same transaction.
|
||||
When the listener exits normally, the listener container will send the offset to the transaction and commit it.
|
||||
A common producer factory is used for all producer bindings configured using `spring.cloud.stream.kafka.binder.transaction.producer.*` properties; individual binding Kafka producer properties are ignored.
|
||||
|
||||
IMPORTANT: Normal binder retries (and dead lettering) are not supported with transactions because the retries will run in the original transaction, which may be rolled back and any published records will be rolled back too.
|
||||
When retries are enabled (the common property `maxAttempts` is greater than zero) the retry properties are used to configure a `DefaultAfterRollbackProcessor` to enable retries at the container level.
|
||||
Similarly, instead of publishing dead-letter records within the transaction, this functionality is moved to the listener container, again via the `DefaultAfterRollbackProcessor` which runs after the main transaction has rolled back.
|
||||
|
||||
If you wish to use transactions in a source application, or from some arbitrary thread for producer-only transaction (e.g. `@Scheduled` method), you must get a reference to the transactional producer factory and define a `KafkaTransactionManager` bean using it.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
@Bean
|
||||
public PlatformTransactionManager transactionManager(BinderFactory binders,
|
||||
@Value("${unique.tx.id.per.instance}") String txId) {
|
||||
|
||||
ProducerFactory<byte[], byte[]> pf = ((KafkaMessageChannelBinder) binders.getBinder(null,
|
||||
MessageChannel.class)).getTransactionalProducerFactory();
|
||||
KafkaTransactionManager tm = new KafkaTransactionManager<>(pf);
|
||||
tm.setTransactionId(txId)
|
||||
return tm;
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
Notice that we get a reference to the binder using the `BinderFactory`; use `null` in the first argument when there is only one binder configured.
|
||||
If more than one binder is configured, use the binder name to get the reference.
|
||||
Once we have a reference to the binder, we can obtain a reference to the `ProducerFactory` and create a transaction manager.
|
||||
|
||||
Then you would use normal Spring transaction support, e.g. `TransactionTemplate` or `@Transactional`, for example:
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
public static class Sender {
|
||||
|
||||
@Transactional
|
||||
public void doInTransaction(MessageChannel output, List<String> stuffToSend) {
|
||||
stuffToSend.forEach(stuff -> output.send(new GenericMessage<>(stuff)));
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
If you wish to synchronize producer-only transactions with those from some other transaction manager, use a `ChainedTransactionManager`.
|
||||
|
||||
IMPORTANT: If you deploy multiple instances of your application, each instance needs a unique `transactionIdPrefix`.
|
||||
|
||||
[[kafka-error-channels]]
|
||||
=== Error Channels
|
||||
|
||||
Starting with version 1.3, the binder unconditionally sends exceptions to an error channel for each consumer destination and can also be configured to send async producer send failures to an error channel.
|
||||
See <<spring-cloud-stream-overview-error-handling>> for more information.
|
||||
|
||||
The payload of the `ErrorMessage` for a send failure is a `KafkaSendFailureException` with properties:
|
||||
|
||||
* `failedMessage`: The Spring Messaging `Message<?>` that failed to be sent.
|
||||
* `record`: The raw `ProducerRecord` that was created from the `failedMessage`
|
||||
|
||||
There is no automatic handling of producer exceptions (such as sending to a <<kafka-dlq-processing, Dead-Letter queue>>).
|
||||
You can consume these exceptions with your own Spring Integration flow.
|
||||
|
||||
[[kafka-metrics]]
|
||||
=== Kafka Metrics
|
||||
|
||||
Kafka binder module exposes the following metrics:
|
||||
|
||||
`spring.cloud.stream.binder.kafka.offset`: This metric indicates how many messages have not been yet consumed from a given binder's topic by a given consumer group.
|
||||
The metrics provided are based on the Mircometer metrics library. The metric contains the consumer group information, topic and the actual lag in committed offset from the latest offset on the topic.
|
||||
This metric is particularly useful for providing auto-scaling feedback to a PaaS platform.
|
||||
|
||||
[[kafka-tombstones]]
|
||||
=== Tombstone Records (null record values)
|
||||
|
||||
When using compacted topics, a record with a `null` value (also called a tombstone record) represents the deletion of a key.
|
||||
To receive such messages in a `@StreamListener` method, the parameter must be marked as not required to receive a `null` value argument.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void in(@Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) byte[] key,
|
||||
@Payload(required = false) Customer customer) {
|
||||
// customer is null if a tombstone record
|
||||
...
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
[[rebalance-listener]]
|
||||
=== Using a KafkaRebalanceListener
|
||||
|
||||
Applications may wish to seek topics/partitions to arbitrary offsets when the partitions are initially assigned, or perform other operations on the consumer.
|
||||
Starting with version 2.1, if you provide a single `KafkaRebalanceListener` bean in the application context, it will be wired into all Kafka consumer bindings.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
public interface KafkaBindingRebalanceListener {
|
||||
|
||||
/**
|
||||
* Invoked by the container before any pending offsets are committed.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
*/
|
||||
default void onPartitionsRevokedBeforeCommit(String bindingName, Consumer<?, ?> consumer,
|
||||
Collection<TopicPartition> partitions) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked by the container after any pending offsets are committed.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
*/
|
||||
default void onPartitionsRevokedAfterCommit(String bindingName, Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked when partitions are initially assigned or after a rebalance.
|
||||
* Applications might only want to perform seek operations on an initial assignment.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
* @param initial true if this is the initial assignment.
|
||||
*/
|
||||
default void onPartitionsAssigned(String bindingName, Consumer<?, ?> consumer, Collection<TopicPartition> partitions,
|
||||
boolean initial) {
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
You cannot set the `resetOffsets` consumer property to `true` when you provide a rebalance listener.
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
[[building]]
|
||||
== Building
|
||||
|
||||
:jdkversion: 1.7
|
||||
|
||||
=== Basic Compile and Test
|
||||
|
||||
To build the source you will need to install JDK {jdkversion}.
|
||||
|
||||
The build uses the Maven wrapper so you don't have to install a specific
|
||||
version of Maven. To enable the tests, you should have Kafka server 0.9 or above running
|
||||
before building. See below for more information on running the servers.
|
||||
|
||||
The main build command is
|
||||
|
||||
----
|
||||
$ ./mvnw clean install
|
||||
----
|
||||
|
||||
You can also add '-DskipTests' if you like, to avoid running the tests.
|
||||
|
||||
NOTE: You can also install Maven (>=3.3.3) yourself and run the `mvn` command
|
||||
in place of `./mvnw` in the examples below. If you do that you also
|
||||
might need to add `-P spring` if your local Maven settings do not
|
||||
contain repository declarations for spring pre-release artifacts.
|
||||
|
||||
NOTE: Be aware that you might need to increase the amount of memory
|
||||
available to Maven by setting a `MAVEN_OPTS` environment variable with
|
||||
a value like `-Xmx512m -XX:MaxPermSize=128m`. We try to cover this in
|
||||
the `.mvn` configuration, so if you find you have to do it to make a
|
||||
build succeed, please raise a ticket to get the settings added to
|
||||
source control.
|
||||
|
||||
|
||||
The projects that require middleware generally include a
|
||||
`docker-compose.yml`, so consider using
|
||||
https://compose.docker.io/[Docker Compose] to run the middeware servers
|
||||
in Docker containers.
|
||||
|
||||
=== Documentation
|
||||
|
||||
There is a "full" profile that will generate documentation.
|
||||
|
||||
=== Working with the code
|
||||
If you don't have an IDE preference we would recommend that you use
|
||||
https://www.springsource.com/developer/sts[Spring Tools Suite] or
|
||||
https://eclipse.org[Eclipse] when working with the code. We use the
|
||||
https://eclipse.org/m2e/[m2eclipe] eclipse plugin for maven support. Other IDEs and tools
|
||||
should also work without issue.
|
||||
|
||||
==== Importing into eclipse with m2eclipse
|
||||
We recommend the https://eclipse.org/m2e/[m2eclipe] eclipse plugin when working with
|
||||
eclipse. If you don't already have m2eclipse installed it is available from the "eclipse
|
||||
marketplace".
|
||||
|
||||
Unfortunately m2e does not yet support Maven 3.3, so once the projects
|
||||
are imported into Eclipse you will also need to tell m2eclipse to use
|
||||
the `.settings.xml` file for the projects. If you do not do this you
|
||||
may see many different errors related to the POMs in the
|
||||
projects. Open your Eclipse preferences, expand the Maven
|
||||
preferences, and select User Settings. In the User Settings field
|
||||
click Browse and navigate to the Spring Cloud project you imported
|
||||
selecting the `.settings.xml` file in that project. Click Apply and
|
||||
then OK to save the preference changes.
|
||||
|
||||
NOTE: Alternatively you can copy the repository settings from https://github.com/spring-cloud/spring-cloud-build/blob/master/.settings.xml[`.settings.xml`] into your own `~/.m2/settings.xml`.
|
||||
|
||||
==== Importing into eclipse without m2eclipse
|
||||
If you prefer not to use m2eclipse you can generate eclipse project metadata using the
|
||||
following command:
|
||||
|
||||
[indent=0]
|
||||
----
|
||||
$ ./mvnw eclipse:eclipse
|
||||
----
|
||||
|
||||
The generated eclipse projects can be imported by selecting `import existing projects`
|
||||
from the `file` menu.
|
||||
[[contributing]
|
||||
== Contributing
|
||||
|
||||
Spring Cloud is released under the non-restrictive Apache 2.0 license,
|
||||
and follows a very standard Github development process, using Github
|
||||
tracker for issues and merging pull requests into master. If you want
|
||||
to contribute even something trivial please do not hesitate, but
|
||||
follow the guidelines below.
|
||||
|
||||
=== Sign the Contributor License Agreement
|
||||
Before we accept a non-trivial patch or pull request we will need you to sign the
|
||||
https://support.springsource.com/spring_committer_signup[contributor's agreement].
|
||||
Signing the contributor's agreement does not grant anyone commit rights to the main
|
||||
repository, but it does mean that we can accept your contributions, and you will get an
|
||||
author credit if we do. Active contributors might be asked to join the core team, and
|
||||
given the ability to merge pull requests.
|
||||
|
||||
=== Code Conventions and Housekeeping
|
||||
None of these is essential for a pull request, but they will all help. They can also be
|
||||
added after the original pull request but before a merge.
|
||||
|
||||
* Use the Spring Framework code format conventions. If you use Eclipse
|
||||
you can import formatter settings using the
|
||||
`eclipse-code-formatter.xml` file from the
|
||||
https://github.com/spring-cloud/build/tree/master/eclipse-coding-conventions.xml[Spring
|
||||
Cloud Build] project. If using IntelliJ, you can use the
|
||||
https://plugins.jetbrains.com/plugin/6546[Eclipse Code Formatter
|
||||
Plugin] to import the same file.
|
||||
* Make sure all new `.java` files to have a simple Javadoc class comment with at least an
|
||||
`@author` tag identifying you, and preferably at least a paragraph on what the class is
|
||||
for.
|
||||
* Add the ASF license header comment to all new `.java` files (copy from existing files
|
||||
in the project)
|
||||
* Add yourself as an `@author` to the .java files that you modify substantially (more
|
||||
than cosmetic changes).
|
||||
* Add some Javadocs and, if you change the namespace, some XSD doc elements.
|
||||
* A few unit tests would help a lot as well -- someone has to do it.
|
||||
* If no-one else is using your branch, please rebase it against the current master (or
|
||||
other target branch in the main project).
|
||||
* When writing a commit message please follow https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html[these conventions],
|
||||
if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit
|
||||
message (where XXXX is the issue number).
|
||||
|
||||
// ======================================================================================
|
||||
Spring Cloud Stream Binder for Apache Kafka
|
||||
|
||||
65
docs/pom.xml
@@ -1,65 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-docs</artifactId>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>3.1.0-M2</version>
|
||||
</parent>
|
||||
<packaging>jar</packaging>
|
||||
<name>spring-cloud-stream-binder-kafka-docs</name>
|
||||
<description>Spring Cloud Stream Kafka Binder Docs</description>
|
||||
<properties>
|
||||
<docs.main>spring-cloud-stream-binder-kafka</docs.main>
|
||||
<main.basedir>${basedir}/..</main.basedir>
|
||||
<maven.plugin.plugin.version>3.4</maven.plugin.plugin.version>
|
||||
<configprops.inclusionPattern>.*stream.*</configprops.inclusionPattern>
|
||||
<upload-docs-zip.phase>deploy</upload-docs-zip.phase>
|
||||
</properties>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>${project.groupId}</groupId>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<sourceDirectory>src/main/asciidoc</sourceDirectory>
|
||||
</build>
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>docs</id>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>pl.project13.maven</groupId>
|
||||
<artifactId>git-commit-id-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-dependency-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-resources-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>exec-maven-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.asciidoctor</groupId>
|
||||
<artifactId>asciidoctor-maven-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<artifactId>maven-deploy-plugin</artifactId>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
</profiles>
|
||||
</project>
|
||||
@@ -1,22 +0,0 @@
|
||||
:jdkversion: 1.8
|
||||
:github-tag: master
|
||||
:github-repo: spring-cloud/spring-cloud-stream-binder-kafka
|
||||
|
||||
:github-raw: https://raw.githubusercontent.com/{github-repo}/{github-tag}
|
||||
:github-code: https://github.com/{github-repo}/tree/{github-tag}
|
||||
|
||||
image::https://circleci.com/gh/spring-cloud/spring-cloud-stream-binder-kafka.svg?style=svg["CircleCI", link="https://circleci.com/gh/spring-cloud/spring-cloud-stream-binder-kafka"]
|
||||
image::https://codecov.io/gh/spring-cloud/spring-cloud-stream-binder-kafka/branch/{github-tag}/graph/badge.svg["codecov", link="https://codecov.io/gh/spring-cloud/spring-cloud-stream-binder-kafka"]
|
||||
image::https://badges.gitter.im/spring-cloud/spring-cloud-stream-binder-kafka.svg[Gitter, link="https://gitter.im/spring-cloud/spring-cloud-stream-binder-kafka?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge"]
|
||||
|
||||
// ======================================================================================
|
||||
|
||||
//= Overview
|
||||
include::overview.adoc[]
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
include::building.adoc[]
|
||||
include::contributing.adoc[]
|
||||
|
||||
// ======================================================================================
|
||||
@@ -1,63 +0,0 @@
|
||||
|===
|
||||
|Name | Default | Description
|
||||
|
||||
|spring.cloud.stream.binders | | Additional per-binder properties (see {@link BinderProperties}) if more then one binder of the same type is used (i.e., connect to multiple instances of RabbitMq). Here you can specify multiple binder configurations, each with different environment settings. For example; spring.cloud.stream.binders.rabbit1.environment. . . , spring.cloud.stream.binders.rabbit2.environment. . .
|
||||
|spring.cloud.stream.binding-retry-interval | 30 | Retry interval (in seconds) used to schedule binding attempts. Default: 30 sec.
|
||||
|spring.cloud.stream.bindings | | Additional binding properties (see {@link BinderProperties}) per binding name (e.g., 'input`). For example; This sets the content-type for the 'input' binding of a Sink application: 'spring.cloud.stream.bindings.input.contentType=text/plain'
|
||||
|spring.cloud.stream.default-binder | | The name of the binder to use by all bindings in the event multiple binders available (e.g., 'rabbit').
|
||||
|spring.cloud.stream.dynamic-destination-cache-size | 10 | The maximum size of Least Recently Used (LRU) cache of dynamic destinations. Once this size is reached, new destinations will trigger the removal of old destinations. Default: 10
|
||||
|spring.cloud.stream.dynamic-destinations | [] | A list of destinations that can be bound dynamically. If set, only listed destinations can be bound.
|
||||
|spring.cloud.stream.function.batch-mode | false |
|
||||
|spring.cloud.stream.function.bindings | |
|
||||
|spring.cloud.stream.function.definition | | Definition of functions to bind. If several functions need to be composed into one, use pipes (e.g., 'fooFunc\|barFunc')
|
||||
|spring.cloud.stream.instance-count | 1 | The number of deployed instances of an application. Default: 1. NOTE: Could also be managed per individual binding "spring.cloud.stream.bindings.foo.consumer.instance-count" where 'foo' is the name of the binding.
|
||||
|spring.cloud.stream.instance-index | 0 | The instance id of the application: a number from 0 to instanceCount-1. Used for partitioning and with Kafka. NOTE: Could also be managed per individual binding "spring.cloud.stream.bindings.foo.consumer.instance-index" where 'foo' is the name of the binding.
|
||||
|spring.cloud.stream.instance-index-list | | A list of instance id's from 0 to instanceCount-1. Used for partitioning and with Kafka. NOTE: Could also be managed per individual binding "spring.cloud.stream.bindings.foo.consumer.instance-index-list" where 'foo' is the name of the binding. This setting will override the one set in 'spring.cloud.stream.instance-index'
|
||||
|spring.cloud.stream.integration.message-handler-not-propagated-headers | | Message header names that will NOT be copied from the inbound message.
|
||||
|spring.cloud.stream.kafka.binder.authorization-exception-retry-interval | | Time between retries after AuthorizationException is caught in the ListenerContainer; defalt is null which disables retries. For more info see: {@link org.springframework.kafka.listener.ConsumerProperties#setAuthorizationExceptionRetryInterval(java.time.Duration)}
|
||||
|spring.cloud.stream.kafka.binder.auto-add-partitions | false |
|
||||
|spring.cloud.stream.kafka.binder.auto-create-topics | true |
|
||||
|spring.cloud.stream.kafka.binder.brokers | [localhost] |
|
||||
|spring.cloud.stream.kafka.binder.configuration | | Arbitrary kafka properties that apply to both producers and consumers.
|
||||
|spring.cloud.stream.kafka.binder.consumer-properties | | Arbitrary kafka consumer properties.
|
||||
|spring.cloud.stream.kafka.binder.header-mapper-bean-name | | The bean name of a custom header mapper to use instead of a {@link org.springframework.kafka.support.DefaultKafkaHeaderMapper}.
|
||||
|spring.cloud.stream.kafka.binder.headers | [] |
|
||||
|spring.cloud.stream.kafka.binder.health-timeout | 60 | Time to wait to get partition information in seconds; default 60.
|
||||
|spring.cloud.stream.kafka.binder.jaas | |
|
||||
|spring.cloud.stream.kafka.binder.min-partition-count | 1 |
|
||||
|spring.cloud.stream.kafka.binder.producer-properties | | Arbitrary kafka producer properties.
|
||||
|spring.cloud.stream.kafka.binder.replication-factor | 1 |
|
||||
|spring.cloud.stream.kafka.binder.required-acks | 1 |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.batch-timeout | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.buffer-size | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.compression-type | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.configuration | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.error-channel-enabled | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.header-mode | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.header-patterns | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.message-key-expression | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.partition-count | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.partition-key-expression | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.partition-key-extractor-name | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.partition-selector-expression | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.partition-selector-name | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.required-groups | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.sync | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.topic | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.producer.use-native-encoding | |
|
||||
|spring.cloud.stream.kafka.binder.transaction.transaction-id-prefix | |
|
||||
|spring.cloud.stream.kafka.bindings | |
|
||||
|spring.cloud.stream.metrics.export-properties | | List of properties that are going to be appended to each message. This gets populate by onApplicationEvent, once the context refreshes to avoid overhead of doing per message basis.
|
||||
|spring.cloud.stream.metrics.key | | The name of the metric being emitted. Should be an unique value per application. Defaults to: ${spring.application.name:${vcap.application.name:${spring.config.name:application}}}.
|
||||
|spring.cloud.stream.metrics.meter-filter | | Pattern to control the 'meters' one wants to capture. By default all 'meters' will be captured. For example, 'spring.integration.*' will only capture metric information for meters whose name starts with 'spring.integration'.
|
||||
|spring.cloud.stream.metrics.properties | | Application properties that should be added to the metrics payload For example: `spring.application**`.
|
||||
|spring.cloud.stream.metrics.schedule-interval | 60s | Interval expressed as Duration for scheduling metrics snapshots publishing. Defaults to 60 seconds
|
||||
|spring.cloud.stream.override-cloud-connectors | false | This property is only applicable when the cloud profile is active and Spring Cloud Connectors are provided with the application. If the property is false (the default), the binder detects a suitable bound service (for example, a RabbitMQ service bound in Cloud Foundry for the RabbitMQ binder) and uses it for creating connections (usually through Spring Cloud Connectors). When set to true, this property instructs binders to completely ignore the bound services and rely on Spring Boot properties (for example, relying on the spring.rabbitmq.* properties provided in the environment for the RabbitMQ binder). The typical usage of this property is to be nested in a customized environment when connecting to multiple systems.
|
||||
|spring.cloud.stream.poller.cron | | Cron expression value for the Cron Trigger.
|
||||
|spring.cloud.stream.poller.fixed-delay | 1000 | Fixed delay for default poller.
|
||||
|spring.cloud.stream.poller.initial-delay | 0 | Initial delay for periodic triggers.
|
||||
|spring.cloud.stream.poller.max-messages-per-poll | 1 | Maximum messages per poll for the default poller.
|
||||
|spring.cloud.stream.sendto.destination | none | The name of the header used to determine the name of the output destination
|
||||
|spring.cloud.stream.source | | A colon delimited string representing the names of the sources based on which source bindings will be created. This is primarily to support cases where source binding may be required without providing a corresponding Supplier. (e.g., for cases where the actual source of data is outside of scope of spring-cloud-stream - HTTP -> Stream)
|
||||
|
||||
|===
|
||||
@@ -1,330 +0,0 @@
|
||||
#!/bin/bash -x
|
||||
|
||||
set -e
|
||||
|
||||
# Set default props like MAVEN_PATH, ROOT_FOLDER etc.
|
||||
function set_default_props() {
|
||||
# The script should be run from the root folder
|
||||
ROOT_FOLDER=`pwd`
|
||||
echo "Current folder is ${ROOT_FOLDER}"
|
||||
|
||||
if [[ ! -e "${ROOT_FOLDER}/.git" ]]; then
|
||||
echo "You're not in the root folder of the project!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Prop that will let commit the changes
|
||||
COMMIT_CHANGES="no"
|
||||
MAVEN_PATH=${MAVEN_PATH:-}
|
||||
echo "Path to Maven is [${MAVEN_PATH}]"
|
||||
REPO_NAME=${PWD##*/}
|
||||
echo "Repo name is [${REPO_NAME}]"
|
||||
SPRING_CLOUD_STATIC_REPO=${SPRING_CLOUD_STATIC_REPO:-git@github.com:spring-cloud/spring-cloud-static.git}
|
||||
echo "Spring Cloud Static repo is [${SPRING_CLOUD_STATIC_REPO}"
|
||||
}
|
||||
|
||||
# Check if gh-pages exists and docs have been built
|
||||
function check_if_anything_to_sync() {
|
||||
git remote set-url --push origin `git config remote.origin.url | sed -e 's/^git:/https:/'`
|
||||
|
||||
if ! (git remote set-branches --add origin gh-pages && git fetch -q); then
|
||||
echo "No gh-pages, so not syncing"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if ! [ -d docs/target/generated-docs ] && ! [ "${BUILD}" == "yes" ]; then
|
||||
echo "No gh-pages sources in docs/target/generated-docs, so not syncing"
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
function retrieve_current_branch() {
|
||||
# Code getting the name of the current branch. For master we want to publish as we did until now
|
||||
# https://stackoverflow.com/questions/1593051/how-to-programmatically-determine-the-current-checked-out-git-branch
|
||||
# If there is a branch already passed will reuse it - otherwise will try to find it
|
||||
CURRENT_BRANCH=${BRANCH}
|
||||
if [[ -z "${CURRENT_BRANCH}" ]] ; then
|
||||
CURRENT_BRANCH=$(git symbolic-ref -q HEAD)
|
||||
CURRENT_BRANCH=${CURRENT_BRANCH##refs/heads/}
|
||||
CURRENT_BRANCH=${CURRENT_BRANCH:-HEAD}
|
||||
fi
|
||||
echo "Current branch is [${CURRENT_BRANCH}]"
|
||||
git checkout ${CURRENT_BRANCH} || echo "Failed to check the branch... continuing with the script"
|
||||
}
|
||||
|
||||
# Switches to the provided value of the release version. We always prefix it with `v`
|
||||
function switch_to_tag() {
|
||||
git checkout v${VERSION}
|
||||
}
|
||||
|
||||
# Build the docs if switch is on
|
||||
function build_docs_if_applicable() {
|
||||
if [[ "${BUILD}" == "yes" ]] ; then
|
||||
./mvnw clean install -P docs -pl docs -DskipTests
|
||||
fi
|
||||
}
|
||||
|
||||
# Get the name of the `docs.main` property
|
||||
# Get allowed branches - assumes that a `docs` module is available under `docs` profile
|
||||
function retrieve_doc_properties() {
|
||||
MAIN_ADOC_VALUE=$("${MAVEN_PATH}"mvn -q \
|
||||
-Dexec.executable="echo" \
|
||||
-Dexec.args='${docs.main}' \
|
||||
--non-recursive \
|
||||
org.codehaus.mojo:exec-maven-plugin:1.3.1:exec)
|
||||
echo "Extracted 'main.adoc' from Maven build [${MAIN_ADOC_VALUE}]"
|
||||
|
||||
|
||||
ALLOW_PROPERTY=${ALLOW_PROPERTY:-"docs.allowed.branches"}
|
||||
ALLOWED_BRANCHES_VALUE=$("${MAVEN_PATH}"mvn -q \
|
||||
-Dexec.executable="echo" \
|
||||
-Dexec.args="\${${ALLOW_PROPERTY}}" \
|
||||
org.codehaus.mojo:exec-maven-plugin:1.3.1:exec \
|
||||
-P docs \
|
||||
-pl docs)
|
||||
echo "Extracted '${ALLOW_PROPERTY}' from Maven build [${ALLOWED_BRANCHES_VALUE}]"
|
||||
}
|
||||
|
||||
# Stash any outstanding changes
|
||||
function stash_changes() {
|
||||
git diff-index --quiet HEAD && dirty=$? || (echo "Failed to check if the current repo is dirty. Assuming that it is." && dirty="1")
|
||||
if [ "$dirty" != "0" ]; then git stash; fi
|
||||
}
|
||||
|
||||
# Switch to gh-pages branch to sync it with current branch
|
||||
function add_docs_from_target() {
|
||||
local DESTINATION_REPO_FOLDER
|
||||
if [[ -z "${DESTINATION}" && -z "${CLONE}" ]] ; then
|
||||
DESTINATION_REPO_FOLDER=${ROOT_FOLDER}
|
||||
elif [[ "${CLONE}" == "yes" ]]; then
|
||||
mkdir -p ${ROOT_FOLDER}/target
|
||||
local clonedStatic=${ROOT_FOLDER}/target/spring-cloud-static
|
||||
if [[ ! -e "${clonedStatic}/.git" ]]; then
|
||||
echo "Cloning Spring Cloud Static to target"
|
||||
git clone ${SPRING_CLOUD_STATIC_REPO} ${clonedStatic} && git checkout gh-pages
|
||||
else
|
||||
echo "Spring Cloud Static already cloned - will pull changes"
|
||||
cd ${clonedStatic} && git checkout gh-pages && git pull origin gh-pages
|
||||
fi
|
||||
DESTINATION_REPO_FOLDER=${clonedStatic}/${REPO_NAME}
|
||||
mkdir -p ${DESTINATION_REPO_FOLDER}
|
||||
else
|
||||
if [[ ! -e "${DESTINATION}/.git" ]]; then
|
||||
echo "[${DESTINATION}] is not a git repository"
|
||||
exit 1
|
||||
fi
|
||||
DESTINATION_REPO_FOLDER=${DESTINATION}/${REPO_NAME}
|
||||
mkdir -p ${DESTINATION_REPO_FOLDER}
|
||||
echo "Destination was provided [${DESTINATION}]"
|
||||
fi
|
||||
cd ${DESTINATION_REPO_FOLDER}
|
||||
git checkout gh-pages
|
||||
git pull origin gh-pages
|
||||
|
||||
# Add git branches
|
||||
###################################################################
|
||||
if [[ -z "${VERSION}" ]] ; then
|
||||
copy_docs_for_current_version
|
||||
else
|
||||
copy_docs_for_provided_version
|
||||
fi
|
||||
commit_changes_if_applicable
|
||||
}
|
||||
|
||||
|
||||
# Copies the docs by using the retrieved properties from Maven build
|
||||
function copy_docs_for_current_version() {
|
||||
if [[ "${CURRENT_BRANCH}" == "master" ]] ; then
|
||||
echo -e "Current branch is master - will copy the current docs only to the root folder"
|
||||
for f in docs/target/generated-docs/*; do
|
||||
file=${f#docs/target/generated-docs/*}
|
||||
if ! git ls-files -i -o --exclude-standard --directory | grep -q ^$file$; then
|
||||
# Not ignored...
|
||||
cp -rf $f ${ROOT_FOLDER}/
|
||||
git add -A ${ROOT_FOLDER}/$file
|
||||
fi
|
||||
done
|
||||
COMMIT_CHANGES="yes"
|
||||
else
|
||||
echo -e "Current branch is [${CURRENT_BRANCH}]"
|
||||
# https://stackoverflow.com/questions/29300806/a-bash-script-to-check-if-a-string-is-present-in-a-comma-separated-list-of-strin
|
||||
if [[ ",${ALLOWED_BRANCHES_VALUE}," = *",${CURRENT_BRANCH},"* ]] ; then
|
||||
mkdir -p ${ROOT_FOLDER}/${CURRENT_BRANCH}
|
||||
echo -e "Branch [${CURRENT_BRANCH}] is allowed! Will copy the current docs to the [${CURRENT_BRANCH}] folder"
|
||||
for f in docs/target/generated-docs/*; do
|
||||
file=${f#docs/target/generated-docs/*}
|
||||
if ! git ls-files -i -o --exclude-standard --directory | grep -q ^$file$; then
|
||||
# Not ignored...
|
||||
# We want users to access 2.0.0.BUILD-SNAPSHOT/ instead of 1.0.0.RELEASE/spring-cloud.sleuth.html
|
||||
if [[ "${file}" == "${MAIN_ADOC_VALUE}.html" ]] ; then
|
||||
# We don't want to copy the spring-cloud-sleuth.html
|
||||
# we want it to be converted to index.html
|
||||
cp -rf $f ${ROOT_FOLDER}/${CURRENT_BRANCH}/index.html
|
||||
git add -A ${ROOT_FOLDER}/${CURRENT_BRANCH}/index.html
|
||||
else
|
||||
cp -rf $f ${ROOT_FOLDER}/${CURRENT_BRANCH}
|
||||
git add -A ${ROOT_FOLDER}/${CURRENT_BRANCH}/$file
|
||||
fi
|
||||
fi
|
||||
done
|
||||
COMMIT_CHANGES="yes"
|
||||
else
|
||||
echo -e "Branch [${CURRENT_BRANCH}] is not on the allow list! Check out the Maven [${ALLOW_PROPERTY}] property in
|
||||
[docs] module available under [docs] profile. Won't commit any changes to gh-pages for this branch."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Copies the docs by using the explicitly provided version
|
||||
function copy_docs_for_provided_version() {
|
||||
local FOLDER=${DESTINATION_REPO_FOLDER}/${VERSION}
|
||||
mkdir -p ${FOLDER}
|
||||
echo -e "Current tag is [v${VERSION}] Will copy the current docs to the [${FOLDER}] folder"
|
||||
for f in ${ROOT_FOLDER}/docs/target/generated-docs/*; do
|
||||
file=${f#${ROOT_FOLDER}/docs/target/generated-docs/*}
|
||||
copy_docs_for_branch ${file} ${FOLDER}
|
||||
done
|
||||
COMMIT_CHANGES="yes"
|
||||
CURRENT_BRANCH="v${VERSION}"
|
||||
}
|
||||
|
||||
# Copies the docs from target to the provided destination
|
||||
# Params:
|
||||
# $1 - file from target
|
||||
# $2 - destination to which copy the files
|
||||
function copy_docs_for_branch() {
|
||||
local file=$1
|
||||
local destination=$2
|
||||
if ! git ls-files -i -o --exclude-standard --directory | grep -q ^${file}$; then
|
||||
# Not ignored...
|
||||
# We want users to access 2.0.0.BUILD-SNAPSHOT/ instead of 1.0.0.RELEASE/spring-cloud.sleuth.html
|
||||
if [[ ("${file}" == "${MAIN_ADOC_VALUE}.html") || ("${file}" == "${REPO_NAME}.html") ]] ; then
|
||||
# We don't want to copy the spring-cloud-sleuth.html
|
||||
# we want it to be converted to index.html
|
||||
cp -rf $f ${destination}/index.html
|
||||
git add -A ${destination}/index.html
|
||||
else
|
||||
cp -rf $f ${destination}
|
||||
git add -A ${destination}/$file
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function commit_changes_if_applicable() {
|
||||
if [[ "${COMMIT_CHANGES}" == "yes" ]] ; then
|
||||
COMMIT_SUCCESSFUL="no"
|
||||
git commit -a -m "Sync docs from ${CURRENT_BRANCH} to gh-pages" && COMMIT_SUCCESSFUL="yes" || echo "Failed to commit changes"
|
||||
|
||||
# Uncomment the following push if you want to auto push to
|
||||
# the gh-pages branch whenever you commit to master locally.
|
||||
# This is a little extreme. Use with care!
|
||||
###################################################################
|
||||
if [[ "${COMMIT_SUCCESSFUL}" == "yes" ]] ; then
|
||||
git push origin gh-pages
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Switch back to the previous branch and exit block
|
||||
function checkout_previous_branch() {
|
||||
# If -version was provided we need to come back to root project
|
||||
cd ${ROOT_FOLDER}
|
||||
git checkout ${CURRENT_BRANCH} || echo "Failed to check the branch... continuing with the script"
|
||||
if [ "$dirty" != "0" ]; then git stash pop; fi
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Assert if properties have been properly passed
|
||||
function assert_properties() {
|
||||
echo "VERSION [${VERSION}], DESTINATION [${DESTINATION}], CLONE [${CLONE}]"
|
||||
if [[ "${VERSION}" != "" && (-z "${DESTINATION}" && -z "${CLONE}") ]] ; then echo "Version was set but destination / clone was not!"; exit 1;fi
|
||||
if [[ ("${DESTINATION}" != "" && "${CLONE}" != "") && -z "${VERSION}" ]] ; then echo "Destination / clone was set but version was not!"; exit 1;fi
|
||||
if [[ "${DESTINATION}" != "" && "${CLONE}" == "yes" ]] ; then echo "Destination and clone was set. Pick one!"; exit 1;fi
|
||||
}
|
||||
|
||||
# Prints the usage
|
||||
function print_usage() {
|
||||
cat <<EOF
|
||||
The idea of this script is to update gh-pages branch with the generated docs. Without any options
|
||||
the script will work in the following manner:
|
||||
|
||||
- if there's no gh-pages / target for docs module then the script ends
|
||||
- for master branch the generated docs are copied to the root of gh-pages branch
|
||||
- for any other branch (if that branch is allowed) a subfolder with branch name is created
|
||||
and docs are copied there
|
||||
- if the version switch is passed (-v) then a tag with (v) prefix will be retrieved and a folder
|
||||
with that version number will be created in the gh-pages branch. WARNING! No allow verification will take place
|
||||
- if the destination switch is passed (-d) then the script will check if the provided dir is a git repo and then will
|
||||
switch to gh-pages of that repo and copy the generated docs to `docs/<project-name>/<version>`
|
||||
- if the destination switch is passed (-d) then the script will check if the provided dir is a git repo and then will
|
||||
switch to gh-pages of that repo and copy the generated docs to `docs/<project-name>/<version>`
|
||||
|
||||
USAGE:
|
||||
|
||||
You can use the following options:
|
||||
|
||||
-v|--version - the script will apply the whole procedure for a particular library version
|
||||
-d|--destination - the root of destination folder where the docs should be copied. You have to use the full path.
|
||||
E.g. point to spring-cloud-static folder. Can't be used with (-c)
|
||||
-b|--build - will run the standard build process after checking out the branch
|
||||
-c|--clone - will automatically clone the spring-cloud-static repo instead of providing the destination.
|
||||
Obviously can't be used with (-d)
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
# ==========================================
|
||||
# ____ ____ _____ _____ _____ _______
|
||||
# / ____|/ ____| __ \|_ _| __ \__ __|
|
||||
# | (___ | | | |__) | | | | |__) | | |
|
||||
# \___ \| | | _ / | | | ___/ | |
|
||||
# ____) | |____| | \ \ _| |_| | | |
|
||||
# |_____/ \_____|_| \_\_____|_| |_|
|
||||
#
|
||||
# ==========================================
|
||||
|
||||
while [[ $# > 0 ]]
|
||||
do
|
||||
key="$1"
|
||||
case ${key} in
|
||||
-v|--version)
|
||||
VERSION="$2"
|
||||
shift # past argument
|
||||
;;
|
||||
-d|--destination)
|
||||
DESTINATION="$2"
|
||||
shift # past argument
|
||||
;;
|
||||
-b|--build)
|
||||
BUILD="yes"
|
||||
;;
|
||||
-c|--clone)
|
||||
CLONE="yes"
|
||||
;;
|
||||
-h|--help)
|
||||
print_usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Invalid option: [$1]"
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
shift # past argument or value
|
||||
done
|
||||
|
||||
assert_properties
|
||||
set_default_props
|
||||
check_if_anything_to_sync
|
||||
if [[ -z "${VERSION}" ]] ; then
|
||||
retrieve_current_branch
|
||||
else
|
||||
switch_to_tag
|
||||
fi
|
||||
build_docs_if_applicable
|
||||
retrieve_doc_properties
|
||||
stash_changes
|
||||
add_docs_from_target
|
||||
checkout_previous_branch
|
||||
|
Before Width: | Height: | Size: 119 KiB |
|
Before Width: | Height: | Size: 233 KiB |
@@ -1,721 +0,0 @@
|
||||
[partintro]
|
||||
--
|
||||
This guide describes the Apache Kafka implementation of the Spring Cloud Stream Binder.
|
||||
It contains information about its design, usage, and configuration options, as well as information on how the Stream Cloud Stream concepts map onto Apache Kafka specific constructs.
|
||||
In addition, this guide explains the Kafka Streams binding capabilities of Spring Cloud Stream.
|
||||
--
|
||||
|
||||
== Apache Kafka Binder
|
||||
|
||||
=== Usage
|
||||
|
||||
To use Apache Kafka binder, you need to add `spring-cloud-stream-binder-kafka` as a dependency to your Spring Cloud Stream application, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
Alternatively, you can also use the Spring Cloud Stream Kafka Starter, as shown in the following example for Maven:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
=== Overview
|
||||
|
||||
The following image shows a simplified diagram of how the Apache Kafka binder operates:
|
||||
|
||||
.Kafka Binder
|
||||
image::{github-raw}/docs/src/main/asciidoc/images/kafka-binder.png[width=300,scaledwidth="50%"]
|
||||
|
||||
The Apache Kafka Binder implementation maps each destination to an Apache Kafka topic.
|
||||
The consumer group maps directly to the same Apache Kafka concept.
|
||||
Partitioning also maps directly to Apache Kafka partitions as well.
|
||||
|
||||
The binder currently uses the Apache Kafka `kafka-clients` version `2.3.1`.
|
||||
This client can communicate with older brokers (see the Kafka documentation), but certain features may not be available.
|
||||
For example, with versions earlier than 0.11.x.x, native headers are not supported.
|
||||
Also, 0.11.x.x does not support the `autoAddPartitions` property.
|
||||
|
||||
=== Configuration Options
|
||||
|
||||
This section contains the configuration options used by the Apache Kafka binder.
|
||||
|
||||
For common configuration options and properties pertaining to binder, see the <<binding-properties,core documentation>>.
|
||||
|
||||
==== Kafka Binder Properties
|
||||
|
||||
spring.cloud.stream.kafka.binder.brokers::
|
||||
A list of brokers to which the Kafka binder connects.
|
||||
+
|
||||
Default: `localhost`.
|
||||
spring.cloud.stream.kafka.binder.defaultBrokerPort::
|
||||
`brokers` allows hosts specified with or without port information (for example, `host1,host2:port2`).
|
||||
This sets the default port when no port is configured in the broker list.
|
||||
+
|
||||
Default: `9092`.
|
||||
spring.cloud.stream.kafka.binder.configuration::
|
||||
Key/Value map of client properties (both producers and consumer) passed to all clients created by the binder.
|
||||
Due to the fact that these properties are used by both producers and consumers, usage should be restricted to common properties -- for example, security settings.
|
||||
Unknown Kafka producer or consumer properties provided through this configuration are filtered out and not allowed to propagate.
|
||||
Properties here supersede any properties set in boot.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.consumerProperties::
|
||||
Key/Value map of arbitrary Kafka client consumer properties.
|
||||
In addition to support known Kafka consumer properties, unknown consumer properties are allowed here as well.
|
||||
Properties here supersede any properties set in boot and in the `configuration` property above.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.headers::
|
||||
The list of custom headers that are transported by the binder.
|
||||
Only required when communicating with older applications (<= 1.3.x) with a `kafka-clients` version < 0.11.0.0. Newer versions support headers natively.
|
||||
+
|
||||
Default: empty.
|
||||
spring.cloud.stream.kafka.binder.healthTimeout::
|
||||
The time to wait to get partition information, in seconds.
|
||||
Health reports as down if this timer expires.
|
||||
+
|
||||
Default: 10.
|
||||
spring.cloud.stream.kafka.binder.requiredAcks::
|
||||
The number of required acks on the broker.
|
||||
See the Kafka documentation for the producer `acks` property.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.minPartitionCount::
|
||||
Effective only if `autoCreateTopics` or `autoAddPartitions` is set.
|
||||
The global minimum number of partitions that the binder configures on topics on which it produces or consumes data.
|
||||
It can be superseded by the `partitionCount` setting of the producer or by the value of `instanceCount * concurrency` settings of the producer (if either is larger).
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.producerProperties::
|
||||
Key/Value map of arbitrary Kafka client producer properties.
|
||||
In addition to support known Kafka producer properties, unknown producer properties are allowed here as well.
|
||||
Properties here supersede any properties set in boot and in the `configuration` property above.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.replicationFactor::
|
||||
The replication factor of auto-created topics if `autoCreateTopics` is active.
|
||||
Can be overridden on each binding.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.autoCreateTopics::
|
||||
If set to `true`, the binder creates new topics automatically.
|
||||
If set to `false`, the binder relies on the topics being already configured.
|
||||
In the latter case, if the topics do not exist, the binder fails to start.
|
||||
+
|
||||
NOTE: This setting is independent of the `auto.create.topics.enable` setting of the broker and does not influence it.
|
||||
If the server is set to auto-create topics, they may be created as part of the metadata retrieval request, with default broker settings.
|
||||
+
|
||||
Default: `true`.
|
||||
spring.cloud.stream.kafka.binder.autoAddPartitions::
|
||||
If set to `true`, the binder creates new partitions if required.
|
||||
If set to `false`, the binder relies on the partition size of the topic being already configured.
|
||||
If the partition count of the target topic is smaller than the expected value, the binder fails to start.
|
||||
+
|
||||
Default: `false`.
|
||||
spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix::
|
||||
Enables transactions in the binder. See `transaction.id` in the Kafka documentation and https://docs.spring.io/spring-kafka/reference/html/_reference.html#transactions[Transactions] in the `spring-kafka` documentation.
|
||||
When transactions are enabled, individual `producer` properties are ignored and all producers use the `spring.cloud.stream.kafka.binder.transaction.producer.*` properties.
|
||||
+
|
||||
Default `null` (no transactions)
|
||||
spring.cloud.stream.kafka.binder.transaction.producer.*::
|
||||
Global producer properties for producers in a transactional binder.
|
||||
See `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` and <<kafka-producer-properties>> and the general producer properties supported by all binders.
|
||||
+
|
||||
Default: See individual producer properties.
|
||||
|
||||
spring.cloud.stream.kafka.binder.headerMapperBeanName::
|
||||
The bean name of a `KafkaHeaderMapper` used for mapping `spring-messaging` headers to and from Kafka headers.
|
||||
Use this, for example, if you wish to customize the trusted packages in a `BinderHeaderMapper` bean that uses JSON deserialization for the headers.
|
||||
If this custom `BinderHeaderMapper` bean is not made available to the binder using this property, then the binder will look for a header mapper bean with the name `kafkaBinderHeaderMapper` that is of type `BinderHeaderMapper` before falling back to a default `BinderHeaderMapper` created by the binder.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
[[kafka-consumer-properties]]
|
||||
==== Kafka Consumer Properties
|
||||
|
||||
NOTE: To avoid repetition, Spring Cloud Stream supports setting values for all channels, in the format of `spring.cloud.stream.kafka.default.consumer.<property>=<value>`.
|
||||
|
||||
|
||||
The following properties are available for Kafka consumers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.consumer.`.
|
||||
|
||||
admin.configuration::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.properties`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replicas-assignment::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replicas-assignment`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replication-factor::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replication-factor`, and support for it will be removed in a future version.
|
||||
|
||||
autoRebalanceEnabled::
|
||||
When `true`, topic partitions is automatically rebalanced between the members of a consumer group.
|
||||
When `false`, each consumer is assigned a fixed set of partitions based on `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex`.
|
||||
This requires both the `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex` properties to be set appropriately on each launched instance.
|
||||
The value of the `spring.cloud.stream.instanceCount` property must typically be greater than 1 in this case.
|
||||
+
|
||||
Default: `true`.
|
||||
ackEachRecord::
|
||||
When `autoCommitOffset` is `true`, this setting dictates whether to commit the offset after each record is processed.
|
||||
By default, offsets are committed after all records in the batch of records returned by `consumer.poll()` have been processed.
|
||||
The number of records returned by a poll can be controlled with the `max.poll.records` Kafka property, which is set through the consumer `configuration` property.
|
||||
Setting this to `true` may cause a degradation in performance, but doing so reduces the likelihood of redelivered records when a failure occurs.
|
||||
Also, see the binder `requiredAcks` property, which also affects the performance of committing offsets.
|
||||
+
|
||||
Default: `false`.
|
||||
autoCommitOffset::
|
||||
Whether to autocommit offsets when a message has been processed.
|
||||
If set to `false`, a header with the key `kafka_acknowledgment` of the type `org.springframework.kafka.support.Acknowledgment` header is present in the inbound message.
|
||||
Applications may use this header for acknowledging messages.
|
||||
See the examples section for details.
|
||||
When this property is set to `false`, Kafka binder sets the ack mode to `org.springframework.kafka.listener.AbstractMessageListenerContainer.AckMode.MANUAL` and the application is responsible for acknowledging records.
|
||||
Also see `ackEachRecord`.
|
||||
+
|
||||
Default: `true`.
|
||||
autoCommitOnError::
|
||||
Effective only if `autoCommitOffset` is set to `true`.
|
||||
If set to `false`, it suppresses auto-commits for messages that result in errors and commits only for successful messages. It allows a stream to automatically replay from the last successfully processed message, in case of persistent failures.
|
||||
If set to `true`, it always auto-commits (if auto-commit is enabled).
|
||||
If not set (the default), it effectively has the same value as `enableDlq`, auto-committing erroneous messages if they are sent to a DLQ and not committing them otherwise.
|
||||
+
|
||||
Default: not set.
|
||||
resetOffsets::
|
||||
Whether to reset offsets on the consumer to the value provided by startOffset.
|
||||
Must be false if a `KafkaRebalanceListener` is provided; see <<rebalance-listener>>.
|
||||
+
|
||||
Default: `false`.
|
||||
startOffset::
|
||||
The starting offset for new groups.
|
||||
Allowed values: `earliest` and `latest`.
|
||||
If the consumer group is set explicitly for the consumer 'binding' (through `spring.cloud.stream.bindings.<channelName>.group`), 'startOffset' is set to `earliest`. Otherwise, it is set to `latest` for the `anonymous` consumer group.
|
||||
Also see `resetOffsets` (earlier in this list).
|
||||
+
|
||||
Default: null (equivalent to `earliest`).
|
||||
enableDlq::
|
||||
When set to true, it enables DLQ behavior for the consumer.
|
||||
By default, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`.
|
||||
The DLQ topic name can be configurable by setting the `dlqName` property.
|
||||
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
|
||||
See <<kafka-dlq-processing>> processing for more information.
|
||||
Starting with version 2.0, messages sent to the DLQ topic are enhanced with the following headers: `x-original-topic`, `x-exception-message`, and `x-exception-stacktrace` as `byte[]`.
|
||||
By default, a failed record is sent to the same partition number in the DLQ topic as the original record.
|
||||
See <<dlq-partition-selection>> for how to change that behavior.
|
||||
**Not allowed when `destinationIsPattern` is `true`.**
|
||||
+
|
||||
Default: `false`.
|
||||
dlqPartitions::
|
||||
When `enableDlq` is true, and this property is not set, a dead letter topic with the same number of partitions as the primary topic(s) is created.
|
||||
Usually, dead-letter records are sent to the same partition in the dead-letter topic as the original record.
|
||||
This behavior can be changed; see <<dlq-partition-selection>>.
|
||||
If this property is set to `1` and there is no `DqlPartitionFunction` bean, all dead-letter records will be written to partition `0`.
|
||||
If this property is greater than `1`, you **MUST** provide a `DlqPartitionFunction` bean.
|
||||
Note that the actual partition count is affected by the binder's `minPartitionCount` property.
|
||||
+
|
||||
Default: `none`
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka consumer properties.
|
||||
In addition to having Kafka consumer properties, other configuration properties can be passed here.
|
||||
For example some properties needed by the application such as `spring.cloud.stream.kafka.bindings.input.consumer.configuration.foo=bar`.
|
||||
The `bootstrap.servers` property cannot be set here; use multi-binder support if you need to connect to multiple clusters.
|
||||
+
|
||||
Default: Empty map.
|
||||
dlqName::
|
||||
The name of the DLQ topic to receive the error messages.
|
||||
+
|
||||
Default: null (If not specified, messages that result in errors are forwarded to a topic named `error.<destination>.<group>`).
|
||||
dlqProducerProperties::
|
||||
Using this, DLQ-specific producer properties can be set.
|
||||
All the properties available through kafka producer properties can be set through this property.
|
||||
When native decoding is enabled on the consumer (i.e., useNativeDecoding: true) , the application must provide corresponding key/value serializers for DLQ.
|
||||
This must be provided in the form of `dlqProducerProperties.configuration.key.serializer` and `dlqProducerProperties.configuration.value.serializer`.
|
||||
+
|
||||
Default: Default Kafka producer properties.
|
||||
standardHeaders::
|
||||
Indicates which standard headers are populated by the inbound channel adapter.
|
||||
Allowed values: `none`, `id`, `timestamp`, or `both`.
|
||||
Useful if using native deserialization and the first component to receive a message needs an `id` (such as an aggregator that is configured to use a JDBC message store).
|
||||
+
|
||||
Default: `none`
|
||||
converterBeanName::
|
||||
The name of a bean that implements `RecordMessageConverter`. Used in the inbound channel adapter to replace the default `MessagingMessageConverter`.
|
||||
+
|
||||
Default: `null`
|
||||
idleEventInterval::
|
||||
The interval, in milliseconds, between events indicating that no messages have recently been received.
|
||||
Use an `ApplicationListener<ListenerContainerIdleEvent>` to receive these events.
|
||||
See <<pause-resume>> for a usage example.
|
||||
+
|
||||
Default: `30000`
|
||||
destinationIsPattern::
|
||||
When true, the destination is treated as a regular expression `Pattern` used to match topic names by the broker.
|
||||
When true, topics are not provisioned, and `enableDlq` is not allowed, because the binder does not know the topic names during the provisioning phase.
|
||||
Note, the time taken to detect new topics that match the pattern is controlled by the consumer property `metadata.max.age.ms`, which (at the time of writing) defaults to 300,000ms (5 minutes).
|
||||
This can be configured using the `configuration` property above.
|
||||
+
|
||||
Default: `false`
|
||||
topic.properties::
|
||||
A `Map` of Kafka topic properties used when provisioning new topics -- for example, `spring.cloud.stream.kafka.bindings.input.consumer.topic.properties.message.format.version=0.9.0.0`
|
||||
+
|
||||
Default: none.
|
||||
topic.replicas-assignment::
|
||||
A Map<Integer, List<Integer>> of replica assignments, with the key being the partition and the value being the assignments.
|
||||
Used when provisioning new topics.
|
||||
See the `NewTopic` Javadocs in the `kafka-clients` jar.
|
||||
+
|
||||
Default: none.
|
||||
topic.replication-factor::
|
||||
The replication factor to use when provisioning topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of 1 is used).
|
||||
pollTimeout::
|
||||
Timeout used for polling in pollable consumers.
|
||||
+
|
||||
Default: 5 seconds.
|
||||
transactionManager::
|
||||
Bean name of a `KafkaAwareTransactionManager` used to override the binder's transaction manager for this binding.
|
||||
Usually needed if you want to synchronize another transaction with the Kafka transaction, using the `ChainedKafkaTransactionManaager`.
|
||||
To achieve exactly once consumption and production of records, the consumer and producer bindings must all be configured with the same transaction manager.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
==== Consuming Batches
|
||||
|
||||
Starting with version 3.0, when `spring.cloud.stream.binding.<name>.consumer.batch-mode` is set to `true`, all of the records received by polling the Kafka `Consumer` will be presented as a `List<?>` to the listener method.
|
||||
Otherwise, the method will be called with one record at a time.
|
||||
The size of the batch is controlled by Kafka consumer properties `max.poll.records`, `min.fetch.bytes`, `fetch.max.wait.ms`; refer to the Kafka documentation for more information.
|
||||
|
||||
Bear in mind that batch mode is not supported with `@StreamListener` - it only works with the newer functional programming model.
|
||||
|
||||
IMPORTANT: Retry within the binder is not supported when using batch mode, so `maxAttempts` will be overridden to 1.
|
||||
You can configure a `SeekToCurrentBatchErrorHandler` (using a `ListenerContainerCustomizer`) to achieve similar functionality to retry in the binder.
|
||||
You can also use a manual `AckMode` and call `Ackowledgment.nack(index, sleep)` to commit the offsets for a partial batch and have the remaining records redelivered.
|
||||
Refer to the https://docs.spring.io/spring-kafka/docs/2.3.0.BUILD-SNAPSHOT/reference/html/#committing-offsets[Spring for Apache Kafka documentation] for more information about these techniques.
|
||||
|
||||
[[kafka-producer-properties]]
|
||||
==== Kafka Producer Properties
|
||||
|
||||
NOTE: To avoid repetition, Spring Cloud Stream supports setting values for all channels, in the format of `spring.cloud.stream.kafka.default.producer.<property>=<value>`.
|
||||
|
||||
|
||||
The following properties are available for Kafka producers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.producer.`.
|
||||
|
||||
admin.configuration::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.properties`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replicas-assignment::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replicas-assignment`, and support for it will be removed in a future version.
|
||||
|
||||
admin.replication-factor::
|
||||
Since version 2.1.1, this property is deprecated in favor of `topic.replication-factor`, and support for it will be removed in a future version.
|
||||
|
||||
bufferSize::
|
||||
Upper limit, in bytes, of how much data the Kafka producer attempts to batch before sending.
|
||||
+
|
||||
Default: `16384`.
|
||||
sync::
|
||||
Whether the producer is synchronous.
|
||||
+
|
||||
Default: `false`.
|
||||
sendTimeoutExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to evaluate the time to wait for ack when synchronous publish is enabled -- for example, `headers['mySendTimeout']`.
|
||||
The value of the timeout is in milliseconds.
|
||||
With versions before 3.0, the payload could not be used unless native encoding was being used because, by the time this expression was evaluated, the payload was already in the form of a `byte[]`.
|
||||
Now, the expression is evaluated before the payload is converted.
|
||||
+
|
||||
Default: `none`.
|
||||
batchTimeout::
|
||||
How long the producer waits to allow more messages to accumulate in the same batch before sending the messages.
|
||||
(Normally, the producer does not wait at all and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.
|
||||
+
|
||||
Default: `0`.
|
||||
messageKeyExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message -- for example, `headers['myKey']`.
|
||||
With versions before 3.0, the payload could not be used unless native encoding was being used because, by the time this expression was evaluated, the payload was already in the form of a `byte[]`.
|
||||
Now, the expression is evaluated before the payload is converted.
|
||||
+
|
||||
Default: `none`.
|
||||
headerPatterns::
|
||||
A comma-delimited list of simple patterns to match Spring messaging headers to be mapped to the Kafka `Headers` in the `ProducerRecord`.
|
||||
Patterns can begin or end with the wildcard character (asterisk).
|
||||
Patterns can be negated by prefixing with `!`.
|
||||
Matching stops after the first match (positive or negative).
|
||||
For example `!ask,as*` will pass `ash` but not `ask`.
|
||||
`id` and `timestamp` are never mapped.
|
||||
+
|
||||
Default: `*` (all headers - except the `id` and `timestamp`)
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka producer properties.
|
||||
The `bootstrap.servers` property cannot be set here; use multi-binder support if you need to connect to multiple clusters.
|
||||
+
|
||||
Default: Empty map.
|
||||
topic.properties::
|
||||
A `Map` of Kafka topic properties used when provisioning new topics -- for example, `spring.cloud.stream.kafka.bindings.output.producer.topic.properties.message.format.version=0.9.0.0`
|
||||
+
|
||||
topic.replicas-assignment::
|
||||
A Map<Integer, List<Integer>> of replica assignments, with the key being the partition and the value being the assignments.
|
||||
Used when provisioning new topics.
|
||||
See the `NewTopic` Javadocs in the `kafka-clients` jar.
|
||||
+
|
||||
Default: none.
|
||||
topic.replication-factor::
|
||||
The replication factor to use when provisioning topics. Overrides the binder-wide setting.
|
||||
Ignored if `replicas-assignments` is present.
|
||||
+
|
||||
Default: none (the binder-wide default of 1 is used).
|
||||
useTopicHeader::
|
||||
Set to `true` to override the default binding destination (topic name) with the value of the `KafkaHeaders.TOPIC` message header in the outbound message.
|
||||
If the header is not present, the default binding destination is used.
|
||||
Default: `false`.
|
||||
+
|
||||
recordMetadataChannel::
|
||||
The bean name of a `MessageChannel` to which successful send results should be sent; the bean must exist in the application context.
|
||||
The message sent to the channel is the sent message (after conversion, if any) with an additional header `KafkaHeaders.RECORD_METADATA`.
|
||||
The header contains a `RecordMetadata` object provided by the Kafka client; it includes the partition and offset where the record was written in the topic.
|
||||
|
||||
`ResultMetadata meta = sendResultMsg.getHeaders().get(KafkaHeaders.RECORD_METADATA, RecordMetadata.class)`
|
||||
|
||||
Failed sends go the producer error channel (if configured); see <<kafka-error-channels>>.
|
||||
Default: null
|
||||
+
|
||||
|
||||
NOTE: The Kafka binder uses the `partitionCount` setting of the producer as a hint to create a topic with the given partition count (in conjunction with the `minPartitionCount`, the maximum of the two being the value being used).
|
||||
Exercise caution when configuring both `minPartitionCount` for a binder and `partitionCount` for an application, as the larger value is used.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is disabled (the default), the binder fails to start.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is enabled, new partitions are added.
|
||||
If a topic already exists with a larger number of partitions than the maximum of (`minPartitionCount` or `partitionCount`), the existing partition count is used.
|
||||
|
||||
compression::
|
||||
Set the `compression.type` producer property.
|
||||
Supported values are `none`, `gzip`, `snappy` and `lz4`.
|
||||
If you override the `kafka-clients` jar to 2.1.0 (or later), as discussed in the https://docs.spring.io/spring-kafka/docs/2.2.x/reference/html/deps-for-21x.html[Spring for Apache Kafka documentation], and wish to use `zstd` compression, use `spring.cloud.stream.kafka.bindings.<binding-name>.producer.configuration.compression.type=zstd`.
|
||||
+
|
||||
Default: `none`.
|
||||
transactionManager::
|
||||
Bean name of a `KafkaAwareTransactionManager` used to override the binder's transaction manager for this binding.
|
||||
Usually needed if you want to synchronize another transaction with the Kafka transaction, using the `ChainedKafkaTransactionManaager`.
|
||||
To achieve exactly once consumption and production of records, the consumer and producer bindings must all be configured with the same transaction manager.
|
||||
+
|
||||
Default: none.
|
||||
|
||||
closeTimeout::
|
||||
Timeout in number of seconds to wait for when closing the producer.
|
||||
+
|
||||
Default: `30`
|
||||
|
||||
==== Usage examples
|
||||
|
||||
In this section, we show the use of the preceding properties for specific scenarios.
|
||||
|
||||
===== Example: Setting `autoCommitOffset` to `false` and Relying on Manual Acking
|
||||
|
||||
This example illustrates how one may manually acknowledge offsets in a consumer application.
|
||||
|
||||
This example requires that `spring.cloud.stream.kafka.bindings.input.consumer.autoCommitOffset` be set to `false`.
|
||||
Use the corresponding input channel name for your example.
|
||||
|
||||
[source]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class ManuallyAcknowdledgingConsumer {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(ManuallyAcknowdledgingConsumer.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void process(Message<?> message) {
|
||||
Acknowledgment acknowledgment = message.getHeaders().get(KafkaHeaders.ACKNOWLEDGMENT, Acknowledgment.class);
|
||||
if (acknowledgment != null) {
|
||||
System.out.println("Acknowledgment provided");
|
||||
acknowledgment.acknowledge();
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
===== Example: Security Configuration
|
||||
|
||||
Apache Kafka 0.9 supports secure connections between client and brokers.
|
||||
To take advantage of this feature, follow the guidelines in the https://kafka.apache.org/090/documentation.html#security_configclients[Apache Kafka Documentation] as well as the Kafka 0.9 https://docs.confluent.io/2.0.0/kafka/security.html[security guidelines from the Confluent documentation].
|
||||
Use the `spring.cloud.stream.kafka.binder.configuration` option to set security properties for all clients created by the binder.
|
||||
|
||||
For example, to set `security.protocol` to `SASL_SSL`, set the following property:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_SSL
|
||||
----
|
||||
|
||||
All the other security properties can be set in a similar manner.
|
||||
|
||||
When using Kerberos, follow the instructions in the https://kafka.apache.org/090/documentation.html#security_sasl_clientconfig[reference documentation] for creating and referencing the JAAS configuration.
|
||||
|
||||
Spring Cloud Stream supports passing JAAS configuration information to the application by using a JAAS configuration file and using Spring Boot properties.
|
||||
|
||||
====== Using JAAS Configuration Files
|
||||
|
||||
The JAAS and (optionally) krb5 file locations can be set for Spring Cloud Stream applications by using system properties.
|
||||
The following example shows how to launch a Spring Cloud Stream application with SASL and Kerberos by using a JAAS configuration file:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
java -Djava.security.auth.login.config=/path.to/kafka_client_jaas.conf -jar log.jar \
|
||||
--spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT
|
||||
----
|
||||
|
||||
====== Using Spring Boot Properties
|
||||
|
||||
As an alternative to having a JAAS configuration file, Spring Cloud Stream provides a mechanism for setting up the JAAS configuration for Spring Cloud Stream applications by using Spring Boot properties.
|
||||
|
||||
The following properties can be used to configure the login context of the Kafka client:
|
||||
|
||||
spring.cloud.stream.kafka.binder.jaas.loginModule::
|
||||
The login module name. Not necessary to be set in normal cases.
|
||||
+
|
||||
Default: `com.sun.security.auth.module.Krb5LoginModule`.
|
||||
spring.cloud.stream.kafka.binder.jaas.controlFlag::
|
||||
The control flag of the login module.
|
||||
+
|
||||
Default: `required`.
|
||||
spring.cloud.stream.kafka.binder.jaas.options::
|
||||
Map with a key/value pair containing the login module options.
|
||||
+
|
||||
Default: Empty map.
|
||||
|
||||
The following example shows how to launch a Spring Cloud Stream application with SASL and Kerberos by using Spring Boot configuration properties:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
java --spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.autoCreateTopics=false \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.useKeyTab=true \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.storeKey=true \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.keyTab=/etc/security/keytabs/kafka_client.keytab \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.principal=kafka-client-1@EXAMPLE.COM
|
||||
----
|
||||
|
||||
The preceding example represents the equivalent of the following JAAS file:
|
||||
|
||||
[source]
|
||||
----
|
||||
KafkaClient {
|
||||
com.sun.security.auth.module.Krb5LoginModule required
|
||||
useKeyTab=true
|
||||
storeKey=true
|
||||
keyTab="/etc/security/keytabs/kafka_client.keytab"
|
||||
principal="kafka-client-1@EXAMPLE.COM";
|
||||
};
|
||||
----
|
||||
|
||||
If the topics required already exist on the broker or will be created by an administrator, autocreation can be turned off and only client JAAS properties need to be sent.
|
||||
|
||||
NOTE: Do not mix JAAS configuration files and Spring Boot properties in the same application.
|
||||
If the `-Djava.security.auth.login.config` system property is already present, Spring Cloud Stream ignores the Spring Boot properties.
|
||||
|
||||
NOTE: Be careful when using the `autoCreateTopics` and `autoAddPartitions` with Kerberos.
|
||||
Usually, applications may use principals that do not have administrative rights in Kafka and Zookeeper.
|
||||
Consequently, relying on Spring Cloud Stream to create/modify topics may fail.
|
||||
In secure environments, we strongly recommend creating topics and managing ACLs administratively by using Kafka tooling.
|
||||
|
||||
[[pause-resume]]
|
||||
===== Example: Pausing and Resuming the Consumer
|
||||
|
||||
If you wish to suspend consumption but not cause a partition rebalance, you can pause and resume the consumer.
|
||||
This is facilitated by adding the `Consumer` as a parameter to your `@StreamListener`.
|
||||
To resume, you need an `ApplicationListener` for `ListenerContainerIdleEvent` instances.
|
||||
The frequency at which events are published is controlled by the `idleEventInterval` property.
|
||||
Since the consumer is not thread-safe, you must call these methods on the calling thread.
|
||||
|
||||
The following simple application shows how to pause and resume:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class Application {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(Application.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void in(String in, @Header(KafkaHeaders.CONSUMER) Consumer<?, ?> consumer) {
|
||||
System.out.println(in);
|
||||
consumer.pause(Collections.singleton(new TopicPartition("myTopic", 0)));
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ApplicationListener<ListenerContainerIdleEvent> idleListener() {
|
||||
return event -> {
|
||||
System.out.println(event);
|
||||
if (event.getConsumer().paused().size() > 0) {
|
||||
event.getConsumer().resume(event.getConsumer().paused());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
[[kafka-transactional-binder]]
|
||||
=== Transactional Binder
|
||||
|
||||
Enable transactions by setting `spring.cloud.stream.kafka.binder.transaction.transactionIdPrefix` to a non-empty value, e.g. `tx-`.
|
||||
When used in a processor application, the consumer starts the transaction; any records sent on the consumer thread participate in the same transaction.
|
||||
When the listener exits normally, the listener container will send the offset to the transaction and commit it.
|
||||
A common producer factory is used for all producer bindings configured using `spring.cloud.stream.kafka.binder.transaction.producer.*` properties; individual binding Kafka producer properties are ignored.
|
||||
|
||||
IMPORTANT: Normal binder retries (and dead lettering) are not supported with transactions because the retries will run in the original transaction, which may be rolled back and any published records will be rolled back too.
|
||||
When retries are enabled (the common property `maxAttempts` is greater than zero) the retry properties are used to configure a `DefaultAfterRollbackProcessor` to enable retries at the container level.
|
||||
Similarly, instead of publishing dead-letter records within the transaction, this functionality is moved to the listener container, again via the `DefaultAfterRollbackProcessor` which runs after the main transaction has rolled back.
|
||||
|
||||
If you wish to use transactions in a source application, or from some arbitrary thread for producer-only transaction (e.g. `@Scheduled` method), you must get a reference to the transactional producer factory and define a `KafkaTransactionManager` bean using it.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
@Bean
|
||||
public PlatformTransactionManager transactionManager(BinderFactory binders,
|
||||
@Value("${unique.tx.id.per.instance}") String txId) {
|
||||
|
||||
ProducerFactory<byte[], byte[]> pf = ((KafkaMessageChannelBinder) binders.getBinder(null,
|
||||
MessageChannel.class)).getTransactionalProducerFactory();
|
||||
KafkaTransactionManager tm = new KafkaTransactionManager<>(pf);
|
||||
tm.setTransactionId(txId)
|
||||
return tm;
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
Notice that we get a reference to the binder using the `BinderFactory`; use `null` in the first argument when there is only one binder configured.
|
||||
If more than one binder is configured, use the binder name to get the reference.
|
||||
Once we have a reference to the binder, we can obtain a reference to the `ProducerFactory` and create a transaction manager.
|
||||
|
||||
Then you would use normal Spring transaction support, e.g. `TransactionTemplate` or `@Transactional`, for example:
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
public static class Sender {
|
||||
|
||||
@Transactional
|
||||
public void doInTransaction(MessageChannel output, List<String> stuffToSend) {
|
||||
stuffToSend.forEach(stuff -> output.send(new GenericMessage<>(stuff)));
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
If you wish to synchronize producer-only transactions with those from some other transaction manager, use a `ChainedTransactionManager`.
|
||||
|
||||
IMPORTANT: If you deploy multiple instances of your application, each instance needs a unique `transactionIdPrefix`.
|
||||
|
||||
[[kafka-error-channels]]
|
||||
=== Error Channels
|
||||
|
||||
Starting with version 1.3, the binder unconditionally sends exceptions to an error channel for each consumer destination and can also be configured to send async producer send failures to an error channel.
|
||||
See <<spring-cloud-stream-overview-error-handling>> for more information.
|
||||
|
||||
The payload of the `ErrorMessage` for a send failure is a `KafkaSendFailureException` with properties:
|
||||
|
||||
* `failedMessage`: The Spring Messaging `Message<?>` that failed to be sent.
|
||||
* `record`: The raw `ProducerRecord` that was created from the `failedMessage`
|
||||
|
||||
There is no automatic handling of producer exceptions (such as sending to a <<kafka-dlq-processing, Dead-Letter queue>>).
|
||||
You can consume these exceptions with your own Spring Integration flow.
|
||||
|
||||
[[kafka-metrics]]
|
||||
=== Kafka Metrics
|
||||
|
||||
Kafka binder module exposes the following metrics:
|
||||
|
||||
`spring.cloud.stream.binder.kafka.offset`: This metric indicates how many messages have not been yet consumed from a given binder's topic by a given consumer group.
|
||||
The metrics provided are based on the Mircometer metrics library. The metric contains the consumer group information, topic and the actual lag in committed offset from the latest offset on the topic.
|
||||
This metric is particularly useful for providing auto-scaling feedback to a PaaS platform.
|
||||
|
||||
[[kafka-tombstones]]
|
||||
=== Tombstone Records (null record values)
|
||||
|
||||
When using compacted topics, a record with a `null` value (also called a tombstone record) represents the deletion of a key.
|
||||
To receive such messages in a `@StreamListener` method, the parameter must be marked as not required to receive a `null` value argument.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void in(@Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) byte[] key,
|
||||
@Payload(required = false) Customer customer) {
|
||||
// customer is null if a tombstone record
|
||||
...
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
[[rebalance-listener]]
|
||||
=== Using a KafkaRebalanceListener
|
||||
|
||||
Applications may wish to seek topics/partitions to arbitrary offsets when the partitions are initially assigned, or perform other operations on the consumer.
|
||||
Starting with version 2.1, if you provide a single `KafkaRebalanceListener` bean in the application context, it will be wired into all Kafka consumer bindings.
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
public interface KafkaBindingRebalanceListener {
|
||||
|
||||
/**
|
||||
* Invoked by the container before any pending offsets are committed.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
*/
|
||||
default void onPartitionsRevokedBeforeCommit(String bindingName, Consumer<?, ?> consumer,
|
||||
Collection<TopicPartition> partitions) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked by the container after any pending offsets are committed.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
*/
|
||||
default void onPartitionsRevokedAfterCommit(String bindingName, Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked when partitions are initially assigned or after a rebalance.
|
||||
* Applications might only want to perform seek operations on an initial assignment.
|
||||
* @param bindingName the name of the binding.
|
||||
* @param consumer the consumer.
|
||||
* @param partitions the partitions.
|
||||
* @param initial true if this is the initial assignment.
|
||||
*/
|
||||
default void onPartitionsAssigned(String bindingName, Consumer<?, ?> consumer, Collection<TopicPartition> partitions,
|
||||
boolean initial) {
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
You cannot set the `resetOffsets` consumer property to `true` when you provide a rebalance listener.
|
||||
@@ -1,103 +0,0 @@
|
||||
=== Partitioning with the Kafka Binder
|
||||
|
||||
Apache Kafka supports topic partitioning natively.
|
||||
|
||||
Sometimes it is advantageous to send data to specific partitions -- for example, when you want to strictly order message processing (all messages for a particular customer should go to the same partition).
|
||||
|
||||
The following example shows how to configure the producer and consumer side:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Source.class)
|
||||
public class KafkaPartitionProducerApplication {
|
||||
|
||||
private static final Random RANDOM = new Random(System.currentTimeMillis());
|
||||
|
||||
private static final String[] data = new String[] {
|
||||
"foo1", "bar1", "qux1",
|
||||
"foo2", "bar2", "qux2",
|
||||
"foo3", "bar3", "qux3",
|
||||
"foo4", "bar4", "qux4",
|
||||
};
|
||||
|
||||
public static void main(String[] args) {
|
||||
new SpringApplicationBuilder(KafkaPartitionProducerApplication.class)
|
||||
.web(false)
|
||||
.run(args);
|
||||
}
|
||||
|
||||
@InboundChannelAdapter(channel = Source.OUTPUT, poller = @Poller(fixedRate = "5000"))
|
||||
public Message<?> generate() {
|
||||
String value = data[RANDOM.nextInt(data.length)];
|
||||
System.out.println("Sending: " + value);
|
||||
return MessageBuilder.withPayload(value)
|
||||
.setHeader("partitionKey", value)
|
||||
.build();
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
.application.yml
|
||||
[source, yaml]
|
||||
----
|
||||
spring:
|
||||
cloud:
|
||||
stream:
|
||||
bindings:
|
||||
output:
|
||||
destination: partitioned.topic
|
||||
producer:
|
||||
partition-key-expression: headers['partitionKey']
|
||||
partition-count: 12
|
||||
----
|
||||
|
||||
IMPORTANT: The topic must be provisioned to have enough partitions to achieve the desired concurrency for all consumer groups.
|
||||
The above configuration supports up to 12 consumer instances (6 if their `concurrency` is 2, 4 if their concurrency is 3, and so on).
|
||||
It is generally best to "`over-provision`" the partitions to allow for future increases in consumers or concurrency.
|
||||
|
||||
NOTE: The preceding configuration uses the default partitioning (`key.hashCode() % partitionCount`).
|
||||
This may or may not provide a suitably balanced algorithm, depending on the key values.
|
||||
You can override this default by using the `partitionSelectorExpression` or `partitionSelectorClass` properties.
|
||||
|
||||
Since partitions are natively handled by Kafka, no special configuration is needed on the consumer side.
|
||||
Kafka allocates partitions across the instances.
|
||||
|
||||
The following Spring Boot application listens to a Kafka stream and prints (to the console) the partition ID to which each message goes:
|
||||
|
||||
[source, java]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class KafkaPartitionConsumerApplication {
|
||||
|
||||
public static void main(String[] args) {
|
||||
new SpringApplicationBuilder(KafkaPartitionConsumerApplication.class)
|
||||
.web(false)
|
||||
.run(args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void listen(@Payload String in, @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition) {
|
||||
System.out.println(in + " received from partition " + partition);
|
||||
}
|
||||
|
||||
}
|
||||
----
|
||||
|
||||
.application.yml
|
||||
[source, yaml]
|
||||
----
|
||||
spring:
|
||||
cloud:
|
||||
stream:
|
||||
bindings:
|
||||
input:
|
||||
destination: partitioned.topic
|
||||
group: myGroup
|
||||
----
|
||||
|
||||
You can add instances as needed.
|
||||
Kafka rebalances the partition allocations.
|
||||
If the instance count (or `instance count * concurrency`) exceeds the number of partitions, some consumers are idle.
|
||||
@@ -1,3 +0,0 @@
|
||||
include::overview.adoc[leveloffset=+1]
|
||||
include::dlq.adoc[leveloffset=+1]
|
||||
include::partitions.adoc[leveloffset=+1]
|
||||
@@ -1,53 +0,0 @@
|
||||
:github-tag: master
|
||||
:github-repo: spring-cloud/spring-cloud-stream-binder-kafka
|
||||
:github-raw: https://raw.githubusercontent.com/{github-repo}/{github-tag}
|
||||
:github-code: https://github.com/{github-repo}/tree/{github-tag}
|
||||
:toc: left
|
||||
:toclevels: 8
|
||||
:nofooter:
|
||||
:sectlinks: true
|
||||
|
||||
|
||||
[[spring-cloud-stream-binder-kafka-reference]]
|
||||
= Spring Cloud Stream Kafka Binder Reference Guide
|
||||
Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinathan, Gunnar Hillert, Mark Pollack, Patrick Peralta, Glenn Renfro, Thomas Risberg, Dave Syer, David Turanski, Janne Valkealahti, Benjamin Klein, Henryk Konsek, Gary Russell, Arnaud Jardiné, Soby Chacko
|
||||
:doctype: book
|
||||
:toc:
|
||||
:toclevels: 4
|
||||
:source-highlighter: prettify
|
||||
:numbered:
|
||||
:icons: font
|
||||
:hide-uri-scheme:
|
||||
:spring-cloud-stream-binder-kafka-repo: snapshot
|
||||
:github-tag: master
|
||||
:spring-cloud-stream-binder-kafka-docs-version: current
|
||||
:spring-cloud-stream-binder-kafka-docs: https://docs.spring.io/spring-cloud-stream-binder-kafka/docs/{spring-cloud-stream-binder-kafka-docs-version}/reference
|
||||
:spring-cloud-stream-binder-kafka-docs-current: https://docs.spring.io/spring-cloud-stream-binder-kafka/docs/current-SNAPSHOT/reference/html/
|
||||
:github-repo: spring-cloud/spring-cloud-stream-binder-kafka
|
||||
:github-raw: https://raw.github.com/{github-repo}/{github-tag}
|
||||
:github-code: https://github.com/{github-repo}/tree/{github-tag}
|
||||
:github-wiki: https://github.com/{github-repo}/wiki
|
||||
:github-master-code: https://github.com/{github-repo}/tree/master
|
||||
:sc-ext: java
|
||||
// ======================================================================================
|
||||
|
||||
|
||||
*{spring-cloud-stream-version}*
|
||||
|
||||
|
||||
= Reference Guide
|
||||
include::overview.adoc[]
|
||||
|
||||
include::dlq.adoc[]
|
||||
|
||||
include::partitions.adoc[]
|
||||
|
||||
include::kafka-streams.adoc[]
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
include::building.adoc[]
|
||||
|
||||
include::contributing.adoc[]
|
||||
|
||||
// ======================================================================================
|
||||
@@ -1,37 +0,0 @@
|
||||
#!/usr/bin/env ruby
|
||||
|
||||
base_dir = File.join(File.dirname(__FILE__),'../../..')
|
||||
src_dir = File.join(base_dir, "/src/main/asciidoc")
|
||||
require 'asciidoctor'
|
||||
require 'optparse'
|
||||
|
||||
options = {}
|
||||
file = "#{src_dir}/README.adoc"
|
||||
|
||||
OptionParser.new do |o|
|
||||
o.on('-o OUTPUT_FILE', 'Output file (default is stdout)') { |file| options[:to_file] = file unless file=='-' }
|
||||
o.on('-h', '--help') { puts o; exit }
|
||||
o.parse!
|
||||
end
|
||||
|
||||
file = ARGV[0] if ARGV.length>0
|
||||
|
||||
# Copied from https://github.com/asciidoctor/asciidoctor-extensions-lab/blob/master/scripts/asciidoc-coalescer.rb
|
||||
doc = Asciidoctor.load_file file, safe: :unsafe, header_only: true, attributes: options[:attributes]
|
||||
header_attr_names = (doc.instance_variable_get :@attributes_modified).to_a
|
||||
header_attr_names.each {|k| doc.attributes[%(#{k}!)] = '' unless doc.attr? k }
|
||||
attrs = doc.attributes
|
||||
attrs['allow-uri-read'] = true
|
||||
puts attrs
|
||||
|
||||
out = "// Do not edit this file (e.g. go instead to src/main/asciidoc)\n\n"
|
||||
doc = Asciidoctor.load_file file, safe: :unsafe, parse: false, attributes: attrs
|
||||
out << doc.reader.read
|
||||
|
||||
unless options[:to_file]
|
||||
puts out
|
||||
else
|
||||
File.open(options[:to_file],'w+') do |file|
|
||||
file.write(out)
|
||||
end
|
||||
end
|
||||
189
mvnw
vendored
@@ -19,7 +19,7 @@
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Maven Start Up Batch script
|
||||
# Maven2 Start Up Batch script
|
||||
#
|
||||
# Required ENV vars:
|
||||
# ------------------
|
||||
@@ -54,16 +54,38 @@ case "`uname`" in
|
||||
CYGWIN*) cygwin=true ;;
|
||||
MINGW*) mingw=true;;
|
||||
Darwin*) darwin=true
|
||||
# Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home
|
||||
# See https://developer.apple.com/library/mac/qa/qa1170/_index.html
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
if [ -x "/usr/libexec/java_home" ]; then
|
||||
export JAVA_HOME="`/usr/libexec/java_home`"
|
||||
else
|
||||
export JAVA_HOME="/Library/Java/Home"
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
#
|
||||
# Look for the Apple JDKs first to preserve the existing behaviour, and then look
|
||||
# for the new JDKs provided by Oracle.
|
||||
#
|
||||
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK ] ; then
|
||||
#
|
||||
# Apple JDKs
|
||||
#
|
||||
export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ] && [ -L /System/Library/Java/JavaVirtualMachines/CurrentJDK ] ; then
|
||||
#
|
||||
# Apple JDKs
|
||||
#
|
||||
export JAVA_HOME=/System/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ] && [ -L "/Library/Java/JavaVirtualMachines/CurrentJDK" ] ; then
|
||||
#
|
||||
# Oracle JDKs
|
||||
#
|
||||
export JAVA_HOME=/Library/Java/JavaVirtualMachines/CurrentJDK/Contents/Home
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ] && [ -x "/usr/libexec/java_home" ]; then
|
||||
#
|
||||
# Apple JDKs
|
||||
#
|
||||
export JAVA_HOME=`/usr/libexec/java_home`
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -z "$JAVA_HOME" ] ; then
|
||||
@@ -108,12 +130,13 @@ if $cygwin ; then
|
||||
CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
|
||||
fi
|
||||
|
||||
# For Mingw, ensure paths are in UNIX format before anything is touched
|
||||
# For Migwn, ensure paths are in UNIX format before anything is touched
|
||||
if $mingw ; then
|
||||
[ -n "$M2_HOME" ] &&
|
||||
M2_HOME="`(cd "$M2_HOME"; pwd)`"
|
||||
[ -n "$JAVA_HOME" ] &&
|
||||
JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`"
|
||||
# TODO classpath?
|
||||
fi
|
||||
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
@@ -161,28 +184,27 @@ fi
|
||||
|
||||
CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher
|
||||
|
||||
# For Cygwin, switch paths to Windows format before running java
|
||||
if $cygwin; then
|
||||
[ -n "$M2_HOME" ] &&
|
||||
M2_HOME=`cygpath --path --windows "$M2_HOME"`
|
||||
[ -n "$JAVA_HOME" ] &&
|
||||
JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
|
||||
[ -n "$CLASSPATH" ] &&
|
||||
CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
|
||||
fi
|
||||
|
||||
# traverses directory structure from process work directory to filesystem root
|
||||
# first directory with .mvn subdirectory is considered project base directory
|
||||
find_maven_basedir() {
|
||||
|
||||
if [ -z "$1" ]
|
||||
then
|
||||
echo "Path not specified to find_maven_basedir"
|
||||
return 1
|
||||
fi
|
||||
|
||||
basedir="$1"
|
||||
wdir="$1"
|
||||
local basedir=$(pwd)
|
||||
local wdir=$(pwd)
|
||||
while [ "$wdir" != '/' ] ; do
|
||||
if [ -d "$wdir"/.mvn ] ; then
|
||||
basedir=$wdir
|
||||
break
|
||||
fi
|
||||
# workaround for JBEAP-8937 (on Solaris 10/Sparc)
|
||||
if [ -d "${wdir}" ]; then
|
||||
wdir=`cd "$wdir/.."; pwd`
|
||||
fi
|
||||
# end of workaround
|
||||
wdir=$(cd "$wdir/.."; pwd)
|
||||
done
|
||||
echo "${basedir}"
|
||||
}
|
||||
@@ -194,108 +216,9 @@ concat_lines() {
|
||||
fi
|
||||
}
|
||||
|
||||
BASE_DIR=`find_maven_basedir "$(pwd)"`
|
||||
if [ -z "$BASE_DIR" ]; then
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
##########################################################################################
|
||||
# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
|
||||
# This allows using the maven wrapper in projects that prohibit checking in binary data.
|
||||
##########################################################################################
|
||||
if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Found .mvn/wrapper/maven-wrapper.jar"
|
||||
fi
|
||||
else
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..."
|
||||
fi
|
||||
if [ -n "$MVNW_REPOURL" ]; then
|
||||
jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||
else
|
||||
jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||
fi
|
||||
while IFS="=" read key value; do
|
||||
case "$key" in (wrapperUrl) jarUrl="$value"; break ;;
|
||||
esac
|
||||
done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties"
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Downloading from: $jarUrl"
|
||||
fi
|
||||
wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar"
|
||||
if $cygwin; then
|
||||
wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"`
|
||||
fi
|
||||
|
||||
if command -v wget > /dev/null; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Found wget ... using wget"
|
||||
fi
|
||||
if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
|
||||
wget "$jarUrl" -O "$wrapperJarPath"
|
||||
else
|
||||
wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath"
|
||||
fi
|
||||
elif command -v curl > /dev/null; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Found curl ... using curl"
|
||||
fi
|
||||
if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then
|
||||
curl -o "$wrapperJarPath" "$jarUrl" -f
|
||||
else
|
||||
curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f
|
||||
fi
|
||||
|
||||
else
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo "Falling back to using Java to download"
|
||||
fi
|
||||
javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java"
|
||||
# For Cygwin, switch paths to Windows format before running javac
|
||||
if $cygwin; then
|
||||
javaClass=`cygpath --path --windows "$javaClass"`
|
||||
fi
|
||||
if [ -e "$javaClass" ]; then
|
||||
if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo " - Compiling MavenWrapperDownloader.java ..."
|
||||
fi
|
||||
# Compiling the Java class
|
||||
("$JAVA_HOME/bin/javac" "$javaClass")
|
||||
fi
|
||||
if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then
|
||||
# Running the downloader
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo " - Running MavenWrapperDownloader.java ..."
|
||||
fi
|
||||
("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR")
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
##########################################################################################
|
||||
# End of extension
|
||||
##########################################################################################
|
||||
|
||||
export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}
|
||||
if [ "$MVNW_VERBOSE" = true ]; then
|
||||
echo $MAVEN_PROJECTBASEDIR
|
||||
fi
|
||||
export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-$(find_maven_basedir)}
|
||||
MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS"
|
||||
|
||||
# For Cygwin, switch paths to Windows format before running java
|
||||
if $cygwin; then
|
||||
[ -n "$M2_HOME" ] &&
|
||||
M2_HOME=`cygpath --path --windows "$M2_HOME"`
|
||||
[ -n "$JAVA_HOME" ] &&
|
||||
JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
|
||||
[ -n "$CLASSPATH" ] &&
|
||||
CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
|
||||
[ -n "$MAVEN_PROJECTBASEDIR" ] &&
|
||||
MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"`
|
||||
fi
|
||||
|
||||
# Provide a "standardized" way to retrieve the CLI args that will
|
||||
# work with both Windows and non-Windows executions.
|
||||
MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@"
|
||||
@@ -303,8 +226,20 @@ export MAVEN_CMD_LINE_ARGS
|
||||
|
||||
WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
|
||||
|
||||
echo "Running version check"
|
||||
VERSION=$( sed '\!<parent!,\!</parent!d' `dirname $0`/pom.xml | grep '<version' | head -1 | sed -e 's/.*<version>//' -e 's!</version>.*$!!' )
|
||||
echo "The found version is [${VERSION}]"
|
||||
|
||||
if echo $VERSION | egrep -q 'M|RC'; then
|
||||
echo Activating \"milestone\" profile for version=\"$VERSION\"
|
||||
echo $MAVEN_ARGS | grep -q milestone || MAVEN_ARGS="$MAVEN_ARGS -Pmilestone"
|
||||
else
|
||||
echo Deactivating \"milestone\" profile for version=\"$VERSION\"
|
||||
echo $MAVEN_ARGS | grep -q milestone && MAVEN_ARGS=$(echo $MAVEN_ARGS | sed -e 's/-Pmilestone//')
|
||||
fi
|
||||
|
||||
exec "$JAVACMD" \
|
||||
$MAVEN_OPTS \
|
||||
-classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \
|
||||
"-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \
|
||||
${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@"
|
||||
${WRAPPER_LAUNCHER} ${MAVEN_ARGS} "$@"
|
||||
|
||||
53
mvnw.cmd
vendored
@@ -18,7 +18,7 @@
|
||||
@REM ----------------------------------------------------------------------------
|
||||
|
||||
@REM ----------------------------------------------------------------------------
|
||||
@REM Maven Start Up Batch script
|
||||
@REM Maven2 Start Up Batch script
|
||||
@REM
|
||||
@REM Required ENV vars:
|
||||
@REM JAVA_HOME - location of a JDK home dir
|
||||
@@ -26,7 +26,7 @@
|
||||
@REM Optional ENV vars
|
||||
@REM M2_HOME - location of maven2's installed home dir
|
||||
@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands
|
||||
@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending
|
||||
@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending
|
||||
@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven
|
||||
@REM e.g. to debug Maven itself, use
|
||||
@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000
|
||||
@@ -35,9 +35,7 @@
|
||||
|
||||
@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on'
|
||||
@echo off
|
||||
@REM set title of command window
|
||||
title %0
|
||||
@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on'
|
||||
@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on'
|
||||
@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO%
|
||||
|
||||
@REM set %HOME% to equivalent of $HOME
|
||||
@@ -82,6 +80,8 @@ goto error
|
||||
|
||||
:init
|
||||
|
||||
set MAVEN_CMD_LINE_ARGS=%*
|
||||
|
||||
@REM Find the project base dir, i.e. the directory that contains the folder ".mvn".
|
||||
@REM Fallback to current working directory if not found.
|
||||
|
||||
@@ -117,48 +117,11 @@ for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do s
|
||||
:endReadAdditionalConfig
|
||||
|
||||
SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe"
|
||||
set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar"
|
||||
|
||||
set WRAPPER_JAR="".\.mvn\wrapper\maven-wrapper.jar""
|
||||
set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain
|
||||
|
||||
set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||
|
||||
FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO (
|
||||
IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B
|
||||
)
|
||||
|
||||
@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central
|
||||
@REM This allows using the maven wrapper in projects that prohibit checking in binary data.
|
||||
if exist %WRAPPER_JAR% (
|
||||
if "%MVNW_VERBOSE%" == "true" (
|
||||
echo Found %WRAPPER_JAR%
|
||||
)
|
||||
) else (
|
||||
if not "%MVNW_REPOURL%" == "" (
|
||||
SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar"
|
||||
)
|
||||
if "%MVNW_VERBOSE%" == "true" (
|
||||
echo Couldn't find %WRAPPER_JAR%, downloading it ...
|
||||
echo Downloading from: %DOWNLOAD_URL%
|
||||
)
|
||||
|
||||
powershell -Command "&{"^
|
||||
"$webclient = new-object System.Net.WebClient;"^
|
||||
"if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^
|
||||
"$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^
|
||||
"}"^
|
||||
"[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^
|
||||
"}"
|
||||
if "%MVNW_VERBOSE%" == "true" (
|
||||
echo Finished downloading %WRAPPER_JAR%
|
||||
)
|
||||
)
|
||||
@REM End of extension
|
||||
|
||||
@REM Provide a "standardized" way to retrieve the CLI args that will
|
||||
@REM work with both Windows and non-Windows executions.
|
||||
set MAVEN_CMD_LINE_ARGS=%*
|
||||
|
||||
%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %*
|
||||
%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CMD_LINE_ARGS%
|
||||
if ERRORLEVEL 1 goto error
|
||||
goto end
|
||||
|
||||
|
||||
163
pom.xml
@@ -1,67 +1,65 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>3.1.0-M2</version>
|
||||
<version>2.0.0.M1</version>
|
||||
<packaging>pom</packaging>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-build</artifactId>
|
||||
<version>3.0.0-M3</version>
|
||||
<version>2.0.0.M1</version>
|
||||
<relativePath />
|
||||
</parent>
|
||||
<properties>
|
||||
<java.version>1.8</java.version>
|
||||
<spring-kafka.version>2.5.3.RELEASE</spring-kafka.version>
|
||||
<spring-integration-kafka.version>3.3.0.RELEASE</spring-integration-kafka.version>
|
||||
<kafka.version>2.5.0</kafka.version>
|
||||
<spring-cloud-schema-registry.version>1.1.0-M2</spring-cloud-schema-registry.version>
|
||||
<spring-cloud-stream.version>3.1.0-M2</spring-cloud-stream.version>
|
||||
<maven-checkstyle-plugin.failsOnError>true</maven-checkstyle-plugin.failsOnError>
|
||||
<maven-checkstyle-plugin.failsOnViolation>true</maven-checkstyle-plugin.failsOnViolation>
|
||||
<maven-checkstyle-plugin.includeTestSourceDirectory>true</maven-checkstyle-plugin.includeTestSourceDirectory>
|
||||
<java.version>1.7</java.version>
|
||||
<kafka.version>0.10.2.0</kafka.version>
|
||||
<spring-kafka.version>2.0.0.M2</spring-kafka.version>
|
||||
<spring-integration-kafka.version>3.0.0.M1</spring-integration-kafka.version>
|
||||
<spring-cloud-stream.version>2.0.0.M1</spring-cloud-stream.version>
|
||||
</properties>
|
||||
<modules>
|
||||
<module>spring-cloud-stream-binder-kafka</module>
|
||||
<module>spring-cloud-starter-stream-kafka</module>
|
||||
<module>spring-cloud-stream-binder-kafka-docs</module>
|
||||
<module>spring-cloud-stream-binder-kafka-0.10.1-test</module>
|
||||
<module>spring-cloud-stream-binder-kafka-core</module>
|
||||
<module>spring-cloud-stream-binder-kafka-streams</module>
|
||||
<module>docs</module>
|
||||
</modules>
|
||||
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-codec</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>jline</groupId>
|
||||
<artifactId>jline</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<version>${spring-kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
@@ -80,67 +78,35 @@
|
||||
<scope>test</scope>
|
||||
<version>${spring-kafka.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-streams</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<classifier>test</classifier>
|
||||
<scope>test</scope>
|
||||
<version>${kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>jline</groupId>
|
||||
<artifactId>jline</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-schema-registry-client</artifactId>
|
||||
<version>${spring-cloud-schema-registry.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</dependencyManagement>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.junit.vintage</groupId>
|
||||
<artifactId>junit-vintage-engine</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
|
||||
<build>
|
||||
<pluginManagement>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>flatten-maven-plugin</artifactId>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<version>1.7</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
<version>2.17</version>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.puppycrawl.tools</groupId>
|
||||
<artifactId>checkstyle</artifactId>
|
||||
<version>7.1</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
@@ -161,6 +127,29 @@
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-build-tools</artifactId>
|
||||
<version>2.0.0.M1</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>checkstyle-validation</id>
|
||||
<phase>validate</phase>
|
||||
<configuration>
|
||||
<configLocation>checkstyle.xml</configLocation>
|
||||
<encoding>UTF-8</encoding>
|
||||
<consoleOutput>true</consoleOutput>
|
||||
<failsOnError>true</failsOnError>
|
||||
<includeTestSourceDirectory>true</includeTestSourceDirectory>
|
||||
</configuration>
|
||||
<goals>
|
||||
<goal>check</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
@@ -172,7 +161,7 @@
|
||||
<repository>
|
||||
<id>spring-snapshots</id>
|
||||
<name>Spring Snapshots</name>
|
||||
<url>https://repo.spring.io/libs-snapshot-local</url>
|
||||
<url>http://repo.spring.io/libs-snapshot-local</url>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
@@ -183,7 +172,7 @@
|
||||
<repository>
|
||||
<id>spring-milestones</id>
|
||||
<name>Spring Milestones</name>
|
||||
<url>https://repo.spring.io/libs-milestone-local</url>
|
||||
<url>http://repo.spring.io/libs-milestone-local</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
@@ -191,7 +180,7 @@
|
||||
<repository>
|
||||
<id>spring-releases</id>
|
||||
<name>Spring Releases</name>
|
||||
<url>https://repo.spring.io/release</url>
|
||||
<url>http://repo.spring.io/release</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
@@ -201,7 +190,7 @@
|
||||
<pluginRepository>
|
||||
<id>spring-snapshots</id>
|
||||
<name>Spring Snapshots</name>
|
||||
<url>https://repo.spring.io/libs-snapshot-local</url>
|
||||
<url>http://repo.spring.io/libs-snapshot-local</url>
|
||||
<snapshots>
|
||||
<enabled>true</enabled>
|
||||
</snapshots>
|
||||
@@ -212,7 +201,7 @@
|
||||
<pluginRepository>
|
||||
<id>spring-milestones</id>
|
||||
<name>Spring Milestones</name>
|
||||
<url>https://repo.spring.io/libs-milestone-local</url>
|
||||
<url>http://repo.spring.io/libs-milestone-local</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
@@ -220,7 +209,7 @@
|
||||
<pluginRepository>
|
||||
<id>spring-releases</id>
|
||||
<name>Spring Releases</name>
|
||||
<url>https://repo.spring.io/libs-release-local</url>
|
||||
<url>http://repo.spring.io/libs-release-local</url>
|
||||
<snapshots>
|
||||
<enabled>false</enabled>
|
||||
</snapshots>
|
||||
@@ -228,12 +217,4 @@
|
||||
</pluginRepositories>
|
||||
</profile>
|
||||
</profiles>
|
||||
<reporting>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</reporting>
|
||||
</project>
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>3.1.0-M2</version>
|
||||
<version>2.0.0.M1</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
<description>Spring Cloud Starter Stream Kafka</description>
|
||||
<url>https://projects.spring.io/spring-cloud</url>
|
||||
<url>http://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>https://www.spring.io</url>
|
||||
<url>http://www.spring.io</url>
|
||||
</organization>
|
||||
<properties>
|
||||
<main.basedir>${basedir}/../..</main.basedir>
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
provides: spring-cloud-starter-stream-kafka
|
||||
120
spring-cloud-stream-binder-kafka-0.10.1-test/pom.xml
Normal file
@@ -0,0 +1,120 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.0.0.M1</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-0.10.1-test</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder 0.10.1 Tests</description>
|
||||
<url>http://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>http://www.spring.io</url>
|
||||
</organization>
|
||||
<properties>
|
||||
<main.basedir>${basedir}/../..</main.basedir>
|
||||
<kafka.version>0.10.1.1</kafka.version>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<scope>test</scope>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<version>1.1.6.RELEASE</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
<type>test-jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-schema</artifactId>
|
||||
<version>${spring-cloud-stream.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-avro-serializer</artifactId>
|
||||
<version>3.1.2</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-schema-registry</artifactId>
|
||||
<version>3.1.2</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>confluent</id>
|
||||
<url>http://packages.confluent.io/maven/</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>3.0.2</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>test-jar</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
</project>
|
||||
@@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Copyright 2015-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.provisioning.KafkaTopicProvisioner;
|
||||
import org.springframework.context.support.GenericApplicationContext;
|
||||
import org.springframework.kafka.support.LoggingProducerListener;
|
||||
import org.springframework.kafka.support.ProducerListener;
|
||||
|
||||
/**
|
||||
* Test support class for {@link KafkaMessageChannelBinder}.
|
||||
* @author Eric Bottard
|
||||
* @author Marius Bogoevici
|
||||
* @author David Turanski
|
||||
* @author Gary Russell
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class Kafka10TestBinder extends AbstractKafkaTestBinder {
|
||||
|
||||
public Kafka10TestBinder(KafkaBinderConfigurationProperties binderConfiguration) {
|
||||
try {
|
||||
AdminUtilsOperation adminUtilsOperation = new Kafka10AdminUtilsOperation();
|
||||
KafkaTopicProvisioner provisioningProvider =
|
||||
new KafkaTopicProvisioner(binderConfiguration, adminUtilsOperation);
|
||||
provisioningProvider.afterPropertiesSet();
|
||||
|
||||
KafkaMessageChannelBinder binder = new KafkaMessageChannelBinder(binderConfiguration, provisioningProvider);
|
||||
|
||||
binder.setCodec(AbstractKafkaTestBinder.getCodec());
|
||||
ProducerListener producerListener = new LoggingProducerListener();
|
||||
binder.setProducerListener(producerListener);
|
||||
GenericApplicationContext context = new GenericApplicationContext();
|
||||
context.refresh();
|
||||
binder.setApplicationContext(context);
|
||||
binder.afterPropertiesSet();
|
||||
this.setBinder(binder);
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,239 @@
|
||||
/*
|
||||
* Copyright 2014-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.UUID;
|
||||
|
||||
import io.confluent.kafka.schemaregistry.rest.SchemaRegistryConfig;
|
||||
import io.confluent.kafka.schemaregistry.rest.SchemaRegistryRestApplication;
|
||||
import kafka.utils.ZKStringSerializer$;
|
||||
import kafka.utils.ZkUtils;
|
||||
import org.I0Itec.zkclient.ZkClient;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.Deserializer;
|
||||
import org.assertj.core.api.Assertions;
|
||||
import org.eclipse.jetty.server.Server;
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.cloud.stream.binder.Binder;
|
||||
import org.springframework.cloud.stream.binder.Binding;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.Spy;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.Kafka10AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.integration.channel.DirectChannel;
|
||||
import org.springframework.integration.channel.QueueChannel;
|
||||
import org.springframework.kafka.core.ConsumerFactory;
|
||||
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
|
||||
import org.springframework.kafka.support.KafkaHeaders;
|
||||
import org.springframework.kafka.test.core.BrokerAddress;
|
||||
import org.springframework.kafka.test.rule.KafkaEmbedded;
|
||||
import org.springframework.messaging.Message;
|
||||
import org.springframework.messaging.MessageChannel;
|
||||
import org.springframework.messaging.SubscribableChannel;
|
||||
import org.springframework.messaging.support.MessageBuilder;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Integration tests for the {@link KafkaMessageChannelBinder}.
|
||||
*
|
||||
* @author Eric Bottard
|
||||
* @author Marius Bogoevici
|
||||
* @author Mark Fisher
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
public class Kafka_0_10_1_BinderTests extends KafkaBinderTests {
|
||||
|
||||
private final String CLASS_UNDER_TEST_NAME = KafkaMessageChannelBinder.class.getSimpleName();
|
||||
|
||||
@ClassRule
|
||||
public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, 10);
|
||||
|
||||
private Kafka10TestBinder binder;
|
||||
|
||||
private Kafka10AdminUtilsOperation adminUtilsOperation = new Kafka10AdminUtilsOperation();
|
||||
|
||||
@Override
|
||||
protected void binderBindUnbindLatency() throws InterruptedException {
|
||||
Thread.sleep(500);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Kafka10TestBinder getBinder() {
|
||||
if (binder == null) {
|
||||
KafkaBinderConfigurationProperties binderConfiguration = createConfigurationProperties();
|
||||
binder = new Kafka10TestBinder(binderConfiguration);
|
||||
}
|
||||
return binder;
|
||||
}
|
||||
|
||||
protected KafkaBinderConfigurationProperties createConfigurationProperties() {
|
||||
KafkaBinderConfigurationProperties binderConfiguration = new KafkaBinderConfigurationProperties();
|
||||
BrokerAddress[] brokerAddresses = embeddedKafka.getBrokerAddresses();
|
||||
List<String> bAddresses = new ArrayList<>();
|
||||
for (BrokerAddress bAddress : brokerAddresses) {
|
||||
bAddresses.add(bAddress.toString());
|
||||
}
|
||||
String[] foo = new String[bAddresses.size()];
|
||||
binderConfiguration.setBrokers(bAddresses.toArray(foo));
|
||||
binderConfiguration.setZkNodes(embeddedKafka.getZookeeperConnectionString());
|
||||
return binderConfiguration;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int partitionSize(String topic) {
|
||||
return consumerFactory().createConsumer().partitionsFor(topic).size();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ZkUtils getZkUtils(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties) {
|
||||
final ZkClient zkClient = new ZkClient(kafkaBinderConfigurationProperties.getZkConnectionString(),
|
||||
kafkaBinderConfigurationProperties.getZkSessionTimeout(), kafkaBinderConfigurationProperties.getZkConnectionTimeout(),
|
||||
ZKStringSerializer$.MODULE$);
|
||||
|
||||
return new ZkUtils(zkClient, null, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions, int replicationFactor, Properties topicConfig) {
|
||||
adminUtilsOperation.invokeCreateTopic(zkUtils, topic, partitions, replicationFactor, new Properties());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int invokePartitionSize(String topic, ZkUtils zkUtils) {
|
||||
return adminUtilsOperation.partitionSize(topic, zkUtils);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getKafkaOffsetHeaderKey() {
|
||||
return KafkaHeaders.OFFSET;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Binder getBinder(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties) {
|
||||
return new Kafka10TestBinder(kafkaBinderConfigurationProperties);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void init() {
|
||||
String multiplier = System.getenv("KAFKA_TIMEOUT_MULTIPLIER");
|
||||
if (multiplier != null) {
|
||||
timeoutMultiplier = Double.parseDouble(multiplier);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean usesExplicitRouting() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getClassUnderTestName() {
|
||||
return CLASS_UNDER_TEST_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Spy spyOn(final String name) {
|
||||
throw new UnsupportedOperationException("'spyOn' is not used by Kafka tests");
|
||||
}
|
||||
|
||||
|
||||
private ConsumerFactory<byte[], byte[]> consumerFactory() {
|
||||
Map<String, Object> props = new HashMap<>();
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, configurationProperties.getKafkaConnectionString());
|
||||
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
|
||||
props.put(ConsumerConfig.GROUP_ID_CONFIG, "TEST-CONSUMER-GROUP");
|
||||
Deserializer<byte[]> valueDecoder = new ByteArrayDeserializer();
|
||||
Deserializer<byte[]> keyDecoder = new ByteArrayDeserializer();
|
||||
|
||||
return new DefaultKafkaConsumerFactory<>(props, keyDecoder, valueDecoder);
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testCustomAvroSerialization() throws Exception {
|
||||
KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties();
|
||||
final ZkClient zkClient = new ZkClient(configurationProperties.getZkConnectionString(),
|
||||
configurationProperties.getZkSessionTimeout(), configurationProperties.getZkConnectionTimeout(),
|
||||
ZKStringSerializer$.MODULE$);
|
||||
final ZkUtils zkUtils = new ZkUtils(zkClient, null, false);
|
||||
Map<String, Object> schemaRegistryProps = new HashMap<>();
|
||||
schemaRegistryProps.put("kafkastore.connection.url", configurationProperties.getZkConnectionString());
|
||||
schemaRegistryProps.put("listeners", "http://0.0.0.0:8082");
|
||||
schemaRegistryProps.put("port", "8082");
|
||||
schemaRegistryProps.put("kafkastore.topic", "_schemas");
|
||||
SchemaRegistryConfig config = new SchemaRegistryConfig(schemaRegistryProps);
|
||||
SchemaRegistryRestApplication app = new SchemaRegistryRestApplication(config);
|
||||
Server server = app.createServer();
|
||||
server.start();
|
||||
long endTime = System.currentTimeMillis() + 5000;
|
||||
while(true) {
|
||||
if (server.isRunning()) {
|
||||
break;
|
||||
}
|
||||
else if (System.currentTimeMillis() > endTime) {
|
||||
Assertions.fail("Kafka Schema Registry Server failed to start");
|
||||
}
|
||||
}
|
||||
User1 firstOutboundFoo = new User1();
|
||||
String userName1 = "foo-name" + UUID.randomUUID().toString();
|
||||
String favColor1 = "foo-color" + UUID.randomUUID().toString();
|
||||
firstOutboundFoo.setName(userName1);
|
||||
firstOutboundFoo.setFavoriteColor(favColor1);
|
||||
Message<?> message = MessageBuilder.withPayload(firstOutboundFoo).build();
|
||||
SubscribableChannel moduleOutputChannel = new DirectChannel();
|
||||
String testTopicName = "existing" + System.currentTimeMillis();
|
||||
invokeCreateTopic(zkUtils, testTopicName, 6, 1, new Properties());
|
||||
configurationProperties.setAutoAddPartitions(true);
|
||||
Binder binder = getBinder(configurationProperties);
|
||||
QueueChannel moduleInputChannel = new QueueChannel();
|
||||
ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties();
|
||||
producerProperties.getExtension().getConfiguration().put("value.serializer", "io.confluent.kafka.serializers.KafkaAvroSerializer");
|
||||
producerProperties.getExtension().getConfiguration().put("schema.registry.url", "http://localhost:8082");
|
||||
producerProperties.setUseNativeEncoding(true);
|
||||
Binding<MessageChannel> producerBinding = binder.bindProducer(testTopicName, moduleOutputChannel, producerProperties);
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
|
||||
consumerProperties.getExtension().setAutoRebalanceEnabled(false);
|
||||
consumerProperties.getExtension().getConfiguration().put("value.deserializer", "io.confluent.kafka.serializers.KafkaAvroDeserializer");
|
||||
consumerProperties.getExtension().getConfiguration().put("schema.registry.url", "http://localhost:8082");
|
||||
Binding<MessageChannel> consumerBinding = binder.bindConsumer(testTopicName, "test", moduleInputChannel, consumerProperties);
|
||||
// Let the consumer actually bind to the producer before sending a msg
|
||||
binderBindUnbindLatency();
|
||||
moduleOutputChannel.send(message);
|
||||
Message<?> inbound = receive(moduleInputChannel);
|
||||
Assertions.assertThat(inbound).isNotNull();
|
||||
assertTrue(message.getPayload() instanceof User1);
|
||||
User1 receivedUser = (User1) message.getPayload();
|
||||
Assertions.assertThat(receivedUser.getName()).isEqualTo(userName1);
|
||||
Assertions.assertThat(receivedUser.getFavoriteColor()).isEqualTo(favColor1);
|
||||
producerBinding.unbind();
|
||||
consumerBinding.unbind();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.avro.Schema;
|
||||
import org.apache.avro.reflect.Nullable;
|
||||
import org.apache.avro.specific.SpecificRecordBase;
|
||||
|
||||
import org.springframework.core.io.ClassPathResource;
|
||||
|
||||
/**
|
||||
* @author Marius Bogoevici
|
||||
* @author Ilayaperumal Gopinathan
|
||||
*/
|
||||
public class User1 extends SpecificRecordBase {
|
||||
|
||||
@Nullable
|
||||
private String name;
|
||||
|
||||
@Nullable
|
||||
private String favoriteColor;
|
||||
|
||||
public String getName() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String getFavoriteColor() {
|
||||
return this.favoriteColor;
|
||||
}
|
||||
|
||||
public void setFavoriteColor(String favoriteColor) {
|
||||
this.favoriteColor = favoriteColor;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Schema getSchema() {
|
||||
try {
|
||||
return new Schema.Parser().parse(new ClassPathResource("schemas/users_v1.schema").getInputStream());
|
||||
}
|
||||
catch (IOException e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object get(int i) {
|
||||
if (i == 0) {
|
||||
return getName().toString();
|
||||
}
|
||||
if (i == 1) {
|
||||
return getFavoriteColor().toString();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(int i, Object o) {
|
||||
if (i == 0) {
|
||||
setName((String) o);
|
||||
}
|
||||
if (i == 1) {
|
||||
setFavoriteColor((String) o);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
eclipse.preferences.version=1
|
||||
org.eclipse.jdt.ui.ignorelowercasenames=true
|
||||
org.eclipse.jdt.ui.importorder=java;javax;com;org;org.springframework;ch.qos;\#;
|
||||
org.eclipse.jdt.ui.ondemandthreshold=99
|
||||
org.eclipse.jdt.ui.staticondemandthreshold=99
|
||||
@@ -1,49 +1,46 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>3.1.0-M2</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder Core</description>
|
||||
<url>https://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>https://www.spring.io</url>
|
||||
</organization>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.0.0.M1</version>
|
||||
</parent>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-core</artifactId>
|
||||
<description>Spring Cloud Stream Kafka Binder Core</description>
|
||||
<url>http://projects.spring.io/spring-cloud</url>
|
||||
<organization>
|
||||
<name>Pivotal Software, Inc.</name>
|
||||
<url>http://www.spring.io</url>
|
||||
</organization>
|
||||
<properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-configuration-processor</artifactId>
|
||||
<optional>true</optional>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-test</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
<version>${spring-integration-kafka.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.apache.avro</groupId>
|
||||
<artifactId>avro-compiler</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
</project>
|
||||
|
||||
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright 2002-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.admin;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
import kafka.utils.ZkUtils;
|
||||
|
||||
/**
|
||||
* API around {@link kafka.admin.AdminUtils} to support
|
||||
* various versions of Kafka brokers.
|
||||
*
|
||||
* Note: Implementations that support Kafka brokers other than 0.10, need to use
|
||||
* a possible strategy that involves reflection around {@link kafka.admin.AdminUtils}.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public interface AdminUtilsOperation {
|
||||
|
||||
/**
|
||||
* Invoke {@link kafka.admin.AdminUtils#addPartitions}
|
||||
*
|
||||
* @param zkUtils Zookeeper utils
|
||||
* @param topic name of the topic
|
||||
* @param numPartitions
|
||||
* @param replicaAssignmentStr
|
||||
* @param checkBrokerAvailable
|
||||
*/
|
||||
void invokeAddPartitions(ZkUtils zkUtils, String topic, int numPartitions,
|
||||
String replicaAssignmentStr, boolean checkBrokerAvailable);
|
||||
|
||||
/**
|
||||
* Invoke {@link kafka.admin.AdminUtils#fetchTopicMetadataFromZk}
|
||||
*
|
||||
* @param topic name
|
||||
* @param zkUtils zookeeper utils
|
||||
* @return error code
|
||||
*/
|
||||
short errorCodeFromTopicMetadata(String topic, ZkUtils zkUtils);
|
||||
|
||||
/**
|
||||
* Find partition size from Kafka broker using {@link kafka.admin.AdminUtils}
|
||||
*
|
||||
* @param topic name
|
||||
* @param zkUtils zookeeper utils
|
||||
* @return partition size
|
||||
*/
|
||||
int partitionSize(String topic, ZkUtils zkUtils);
|
||||
|
||||
/**
|
||||
* Inovke {@link kafka.admin.AdminUtils#createTopic}
|
||||
*
|
||||
* @param zkUtils zookeeper utils
|
||||
* @param topic name
|
||||
* @param partitions
|
||||
* @param replicationFactor
|
||||
* @param topicConfig
|
||||
*/
|
||||
void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions,
|
||||
int replicationFactor, Properties topicConfig);
|
||||
}
|
||||
@@ -0,0 +1,145 @@
|
||||
/*
|
||||
* Copyright 2002-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.admin;
|
||||
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Properties;
|
||||
|
||||
import kafka.utils.ZkUtils;
|
||||
|
||||
import org.springframework.util.ClassUtils;
|
||||
import org.springframework.util.ReflectionUtils;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class Kafka09AdminUtilsOperation implements AdminUtilsOperation {
|
||||
|
||||
private static Class<?> ADMIN_UTIL_CLASS;
|
||||
|
||||
static {
|
||||
try {
|
||||
ADMIN_UTIL_CLASS = ClassUtils.forName("kafka.admin.AdminUtils", null);
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("AdminUtils class not found", e);
|
||||
}
|
||||
}
|
||||
|
||||
public void invokeAddPartitions(ZkUtils zkUtils, String topic, int numPartitions,
|
||||
String replicaAssignmentStr, boolean checkBrokerAvailable) {
|
||||
try {
|
||||
Method[] declaredMethods = ADMIN_UTIL_CLASS.getDeclaredMethods();
|
||||
Method addPartitions = null;
|
||||
for (Method m : declaredMethods) {
|
||||
if (m.getName().equals("addPartitions")) {
|
||||
addPartitions = m;
|
||||
}
|
||||
}
|
||||
if (addPartitions != null) {
|
||||
addPartitions.invoke(null, zkUtils, topic, numPartitions,
|
||||
replicaAssignmentStr, checkBrokerAvailable);
|
||||
}
|
||||
else {
|
||||
throw new InvocationTargetException(
|
||||
new RuntimeException("method not found"));
|
||||
}
|
||||
}
|
||||
catch (InvocationTargetException e) {
|
||||
ReflectionUtils.handleInvocationTargetException(e);
|
||||
}
|
||||
catch (IllegalAccessException e) {
|
||||
ReflectionUtils.handleReflectionException(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public short errorCodeFromTopicMetadata(String topic, ZkUtils zkUtils) {
|
||||
try {
|
||||
Method fetchTopicMetadataFromZk = ReflectionUtils.findMethod(ADMIN_UTIL_CLASS, "fetchTopicMetadataFromZk", String.class, ZkUtils.class);
|
||||
Object result = fetchTopicMetadataFromZk.invoke(null, topic, zkUtils);
|
||||
Class<?> topicMetadataClass = ClassUtils.forName("kafka.api.TopicMetadata", null);
|
||||
Method errorCodeMethod = ReflectionUtils.findMethod(topicMetadataClass, "errorCode");
|
||||
return (short) errorCodeMethod.invoke(result);
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("AdminUtils class not found", e);
|
||||
}
|
||||
catch (InvocationTargetException e) {
|
||||
ReflectionUtils.handleInvocationTargetException(e);
|
||||
}
|
||||
catch (IllegalAccessException e) {
|
||||
ReflectionUtils.handleReflectionException(e);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public int partitionSize(String topic, ZkUtils zkUtils) {
|
||||
try {
|
||||
Method fetchTopicMetadataFromZk = ReflectionUtils.findMethod(ADMIN_UTIL_CLASS, "fetchTopicMetadataFromZk", String.class, ZkUtils.class);
|
||||
Object result = fetchTopicMetadataFromZk.invoke(null, topic, zkUtils);
|
||||
Class<?> topicMetadataClass = ClassUtils.forName("kafka.api.TopicMetadata", null);
|
||||
|
||||
Method partitionsMetadata = ReflectionUtils.findMethod(topicMetadataClass, "partitionsMetadata");
|
||||
scala.collection.Seq<kafka.api.PartitionMetadata> partitionSize =
|
||||
(scala.collection.Seq<kafka.api.PartitionMetadata>)partitionsMetadata.invoke(result);
|
||||
|
||||
return partitionSize.size();
|
||||
}
|
||||
catch (ClassNotFoundException e) {
|
||||
throw new IllegalStateException("AdminUtils class not found", e);
|
||||
}
|
||||
catch (InvocationTargetException e) {
|
||||
ReflectionUtils.handleInvocationTargetException(e);
|
||||
}
|
||||
catch (IllegalAccessException e) {
|
||||
ReflectionUtils.handleReflectionException(e);
|
||||
}
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
public void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions,
|
||||
int replicationFactor, Properties topicConfig) {
|
||||
try {
|
||||
Method[] declaredMethods = ADMIN_UTIL_CLASS.getDeclaredMethods();
|
||||
Method createTopic = null;
|
||||
for (Method m : declaredMethods) {
|
||||
if (m.getName().equals("createTopic")) {
|
||||
createTopic = m;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (createTopic != null) {
|
||||
createTopic.invoke(null, zkUtils, topic, partitions,
|
||||
replicationFactor, topicConfig);
|
||||
}
|
||||
else {
|
||||
throw new InvocationTargetException(
|
||||
new RuntimeException("method not found"));
|
||||
}
|
||||
}
|
||||
catch (InvocationTargetException e) {
|
||||
ReflectionUtils.handleInvocationTargetException(e);
|
||||
}
|
||||
catch (IllegalAccessException e) {
|
||||
ReflectionUtils.handleReflectionException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright 2002-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.admin;
|
||||
|
||||
import java.util.Properties;
|
||||
|
||||
import kafka.admin.AdminUtils;
|
||||
import kafka.utils.ZkUtils;
|
||||
import org.apache.kafka.common.requests.MetadataResponse;
|
||||
|
||||
/**
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class Kafka10AdminUtilsOperation implements AdminUtilsOperation {
|
||||
|
||||
public void invokeAddPartitions(ZkUtils zkUtils, String topic, int numPartitions,
|
||||
String replicaAssignmentStr, boolean checkBrokerAvailable) {
|
||||
AdminUtils.addPartitions(zkUtils, topic, numPartitions, replicaAssignmentStr, checkBrokerAvailable, null);
|
||||
}
|
||||
|
||||
public short errorCodeFromTopicMetadata(String topic, ZkUtils zkUtils) {
|
||||
|
||||
MetadataResponse.TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils);
|
||||
return topicMetadata.error().code();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public int partitionSize(String topic, ZkUtils zkUtils) {
|
||||
|
||||
MetadataResponse.TopicMetadata topicMetadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkUtils);
|
||||
return topicMetadata.partitionMetadata().size();
|
||||
}
|
||||
|
||||
public void invokeCreateTopic(ZkUtils zkUtils, String topic, int partitions,
|
||||
int replicationFactor, Properties topicConfig) {
|
||||
|
||||
AdminUtils.createTopic(zkUtils, topic, partitions, replicationFactor,
|
||||
topicConfig, null);
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -18,10 +18,8 @@ package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.security.auth.login.AppConfigurationEntry;
|
||||
|
||||
import org.springframework.kafka.security.jaas.KafkaJaasLoginModuleInitializer;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
/**
|
||||
@@ -29,18 +27,17 @@ import org.springframework.util.Assert;
|
||||
* for the Kafka or Zookeeper client.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public class JaasLoginModuleConfiguration {
|
||||
|
||||
private String loginModule = "com.sun.security.auth.module.Krb5LoginModule";
|
||||
|
||||
private KafkaJaasLoginModuleInitializer.ControlFlag controlFlag = KafkaJaasLoginModuleInitializer.ControlFlag.REQUIRED;
|
||||
private AppConfigurationEntry.LoginModuleControlFlag controlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUIRED;
|
||||
|
||||
private Map<String, String> options = new HashMap<>();
|
||||
private Map<String,String> options = new HashMap<>();
|
||||
|
||||
public String getLoginModule() {
|
||||
return this.loginModule;
|
||||
return loginModule;
|
||||
}
|
||||
|
||||
public void setLoginModule(String loginModule) {
|
||||
@@ -48,22 +45,38 @@ public class JaasLoginModuleConfiguration {
|
||||
this.loginModule = loginModule;
|
||||
}
|
||||
|
||||
public KafkaJaasLoginModuleInitializer.ControlFlag getControlFlag() {
|
||||
return this.controlFlag;
|
||||
public String getControlFlag() {
|
||||
return controlFlag.toString();
|
||||
}
|
||||
|
||||
public AppConfigurationEntry.LoginModuleControlFlag getControlFlagValue() {
|
||||
return controlFlag;
|
||||
}
|
||||
|
||||
public void setControlFlag(String controlFlag) {
|
||||
Assert.notNull(controlFlag, "cannot be null");
|
||||
this.controlFlag = KafkaJaasLoginModuleInitializer.ControlFlag
|
||||
.valueOf(controlFlag.toUpperCase());
|
||||
if (AppConfigurationEntry.LoginModuleControlFlag.OPTIONAL.equals(controlFlag)) {
|
||||
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.OPTIONAL;
|
||||
}
|
||||
else if (AppConfigurationEntry.LoginModuleControlFlag.REQUIRED.equals(controlFlag)) {
|
||||
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUIRED;
|
||||
}
|
||||
else if (AppConfigurationEntry.LoginModuleControlFlag.REQUISITE.equals(controlFlag)) {
|
||||
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUISITE;
|
||||
}
|
||||
else if (AppConfigurationEntry.LoginModuleControlFlag.SUFFICIENT.equals(controlFlag)) {
|
||||
this.controlFlag = AppConfigurationEntry.LoginModuleControlFlag.SUFFICIENT;
|
||||
}
|
||||
else {
|
||||
throw new IllegalArgumentException(controlFlag + " is not a supported control flag");
|
||||
}
|
||||
}
|
||||
|
||||
public Map<String, String> getOptions() {
|
||||
return this.options;
|
||||
return options;
|
||||
}
|
||||
|
||||
public void setOptions(Map<String, String> options) {
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2015-2018 the original author or authors.
|
||||
* Copyright 2015-2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -16,131 +16,105 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.validation.constraints.AssertTrue;
|
||||
import javax.validation.constraints.Min;
|
||||
import javax.validation.constraints.NotNull;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.HeaderMode;
|
||||
import org.springframework.cloud.stream.binder.ProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties.CompressionType;
|
||||
import org.springframework.expression.Expression;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Configuration properties for the Kafka binder. The properties in this class are
|
||||
* prefixed with <b>spring.cloud.stream.kafka.binder</b>.
|
||||
*
|
||||
* @author David Turanski
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Marius Bogoevici
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
* @author Rafal Zukowski
|
||||
* @author Aldo Sinanaj
|
||||
* @author Lukasz Kaminski
|
||||
*/
|
||||
@ConfigurationProperties(prefix = "spring.cloud.stream.kafka.binder")
|
||||
public class KafkaBinderConfigurationProperties {
|
||||
|
||||
private static final String DEFAULT_KAFKA_CONNECTION_STRING = "localhost:9092";
|
||||
private String[] zkNodes = new String[] {"localhost"};
|
||||
|
||||
private final Log logger = LogFactory.getLog(getClass());
|
||||
private Map<String, Object> configuration = new HashMap<>();
|
||||
|
||||
private final Transaction transaction = new Transaction();
|
||||
private String defaultZkPort = "2181";
|
||||
|
||||
private final KafkaProperties kafkaProperties;
|
||||
|
||||
/**
|
||||
* Arbitrary kafka properties that apply to both producers and consumers.
|
||||
*/
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Arbitrary kafka consumer properties.
|
||||
*/
|
||||
private Map<String, String> consumerProperties = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Arbitrary kafka producer properties.
|
||||
*/
|
||||
private Map<String, String> producerProperties = new HashMap<>();
|
||||
|
||||
private String[] brokers = new String[] { "localhost" };
|
||||
private String[] brokers = new String[] {"localhost"};
|
||||
|
||||
private String defaultBrokerPort = "9092";
|
||||
|
||||
private String[] headers = new String[] {};
|
||||
|
||||
private int offsetUpdateTimeWindow = 10000;
|
||||
|
||||
private int offsetUpdateCount;
|
||||
|
||||
private int offsetUpdateShutdownTimeout = 2000;
|
||||
|
||||
private int maxWait = 100;
|
||||
|
||||
private boolean autoCreateTopics = true;
|
||||
|
||||
private boolean autoAddPartitions;
|
||||
|
||||
private String requiredAcks = "1";
|
||||
private int socketBufferSize = 2097152;
|
||||
|
||||
private short replicationFactor = 1;
|
||||
/**
|
||||
* ZK session timeout in milliseconds.
|
||||
*/
|
||||
private int zkSessionTimeout = 10000;
|
||||
|
||||
/**
|
||||
* ZK Connection timeout in milliseconds.
|
||||
*/
|
||||
private int zkConnectionTimeout = 10000;
|
||||
|
||||
private int requiredAcks = 1;
|
||||
|
||||
private int replicationFactor = 1;
|
||||
|
||||
private int fetchSize = 1024 * 1024;
|
||||
|
||||
private int minPartitionCount = 1;
|
||||
|
||||
/**
|
||||
* Time to wait to get partition information in seconds; default 60.
|
||||
*/
|
||||
private int healthTimeout = 60;
|
||||
private int queueSize = 8192;
|
||||
|
||||
private JaasLoginModuleConfiguration jaas;
|
||||
|
||||
/**
|
||||
* The bean name of a custom header mapper to use instead of a
|
||||
* {@link org.springframework.kafka.support.DefaultKafkaHeaderMapper}.
|
||||
*/
|
||||
private String headerMapperBeanName;
|
||||
|
||||
/**
|
||||
* Time between retries after AuthorizationException is caught in
|
||||
* the ListenerContainer; defalt is null which disables retries.
|
||||
* For more info see: {@link org.springframework.kafka.listener.ConsumerProperties#setAuthorizationExceptionRetryInterval(java.time.Duration)}
|
||||
*/
|
||||
private Duration authorizationExceptionRetryInterval;
|
||||
|
||||
public KafkaBinderConfigurationProperties(KafkaProperties kafkaProperties) {
|
||||
Assert.notNull(kafkaProperties, "'kafkaProperties' cannot be null");
|
||||
this.kafkaProperties = kafkaProperties;
|
||||
}
|
||||
|
||||
public KafkaProperties getKafkaProperties() {
|
||||
return this.kafkaProperties;
|
||||
}
|
||||
|
||||
public Transaction getTransaction() {
|
||||
return this.transaction;
|
||||
public String getZkConnectionString() {
|
||||
return toConnectionString(this.zkNodes, this.defaultZkPort);
|
||||
}
|
||||
|
||||
public String getKafkaConnectionString() {
|
||||
return toConnectionString(this.brokers, this.defaultBrokerPort);
|
||||
}
|
||||
|
||||
public String getDefaultKafkaConnectionString() {
|
||||
return DEFAULT_KAFKA_CONNECTION_STRING;
|
||||
}
|
||||
|
||||
public String[] getHeaders() {
|
||||
return this.headers;
|
||||
}
|
||||
|
||||
public int getOffsetUpdateTimeWindow() {
|
||||
return this.offsetUpdateTimeWindow;
|
||||
}
|
||||
|
||||
public int getOffsetUpdateCount() {
|
||||
return this.offsetUpdateCount;
|
||||
}
|
||||
|
||||
public int getOffsetUpdateShutdownTimeout() {
|
||||
return this.offsetUpdateShutdownTimeout;
|
||||
}
|
||||
|
||||
public String[] getZkNodes() {
|
||||
return this.zkNodes;
|
||||
}
|
||||
|
||||
public void setZkNodes(String... zkNodes) {
|
||||
this.zkNodes = zkNodes;
|
||||
}
|
||||
|
||||
public void setDefaultZkPort(String defaultZkPort) {
|
||||
this.defaultZkPort = defaultZkPort;
|
||||
}
|
||||
|
||||
public String[] getBrokers() {
|
||||
return this.brokers;
|
||||
}
|
||||
@@ -157,12 +131,38 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.headers = headers;
|
||||
}
|
||||
|
||||
public void setOffsetUpdateTimeWindow(int offsetUpdateTimeWindow) {
|
||||
this.offsetUpdateTimeWindow = offsetUpdateTimeWindow;
|
||||
}
|
||||
|
||||
public void setOffsetUpdateCount(int offsetUpdateCount) {
|
||||
this.offsetUpdateCount = offsetUpdateCount;
|
||||
}
|
||||
|
||||
public void setOffsetUpdateShutdownTimeout(int offsetUpdateShutdownTimeout) {
|
||||
this.offsetUpdateShutdownTimeout = offsetUpdateShutdownTimeout;
|
||||
}
|
||||
|
||||
public int getZkSessionTimeout() {
|
||||
return this.zkSessionTimeout;
|
||||
}
|
||||
|
||||
public void setZkSessionTimeout(int zkSessionTimeout) {
|
||||
this.zkSessionTimeout = zkSessionTimeout;
|
||||
}
|
||||
|
||||
public int getZkConnectionTimeout() {
|
||||
return this.zkConnectionTimeout;
|
||||
}
|
||||
|
||||
public void setZkConnectionTimeout(int zkConnectionTimeout) {
|
||||
this.zkConnectionTimeout = zkConnectionTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts an array of host values to a comma-separated String. It will append the
|
||||
* default port value, if not already specified.
|
||||
* @param hosts host string
|
||||
* @param defaultPort port
|
||||
* @return formatted connection string
|
||||
* Converts an array of host values to a comma-separated String.
|
||||
*
|
||||
* It will append the default port value, if not already specified.
|
||||
*/
|
||||
private String toConnectionString(String[] hosts, String defaultPort) {
|
||||
String[] fullyFormattedHosts = new String[hosts.length];
|
||||
@@ -177,22 +177,38 @@ public class KafkaBinderConfigurationProperties {
|
||||
return StringUtils.arrayToCommaDelimitedString(fullyFormattedHosts);
|
||||
}
|
||||
|
||||
public String getRequiredAcks() {
|
||||
public int getMaxWait() {
|
||||
return this.maxWait;
|
||||
}
|
||||
|
||||
public void setMaxWait(int maxWait) {
|
||||
this.maxWait = maxWait;
|
||||
}
|
||||
|
||||
public int getRequiredAcks() {
|
||||
return this.requiredAcks;
|
||||
}
|
||||
|
||||
public void setRequiredAcks(String requiredAcks) {
|
||||
public void setRequiredAcks(int requiredAcks) {
|
||||
this.requiredAcks = requiredAcks;
|
||||
}
|
||||
|
||||
public short getReplicationFactor() {
|
||||
public int getReplicationFactor() {
|
||||
return this.replicationFactor;
|
||||
}
|
||||
|
||||
public void setReplicationFactor(short replicationFactor) {
|
||||
public void setReplicationFactor(int replicationFactor) {
|
||||
this.replicationFactor = replicationFactor;
|
||||
}
|
||||
|
||||
public int getFetchSize() {
|
||||
return this.fetchSize;
|
||||
}
|
||||
|
||||
public void setFetchSize(int fetchSize) {
|
||||
this.fetchSize = fetchSize;
|
||||
}
|
||||
|
||||
public int getMinPartitionCount() {
|
||||
return this.minPartitionCount;
|
||||
}
|
||||
@@ -201,12 +217,12 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.minPartitionCount = minPartitionCount;
|
||||
}
|
||||
|
||||
public int getHealthTimeout() {
|
||||
return this.healthTimeout;
|
||||
public int getQueueSize() {
|
||||
return this.queueSize;
|
||||
}
|
||||
|
||||
public void setHealthTimeout(int healthTimeout) {
|
||||
this.healthTimeout = healthTimeout;
|
||||
public void setQueueSize(int queueSize) {
|
||||
this.queueSize = queueSize;
|
||||
}
|
||||
|
||||
public boolean isAutoCreateTopics() {
|
||||
@@ -225,337 +241,28 @@ public class KafkaBinderConfigurationProperties {
|
||||
this.autoAddPartitions = autoAddPartitions;
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
public int getSocketBufferSize() {
|
||||
return this.socketBufferSize;
|
||||
}
|
||||
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
public void setSocketBufferSize(int socketBufferSize) {
|
||||
this.socketBufferSize = socketBufferSize;
|
||||
}
|
||||
|
||||
public Map<String, Object> getConfiguration() {
|
||||
return configuration;
|
||||
}
|
||||
|
||||
public void setConfiguration(Map<String, Object> configuration) {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
public Map<String, String> getConsumerProperties() {
|
||||
return this.consumerProperties;
|
||||
}
|
||||
|
||||
public void setConsumerProperties(Map<String, String> consumerProperties) {
|
||||
Assert.notNull(consumerProperties, "'consumerProperties' cannot be null");
|
||||
this.consumerProperties = consumerProperties;
|
||||
}
|
||||
|
||||
public Map<String, String> getProducerProperties() {
|
||||
return this.producerProperties;
|
||||
}
|
||||
|
||||
public void setProducerProperties(Map<String, String> producerProperties) {
|
||||
Assert.notNull(producerProperties, "'producerProperties' cannot be null");
|
||||
this.producerProperties = producerProperties;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge boot consumer properties, general properties from
|
||||
* {@link #setConfiguration(Map)} that apply to consumers, properties from
|
||||
* {@link #setConsumerProperties(Map)}, in that order.
|
||||
* @return the merged properties.
|
||||
*/
|
||||
public Map<String, Object> mergedConsumerConfiguration() {
|
||||
Map<String, Object> consumerConfiguration = new HashMap<>();
|
||||
consumerConfiguration.putAll(this.kafkaProperties.buildConsumerProperties());
|
||||
// Copy configured binder properties that apply to consumers
|
||||
for (Map.Entry<String, String> configurationEntry : this.configuration
|
||||
.entrySet()) {
|
||||
if (ConsumerConfig.configNames().contains(configurationEntry.getKey())) {
|
||||
consumerConfiguration.put(configurationEntry.getKey(),
|
||||
configurationEntry.getValue());
|
||||
}
|
||||
}
|
||||
consumerConfiguration.putAll(this.consumerProperties);
|
||||
filterStreamManagedConfiguration(consumerConfiguration);
|
||||
// Override Spring Boot bootstrap server setting if left to default with the value
|
||||
// configured in the binder
|
||||
return getConfigurationWithBootstrapServer(consumerConfiguration,
|
||||
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge boot producer properties, general properties from
|
||||
* {@link #setConfiguration(Map)} that apply to producers, properties from
|
||||
* {@link #setProducerProperties(Map)}, in that order.
|
||||
* @return the merged properties.
|
||||
*/
|
||||
public Map<String, Object> mergedProducerConfiguration() {
|
||||
Map<String, Object> producerConfiguration = new HashMap<>();
|
||||
producerConfiguration.putAll(this.kafkaProperties.buildProducerProperties());
|
||||
// Copy configured binder properties that apply to producers
|
||||
for (Map.Entry<String, String> configurationEntry : this.configuration
|
||||
.entrySet()) {
|
||||
if (ProducerConfig.configNames().contains(configurationEntry.getKey())) {
|
||||
producerConfiguration.put(configurationEntry.getKey(),
|
||||
configurationEntry.getValue());
|
||||
}
|
||||
}
|
||||
producerConfiguration.putAll(this.producerProperties);
|
||||
// Override Spring Boot bootstrap server setting if left to default with the value
|
||||
// configured in the binder
|
||||
return getConfigurationWithBootstrapServer(producerConfiguration,
|
||||
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
|
||||
}
|
||||
|
||||
private void filterStreamManagedConfiguration(Map<String, Object> configuration) {
|
||||
if (configuration.containsKey(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)
|
||||
&& configuration.get(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG).equals(true)) {
|
||||
logger.warn(constructIgnoredConfigMessage(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG) +
|
||||
ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG + "=true is not supported by the Kafka binder");
|
||||
configuration.remove(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG);
|
||||
}
|
||||
if (configuration.containsKey(ConsumerConfig.GROUP_ID_CONFIG)) {
|
||||
logger.warn(constructIgnoredConfigMessage(ConsumerConfig.GROUP_ID_CONFIG) +
|
||||
"Use spring.cloud.stream.default.group or spring.cloud.stream.binding.<name>.group to specify " +
|
||||
"the group instead of " + ConsumerConfig.GROUP_ID_CONFIG);
|
||||
configuration.remove(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
}
|
||||
|
||||
private String constructIgnoredConfigMessage(String config) {
|
||||
return String.format("Ignoring provided value(s) for '%s'. ", config);
|
||||
}
|
||||
|
||||
private Map<String, Object> getConfigurationWithBootstrapServer(
|
||||
Map<String, Object> configuration, String bootstrapServersConfig) {
|
||||
if (ObjectUtils.isEmpty(configuration.get(bootstrapServersConfig))) {
|
||||
configuration.put(bootstrapServersConfig, getKafkaConnectionString());
|
||||
}
|
||||
else {
|
||||
Object boostrapServersConfig = configuration.get(bootstrapServersConfig);
|
||||
if (boostrapServersConfig instanceof List) {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<String> bootStrapServers = (List<String>) configuration
|
||||
.get(bootstrapServersConfig);
|
||||
if (bootStrapServers.size() == 1
|
||||
&& bootStrapServers.get(0).equals("localhost:9092")) {
|
||||
configuration.put(bootstrapServersConfig, getKafkaConnectionString());
|
||||
}
|
||||
}
|
||||
}
|
||||
return Collections.unmodifiableMap(configuration);
|
||||
}
|
||||
|
||||
public JaasLoginModuleConfiguration getJaas() {
|
||||
return this.jaas;
|
||||
return jaas;
|
||||
}
|
||||
|
||||
public void setJaas(JaasLoginModuleConfiguration jaas) {
|
||||
this.jaas = jaas;
|
||||
}
|
||||
|
||||
public String getHeaderMapperBeanName() {
|
||||
return this.headerMapperBeanName;
|
||||
}
|
||||
|
||||
public void setHeaderMapperBeanName(String headerMapperBeanName) {
|
||||
this.headerMapperBeanName = headerMapperBeanName;
|
||||
}
|
||||
|
||||
public Duration getAuthorizationExceptionRetryInterval() {
|
||||
return authorizationExceptionRetryInterval;
|
||||
}
|
||||
|
||||
public void setAuthorizationExceptionRetryInterval(Duration authorizationExceptionRetryInterval) {
|
||||
this.authorizationExceptionRetryInterval = authorizationExceptionRetryInterval;
|
||||
}
|
||||
|
||||
/**
|
||||
* Domain class that models transaction capabilities in Kafka.
|
||||
*/
|
||||
public static class Transaction {
|
||||
|
||||
private final CombinedProducerProperties producer = new CombinedProducerProperties();
|
||||
|
||||
private String transactionIdPrefix;
|
||||
|
||||
public String getTransactionIdPrefix() {
|
||||
return this.transactionIdPrefix;
|
||||
}
|
||||
|
||||
public void setTransactionIdPrefix(String transactionIdPrefix) {
|
||||
this.transactionIdPrefix = transactionIdPrefix;
|
||||
}
|
||||
|
||||
public CombinedProducerProperties getProducer() {
|
||||
return this.producer;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* An combination of {@link ProducerProperties} and {@link KafkaProducerProperties} so
|
||||
* that common and kafka-specific properties can be set for the transactional
|
||||
* producer.
|
||||
*
|
||||
* @since 2.1
|
||||
*/
|
||||
public static class CombinedProducerProperties {
|
||||
|
||||
private final ProducerProperties producerProperties = new ProducerProperties();
|
||||
|
||||
private final KafkaProducerProperties kafkaProducerProperties = new KafkaProducerProperties();
|
||||
|
||||
public Expression getPartitionKeyExpression() {
|
||||
return this.producerProperties.getPartitionKeyExpression();
|
||||
}
|
||||
|
||||
public void setPartitionKeyExpression(Expression partitionKeyExpression) {
|
||||
this.producerProperties.setPartitionKeyExpression(partitionKeyExpression);
|
||||
}
|
||||
|
||||
public boolean isPartitioned() {
|
||||
return this.producerProperties.isPartitioned();
|
||||
}
|
||||
|
||||
public Expression getPartitionSelectorExpression() {
|
||||
return this.producerProperties.getPartitionSelectorExpression();
|
||||
}
|
||||
|
||||
public void setPartitionSelectorExpression(
|
||||
Expression partitionSelectorExpression) {
|
||||
this.producerProperties
|
||||
.setPartitionSelectorExpression(partitionSelectorExpression);
|
||||
}
|
||||
|
||||
public @Min(value = 1, message = "Partition count should be greater than zero.") int getPartitionCount() {
|
||||
return this.producerProperties.getPartitionCount();
|
||||
}
|
||||
|
||||
public void setPartitionCount(int partitionCount) {
|
||||
this.producerProperties.setPartitionCount(partitionCount);
|
||||
}
|
||||
|
||||
public String[] getRequiredGroups() {
|
||||
return this.producerProperties.getRequiredGroups();
|
||||
}
|
||||
|
||||
public void setRequiredGroups(String... requiredGroups) {
|
||||
this.producerProperties.setRequiredGroups(requiredGroups);
|
||||
}
|
||||
|
||||
public @AssertTrue(message = "Partition key expression and partition key extractor class properties "
|
||||
+ "are mutually exclusive.") boolean isValidPartitionKeyProperty() {
|
||||
return this.producerProperties.isValidPartitionKeyProperty();
|
||||
}
|
||||
|
||||
public @AssertTrue(message = "Partition selector class and partition selector expression "
|
||||
+ "properties are mutually exclusive.") boolean isValidPartitionSelectorProperty() {
|
||||
return this.producerProperties.isValidPartitionSelectorProperty();
|
||||
}
|
||||
|
||||
public HeaderMode getHeaderMode() {
|
||||
return this.producerProperties.getHeaderMode();
|
||||
}
|
||||
|
||||
public void setHeaderMode(HeaderMode headerMode) {
|
||||
this.producerProperties.setHeaderMode(headerMode);
|
||||
}
|
||||
|
||||
public boolean isUseNativeEncoding() {
|
||||
return this.producerProperties.isUseNativeEncoding();
|
||||
}
|
||||
|
||||
public void setUseNativeEncoding(boolean useNativeEncoding) {
|
||||
this.producerProperties.setUseNativeEncoding(useNativeEncoding);
|
||||
}
|
||||
|
||||
public boolean isErrorChannelEnabled() {
|
||||
return this.producerProperties.isErrorChannelEnabled();
|
||||
}
|
||||
|
||||
public void setErrorChannelEnabled(boolean errorChannelEnabled) {
|
||||
this.producerProperties.setErrorChannelEnabled(errorChannelEnabled);
|
||||
}
|
||||
|
||||
public String getPartitionKeyExtractorName() {
|
||||
return this.producerProperties.getPartitionKeyExtractorName();
|
||||
}
|
||||
|
||||
public void setPartitionKeyExtractorName(String partitionKeyExtractorName) {
|
||||
this.producerProperties
|
||||
.setPartitionKeyExtractorName(partitionKeyExtractorName);
|
||||
}
|
||||
|
||||
public String getPartitionSelectorName() {
|
||||
return this.producerProperties.getPartitionSelectorName();
|
||||
}
|
||||
|
||||
public void setPartitionSelectorName(String partitionSelectorName) {
|
||||
this.producerProperties.setPartitionSelectorName(partitionSelectorName);
|
||||
}
|
||||
|
||||
public int getBufferSize() {
|
||||
return this.kafkaProducerProperties.getBufferSize();
|
||||
}
|
||||
|
||||
public void setBufferSize(int bufferSize) {
|
||||
this.kafkaProducerProperties.setBufferSize(bufferSize);
|
||||
}
|
||||
|
||||
public @NotNull CompressionType getCompressionType() {
|
||||
return this.kafkaProducerProperties.getCompressionType();
|
||||
}
|
||||
|
||||
public void setCompressionType(CompressionType compressionType) {
|
||||
this.kafkaProducerProperties.setCompressionType(compressionType);
|
||||
}
|
||||
|
||||
public boolean isSync() {
|
||||
return this.kafkaProducerProperties.isSync();
|
||||
}
|
||||
|
||||
public void setSync(boolean sync) {
|
||||
this.kafkaProducerProperties.setSync(sync);
|
||||
}
|
||||
|
||||
public int getBatchTimeout() {
|
||||
return this.kafkaProducerProperties.getBatchTimeout();
|
||||
}
|
||||
|
||||
public void setBatchTimeout(int batchTimeout) {
|
||||
this.kafkaProducerProperties.setBatchTimeout(batchTimeout);
|
||||
}
|
||||
|
||||
public Expression getMessageKeyExpression() {
|
||||
return this.kafkaProducerProperties.getMessageKeyExpression();
|
||||
}
|
||||
|
||||
public void setMessageKeyExpression(Expression messageKeyExpression) {
|
||||
this.kafkaProducerProperties.setMessageKeyExpression(messageKeyExpression);
|
||||
}
|
||||
|
||||
public String[] getHeaderPatterns() {
|
||||
return this.kafkaProducerProperties.getHeaderPatterns();
|
||||
}
|
||||
|
||||
public void setHeaderPatterns(String[] headerPatterns) {
|
||||
this.kafkaProducerProperties.setHeaderPatterns(headerPatterns);
|
||||
}
|
||||
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.kafkaProducerProperties.getConfiguration();
|
||||
}
|
||||
|
||||
public void setConfiguration(Map<String, String> configuration) {
|
||||
this.kafkaProducerProperties.setConfiguration(configuration);
|
||||
}
|
||||
|
||||
public KafkaTopicProperties getTopic() {
|
||||
return this.kafkaProducerProperties.getTopic();
|
||||
}
|
||||
|
||||
public void setTopic(KafkaTopicProperties topic) {
|
||||
this.kafkaProducerProperties.setTopic(topic);
|
||||
}
|
||||
|
||||
public KafkaProducerProperties getExtension() {
|
||||
return this.kafkaProducerProperties;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -16,48 +16,28 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import org.springframework.cloud.stream.binder.BinderSpecificPropertiesProvider;
|
||||
|
||||
/**
|
||||
* Container object for Kafka specific extended producer and consumer binding properties.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Oleg Zhurakousky
|
||||
*/
|
||||
public class KafkaBindingProperties implements BinderSpecificPropertiesProvider {
|
||||
public class KafkaBindingProperties {
|
||||
|
||||
/**
|
||||
* Consumer specific binding properties. @see {@link KafkaConsumerProperties}.
|
||||
*/
|
||||
private KafkaConsumerProperties consumer = new KafkaConsumerProperties();
|
||||
|
||||
/**
|
||||
* Producer specific binding properties. @see {@link KafkaProducerProperties}.
|
||||
*/
|
||||
private KafkaProducerProperties producer = new KafkaProducerProperties();
|
||||
|
||||
/**
|
||||
* @return {@link KafkaConsumerProperties}
|
||||
* Consumer specific binding properties. @see {@link KafkaConsumerProperties}.
|
||||
*/
|
||||
public KafkaConsumerProperties getConsumer() {
|
||||
return this.consumer;
|
||||
return consumer;
|
||||
}
|
||||
|
||||
public void setConsumer(KafkaConsumerProperties consumer) {
|
||||
this.consumer = consumer;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return {@link KafkaProducerProperties}
|
||||
* Producer specific binding properties. @see {@link KafkaProducerProperties}.
|
||||
*/
|
||||
public KafkaProducerProperties getProducer() {
|
||||
return this.producer;
|
||||
return producer;
|
||||
}
|
||||
|
||||
public void setProducer(KafkaProducerProperties producer) {
|
||||
this.producer = producer;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2016-2019 the original author or authors.
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -20,33 +20,89 @@ import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Extended consumer properties for Kafka binder.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
* @author Aldo Sinanaj
|
||||
*
|
||||
* <p>
|
||||
* Thanks to Laszlo Szabo for providing the initial patch for generic property support.
|
||||
* </p>
|
||||
* <p>Thanks to Laszlo Szabo for providing the initial patch for generic property support.</p>
|
||||
*/
|
||||
public class KafkaConsumerProperties {
|
||||
|
||||
/**
|
||||
* Enumeration for starting consumer offset.
|
||||
*/
|
||||
public enum StartOffset {
|
||||
private boolean autoRebalanceEnabled = true;
|
||||
|
||||
/**
|
||||
* Starting from earliest offset.
|
||||
*/
|
||||
earliest(-2L),
|
||||
/**
|
||||
* Starting from latest offset.
|
||||
*/
|
||||
latest(-1L);
|
||||
private boolean autoCommitOffset = true;
|
||||
|
||||
private Boolean autoCommitOnError;
|
||||
|
||||
private boolean resetOffsets;
|
||||
|
||||
private StartOffset startOffset;
|
||||
|
||||
private boolean enableDlq;
|
||||
|
||||
private String dlqName;
|
||||
|
||||
private int recoveryInterval = 5000;
|
||||
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
public boolean isAutoCommitOffset() {
|
||||
return this.autoCommitOffset;
|
||||
}
|
||||
|
||||
public void setAutoCommitOffset(boolean autoCommitOffset) {
|
||||
this.autoCommitOffset = autoCommitOffset;
|
||||
}
|
||||
|
||||
public boolean isResetOffsets() {
|
||||
return this.resetOffsets;
|
||||
}
|
||||
|
||||
public void setResetOffsets(boolean resetOffsets) {
|
||||
this.resetOffsets = resetOffsets;
|
||||
}
|
||||
|
||||
public StartOffset getStartOffset() {
|
||||
return this.startOffset;
|
||||
}
|
||||
|
||||
public void setStartOffset(StartOffset startOffset) {
|
||||
this.startOffset = startOffset;
|
||||
}
|
||||
|
||||
public boolean isEnableDlq() {
|
||||
return this.enableDlq;
|
||||
}
|
||||
|
||||
public void setEnableDlq(boolean enableDlq) {
|
||||
this.enableDlq = enableDlq;
|
||||
}
|
||||
|
||||
public Boolean getAutoCommitOnError() {
|
||||
return this.autoCommitOnError;
|
||||
}
|
||||
|
||||
public void setAutoCommitOnError(Boolean autoCommitOnError) {
|
||||
this.autoCommitOnError = autoCommitOnError;
|
||||
}
|
||||
|
||||
public int getRecoveryInterval() {
|
||||
return this.recoveryInterval;
|
||||
}
|
||||
|
||||
public void setRecoveryInterval(int recoveryInterval) {
|
||||
this.recoveryInterval = recoveryInterval;
|
||||
}
|
||||
|
||||
public boolean isAutoRebalanceEnabled() {
|
||||
return this.autoRebalanceEnabled;
|
||||
}
|
||||
|
||||
public void setAutoRebalanceEnabled(boolean autoRebalanceEnabled) {
|
||||
this.autoRebalanceEnabled = autoRebalanceEnabled;
|
||||
}
|
||||
|
||||
public enum StartOffset {
|
||||
earliest(-2L), latest(-1L);
|
||||
|
||||
private final long referencePoint;
|
||||
|
||||
@@ -57,278 +113,8 @@ public class KafkaConsumerProperties {
|
||||
public long getReferencePoint() {
|
||||
return this.referencePoint;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Standard headers for the message.
|
||||
*/
|
||||
public enum StandardHeaders {
|
||||
|
||||
/**
|
||||
* No headers.
|
||||
*/
|
||||
none,
|
||||
/**
|
||||
* Message header representing ID.
|
||||
*/
|
||||
id,
|
||||
/**
|
||||
* Message header representing timestamp.
|
||||
*/
|
||||
timestamp,
|
||||
/**
|
||||
* Indicating both ID and timestamp headers.
|
||||
*/
|
||||
both
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* When true the offset is committed after each record, otherwise the offsets for the complete set of records
|
||||
* received from the poll() are committed after all records have been processed.
|
||||
*/
|
||||
private boolean ackEachRecord;
|
||||
|
||||
/**
|
||||
* When true, topic partitions is automatically rebalanced between the members of a consumer group.
|
||||
* When false, each consumer is assigned a fixed set of partitions based on spring.cloud.stream.instanceCount and spring.cloud.stream.instanceIndex.
|
||||
*/
|
||||
private boolean autoRebalanceEnabled = true;
|
||||
|
||||
/**
|
||||
* Whether to autocommit offsets when a message has been processed.
|
||||
* If set to false, a header with the key kafka_acknowledgment of the type org.springframework.kafka.support.Acknowledgment header
|
||||
* is present in the inbound message. Applications may use this header for acknowledging messages.
|
||||
*/
|
||||
private boolean autoCommitOffset = true;
|
||||
|
||||
/**
|
||||
* Effective only if autoCommitOffset is set to true.
|
||||
* If set to false, it suppresses auto-commits for messages that result in errors and commits only for successful messages.
|
||||
* It allows a stream to automatically replay from the last successfully processed message, in case of persistent failures.
|
||||
* If set to true, it always auto-commits (if auto-commit is enabled).
|
||||
* If not set (the default), it effectively has the same value as enableDlq,
|
||||
* auto-committing erroneous messages if they are sent to a DLQ and not committing them otherwise.
|
||||
*/
|
||||
private Boolean autoCommitOnError;
|
||||
|
||||
/**
|
||||
* The starting offset for new groups. Allowed values: earliest and latest.
|
||||
*/
|
||||
private StartOffset startOffset;
|
||||
|
||||
/**
|
||||
* Whether to reset offsets on the consumer to the value provided by startOffset.
|
||||
* Must be false if a KafkaRebalanceListener is provided.
|
||||
*/
|
||||
private boolean resetOffsets;
|
||||
|
||||
/**
|
||||
* When set to true, it enables DLQ behavior for the consumer.
|
||||
* By default, messages that result in errors are forwarded to a topic named error.name-of-destination.name-of-group.
|
||||
* The DLQ topic name can be configurable by setting the dlqName property.
|
||||
*/
|
||||
private boolean enableDlq;
|
||||
|
||||
/**
|
||||
* The name of the DLQ topic to receive the error messages.
|
||||
*/
|
||||
private String dlqName;
|
||||
|
||||
/**
|
||||
* Number of partitions to use on the DLQ.
|
||||
*/
|
||||
private Integer dlqPartitions;
|
||||
|
||||
/**
|
||||
* Using this, DLQ-specific producer properties can be set.
|
||||
* All the properties available through kafka producer properties can be set through this property.
|
||||
*/
|
||||
private KafkaProducerProperties dlqProducerProperties = new KafkaProducerProperties();
|
||||
|
||||
/**
|
||||
* @deprecated No longer used by the binder.
|
||||
*/
|
||||
@Deprecated
|
||||
private int recoveryInterval = 5000;
|
||||
|
||||
/**
|
||||
* List of trusted packages to provide the header mapper.
|
||||
*/
|
||||
private String[] trustedPackages;
|
||||
|
||||
/**
|
||||
* Indicates which standard headers are populated by the inbound channel adapter.
|
||||
* Allowed values: none, id, timestamp, or both.
|
||||
*/
|
||||
private StandardHeaders standardHeaders = StandardHeaders.none;
|
||||
|
||||
/**
|
||||
* The name of a bean that implements RecordMessageConverter.
|
||||
*/
|
||||
private String converterBeanName;
|
||||
|
||||
/**
|
||||
* The interval, in milliseconds, between events indicating that no messages have recently been received.
|
||||
*/
|
||||
private long idleEventInterval = 30_000;
|
||||
|
||||
/**
|
||||
* When true, the destination is treated as a regular expression Pattern used to match topic names by the broker.
|
||||
*/
|
||||
private boolean destinationIsPattern;
|
||||
|
||||
/**
|
||||
* Map with a key/value pair containing generic Kafka consumer properties.
|
||||
* In addition to having Kafka consumer properties, other configuration properties can be passed here.
|
||||
*/
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Various topic level properties. @see {@link KafkaTopicProperties} for more details.
|
||||
*/
|
||||
private KafkaTopicProperties topic = new KafkaTopicProperties();
|
||||
|
||||
/**
|
||||
* Timeout used for polling in pollable consumers.
|
||||
*/
|
||||
private long pollTimeout = org.springframework.kafka.listener.ConsumerProperties.DEFAULT_POLL_TIMEOUT;
|
||||
|
||||
/**
|
||||
* Transaction manager bean name - overrides the binder's transaction configuration.
|
||||
*/
|
||||
private String transactionManager;
|
||||
|
||||
/**
|
||||
* @return if each record needs to be acknowledged.
|
||||
*
|
||||
* When true the offset is committed after each record, otherwise the offsets for the complete set of records
|
||||
* received from the poll() are committed after all records have been processed.
|
||||
*/
|
||||
public boolean isAckEachRecord() {
|
||||
return this.ackEachRecord;
|
||||
}
|
||||
|
||||
public void setAckEachRecord(boolean ackEachRecord) {
|
||||
this.ackEachRecord = ackEachRecord;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return is autocommit offset enabled
|
||||
*
|
||||
* Whether to autocommit offsets when a message has been processed.
|
||||
* If set to false, a header with the key kafka_acknowledgment of the type org.springframework.kafka.support.Acknowledgment header
|
||||
* is present in the inbound message. Applications may use this header for acknowledging messages.
|
||||
*/
|
||||
public boolean isAutoCommitOffset() {
|
||||
return this.autoCommitOffset;
|
||||
}
|
||||
|
||||
public void setAutoCommitOffset(boolean autoCommitOffset) {
|
||||
this.autoCommitOffset = autoCommitOffset;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return start offset
|
||||
*
|
||||
* The starting offset for new groups. Allowed values: earliest and latest.
|
||||
*/
|
||||
public StartOffset getStartOffset() {
|
||||
return this.startOffset;
|
||||
}
|
||||
|
||||
public void setStartOffset(StartOffset startOffset) {
|
||||
this.startOffset = startOffset;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return if resetting offset is enabled
|
||||
*
|
||||
* Whether to reset offsets on the consumer to the value provided by startOffset.
|
||||
* Must be false if a KafkaRebalanceListener is provided.
|
||||
*/
|
||||
public boolean isResetOffsets() {
|
||||
return this.resetOffsets;
|
||||
}
|
||||
|
||||
public void setResetOffsets(boolean resetOffsets) {
|
||||
this.resetOffsets = resetOffsets;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return is DLQ enabled.
|
||||
*
|
||||
* When set to true, it enables DLQ behavior for the consumer.
|
||||
* By default, messages that result in errors are forwarded to a topic named error.name-of-destination.name-of-group.
|
||||
* The DLQ topic name can be configurable by setting the dlqName property.
|
||||
*/
|
||||
public boolean isEnableDlq() {
|
||||
return this.enableDlq;
|
||||
}
|
||||
|
||||
public void setEnableDlq(boolean enableDlq) {
|
||||
this.enableDlq = enableDlq;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return is autocommit on error
|
||||
*
|
||||
* Effective only if autoCommitOffset is set to true.
|
||||
* If set to false, it suppresses auto-commits for messages that result in errors and commits only for successful messages.
|
||||
* It allows a stream to automatically replay from the last successfully processed message, in case of persistent failures.
|
||||
* If set to true, it always auto-commits (if auto-commit is enabled).
|
||||
* If not set (the default), it effectively has the same value as enableDlq,
|
||||
* auto-committing erroneous messages if they are sent to a DLQ and not committing them otherwise.
|
||||
*/
|
||||
public Boolean getAutoCommitOnError() {
|
||||
return this.autoCommitOnError;
|
||||
}
|
||||
|
||||
public void setAutoCommitOnError(Boolean autoCommitOnError) {
|
||||
this.autoCommitOnError = autoCommitOnError;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @return the interval.
|
||||
* @deprecated No longer used by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
public int getRecoveryInterval() {
|
||||
return this.recoveryInterval;
|
||||
}
|
||||
|
||||
/**
|
||||
* No longer used.
|
||||
* @param recoveryInterval the interval.
|
||||
* @deprecated No longer needed by the binder
|
||||
*/
|
||||
@Deprecated
|
||||
public void setRecoveryInterval(int recoveryInterval) {
|
||||
this.recoveryInterval = recoveryInterval;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return is auto rebalance enabled
|
||||
*
|
||||
* When true, topic partitions is automatically rebalanced between the members of a consumer group.
|
||||
* When false, each consumer is assigned a fixed set of partitions based on spring.cloud.stream.instanceCount and spring.cloud.stream.instanceIndex.
|
||||
*/
|
||||
public boolean isAutoRebalanceEnabled() {
|
||||
return this.autoRebalanceEnabled;
|
||||
}
|
||||
|
||||
public void setAutoRebalanceEnabled(boolean autoRebalanceEnabled) {
|
||||
this.autoRebalanceEnabled = autoRebalanceEnabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a map of configuration
|
||||
*
|
||||
* Map with a key/value pair containing generic Kafka consumer properties.
|
||||
* In addition to having Kafka consumer properties, other configuration properties can be passed here.
|
||||
*/
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
@@ -337,149 +123,11 @@ public class KafkaConsumerProperties {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return dlq name
|
||||
*
|
||||
* The name of the DLQ topic to receive the error messages.
|
||||
*/
|
||||
public String getDlqName() {
|
||||
return this.dlqName;
|
||||
return dlqName;
|
||||
}
|
||||
|
||||
public void setDlqName(String dlqName) {
|
||||
this.dlqName = dlqName;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return number of partitions on the DLQ topic
|
||||
*
|
||||
* Number of partitions to use on the DLQ.
|
||||
*/
|
||||
public Integer getDlqPartitions() {
|
||||
return this.dlqPartitions;
|
||||
}
|
||||
|
||||
public void setDlqPartitions(Integer dlqPartitions) {
|
||||
this.dlqPartitions = dlqPartitions;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return trusted packages
|
||||
*
|
||||
* List of trusted packages to provide the header mapper.
|
||||
*/
|
||||
public String[] getTrustedPackages() {
|
||||
return this.trustedPackages;
|
||||
}
|
||||
|
||||
public void setTrustedPackages(String[] trustedPackages) {
|
||||
this.trustedPackages = trustedPackages;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return dlq producer properties
|
||||
*
|
||||
* Using this, DLQ-specific producer properties can be set.
|
||||
* All the properties available through kafka producer properties can be set through this property.
|
||||
*/
|
||||
public KafkaProducerProperties getDlqProducerProperties() {
|
||||
return this.dlqProducerProperties;
|
||||
}
|
||||
|
||||
public void setDlqProducerProperties(KafkaProducerProperties dlqProducerProperties) {
|
||||
this.dlqProducerProperties = dlqProducerProperties;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return standard headers
|
||||
*
|
||||
* Indicates which standard headers are populated by the inbound channel adapter.
|
||||
* Allowed values: none, id, timestamp, or both.
|
||||
*/
|
||||
public StandardHeaders getStandardHeaders() {
|
||||
return this.standardHeaders;
|
||||
}
|
||||
|
||||
public void setStandardHeaders(StandardHeaders standardHeaders) {
|
||||
this.standardHeaders = standardHeaders;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return converter bean name
|
||||
*
|
||||
* The name of a bean that implements RecordMessageConverter.
|
||||
*/
|
||||
public String getConverterBeanName() {
|
||||
return this.converterBeanName;
|
||||
}
|
||||
|
||||
public void setConverterBeanName(String converterBeanName) {
|
||||
this.converterBeanName = converterBeanName;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return idle event interval
|
||||
*
|
||||
* The interval, in milliseconds, between events indicating that no messages have recently been received.
|
||||
*/
|
||||
public long getIdleEventInterval() {
|
||||
return this.idleEventInterval;
|
||||
}
|
||||
|
||||
public void setIdleEventInterval(long idleEventInterval) {
|
||||
this.idleEventInterval = idleEventInterval;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return is destination given through a pattern
|
||||
*
|
||||
* When true, the destination is treated as a regular expression Pattern used to match topic names by the broker.
|
||||
*/
|
||||
public boolean isDestinationIsPattern() {
|
||||
return this.destinationIsPattern;
|
||||
}
|
||||
|
||||
public void setDestinationIsPattern(boolean destinationIsPattern) {
|
||||
this.destinationIsPattern = destinationIsPattern;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return topic properties
|
||||
*
|
||||
* Various topic level properties. @see {@link KafkaTopicProperties} for more details.
|
||||
*/
|
||||
public KafkaTopicProperties getTopic() {
|
||||
return this.topic;
|
||||
}
|
||||
|
||||
public void setTopic(KafkaTopicProperties topic) {
|
||||
this.topic = topic;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return timeout in pollable consumers
|
||||
*
|
||||
* Timeout used for polling in pollable consumers.
|
||||
*/
|
||||
public long getPollTimeout() {
|
||||
return this.pollTimeout;
|
||||
}
|
||||
|
||||
public void setPollTimeout(long pollTimeout) {
|
||||
this.pollTimeout = pollTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the transaction manager bean name.
|
||||
*
|
||||
* Transaction manager bean name (must be {@code KafkaAwareTransactionManager}.
|
||||
*/
|
||||
public String getTransactionManager() {
|
||||
return this.transactionManager;
|
||||
}
|
||||
|
||||
public void setTransactionManager(String transactionManager) {
|
||||
this.transactionManager = transactionManager;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -16,40 +16,46 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.AbstractExtendedBindingProperties;
|
||||
import org.springframework.cloud.stream.binder.BinderSpecificPropertiesProvider;
|
||||
import org.springframework.cloud.stream.binder.ExtendedBindingProperties;
|
||||
|
||||
/**
|
||||
* Kafka specific extended binding properties class that extends from
|
||||
* {@link AbstractExtendedBindingProperties}.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Gary Russell
|
||||
* @author Soby Chacko
|
||||
* @author Oleg Zhurakousky
|
||||
*/
|
||||
@ConfigurationProperties("spring.cloud.stream.kafka")
|
||||
public class KafkaExtendedBindingProperties extends
|
||||
AbstractExtendedBindingProperties<KafkaConsumerProperties, KafkaProducerProperties, KafkaBindingProperties> {
|
||||
public class KafkaExtendedBindingProperties
|
||||
implements ExtendedBindingProperties<KafkaConsumerProperties, KafkaProducerProperties> {
|
||||
|
||||
private static final String DEFAULTS_PREFIX = "spring.cloud.stream.kafka.default";
|
||||
private Map<String, KafkaBindingProperties> bindings = new HashMap<>();
|
||||
|
||||
@Override
|
||||
public String getDefaultsPrefix() {
|
||||
return DEFAULTS_PREFIX;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, KafkaBindingProperties> getBindings() {
|
||||
return this.doGetBindings();
|
||||
return this.bindings;
|
||||
}
|
||||
|
||||
public void setBindings(Map<String, KafkaBindingProperties> bindings) {
|
||||
this.bindings = bindings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Class<? extends BinderSpecificPropertiesProvider> getExtendedPropertiesEntryClass() {
|
||||
return KafkaBindingProperties.class;
|
||||
public KafkaConsumerProperties getExtendedConsumerProperties(String channelName) {
|
||||
if (this.bindings.containsKey(channelName) && this.bindings.get(channelName).getConsumer() != null) {
|
||||
return this.bindings.get(channelName).getConsumer();
|
||||
}
|
||||
else {
|
||||
return new KafkaConsumerProperties();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public KafkaProducerProperties getExtendedProducerProperties(String channelName) {
|
||||
if (this.bindings.containsKey(channelName) && this.bindings.get(channelName).getProducer() != null) {
|
||||
return this.bindings.get(channelName).getProducer();
|
||||
}
|
||||
else {
|
||||
return new KafkaProducerProperties();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2016-2018 the original author or authors.
|
||||
* Copyright 2016-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -16,100 +16,31 @@
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import org.springframework.expression.Expression;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.validation.constraints.NotNull;
|
||||
|
||||
import org.springframework.expression.Expression;
|
||||
|
||||
/**
|
||||
* Extended producer properties for Kafka binder.
|
||||
*
|
||||
* @author Marius Bogoevici
|
||||
* @author Henryk Konsek
|
||||
* @author Gary Russell
|
||||
* @author Aldo Sinanaj
|
||||
*/
|
||||
public class KafkaProducerProperties {
|
||||
|
||||
/**
|
||||
* Upper limit, in bytes, of how much data the Kafka producer attempts to batch before sending.
|
||||
*/
|
||||
private int bufferSize = 16384;
|
||||
|
||||
/**
|
||||
* Set the compression.type producer property. Supported values are none, gzip, snappy and lz4.
|
||||
* See {@link CompressionType} for more details.
|
||||
*/
|
||||
private CompressionType compressionType = CompressionType.none;
|
||||
|
||||
/**
|
||||
* Whether the producer is synchronous.
|
||||
*/
|
||||
private boolean sync;
|
||||
|
||||
/**
|
||||
* A SpEL expression evaluated against the outgoing message used to evaluate the time to wait
|
||||
* for ack when synchronous publish is enabled.
|
||||
*/
|
||||
private Expression sendTimeoutExpression;
|
||||
|
||||
/**
|
||||
* How long the producer waits to allow more messages to accumulate in the same batch before sending the messages.
|
||||
*/
|
||||
private int batchTimeout;
|
||||
|
||||
/**
|
||||
* A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message.
|
||||
*/
|
||||
private Expression messageKeyExpression;
|
||||
|
||||
/**
|
||||
* A comma-delimited list of simple patterns to match Spring messaging headers
|
||||
* to be mapped to the Kafka Headers in the ProducerRecord.
|
||||
*/
|
||||
private String[] headerPatterns;
|
||||
|
||||
/**
|
||||
* Map with a key/value pair containing generic Kafka producer properties.
|
||||
*/
|
||||
private Map<String, String> configuration = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Various topic level properties. @see {@link KafkaTopicProperties} for more details.
|
||||
*/
|
||||
private KafkaTopicProperties topic = new KafkaTopicProperties();
|
||||
|
||||
/**
|
||||
* Set to true to override the default binding destination (topic name) with the value of the
|
||||
* KafkaHeaders.TOPIC message header in the outbound message. If the header is not present,
|
||||
* the default binding destination is used.
|
||||
*/
|
||||
private boolean useTopicHeader;
|
||||
|
||||
/**
|
||||
* The bean name of a MessageChannel to which successful send results should be sent;
|
||||
* the bean must exist in the application context.
|
||||
*/
|
||||
private String recordMetadataChannel;
|
||||
|
||||
/**
|
||||
* Transaction manager bean name - overrides the binder's transaction configuration.
|
||||
*/
|
||||
private String transactionManager;
|
||||
|
||||
/*
|
||||
* Timeout value in seconds for the duration to wait when closing the producer.
|
||||
* If not set this defaults to 30 seconds.
|
||||
*/
|
||||
private int closeTimeout;
|
||||
|
||||
/**
|
||||
* @return buffer size
|
||||
*
|
||||
* Upper limit, in bytes, of how much data the Kafka producer attempts to batch before sending.
|
||||
*/
|
||||
public int getBufferSize() {
|
||||
return this.bufferSize;
|
||||
}
|
||||
@@ -118,12 +49,6 @@ public class KafkaProducerProperties {
|
||||
this.bufferSize = bufferSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return compression type {@link CompressionType}
|
||||
*
|
||||
* Set the compression.type producer property. Supported values are none, gzip, snappy and lz4.
|
||||
* See {@link CompressionType} for more details.
|
||||
*/
|
||||
@NotNull
|
||||
public CompressionType getCompressionType() {
|
||||
return this.compressionType;
|
||||
@@ -133,11 +58,6 @@ public class KafkaProducerProperties {
|
||||
this.compressionType = compressionType;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return if synchronous sending is enabled
|
||||
*
|
||||
* Whether the producer is synchronous.
|
||||
*/
|
||||
public boolean isSync() {
|
||||
return this.sync;
|
||||
}
|
||||
@@ -146,25 +66,6 @@ public class KafkaProducerProperties {
|
||||
this.sync = sync;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return timeout expression for send
|
||||
*
|
||||
* A SpEL expression evaluated against the outgoing message used to evaluate the time to wait
|
||||
* for ack when synchronous publish is enabled.
|
||||
*/
|
||||
public Expression getSendTimeoutExpression() {
|
||||
return this.sendTimeoutExpression;
|
||||
}
|
||||
|
||||
public void setSendTimeoutExpression(Expression sendTimeoutExpression) {
|
||||
this.sendTimeoutExpression = sendTimeoutExpression;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return batch timeout
|
||||
*
|
||||
* How long the producer waits to allow more messages to accumulate in the same batch before sending the messages.
|
||||
*/
|
||||
public int getBatchTimeout() {
|
||||
return this.batchTimeout;
|
||||
}
|
||||
@@ -173,38 +74,14 @@ public class KafkaProducerProperties {
|
||||
this.batchTimeout = batchTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return message key expression
|
||||
*
|
||||
* A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message.
|
||||
*/
|
||||
public Expression getMessageKeyExpression() {
|
||||
return this.messageKeyExpression;
|
||||
return messageKeyExpression;
|
||||
}
|
||||
|
||||
public void setMessageKeyExpression(Expression messageKeyExpression) {
|
||||
this.messageKeyExpression = messageKeyExpression;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return header patterns
|
||||
*
|
||||
* A comma-delimited list of simple patterns to match Spring messaging headers
|
||||
* to be mapped to the Kafka Headers in the ProducerRecord.
|
||||
*/
|
||||
public String[] getHeaderPatterns() {
|
||||
return this.headerPatterns;
|
||||
}
|
||||
|
||||
public void setHeaderPatterns(String[] headerPatterns) {
|
||||
this.headerPatterns = headerPatterns;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return map of configuration
|
||||
*
|
||||
* Map with a key/value pair containing generic Kafka producer properties.
|
||||
*/
|
||||
public Map<String, String> getConfiguration() {
|
||||
return this.configuration;
|
||||
}
|
||||
@@ -213,103 +90,9 @@ public class KafkaProducerProperties {
|
||||
this.configuration = configuration;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return topic properties
|
||||
*
|
||||
* Various topic level properties. @see {@link KafkaTopicProperties} for more details.
|
||||
*/
|
||||
public KafkaTopicProperties getTopic() {
|
||||
return this.topic;
|
||||
}
|
||||
|
||||
public void setTopic(KafkaTopicProperties topic) {
|
||||
this.topic = topic;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return if using topic header
|
||||
*
|
||||
* Set to true to override the default binding destination (topic name) with the value of the
|
||||
* KafkaHeaders.TOPIC message header in the outbound message. If the header is not present,
|
||||
* the default binding destination is used.
|
||||
*/
|
||||
public boolean isUseTopicHeader() {
|
||||
return this.useTopicHeader;
|
||||
}
|
||||
|
||||
public void setUseTopicHeader(boolean useTopicHeader) {
|
||||
this.useTopicHeader = useTopicHeader;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return record metadata channel
|
||||
*
|
||||
* The bean name of a MessageChannel to which successful send results should be sent;
|
||||
* the bean must exist in the application context.
|
||||
*/
|
||||
public String getRecordMetadataChannel() {
|
||||
return this.recordMetadataChannel;
|
||||
}
|
||||
|
||||
public void setRecordMetadataChannel(String recordMetadataChannel) {
|
||||
this.recordMetadataChannel = recordMetadataChannel;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the transaction manager bean name.
|
||||
*
|
||||
* Transaction manager bean name (must be {@code KafkaAwareTransactionManager}.
|
||||
*/
|
||||
public String getTransactionManager() {
|
||||
return this.transactionManager;
|
||||
}
|
||||
|
||||
public void setTransactionManager(String transactionManager) {
|
||||
this.transactionManager = transactionManager;
|
||||
}
|
||||
|
||||
/*
|
||||
* @return timeout in seconds for closing the producer
|
||||
*/
|
||||
public int getCloseTimeout() {
|
||||
return this.closeTimeout;
|
||||
}
|
||||
|
||||
public void setCloseTimeout(int closeTimeout) {
|
||||
this.closeTimeout = closeTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enumeration for compression types.
|
||||
*/
|
||||
public enum CompressionType {
|
||||
|
||||
/**
|
||||
* No compression.
|
||||
*/
|
||||
none,
|
||||
|
||||
/**
|
||||
* gzip based compression.
|
||||
*/
|
||||
gzip,
|
||||
|
||||
/**
|
||||
* snappy based compression.
|
||||
*/
|
||||
snappy,
|
||||
|
||||
/**
|
||||
* lz4 compression.
|
||||
*/
|
||||
lz4,
|
||||
|
||||
// /** // TODO: uncomment and fix docs when kafka-clients 2.1.0 or newer is the
|
||||
// default
|
||||
// * zstd compression
|
||||
// */
|
||||
// zstd
|
||||
|
||||
snappy
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Properties for configuring topics.
|
||||
*
|
||||
* @author Aldo Sinanaj
|
||||
* @since 2.2
|
||||
*
|
||||
*/
|
||||
public class KafkaTopicProperties {
|
||||
|
||||
private Short replicationFactor;
|
||||
|
||||
private Map<Integer, List<Integer>> replicasAssignments = new HashMap<>();
|
||||
|
||||
private Map<String, String> properties = new HashMap<>();
|
||||
|
||||
public Short getReplicationFactor() {
|
||||
return replicationFactor;
|
||||
}
|
||||
|
||||
public void setReplicationFactor(Short replicationFactor) {
|
||||
this.replicationFactor = replicationFactor;
|
||||
}
|
||||
|
||||
public Map<Integer, List<Integer>> getReplicasAssignments() {
|
||||
return replicasAssignments;
|
||||
}
|
||||
|
||||
public void setReplicasAssignments(Map<Integer, List<Integer>> replicasAssignments) {
|
||||
this.replicasAssignments = replicasAssignments;
|
||||
}
|
||||
|
||||
public Map<String, String> getProperties() {
|
||||
return properties;
|
||||
}
|
||||
|
||||
public void setProperties(Map<String, String> properties) {
|
||||
this.properties = properties;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2014-2018 the original author or authors.
|
||||
* Copyright 2014-2017 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -17,101 +17,64 @@
|
||||
package org.springframework.cloud.stream.binder.kafka.provisioning;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import kafka.common.ErrorMapping;
|
||||
import kafka.utils.ZkUtils;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
import org.apache.kafka.clients.admin.AdminClientConfig;
|
||||
import org.apache.kafka.clients.admin.CreatePartitionsResult;
|
||||
import org.apache.kafka.clients.admin.CreateTopicsResult;
|
||||
import org.apache.kafka.clients.admin.DescribeTopicsResult;
|
||||
import org.apache.kafka.clients.admin.ListTopicsResult;
|
||||
import org.apache.kafka.clients.admin.NewPartitions;
|
||||
import org.apache.kafka.clients.admin.NewTopic;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.apache.kafka.common.KafkaFuture;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.errors.TopicExistsException;
|
||||
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
|
||||
import org.apache.kafka.common.security.JaasUtils;
|
||||
|
||||
import org.springframework.beans.factory.InitializingBean;
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.BinderException;
|
||||
import org.springframework.cloud.stream.binder.ExtendedConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.ExtendedProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.admin.AdminUtilsOperation;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaConsumerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaProducerProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaTopicProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.utils.KafkaTopicUtils;
|
||||
import org.springframework.cloud.stream.provisioning.ConsumerDestination;
|
||||
import org.springframework.cloud.stream.provisioning.ProducerDestination;
|
||||
import org.springframework.cloud.stream.provisioning.ProvisioningException;
|
||||
import org.springframework.cloud.stream.provisioning.ProvisioningProvider;
|
||||
import org.springframework.retry.RetryCallback;
|
||||
import org.springframework.retry.RetryContext;
|
||||
import org.springframework.retry.RetryOperations;
|
||||
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
|
||||
import org.springframework.retry.policy.SimpleRetryPolicy;
|
||||
import org.springframework.retry.support.RetryTemplate;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
import org.springframework.util.ObjectUtils;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* Kafka implementation for {@link ProvisioningProvider}.
|
||||
* Kafka implementation for {@link ProvisioningProvider}
|
||||
*
|
||||
* @author Soby Chacko
|
||||
* @author Gary Russell
|
||||
* @author Ilayaperumal Gopinathan
|
||||
* @author Simon Flandergan
|
||||
* @author Oleg Zhurakousky
|
||||
* @author Aldo Sinanaj
|
||||
*/
|
||||
public class KafkaTopicProvisioner implements
|
||||
// @checkstyle:off
|
||||
ProvisioningProvider<ExtendedConsumerProperties<KafkaConsumerProperties>, ExtendedProducerProperties<KafkaProducerProperties>>,
|
||||
// @checkstyle:on
|
||||
InitializingBean {
|
||||
public class KafkaTopicProvisioner implements ProvisioningProvider<ExtendedConsumerProperties<KafkaConsumerProperties>,
|
||||
ExtendedProducerProperties<KafkaProducerProperties>>, InitializingBean {
|
||||
|
||||
private static final Log logger = LogFactory.getLog(KafkaTopicProvisioner.class);
|
||||
|
||||
private static final int DEFAULT_OPERATION_TIMEOUT = 30;
|
||||
private final Log logger = LogFactory.getLog(getClass());
|
||||
|
||||
private final KafkaBinderConfigurationProperties configurationProperties;
|
||||
|
||||
private final int operationTimeout = DEFAULT_OPERATION_TIMEOUT;
|
||||
|
||||
private final Map<String, Object> adminClientProperties;
|
||||
private final AdminUtilsOperation adminUtilsOperation;
|
||||
|
||||
private RetryOperations metadataRetryOperations;
|
||||
|
||||
/**
|
||||
* Create an instance.
|
||||
* @param kafkaBinderConfigurationProperties the binder configuration properties.
|
||||
* @param kafkaProperties the boot Kafka properties used to build the
|
||||
* {@link AdminClient}.
|
||||
*/
|
||||
public KafkaTopicProvisioner(
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties,
|
||||
KafkaProperties kafkaProperties) {
|
||||
Assert.isTrue(kafkaProperties != null, "KafkaProperties cannot be null");
|
||||
this.adminClientProperties = kafkaProperties.buildAdminProperties();
|
||||
public KafkaTopicProvisioner(KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties,
|
||||
AdminUtilsOperation adminUtilsOperation) {
|
||||
this.configurationProperties = kafkaBinderConfigurationProperties;
|
||||
normalalizeBootPropsWithBinder(this.adminClientProperties, kafkaProperties,
|
||||
kafkaBinderConfigurationProperties);
|
||||
this.adminUtilsOperation = adminUtilsOperation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mutator for metadata retry operations.
|
||||
* @param metadataRetryOperations the retry configuration
|
||||
*/
|
||||
public void setMetadataRetryOperations(RetryOperations metadataRetryOperations) {
|
||||
@@ -119,7 +82,7 @@ public class KafkaTopicProvisioner implements
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterPropertiesSet() {
|
||||
public void afterPropertiesSet() throws Exception {
|
||||
if (this.metadataRetryOperations == null) {
|
||||
RetryTemplate retryTemplate = new RetryTemplate();
|
||||
|
||||
@@ -137,73 +100,27 @@ public class KafkaTopicProvisioner implements
|
||||
}
|
||||
|
||||
@Override
|
||||
public ProducerDestination provisionProducerDestination(final String name,
|
||||
ExtendedProducerProperties<KafkaProducerProperties> properties) {
|
||||
|
||||
if (logger.isInfoEnabled()) {
|
||||
logger.info("Using kafka topic for outbound: " + name);
|
||||
public ProducerDestination provisionProducerDestination(final String name, ExtendedProducerProperties<KafkaProducerProperties> properties) {
|
||||
if (this.logger.isInfoEnabled()) {
|
||||
this.logger.info("Using kafka topic for outbound: " + name);
|
||||
}
|
||||
KafkaTopicUtils.validateTopicName(name);
|
||||
try (AdminClient adminClient = AdminClient.create(this.adminClientProperties)) {
|
||||
createTopic(adminClient, name, properties.getPartitionCount(), false,
|
||||
properties.getExtension().getTopic());
|
||||
int partitions = 0;
|
||||
Map<String, TopicDescription> topicDescriptions = new HashMap<>();
|
||||
if (this.configurationProperties.isAutoCreateTopics()) {
|
||||
this.metadataRetryOperations.execute(context -> {
|
||||
try {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Attempting to retrieve the description for the topic: " + name);
|
||||
}
|
||||
DescribeTopicsResult describeTopicsResult = adminClient
|
||||
.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult
|
||||
.all();
|
||||
topicDescriptions.putAll(all.get(this.operationTimeout, TimeUnit.SECONDS));
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new ProvisioningException("Problems encountered with partitions finding", ex);
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
TopicDescription topicDescription = topicDescriptions.get(name);
|
||||
if (topicDescription != null) {
|
||||
partitions = topicDescription.partitions().size();
|
||||
}
|
||||
createTopicsIfAutoCreateEnabledAndAdminUtilsPresent(name, properties.getPartitionCount(), false);
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminUtilsOperation != null) {
|
||||
final ZkUtils zkUtils = ZkUtils.apply(this.configurationProperties.getZkConnectionString(),
|
||||
this.configurationProperties.getZkSessionTimeout(),
|
||||
this.configurationProperties.getZkConnectionTimeout(),
|
||||
JaasUtils.isZkSecurityEnabled());
|
||||
int partitions = adminUtilsOperation.partitionSize(name, zkUtils);
|
||||
return new KafkaProducerDestination(name, partitions);
|
||||
}
|
||||
else {
|
||||
return new KafkaProducerDestination(name);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConsumerDestination provisionConsumerDestination(final String name,
|
||||
final String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
if (!properties.isMultiplex()) {
|
||||
return doProvisionConsumerDestination(name, group, properties);
|
||||
}
|
||||
else {
|
||||
String[] destinations = StringUtils.commaDelimitedListToStringArray(name);
|
||||
for (String destination : destinations) {
|
||||
doProvisionConsumerDestination(destination.trim(), group, properties);
|
||||
}
|
||||
return new KafkaConsumerDestination(name);
|
||||
}
|
||||
}
|
||||
|
||||
private ConsumerDestination doProvisionConsumerDestination(final String name,
|
||||
final String group,
|
||||
ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
|
||||
if (properties.getExtension().isDestinationIsPattern()) {
|
||||
Assert.isTrue(!properties.getExtension().isEnableDlq(),
|
||||
"enableDLQ is not allowed when listening to topic patterns");
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Listening to a topic pattern - " + name
|
||||
+ " - no provisioning performed");
|
||||
}
|
||||
return new KafkaConsumerDestination(name);
|
||||
}
|
||||
public ConsumerDestination provisionConsumerDestination(final String name, final String group, ExtendedConsumerProperties<KafkaConsumerProperties> properties) {
|
||||
KafkaTopicUtils.validateTopicName(name);
|
||||
boolean anonymous = !StringUtils.hasText(group);
|
||||
Assert.isTrue(!anonymous || !properties.getExtension().isEnableDlq(),
|
||||
@@ -212,327 +129,146 @@ public class KafkaTopicProvisioner implements
|
||||
throw new IllegalArgumentException("Instance count cannot be zero");
|
||||
}
|
||||
int partitionCount = properties.getInstanceCount() * properties.getConcurrency();
|
||||
ConsumerDestination consumerDestination = new KafkaConsumerDestination(name);
|
||||
try (AdminClient adminClient = createAdminClient()) {
|
||||
createTopic(adminClient, name, partitionCount,
|
||||
properties.getExtension().isAutoRebalanceEnabled(),
|
||||
properties.getExtension().getTopic());
|
||||
if (this.configurationProperties.isAutoCreateTopics()) {
|
||||
DescribeTopicsResult describeTopicsResult = adminClient
|
||||
.describeTopics(Collections.singletonList(name));
|
||||
KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult
|
||||
.all();
|
||||
try {
|
||||
Map<String, TopicDescription> topicDescriptions = all
|
||||
.get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
TopicDescription topicDescription = topicDescriptions.get(name);
|
||||
int partitions = topicDescription.partitions().size();
|
||||
consumerDestination = createDlqIfNeedBe(adminClient, name, group,
|
||||
properties, anonymous, partitions);
|
||||
if (consumerDestination == null) {
|
||||
consumerDestination = new KafkaConsumerDestination(name,
|
||||
partitions);
|
||||
}
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new ProvisioningException("provisioning exception", ex);
|
||||
}
|
||||
createTopicsIfAutoCreateEnabledAndAdminUtilsPresent(name, partitionCount, properties.getExtension().isAutoRebalanceEnabled());
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminUtilsOperation != null) {
|
||||
final ZkUtils zkUtils = ZkUtils.apply(this.configurationProperties.getZkConnectionString(),
|
||||
this.configurationProperties.getZkSessionTimeout(),
|
||||
this.configurationProperties.getZkConnectionTimeout(),
|
||||
JaasUtils.isZkSecurityEnabled());
|
||||
int partitions = adminUtilsOperation.partitionSize(name, zkUtils);
|
||||
if (properties.getExtension().isEnableDlq() && !anonymous) {
|
||||
String dlqTopic = StringUtils.hasText(properties.getExtension().getDlqName()) ?
|
||||
properties.getExtension().getDlqName() : "error." + name + "." + group;
|
||||
createTopicAndPartitions(dlqTopic, partitions, properties.getExtension().isAutoRebalanceEnabled());
|
||||
return new KafkaConsumerDestination(name, partitions, dlqTopic);
|
||||
}
|
||||
return new KafkaConsumerDestination(name, partitions);
|
||||
}
|
||||
return consumerDestination;
|
||||
return new KafkaConsumerDestination(name);
|
||||
}
|
||||
|
||||
AdminClient createAdminClient() {
|
||||
return AdminClient.create(this.adminClientProperties);
|
||||
}
|
||||
|
||||
/**
|
||||
* In general, binder properties supersede boot kafka properties. The one exception is
|
||||
* the bootstrap servers. In that case, we should only override the boot properties if
|
||||
* (there is a binder property AND it is a non-default value) OR (if there is no boot
|
||||
* property); this is needed because the binder property never returns a null value.
|
||||
* @param adminProps the admin properties to normalize.
|
||||
* @param bootProps the boot kafka properties.
|
||||
* @param binderProps the binder kafka properties.
|
||||
*/
|
||||
public static void normalalizeBootPropsWithBinder(Map<String, Object> adminProps,
|
||||
KafkaProperties bootProps, KafkaBinderConfigurationProperties binderProps) {
|
||||
// First deal with the outlier
|
||||
String kafkaConnectionString = binderProps.getKafkaConnectionString();
|
||||
if (ObjectUtils
|
||||
.isEmpty(adminProps.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG))
|
||||
|| !kafkaConnectionString
|
||||
.equals(binderProps.getDefaultKafkaConnectionString())) {
|
||||
adminProps.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG,
|
||||
kafkaConnectionString);
|
||||
private void createTopicsIfAutoCreateEnabledAndAdminUtilsPresent(final String topicName, final int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker) {
|
||||
if (this.configurationProperties.isAutoCreateTopics() && adminUtilsOperation != null) {
|
||||
createTopicAndPartitions(topicName, partitionCount, tolerateLowerPartitionsOnBroker);
|
||||
}
|
||||
// Now override any boot values with binder values
|
||||
Map<String, String> binderProperties = binderProps.getConfiguration();
|
||||
Set<String> adminConfigNames = AdminClientConfig.configNames();
|
||||
binderProperties.forEach((key, value) -> {
|
||||
if (key.equals(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)) {
|
||||
throw new IllegalStateException(
|
||||
"Set binder bootstrap servers via the 'brokers' property, not 'configuration'");
|
||||
}
|
||||
if (adminConfigNames.contains(key)) {
|
||||
Object replaced = adminProps.put(key, value);
|
||||
if (replaced != null && KafkaTopicProvisioner.logger.isDebugEnabled()) {
|
||||
KafkaTopicProvisioner.logger.debug("Overrode boot property: [" + key + "], from: ["
|
||||
+ replaced + "] to: [" + value + "]");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private ConsumerDestination createDlqIfNeedBe(AdminClient adminClient, String name,
|
||||
String group, ExtendedConsumerProperties<KafkaConsumerProperties> properties,
|
||||
boolean anonymous, int partitions) {
|
||||
|
||||
if (properties.getExtension().isEnableDlq() && !anonymous) {
|
||||
String dlqTopic = StringUtils.hasText(properties.getExtension().getDlqName())
|
||||
? properties.getExtension().getDlqName()
|
||||
: "error." + name + "." + group;
|
||||
int dlqPartitions = properties.getExtension().getDlqPartitions() == null
|
||||
? partitions
|
||||
: properties.getExtension().getDlqPartitions();
|
||||
try {
|
||||
createTopicAndPartitions(adminClient, dlqTopic, dlqPartitions,
|
||||
properties.getExtension().isAutoRebalanceEnabled(),
|
||||
properties.getExtension().getTopic());
|
||||
}
|
||||
catch (Throwable throwable) {
|
||||
if (throwable instanceof Error) {
|
||||
throw (Error) throwable;
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("provisioning exception", throwable);
|
||||
}
|
||||
}
|
||||
return new KafkaConsumerDestination(name, partitions, dlqTopic);
|
||||
else if (this.configurationProperties.isAutoCreateTopics() && adminUtilsOperation == null) {
|
||||
this.logger.warn("Auto creation of topics is enabled, but Kafka AdminUtils class is not present on the classpath. " +
|
||||
"No topic will be created by the binder");
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private void createTopic(AdminClient adminClient, String name, int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker, KafkaTopicProperties properties) {
|
||||
try {
|
||||
createTopicIfNecessary(adminClient, name, partitionCount,
|
||||
tolerateLowerPartitionsOnBroker, properties);
|
||||
}
|
||||
// TODO: Remove catching Throwable. See this thread:
|
||||
// TODO:
|
||||
// https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/pull/514#discussion_r241075940
|
||||
catch (Throwable throwable) {
|
||||
if (throwable instanceof Error) {
|
||||
throw (Error) throwable;
|
||||
}
|
||||
else {
|
||||
// TODO:
|
||||
// https://github.com/spring-cloud/spring-cloud-stream-binder-kafka/pull/514#discussion_r241075940
|
||||
throw new ProvisioningException("Provisioning exception", throwable);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void createTopicIfNecessary(AdminClient adminClient, final String topicName,
|
||||
final int partitionCount, boolean tolerateLowerPartitionsOnBroker,
|
||||
KafkaTopicProperties properties) throws Throwable {
|
||||
|
||||
if (this.configurationProperties.isAutoCreateTopics()) {
|
||||
createTopicAndPartitions(adminClient, topicName, partitionCount,
|
||||
tolerateLowerPartitionsOnBroker, properties);
|
||||
}
|
||||
else {
|
||||
logger.info("Auto creation of topics is disabled.");
|
||||
else if (!this.configurationProperties.isAutoCreateTopics()) {
|
||||
this.logger.info("Auto creation of topics is disabled.");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Kafka topic if needed, or try to increase its partition count to the
|
||||
* desired number.
|
||||
* @param adminClient kafka admin client
|
||||
* @param topicName topic name
|
||||
* @param partitionCount partition count
|
||||
* @param tolerateLowerPartitionsOnBroker whether lower partitions count on broker is
|
||||
* tolerated ot not
|
||||
* @param topicProperties kafka topic properties
|
||||
* @throws Throwable from topic creation
|
||||
*/
|
||||
private void createTopicAndPartitions(AdminClient adminClient, final String topicName,
|
||||
final int partitionCount, boolean tolerateLowerPartitionsOnBroker,
|
||||
KafkaTopicProperties topicProperties) throws Throwable {
|
||||
private void createTopicAndPartitions(final String topicName, final int partitionCount,
|
||||
boolean tolerateLowerPartitionsOnBroker) {
|
||||
|
||||
ListTopicsResult listTopicsResult = adminClient.listTopics();
|
||||
KafkaFuture<Set<String>> namesFutures = listTopicsResult.names();
|
||||
final ZkUtils zkUtils = ZkUtils.apply(this.configurationProperties.getZkConnectionString(),
|
||||
this.configurationProperties.getZkSessionTimeout(),
|
||||
this.configurationProperties.getZkConnectionTimeout(),
|
||||
JaasUtils.isZkSecurityEnabled());
|
||||
try {
|
||||
short errorCode = adminUtilsOperation.errorCodeFromTopicMetadata(topicName, zkUtils);
|
||||
if (errorCode == ErrorMapping.NoError()) {
|
||||
// only consider minPartitionCount for resizing if autoAddPartitions is true
|
||||
int effectivePartitionCount = this.configurationProperties.isAutoAddPartitions()
|
||||
? Math.max(this.configurationProperties.getMinPartitionCount(), partitionCount)
|
||||
: partitionCount;
|
||||
int partitionSize = adminUtilsOperation.partitionSize(topicName, zkUtils);
|
||||
|
||||
Set<String> names = namesFutures.get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
if (names.contains(topicName)) {
|
||||
// only consider minPartitionCount for resizing if autoAddPartitions is true
|
||||
int effectivePartitionCount = this.configurationProperties
|
||||
.isAutoAddPartitions()
|
||||
? Math.max(
|
||||
this.configurationProperties.getMinPartitionCount(),
|
||||
partitionCount)
|
||||
: partitionCount;
|
||||
DescribeTopicsResult describeTopicsResult = adminClient
|
||||
.describeTopics(Collections.singletonList(topicName));
|
||||
KafkaFuture<Map<String, TopicDescription>> topicDescriptionsFuture = describeTopicsResult
|
||||
.all();
|
||||
Map<String, TopicDescription> topicDescriptions = topicDescriptionsFuture
|
||||
.get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
TopicDescription topicDescription = topicDescriptions.get(topicName);
|
||||
int partitionSize = topicDescription.partitions().size();
|
||||
if (partitionSize < effectivePartitionCount) {
|
||||
if (this.configurationProperties.isAutoAddPartitions()) {
|
||||
CreatePartitionsResult partitions = adminClient
|
||||
.createPartitions(Collections.singletonMap(topicName,
|
||||
NewPartitions.increaseTo(effectivePartitionCount)));
|
||||
partitions.all().get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
else if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ")
|
||||
+ "been found instead." + "There will be "
|
||||
+ (effectivePartitionCount - partitionSize)
|
||||
+ " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException(
|
||||
"The number of expected partitions was: " + partitionCount
|
||||
+ ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ")
|
||||
+ "been found instead."
|
||||
+ "Consider either increasing the partition count of the topic or enabling "
|
||||
+ "`autoAddPartitions`");
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// always consider minPartitionCount for topic creation
|
||||
final int effectivePartitionCount = Math.max(
|
||||
this.configurationProperties.getMinPartitionCount(), partitionCount);
|
||||
this.metadataRetryOperations.execute((context) -> {
|
||||
|
||||
NewTopic newTopic;
|
||||
Map<Integer, List<Integer>> replicasAssignments = topicProperties
|
||||
.getReplicasAssignments();
|
||||
if (replicasAssignments != null && replicasAssignments.size() > 0) {
|
||||
newTopic = new NewTopic(topicName,
|
||||
topicProperties.getReplicasAssignments());
|
||||
}
|
||||
else {
|
||||
newTopic = new NewTopic(topicName, effectivePartitionCount,
|
||||
topicProperties.getReplicationFactor() != null
|
||||
? topicProperties.getReplicationFactor()
|
||||
: this.configurationProperties
|
||||
.getReplicationFactor());
|
||||
}
|
||||
if (topicProperties.getProperties().size() > 0) {
|
||||
newTopic.configs(topicProperties.getProperties());
|
||||
}
|
||||
CreateTopicsResult createTopicsResult = adminClient
|
||||
.createTopics(Collections.singletonList(newTopic));
|
||||
try {
|
||||
createTopicsResult.all().get(this.operationTimeout, TimeUnit.SECONDS);
|
||||
}
|
||||
catch (Exception ex) {
|
||||
if (ex instanceof ExecutionException) {
|
||||
if (ex.getCause() instanceof TopicExistsException) {
|
||||
if (logger.isWarnEnabled()) {
|
||||
logger.warn("Attempt to create topic: " + topicName
|
||||
+ ". Topic already exists.");
|
||||
}
|
||||
}
|
||||
else {
|
||||
logger.error("Failed to create topics", ex.getCause());
|
||||
throw ex.getCause();
|
||||
}
|
||||
if (partitionSize < effectivePartitionCount) {
|
||||
if (this.configurationProperties.isAutoAddPartitions()) {
|
||||
adminUtilsOperation.invokeAddPartitions(zkUtils, topicName, effectivePartitionCount, null, false);
|
||||
}
|
||||
else if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "There will be " + (effectivePartitionCount - partitionSize) + " idle consumers");
|
||||
}
|
||||
else {
|
||||
logger.error("Failed to create topics", ex.getCause());
|
||||
throw ex.getCause();
|
||||
throw new ProvisioningException("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "Consider either increasing the partition count of the topic or enabling " +
|
||||
"`autoAddPartitions`");
|
||||
}
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
else if (errorCode == ErrorMapping.UnknownTopicOrPartitionCode()) {
|
||||
// always consider minPartitionCount for topic creation
|
||||
final int effectivePartitionCount = Math.max(this.configurationProperties.getMinPartitionCount(),
|
||||
partitionCount);
|
||||
|
||||
this.metadataRetryOperations.execute(new RetryCallback<Object, RuntimeException>() {
|
||||
|
||||
@Override
|
||||
public Object doWithRetry(RetryContext context) throws RuntimeException {
|
||||
|
||||
try {
|
||||
adminUtilsOperation.invokeCreateTopic(zkUtils, topicName, effectivePartitionCount,
|
||||
configurationProperties.getReplicationFactor(), new Properties());
|
||||
}
|
||||
catch (Exception e) {
|
||||
String exceptionClass = e.getClass().getName();
|
||||
if (exceptionClass.equals("kafka.common.TopicExistsException")
|
||||
|| exceptionClass.equals("org.apache.kafka.common.errors.TopicExistsException")) {
|
||||
if (logger.isWarnEnabled()) {
|
||||
logger.warn("Attempt to create topic: " + topicName + ". Topic already exists.");
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
throw new ProvisioningException("Error fetching Kafka topic metadata: ",
|
||||
ErrorMapping.exceptionFor(errorCode));
|
||||
}
|
||||
}
|
||||
finally {
|
||||
zkUtils.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that the topic has the expected number of partitions and return the partition information.
|
||||
* @param partitionCount the expected count.
|
||||
* @param tolerateLowerPartitionsOnBroker if false, throw an exception if there are not enough partitions.
|
||||
* @param callable a Callable that will provide the partition information.
|
||||
* @param topicName the topic./
|
||||
* @return the partition information.
|
||||
*/
|
||||
public Collection<PartitionInfo> getPartitionsForTopic(final int partitionCount,
|
||||
final boolean tolerateLowerPartitionsOnBroker,
|
||||
final Callable<Collection<PartitionInfo>> callable, final String topicName) {
|
||||
final boolean tolerateLowerPartitionsOnBroker,
|
||||
final Callable<Collection<PartitionInfo>> callable) {
|
||||
try {
|
||||
return this.metadataRetryOperations.execute((context) -> {
|
||||
Collection<PartitionInfo> partitions = Collections.emptyList();
|
||||
return this.metadataRetryOperations
|
||||
.execute(new RetryCallback<Collection<PartitionInfo>, Exception>() {
|
||||
|
||||
try {
|
||||
// This call may return null or throw an exception.
|
||||
partitions = callable.call();
|
||||
}
|
||||
catch (Exception ex) {
|
||||
// The above call can potentially throw exceptions such as timeout. If
|
||||
// we can determine
|
||||
// that the exception was due to an unknown topic on the broker, just
|
||||
// simply rethrow that.
|
||||
if (ex instanceof UnknownTopicOrPartitionException) {
|
||||
throw ex;
|
||||
}
|
||||
logger.error("Failed to obtain partition information", ex);
|
||||
}
|
||||
// In some cases, the above partition query may not throw an UnknownTopic..Exception for various reasons.
|
||||
// For that, we are forcing another query to ensure that the topic is present on the server.
|
||||
if (CollectionUtils.isEmpty(partitions)) {
|
||||
try (AdminClient adminClient = AdminClient
|
||||
.create(this.adminClientProperties)) {
|
||||
final DescribeTopicsResult describeTopicsResult = adminClient
|
||||
.describeTopics(Collections.singletonList(topicName));
|
||||
|
||||
describeTopicsResult.all().get();
|
||||
}
|
||||
catch (ExecutionException ex) {
|
||||
if (ex.getCause() instanceof UnknownTopicOrPartitionException) {
|
||||
throw (UnknownTopicOrPartitionException) ex.getCause();
|
||||
@Override
|
||||
public Collection<PartitionInfo> doWithRetry(RetryContext context) throws Exception {
|
||||
Collection<PartitionInfo> partitions = callable.call();
|
||||
// do a sanity check on the partition set
|
||||
int partitionSize = partitions.size();
|
||||
if (partitionSize < partitionCount) {
|
||||
if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: " + partitionCount + ", but "
|
||||
+ partitionSize + (partitionSize > 1 ? " have " : " has ") + "been found instead."
|
||||
+ "There will be " + (partitionCount - partitionSize) + " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ") + "been found instead");
|
||||
}
|
||||
}
|
||||
return partitions;
|
||||
}
|
||||
else {
|
||||
logger.warn("No partitions have been retrieved for the topic "
|
||||
+ "(" + topicName
|
||||
+ "). This will affect the health check.");
|
||||
}
|
||||
}
|
||||
}
|
||||
// do a sanity check on the partition set
|
||||
int partitionSize = CollectionUtils.isEmpty(partitions) ? 0 : partitions.size();
|
||||
if (partitionSize < partitionCount) {
|
||||
if (tolerateLowerPartitionsOnBroker) {
|
||||
logger.warn("The number of expected partitions was: "
|
||||
+ partitionCount + ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ")
|
||||
+ "been found instead." + "There will be "
|
||||
+ (partitionCount - partitionSize) + " idle consumers");
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException(
|
||||
"The number of expected partitions was: " + partitionCount
|
||||
+ ", but " + partitionSize
|
||||
+ (partitionSize > 1 ? " have " : " has ")
|
||||
+ "been found instead");
|
||||
}
|
||||
}
|
||||
return partitions;
|
||||
});
|
||||
});
|
||||
}
|
||||
catch (Exception ex) {
|
||||
logger.error("Cannot initialize Binder", ex);
|
||||
throw new BinderException("Cannot initialize binder:", ex);
|
||||
catch (Exception e) {
|
||||
this.logger.error("Cannot initialize Binder", e);
|
||||
throw new BinderException("Cannot initialize binder:", e);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -542,6 +278,10 @@ public class KafkaTopicProvisioner implements
|
||||
|
||||
private final int partitions;
|
||||
|
||||
KafkaProducerDestination(String destinationName) {
|
||||
this(destinationName, 0);
|
||||
}
|
||||
|
||||
KafkaProducerDestination(String destinationName, Integer partitions) {
|
||||
this.producerDestinationName = destinationName;
|
||||
this.partitions = partitions;
|
||||
@@ -549,20 +289,21 @@ public class KafkaTopicProvisioner implements
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return this.producerDestinationName;
|
||||
return producerDestinationName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getNameForPartition(int partition) {
|
||||
return this.producerDestinationName;
|
||||
return producerDestinationName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KafkaProducerDestination{" + "producerDestinationName='"
|
||||
+ producerDestinationName + '\'' + ", partitions=" + partitions + '}';
|
||||
return "KafkaProducerDestination{" +
|
||||
"producerDestinationName='" + producerDestinationName + '\'' +
|
||||
", partitions=" + partitions +
|
||||
'}';
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static final class KafkaConsumerDestination implements ConsumerDestination {
|
||||
@@ -581,8 +322,7 @@ public class KafkaTopicProvisioner implements
|
||||
this(consumerDestinationName, partitions, null);
|
||||
}
|
||||
|
||||
KafkaConsumerDestination(String consumerDestinationName, Integer partitions,
|
||||
String dlqName) {
|
||||
KafkaConsumerDestination(String consumerDestinationName, Integer partitions, String dlqName) {
|
||||
this.consumerDestinationName = consumerDestinationName;
|
||||
this.partitions = partitions;
|
||||
this.dlqName = dlqName;
|
||||
@@ -595,11 +335,11 @@ public class KafkaTopicProvisioner implements
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "KafkaConsumerDestination{" + "consumerDestinationName='"
|
||||
+ consumerDestinationName + '\'' + ", partitions=" + partitions
|
||||
+ ", dlqName='" + dlqName + '\'' + '}';
|
||||
return "KafkaConsumerDestination{" +
|
||||
"consumerDestinationName='" + consumerDestinationName + '\'' +
|
||||
", partitions=" + partitions +
|
||||
", dlqName='" + dlqName + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.utils;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
|
||||
import org.springframework.lang.Nullable;
|
||||
|
||||
/**
|
||||
* A TriFunction that takes a consumer group, consumer record, and throwable and returns
|
||||
* which partition to publish to the dead letter topic. Returning {@code null} means Kafka
|
||||
* will choose the partition.
|
||||
*
|
||||
* @author Gary Russell
|
||||
* @since 3.0
|
||||
*
|
||||
*/
|
||||
@FunctionalInterface
|
||||
public interface DlqPartitionFunction {
|
||||
|
||||
/**
|
||||
* Returns the same partition as the original recor.
|
||||
*/
|
||||
DlqPartitionFunction ORIGINAL_PARTITION = (group, rec, ex) -> rec.partition();
|
||||
|
||||
/**
|
||||
* Returns 0.
|
||||
*/
|
||||
DlqPartitionFunction PARTITION_ZERO = (group, rec, ex) -> 0;
|
||||
|
||||
/**
|
||||
* Apply the function.
|
||||
* @param group the consumer group.
|
||||
* @param record the consumer record.
|
||||
* @param throwable the exception.
|
||||
* @return the DLQ partition, or null.
|
||||
*/
|
||||
@Nullable
|
||||
Integer apply(String group, ConsumerRecord<?, ?> record, Throwable throwable);
|
||||
|
||||
/**
|
||||
* Determine the fallback function to use based on the dlq partition count if no
|
||||
* {@link DlqPartitionFunction} bean is provided.
|
||||
* @param dlqPartitions the partition count.
|
||||
* @param logger the logger.
|
||||
* @return the fallback.
|
||||
*/
|
||||
static DlqPartitionFunction determineFallbackFunction(@Nullable Integer dlqPartitions, Log logger) {
|
||||
if (dlqPartitions == null) {
|
||||
return ORIGINAL_PARTITION;
|
||||
}
|
||||
else if (dlqPartitions > 1) {
|
||||
logger.error("'dlqPartitions' is > 1 but a custom DlqPartitionFunction bean is not provided");
|
||||
return ORIGINAL_PARTITION;
|
||||
}
|
||||
else {
|
||||
return PARTITION_ZERO;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,11 +1,11 @@
|
||||
/*
|
||||
* Copyright 2016-2019 the original author or authors.
|
||||
* Copyright 2016 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -19,8 +19,6 @@ package org.springframework.cloud.stream.binder.kafka.utils;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
|
||||
/**
|
||||
* Utility methods releated to Kafka topics.
|
||||
*
|
||||
* @author Soby Chacko
|
||||
*/
|
||||
public final class KafkaTopicUtils {
|
||||
@@ -30,25 +28,22 @@ public final class KafkaTopicUtils {
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate topic name. Allowed chars are ASCII alphanumerics, '.', '_' and '-'.
|
||||
* @param topicName name of the topic
|
||||
* Allowed chars are ASCII alphanumerics, '.', '_' and '-'.
|
||||
*/
|
||||
public static void validateTopicName(String topicName) {
|
||||
try {
|
||||
byte[] utf8 = topicName.getBytes("UTF-8");
|
||||
for (byte b : utf8) {
|
||||
if (!((b >= 'a') && (b <= 'z') || (b >= 'A') && (b <= 'Z')
|
||||
|| (b >= '0') && (b <= '9') || (b == '.') || (b == '-')
|
||||
|| (b == '_'))) {
|
||||
if (!((b >= 'a') && (b <= 'z') || (b >= 'A') && (b <= 'Z') || (b >= '0') && (b <= '9') || (b == '.')
|
||||
|| (b == '-') || (b == '_'))) {
|
||||
throw new IllegalArgumentException(
|
||||
"Topic name can only have ASCII alphanumerics, '.', '_' and '-', but was: '"
|
||||
+ topicName + "'");
|
||||
"Topic name can only have ASCII alphanumerics, '.', '_' and '-', but was: '" + topicName
|
||||
+ "'");
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (UnsupportedEncodingException ex) {
|
||||
throw new AssertionError(ex); // Can't happen
|
||||
catch (UnsupportedEncodingException e) {
|
||||
throw new AssertionError(e); // Can't happen
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,108 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.properties;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
public class KafkaBinderConfigurationPropertiesTest {
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersGroupIdFromKafkaProperties() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
kafkaProperties.getConsumer().setGroupId("group1");
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration =
|
||||
kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersEnableAutoCommitFromKafkaProperties() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
kafkaProperties.getConsumer().setEnableAutoCommit(true);
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration =
|
||||
kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersGroupIdFromKafkaBinderConfigurationPropertiesConfiguration() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
kafkaBinderConfigurationProperties
|
||||
.setConfiguration(Collections.singletonMap(ConsumerConfig.GROUP_ID_CONFIG, "group1"));
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration = kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersEnableAutoCommitFromKafkaBinderConfigurationPropertiesConfiguration() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
kafkaBinderConfigurationProperties
|
||||
.setConfiguration(Collections.singletonMap(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"));
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration = kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersGroupIdFromKafkaBinderConfigurationPropertiesConsumerProperties() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
kafkaBinderConfigurationProperties
|
||||
.setConsumerProperties(Collections.singletonMap(ConsumerConfig.GROUP_ID_CONFIG, "group1"));
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration = kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void mergedConsumerConfigurationFiltersEnableAutoCommitFromKafkaBinderConfigurationPropertiesConsumerProps() {
|
||||
KafkaProperties kafkaProperties = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties kafkaBinderConfigurationProperties =
|
||||
new KafkaBinderConfigurationProperties(kafkaProperties);
|
||||
kafkaBinderConfigurationProperties
|
||||
.setConsumerProperties(Collections.singletonMap(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"));
|
||||
|
||||
Map<String, Object> mergedConsumerConfiguration = kafkaBinderConfigurationProperties.mergedConsumerConfiguration();
|
||||
|
||||
assertThat(mergedConsumerConfiguration).doesNotContainKeys(ConsumerConfig.GROUP_ID_CONFIG);
|
||||
}
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
/*
|
||||
* Copyright 2018-2019 the original author or authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.springframework.cloud.stream.binder.kafka.provisioning;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.kafka.clients.CommonClientConfigs;
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
import org.apache.kafka.clients.admin.AdminClientConfig;
|
||||
import org.apache.kafka.common.config.SslConfigs;
|
||||
import org.apache.kafka.common.network.SslChannelBuilder;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
|
||||
import org.springframework.cloud.stream.binder.kafka.properties.KafkaBinderConfigurationProperties;
|
||||
import org.springframework.core.io.ClassPathResource;
|
||||
import org.springframework.kafka.test.utils.KafkaTestUtils;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.assertj.core.api.Assertions.fail;
|
||||
|
||||
/**
|
||||
* @author Gary Russell
|
||||
* @since 2.0
|
||||
*
|
||||
*/
|
||||
public class KafkaTopicProvisionerTests {
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
@Test
|
||||
public void bootPropertiesOverriddenExceptServers() throws Exception {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
bootConfig.getProperties().put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG,
|
||||
"PLAINTEXT");
|
||||
bootConfig.setBootstrapServers(Collections.singletonList("localhost:1234"));
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties(
|
||||
bootConfig);
|
||||
binderConfig.getConfiguration().put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG,
|
||||
"SSL");
|
||||
ClassPathResource ts = new ClassPathResource("test.truststore.ks");
|
||||
binderConfig.getConfiguration().put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG,
|
||||
ts.getFile().getAbsolutePath());
|
||||
binderConfig.setBrokers("localhost:9092");
|
||||
KafkaTopicProvisioner provisioner = new KafkaTopicProvisioner(binderConfig,
|
||||
bootConfig);
|
||||
AdminClient adminClient = provisioner.createAdminClient();
|
||||
assertThat(KafkaTestUtils.getPropertyValue(adminClient,
|
||||
"client.selector.channelBuilder")).isInstanceOf(SslChannelBuilder.class);
|
||||
Map configs = KafkaTestUtils.getPropertyValue(adminClient,
|
||||
"client.selector.channelBuilder.configs", Map.class);
|
||||
assertThat(
|
||||
((List) configs.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)).get(0))
|
||||
.isEqualTo("localhost:1234");
|
||||
adminClient.close();
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
@Test
|
||||
public void bootPropertiesOverriddenIncludingServers() throws Exception {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
bootConfig.getProperties().put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG,
|
||||
"PLAINTEXT");
|
||||
bootConfig.setBootstrapServers(Collections.singletonList("localhost:9092"));
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties(
|
||||
bootConfig);
|
||||
binderConfig.getConfiguration().put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG,
|
||||
"SSL");
|
||||
ClassPathResource ts = new ClassPathResource("test.truststore.ks");
|
||||
binderConfig.getConfiguration().put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG,
|
||||
ts.getFile().getAbsolutePath());
|
||||
binderConfig.setBrokers("localhost:1234");
|
||||
KafkaTopicProvisioner provisioner = new KafkaTopicProvisioner(binderConfig,
|
||||
bootConfig);
|
||||
AdminClient adminClient = provisioner.createAdminClient();
|
||||
assertThat(KafkaTestUtils.getPropertyValue(adminClient,
|
||||
"client.selector.channelBuilder")).isInstanceOf(SslChannelBuilder.class);
|
||||
Map configs = KafkaTestUtils.getPropertyValue(adminClient,
|
||||
"client.selector.channelBuilder.configs", Map.class);
|
||||
assertThat(
|
||||
((List) configs.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)).get(0))
|
||||
.isEqualTo("localhost:1234");
|
||||
adminClient.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void brokersInvalid() throws Exception {
|
||||
KafkaProperties bootConfig = new KafkaProperties();
|
||||
KafkaBinderConfigurationProperties binderConfig = new KafkaBinderConfigurationProperties(
|
||||
bootConfig);
|
||||
binderConfig.getConfiguration().put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG,
|
||||
"localhost:1234");
|
||||
try {
|
||||
new KafkaTopicProvisioner(binderConfig, bootConfig);
|
||||
fail("Expected illegal state");
|
||||
}
|
||||
catch (IllegalStateException e) {
|
||||
assertThat(e.getMessage()).isEqualTo(
|
||||
"Set binder bootstrap servers via the 'brokers' property, not 'configuration'");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
337
spring-cloud-stream-binder-kafka-docs/pom.xml
Normal file
@@ -0,0 +1,337 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<parent>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka-parent</artifactId>
|
||||
<version>2.0.0.M1</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>spring-cloud-stream-binder-kafka-docs</artifactId>
|
||||
<name>spring-cloud-stream-binder-kafka-docs</name>
|
||||
<description>Spring Cloud Stream Kafka Binder Docs</description>
|
||||
<properties>
|
||||
<main.basedir>${basedir}/..</main.basedir>
|
||||
</properties>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<profiles>
|
||||
<profile>
|
||||
<id>full</id>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>xml-maven-plugin</artifactId>
|
||||
<version>1.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<goals>
|
||||
<goal>transform</goal>
|
||||
</goals>
|
||||
</execution>
|
||||
</executions>
|
||||
<configuration>
|
||||
<transformationSets>
|
||||
<transformationSet>
|
||||
<dir>${project.build.directory}/external-resources</dir>
|
||||
<stylesheet>src/main/xslt/dependencyVersions.xsl</stylesheet>
|
||||
<fileMappers>
|
||||
<fileMapper implementation="org.codehaus.plexus.components.io.filemappers.FileExtensionMapper">
|
||||
<targetExtension>.adoc</targetExtension>
|
||||
</fileMapper>
|
||||
</fileMappers>
|
||||
<outputDir>${project.build.directory}/generated-resources</outputDir>
|
||||
</transformationSet>
|
||||
</transformationSets>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-javadoc-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-javadocs</id>
|
||||
<goals>
|
||||
<goal>jar</goal>
|
||||
</goals>
|
||||
<phase>prepare-package</phase>
|
||||
<configuration>
|
||||
<includeDependencySources>true</includeDependencySources>
|
||||
<dependencySourceIncludes>
|
||||
<dependencySourceInclude>${project.groupId}:*</dependencySourceInclude>
|
||||
</dependencySourceIncludes>
|
||||
<attach>false</attach>
|
||||
<quiet>true</quiet>
|
||||
<stylesheetfile>${basedir}/src/main/javadoc/spring-javadoc.css</stylesheetfile>
|
||||
<links>
|
||||
<link>http://docs.spring.io/spring-framework/docs/${spring.version}/javadoc-api/</link>
|
||||
<link>http://docs.spring.io/spring-shell/docs/current/api/</link>
|
||||
</links>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.asciidoctor</groupId>
|
||||
<artifactId>asciidoctor-maven-plugin</artifactId>
|
||||
<version>1.5.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>generate-docbook</id>
|
||||
<phase>generate-resources</phase>
|
||||
<goals>
|
||||
<goal>process-asciidoc</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<sourceDocumentName>index.adoc</sourceDocumentName>
|
||||
<backend>docbook5</backend>
|
||||
<doctype>book</doctype>
|
||||
<attributes>
|
||||
<docinfo>true</docinfo>
|
||||
<spring-cloud-stream-binder-kafka-version>${project.version}</spring-cloud-stream-binder-kafka-version>
|
||||
<spring-cloud-stream-binder-kafka-docs-version>${project.version}</spring-cloud-stream-binder-kafka-docs-version>
|
||||
<github-tag>${github-tag}</github-tag>
|
||||
</attributes>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>com.agilejava.docbkx</groupId>
|
||||
<artifactId>docbkx-maven-plugin</artifactId>
|
||||
<version>2.0.15</version>
|
||||
<configuration>
|
||||
<sourceDirectory>${basedir}/target/generated-docs</sourceDirectory>
|
||||
<includes>index.xml</includes>
|
||||
<xincludeSupported>true</xincludeSupported>
|
||||
<chunkedOutput>false</chunkedOutput>
|
||||
<foCustomization>${basedir}/src/main/docbook/xsl/pdf.xsl</foCustomization>
|
||||
<useExtensions>1</useExtensions>
|
||||
<highlightSource>1</highlightSource>
|
||||
<highlightXslthlConfig>${basedir}/src/main/docbook/xsl/xslthl-config.xml</highlightXslthlConfig>
|
||||
</configuration>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>net.sf.xslthl</groupId>
|
||||
<artifactId>xslthl</artifactId>
|
||||
<version>2.1.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.sf.docbook</groupId>
|
||||
<artifactId>docbook-xml</artifactId>
|
||||
<version>5.0-all</version>
|
||||
<classifier>resources</classifier>
|
||||
<type>zip</type>
|
||||
<scope>runtime</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>html-single</id>
|
||||
<goals>
|
||||
<goal>generate-html</goal>
|
||||
</goals>
|
||||
<phase>generate-resources</phase>
|
||||
<configuration>
|
||||
<htmlCustomization>${basedir}/src/main/docbook/xsl/html-singlepage.xsl</htmlCustomization>
|
||||
<targetDirectory>${basedir}/target/docbook/htmlsingle</targetDirectory>
|
||||
<postProcess>
|
||||
<copy todir="${basedir}/target/contents/reference/htmlsingle">
|
||||
<fileset dir="${basedir}/target/docbook/htmlsingle">
|
||||
<include name="**/*.html" />
|
||||
</fileset>
|
||||
</copy>
|
||||
<copy todir="${basedir}/target/contents/reference/htmlsingle">
|
||||
<fileset dir="${basedir}/src/main/docbook">
|
||||
<include name="**/*.css" />
|
||||
<include name="**/*.png" />
|
||||
<include name="**/*.gif" />
|
||||
<include name="**/*.jpg" />
|
||||
</fileset>
|
||||
</copy>
|
||||
<copy todir="${basedir}/target/contents/reference/htmlsingle">
|
||||
<fileset dir="${basedir}/src/main/asciidoc">
|
||||
<include name="images/*.css" />
|
||||
<include name="images/*.png" />
|
||||
<include name="images/*.gif" />
|
||||
<include name="images/*.jpg" />
|
||||
</fileset>
|
||||
</copy>
|
||||
</postProcess>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>html</id>
|
||||
<goals>
|
||||
<goal>generate-html</goal>
|
||||
</goals>
|
||||
<phase>generate-resources</phase>
|
||||
<configuration>
|
||||
<htmlCustomization>${basedir}/src/main/docbook/xsl/html-multipage.xsl</htmlCustomization>
|
||||
<targetDirectory>${basedir}/target/docbook/html</targetDirectory>
|
||||
<chunkedOutput>true</chunkedOutput>
|
||||
<postProcess>
|
||||
<copy todir="${basedir}/target/contents/reference/html">
|
||||
<fileset dir="${basedir}/target/docbook/html">
|
||||
<include name="**/*.html" />
|
||||
</fileset>
|
||||
</copy>
|
||||
<copy todir="${basedir}/target/contents/reference/html">
|
||||
<fileset dir="${basedir}/src/main/docbook">
|
||||
<include name="**/*.css" />
|
||||
<include name="**/*.png" />
|
||||
<include name="**/*.gif" />
|
||||
<include name="**/*.jpg" />
|
||||
</fileset>
|
||||
</copy>
|
||||
<copy todir="${basedir}/target/contents/reference/html">
|
||||
<fileset dir="${basedir}/src/main/asciidoc">
|
||||
<include name="images/*.css" />
|
||||
<include name="images/*.png" />
|
||||
<include name="images/*.gif" />
|
||||
<include name="images/*.jpg" />
|
||||
</fileset>
|
||||
</copy>
|
||||
</postProcess>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>pdf</id>
|
||||
<goals>
|
||||
<goal>generate-pdf</goal>
|
||||
</goals>
|
||||
<phase>generate-resources</phase>
|
||||
<configuration>
|
||||
<foCustomization>${basedir}/src/main/docbook/xsl/pdf.xsl</foCustomization>
|
||||
<targetDirectory>${basedir}/target/docbook/pdf</targetDirectory>
|
||||
<postProcess>
|
||||
<copy todir="${basedir}/target/contents/reference">
|
||||
<fileset dir="${basedir}/target/docbook">
|
||||
<include name="**/*.pdf" />
|
||||
</fileset>
|
||||
</copy>
|
||||
<move file="${basedir}/target/contents/reference/pdf/index.pdf" tofile="${basedir}/target/contents/reference/pdf/spring-cloud-stream-reference.pdf" />
|
||||
</postProcess>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>epub</id>
|
||||
<goals>
|
||||
<goal>generate-epub3</goal>
|
||||
</goals>
|
||||
<phase>generate-resources</phase>
|
||||
<configuration>
|
||||
<epubCustomization>${basedir}/src/main/docbook/xsl/epub.xsl</epubCustomization>
|
||||
<targetDirectory>${basedir}/target/docbook/epub</targetDirectory>
|
||||
<postProcess>
|
||||
<copy todir="${basedir}/target/contents/reference/epub">
|
||||
<fileset dir="${basedir}/target/docbook">
|
||||
<include name="**/*.epub" />
|
||||
</fileset>
|
||||
</copy>
|
||||
<move file="${basedir}/target/contents/reference/epub/index.epub" tofile="${basedir}/target/contents/reference/epub/spring-cloud-stream-reference.epub" />
|
||||
</postProcess>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>ant-contrib</groupId>
|
||||
<artifactId>ant-contrib</artifactId>
|
||||
<version>1.0b3</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>ant</groupId>
|
||||
<artifactId>ant</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.ant</groupId>
|
||||
<artifactId>ant-nodeps</artifactId>
|
||||
<version>1.8.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.tigris.antelope</groupId>
|
||||
<artifactId>antelopetasks</artifactId>
|
||||
<version>3.2.10</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>package-and-attach-docs-zip</id>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<target>
|
||||
<zip destfile="${project.build.directory}/${project.artifactId}-${project.version}.zip">
|
||||
<zipfileset src="${project.build.directory}/${project.artifactId}-${project.version}-javadoc.jar" prefix="api" />
|
||||
<fileset dir="${project.build.directory}/contents" />
|
||||
</zip>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>setup-maven-properties</id>
|
||||
<phase>validate</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<exportAntProperties>true</exportAntProperties>
|
||||
<target>
|
||||
<taskdef resource="net/sf/antcontrib/antcontrib.properties" />
|
||||
<taskdef name="stringutil" classname="ise.antelope.tasks.StringUtilTask" />
|
||||
<var name="version-type" value="${project.version}" />
|
||||
<propertyregex property="version-type" override="true" input="${version-type}" regexp=".*\.(.*)" replace="\1" />
|
||||
<propertyregex property="version-type" override="true" input="${version-type}" regexp="(M)\d+" replace="MILESTONE" />
|
||||
<propertyregex property="version-type" override="true" input="${version-type}" regexp="(RC)\d+" replace="MILESTONE" />
|
||||
<propertyregex property="version-type" override="true" input="${version-type}" regexp="BUILD-(.*)" replace="SNAPSHOT" />
|
||||
<stringutil string="${version-type}" property="spring-boot-repo">
|
||||
<lowercase />
|
||||
</stringutil>
|
||||
<var name="github-tag" value="v${project.version}" />
|
||||
<propertyregex property="github-tag" override="true" input="${github-tag}" regexp=".*SNAPSHOT" replace="master" />
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>build-helper-maven-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>attach-zip</id>
|
||||
<goals>
|
||||
<goal>attach-artifact</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<artifacts>
|
||||
<artifact>
|
||||
<file>${project.build.directory}/${project.artifactId}-${project.version}.zip</file>
|
||||
<type>zip;zip.type=docs;zip.deployed=false</type>
|
||||
</artifact>
|
||||
</artifacts>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
</profiles>
|
||||
</project>
|
||||
@@ -34,7 +34,7 @@ source control.
|
||||
|
||||
The projects that require middleware generally include a
|
||||
`docker-compose.yml`, so consider using
|
||||
https://compose.docker.io/[Docker Compose] to run the middeware servers
|
||||
http://compose.docker.io/[Docker Compose] to run the middeware servers
|
||||
in Docker containers.
|
||||
|
||||
=== Documentation
|
||||
@@ -43,13 +43,13 @@ There is a "full" profile that will generate documentation.
|
||||
|
||||
=== Working with the code
|
||||
If you don't have an IDE preference we would recommend that you use
|
||||
https://www.springsource.com/developer/sts[Spring Tools Suite] or
|
||||
https://eclipse.org[Eclipse] when working with the code. We use the
|
||||
https://eclipse.org/m2e/[m2eclipe] eclipse plugin for maven support. Other IDEs and tools
|
||||
http://www.springsource.com/developer/sts[Spring Tools Suite] or
|
||||
http://eclipse.org[Eclipse] when working with the code. We use the
|
||||
http://eclipse.org/m2e/[m2eclipe] eclipse plugin for maven support. Other IDEs and tools
|
||||
should also work without issue.
|
||||
|
||||
==== Importing into eclipse with m2eclipse
|
||||
We recommend the https://eclipse.org/m2e/[m2eclipe] eclipse plugin when working with
|
||||
We recommend the http://eclipse.org/m2e/[m2eclipe] eclipse plugin when working with
|
||||
eclipse. If you don't already have m2eclipse installed it is available from the "eclipse
|
||||
marketplace".
|
||||
|
||||
@@ -24,7 +24,7 @@ added after the original pull request but before a merge.
|
||||
`eclipse-code-formatter.xml` file from the
|
||||
https://github.com/spring-cloud/build/tree/master/eclipse-coding-conventions.xml[Spring
|
||||
Cloud Build] project. If using IntelliJ, you can use the
|
||||
https://plugins.jetbrains.com/plugin/6546[Eclipse Code Formatter
|
||||
http://plugins.jetbrains.com/plugin/6546[Eclipse Code Formatter
|
||||
Plugin] to import the same file.
|
||||
* Make sure all new `.java` files to have a simple Javadoc class comment with at least an
|
||||
`@author` tag identifying you, and preferably at least a paragraph on what the class is
|
||||
@@ -37,6 +37,6 @@ added after the original pull request but before a merge.
|
||||
* A few unit tests would help a lot as well -- someone has to do it.
|
||||
* If no-one else is using your branch, please rebase it against the current master (or
|
||||
other target branch in the main project).
|
||||
* When writing a commit message please follow https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html[these conventions],
|
||||
* When writing a commit message please follow http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html[these conventions],
|
||||
if you are fixing an existing issue please add `Fixes gh-XXXX` at the end of the commit
|
||||
message (where XXXX is the issue number).
|
||||
@@ -1,49 +1,24 @@
|
||||
[[kafka-dlq-processing]]
|
||||
=== Dead-Letter Topic Processing
|
||||
== Dead-Letter Topic Processing
|
||||
|
||||
[[dlq-partition-selection]]
|
||||
==== Dead-Letter Topic Partition Selection
|
||||
|
||||
By default, records are published to the Dead-Letter topic using the same partition as the original record.
|
||||
This means the Dead-Letter topic must have at least as many partitions as the original record.
|
||||
|
||||
To change this behavior, add a `DlqPartitionFunction` implementation as a `@Bean` to the application context.
|
||||
Only one such bean can be present.
|
||||
The function is provided with the consumer group, the failed `ConsumerRecord` and the exception.
|
||||
For example, if you always want to route to partition 0, you might use:
|
||||
|
||||
====
|
||||
[source, java]
|
||||
----
|
||||
@Bean
|
||||
public DlqPartitionFunction partitionFunction() {
|
||||
return (group, record, ex) -> 0;
|
||||
}
|
||||
----
|
||||
====
|
||||
|
||||
NOTE: If you set a consumer binding's `dlqPartitions` property to 1 (and the binder's `minPartitionCount` is equal to `1`), there is no need to supply a `DlqPartitionFunction`; the framework will always use partition 0.
|
||||
If you set a consumer binding's `dlqPartitions` property to a value greater than `1` (or the binder's `minPartitionCount` is greater than `1`), you **must** provide a `DlqPartitionFunction` bean, even if the partition count is the same as the original topic's.
|
||||
|
||||
[[dlq-handling]]
|
||||
==== Handling Records in a Dead-Letter Topic
|
||||
|
||||
Because the framework cannot anticipate how users would want to dispose of dead-lettered messages, it does not provide any standard mechanism to handle them.
|
||||
Because it can't be anticipated how users would want to dispose of dead-lettered messages, the framework does not provide any standard mechanism to handle them.
|
||||
If the reason for the dead-lettering is transient, you may wish to route the messages back to the original topic.
|
||||
However, if the problem is a permanent issue, that could cause an infinite loop.
|
||||
The sample Spring Boot application within this topic is an example of how to route those messages back to the original topic, but it moves them to a "`parking lot`" topic after three attempts.
|
||||
The application is another spring-cloud-stream application that reads from the dead-letter topic.
|
||||
It exits when no messages are received for 5 seconds.
|
||||
The following `spring-boot` application is an example of how to route those messages back to the original topic, but moves them to a third "parking lot" topic after three attempts.
|
||||
The application is simply another spring-cloud-stream application that reads from the dead-letter topic.
|
||||
It terminates when no messages are received for 5 seconds.
|
||||
|
||||
The examples assume the original destination is `so8400out` and the consumer group is `so8400`.
|
||||
|
||||
There are a couple of strategies to consider:
|
||||
There are several considerations.
|
||||
|
||||
* Consider running the rerouting only when the main application is not running.
|
||||
Otherwise, the retries for transient errors are used up very quickly.
|
||||
* Alternatively, use a two-stage approach: Use this application to route to a third topic and another to route from there back to the main topic.
|
||||
|
||||
The following code listings show the sample application:
|
||||
- Consider only running the rerouting when the main application is not running.
|
||||
Otherwise, the retries for transient errors will be used up very quickly.
|
||||
- Alternatively, use a two-stage approach - use this application to route to a third topic, and another to route from there back to the main topic.
|
||||
- Since this technique uses a message header to keep track of retries, it won't work with `headerMode=raw`.
|
||||
In that case, consider adding some data to the payload (that can be ignored by the main application).
|
||||
- `x-retries` has to be added to the `headers` property `spring.cloud.stream.kafka.binder.headers=x-retries` on both this, and the main application so that the header is transported between the applications.
|
||||
- Since kafka is publish/subscribe, replayed messages will be sent to each consumer group, even those that successfully processed a message the first time around.
|
||||
|
||||
.application.properties
|
||||
[source]
|
||||
@@ -52,8 +27,10 @@ spring.cloud.stream.bindings.input.group=so8400replay
|
||||
spring.cloud.stream.bindings.input.destination=error.so8400out.so8400
|
||||
|
||||
spring.cloud.stream.bindings.output.destination=so8400out
|
||||
spring.cloud.stream.bindings.output.producer.partitioned=true
|
||||
|
||||
spring.cloud.stream.bindings.parkingLot.destination=so8400in.parkingLot
|
||||
spring.cloud.stream.bindings.parkingLot.producer.partitioned=true
|
||||
|
||||
spring.cloud.stream.kafka.binder.configuration.auto.offset.reset=earliest
|
||||
|
||||
@@ -115,7 +92,7 @@ public class ReRouteDlqKApplication implements CommandLineRunner {
|
||||
int count = this.processed.get();
|
||||
Thread.sleep(5000);
|
||||
if (count == this.processed.get()) {
|
||||
System.out.println("Idle, exiting");
|
||||
System.out.println("Idle, terminating");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
Before Width: | Height: | Size: 9.2 KiB After Width: | Height: | Size: 9.2 KiB |
@@ -0,0 +1,34 @@
|
||||
[[spring-cloud-stream-binder-kafka-reference]]
|
||||
= Spring Cloud Stream Kafka Binder Reference Guide
|
||||
Sabby Anandan, Marius Bogoevici, Eric Bottard, Mark Fisher, Ilayaperumal Gopinathan, Gunnar Hillert, Mark Pollack, Patrick Peralta, Glenn Renfro, Thomas Risberg, Dave Syer, David Turanski, Janne Valkealahti, Benjamin Klein, Henryk Konsek
|
||||
:doctype: book
|
||||
:toc:
|
||||
:toclevels: 4
|
||||
:source-highlighter: prettify
|
||||
:numbered:
|
||||
:icons: font
|
||||
:hide-uri-scheme:
|
||||
:spring-cloud-stream-binder-kafka-repo: snapshot
|
||||
:github-tag: master
|
||||
:spring-cloud-stream-binder-kafka-docs-version: current
|
||||
:spring-cloud-stream-binder-kafka-docs: http://docs.spring.io/spring-cloud-stream-binder-kafka/docs/{spring-cloud-stream-binder-kafka-docs-version}/reference
|
||||
:spring-cloud-stream-binder-kafka-docs-current: http://docs.spring.io/spring-cloud-stream-binder-kafka/docs/current-SNAPSHOT/reference/html/
|
||||
:github-repo: spring-cloud/spring-cloud-stream-binder-kafka
|
||||
:github-raw: http://raw.github.com/{github-repo}/{github-tag}
|
||||
:github-code: http://github.com/{github-repo}/tree/{github-tag}
|
||||
:github-wiki: http://github.com/{github-repo}/wiki
|
||||
:github-master-code: http://github.com/{github-repo}/tree/master
|
||||
:sc-ext: java
|
||||
// ======================================================================================
|
||||
|
||||
= Reference Guide
|
||||
include::overview.adoc[]
|
||||
include::dlq.adoc[]
|
||||
include::metrics.adoc[]
|
||||
|
||||
= Appendices
|
||||
[appendix]
|
||||
include::building.adoc[]
|
||||
include::contributing.adoc[]
|
||||
|
||||
// ======================================================================================
|
||||
@@ -0,0 +1,10 @@
|
||||
[[kafka-metrics]]
|
||||
== Kafka metrics
|
||||
|
||||
Kafka binder module exposes the following metrics:
|
||||
|
||||
`spring.cloud.stream.binder.kafka.someGroup.someTopic.lag` - this metric indicates how many messages
|
||||
have not been yet consumed from given binder's topic (`someTopic`) by given consumer group (`someGroup`).
|
||||
For example if the value of the metric `spring.cloud.stream.binder.kafka.myGroup.myTopic.lag` is `1000`, then
|
||||
consumer group `myGroup` has `1000` messages to waiting to be consumed from topic `myTopic`. This metric is
|
||||
particularly useful to provide auto-scaling feedback to PaaS platform of your choice.
|
||||
@@ -0,0 +1,436 @@
|
||||
[partintro]
|
||||
--
|
||||
This guide describes the Apache Kafka implementation of the Spring Cloud Stream Binder.
|
||||
It contains information about its design, usage and configuration options, as well as information on how the Stream Cloud Stream concepts map into Apache Kafka specific constructs.
|
||||
--
|
||||
|
||||
== Usage
|
||||
|
||||
For using the Apache Kafka binder, you just need to add it to your Spring Cloud Stream application, using the following Maven coordinates:
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-stream-binder-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
Alternatively, you can also use the Spring Cloud Stream Kafka Starter.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
== Apache Kafka Binder Overview
|
||||
|
||||
A simplified diagram of how the Apache Kafka binder operates can be seen below.
|
||||
|
||||
.Kafka Binder
|
||||
image::kafka-binder.png[width=300,scaledwidth="50%"]
|
||||
|
||||
The Apache Kafka Binder implementation maps each destination to an Apache Kafka topic.
|
||||
The consumer group maps directly to the same Apache Kafka concept.
|
||||
Partitioning also maps directly to Apache Kafka partitions as well.
|
||||
|
||||
== Configuration Options
|
||||
|
||||
This section contains the configuration options used by the Apache Kafka binder.
|
||||
|
||||
For common configuration options and properties pertaining to binder, refer to the <<binding-properties,core documentation>>.
|
||||
|
||||
=== Kafka Binder Properties
|
||||
|
||||
spring.cloud.stream.kafka.binder.brokers::
|
||||
A list of brokers to which the Kafka binder will connect.
|
||||
+
|
||||
Default: `localhost`.
|
||||
spring.cloud.stream.kafka.binder.defaultBrokerPort::
|
||||
`brokers` allows hosts specified with or without port information (e.g., `host1,host2:port2`).
|
||||
This sets the default port when no port is configured in the broker list.
|
||||
+
|
||||
Default: `9092`.
|
||||
spring.cloud.stream.kafka.binder.zkNodes::
|
||||
A list of ZooKeeper nodes to which the Kafka binder can connect.
|
||||
+
|
||||
Default: `localhost`.
|
||||
spring.cloud.stream.kafka.binder.defaultZkPort::
|
||||
`zkNodes` allows hosts specified with or without port information (e.g., `host1,host2:port2`).
|
||||
This sets the default port when no port is configured in the node list.
|
||||
+
|
||||
Default: `2181`.
|
||||
spring.cloud.stream.kafka.binder.configuration::
|
||||
Key/Value map of client properties (both producers and consumer) passed to all clients created by the binder.
|
||||
Due to the fact that these properties will be used by both producers and consumers, usage should be restricted to common properties, especially security settings.
|
||||
+
|
||||
Default: Empty map.
|
||||
spring.cloud.stream.kafka.binder.headers::
|
||||
The list of custom headers that will be transported by the binder.
|
||||
+
|
||||
Default: empty.
|
||||
spring.cloud.stream.kafka.binder.offsetUpdateTimeWindow::
|
||||
The frequency, in milliseconds, with which offsets are saved.
|
||||
Ignored if `0`.
|
||||
+
|
||||
Default: `10000`.
|
||||
spring.cloud.stream.kafka.binder.offsetUpdateCount::
|
||||
The frequency, in number of updates, which which consumed offsets are persisted.
|
||||
Ignored if `0`.
|
||||
Mutually exclusive with `offsetUpdateTimeWindow`.
|
||||
+
|
||||
Default: `0`.
|
||||
spring.cloud.stream.kafka.binder.requiredAcks::
|
||||
The number of required acks on the broker.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.minPartitionCount::
|
||||
Effective only if `autoCreateTopics` or `autoAddPartitions` is set.
|
||||
The global minimum number of partitions that the binder will configure on topics on which it produces/consumes data.
|
||||
It can be superseded by the `partitionCount` setting of the producer or by the value of `instanceCount` * `concurrency` settings of the producer (if either is larger).
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.replicationFactor::
|
||||
The replication factor of auto-created topics if `autoCreateTopics` is active.
|
||||
+
|
||||
Default: `1`.
|
||||
spring.cloud.stream.kafka.binder.autoCreateTopics::
|
||||
If set to `true`, the binder will create new topics automatically.
|
||||
If set to `false`, the binder will rely on the topics being already configured.
|
||||
In the latter case, if the topics do not exist, the binder will fail to start.
|
||||
Of note, this setting is independent of the `auto.topic.create.enable` setting of the broker and it does not influence it: if the server is set to auto-create topics, they may be created as part of the metadata retrieval request, with default broker settings.
|
||||
+
|
||||
Default: `true`.
|
||||
spring.cloud.stream.kafka.binder.autoAddPartitions::
|
||||
If set to `true`, the binder will create add new partitions if required.
|
||||
If set to `false`, the binder will rely on the partition size of the topic being already configured.
|
||||
If the partition count of the target topic is smaller than the expected value, the binder will fail to start.
|
||||
+
|
||||
Default: `false`.
|
||||
spring.cloud.stream.kafka.binder.socketBufferSize::
|
||||
Size (in bytes) of the socket buffer to be used by the Kafka consumers.
|
||||
+
|
||||
Default: `2097152`.
|
||||
|
||||
=== Kafka Consumer Properties
|
||||
|
||||
The following properties are available for Kafka consumers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.consumer.`.
|
||||
|
||||
autoRebalanceEnabled::
|
||||
When `true`, topic partitions will be automatically rebalanced between the members of a consumer group.
|
||||
When `false`, each consumer will be assigned a fixed set of partitions based on `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex`.
|
||||
This requires both `spring.cloud.stream.instanceCount` and `spring.cloud.stream.instanceIndex` properties to be set appropriately on each launched instance.
|
||||
The property `spring.cloud.stream.instanceCount` must typically be greater than 1 in this case.
|
||||
+
|
||||
Default: `true`.
|
||||
autoCommitOffset::
|
||||
Whether to autocommit offsets when a message has been processed.
|
||||
If set to `false`, a header with the key `kafka_acknowledgment` of the type `org.springframework.kafka.support.Acknowledgment` header will be present in the inbound message.
|
||||
Applications may use this header for acknowledging messages.
|
||||
See the examples section for details.
|
||||
When this property is set to `false`, Kafka binder will set the ack mode to `org.springframework.kafka.listener.AbstractMessageListenerContainer.AckMode.MANUAL`.
|
||||
+
|
||||
Default: `true`.
|
||||
autoCommitOnError::
|
||||
Effective only if `autoCommitOffset` is set to `true`.
|
||||
If set to `false` it suppresses auto-commits for messages that result in errors, and will commit only for successful messages, allows a stream to automatically replay from the last successfully processed message, in case of persistent failures.
|
||||
If set to `true`, it will always auto-commit (if auto-commit is enabled).
|
||||
If not set (default), it effectively has the same value as `enableDlq`, auto-committing erroneous messages if they are sent to a DLQ, and not committing them otherwise.
|
||||
+
|
||||
Default: not set.
|
||||
recoveryInterval::
|
||||
The interval between connection recovery attempts, in milliseconds.
|
||||
+
|
||||
Default: `5000`.
|
||||
resetOffsets::
|
||||
Whether to reset offsets on the consumer to the value provided by `startOffset`.
|
||||
+
|
||||
Default: `false`.
|
||||
startOffset::
|
||||
The starting offset for new groups, or when `resetOffsets` is `true`.
|
||||
Allowed values: `earliest`, `latest`.
|
||||
If the consumer group is set explicitly for the consumer 'binding' (via `spring.cloud.stream.bindings.<channelName>.group`), then 'startOffset' is set to `earliest`; otherwise it is set to `latest` for the `anonymous` consumer group.
|
||||
+
|
||||
Default: null (equivalent to `earliest`).
|
||||
enableDlq::
|
||||
When set to true, it will send enable DLQ behavior for the consumer.
|
||||
By default, messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`.
|
||||
The DLQ topic name can be configurable via the property `dlqName`.
|
||||
This provides an alternative option to the more common Kafka replay scenario for the case when the number of errors is relatively small and replaying the entire original topic may be too cumbersome.
|
||||
+
|
||||
Default: `false`.
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka consumer properties.
|
||||
+
|
||||
Default: Empty map.
|
||||
dlqName::
|
||||
The name of the DLQ topic to receive the error messages.
|
||||
+
|
||||
Default: null (If not specified, messages that result in errors will be forwarded to a topic named `error.<destination>.<group>`).
|
||||
|
||||
=== Kafka Producer Properties
|
||||
|
||||
The following properties are available for Kafka producers only and
|
||||
must be prefixed with `spring.cloud.stream.kafka.bindings.<channelName>.producer.`.
|
||||
|
||||
bufferSize::
|
||||
Upper limit, in bytes, of how much data the Kafka producer will attempt to batch before sending.
|
||||
+
|
||||
Default: `16384`.
|
||||
sync::
|
||||
Whether the producer is synchronous.
|
||||
+
|
||||
Default: `false`.
|
||||
batchTimeout::
|
||||
How long the producer will wait before sending in order to allow more messages to accumulate in the same batch.
|
||||
(Normally the producer does not wait at all, and simply sends all the messages that accumulated while the previous send was in progress.) A non-zero value may increase throughput at the expense of latency.
|
||||
+
|
||||
Default: `0`.
|
||||
messageKeyExpression::
|
||||
A SpEL expression evaluated against the outgoing message used to populate the key of the produced Kafka message.
|
||||
For example `headers.key` or `payload.myKey`.
|
||||
+
|
||||
Default: `none`.
|
||||
configuration::
|
||||
Map with a key/value pair containing generic Kafka producer properties.
|
||||
+
|
||||
Default: Empty map.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The Kafka binder will use the `partitionCount` setting of the producer as a hint to create a topic with the given partition count (in conjunction with the `minPartitionCount`, the maximum of the two being the value being used).
|
||||
Exercise caution when configuring both `minPartitionCount` for a binder and `partitionCount` for an application, as the larger value will be used.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is disabled (the default), then the binder will fail to start.
|
||||
If a topic already exists with a smaller partition count and `autoAddPartitions` is enabled, new partitions will be added.
|
||||
If a topic already exists with a larger number of partitions than the maximum of (`minPartitionCount` and `partitionCount`), the existing partition count will be used.
|
||||
====
|
||||
|
||||
=== Usage examples
|
||||
|
||||
In this section, we illustrate the use of the above properties for specific scenarios.
|
||||
|
||||
==== Example: Setting `autoCommitOffset` false and relying on manual acking.
|
||||
|
||||
This example illustrates how one may manually acknowledge offsets in a consumer application.
|
||||
|
||||
This example requires that `spring.cloud.stream.kafka.bindings.input.consumer.autoCommitOffset` is set to false.
|
||||
Use the corresponding input channel name for your example.
|
||||
|
||||
[source]
|
||||
----
|
||||
@SpringBootApplication
|
||||
@EnableBinding(Sink.class)
|
||||
public class ManuallyAcknowdledgingConsumer {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(ManuallyAcknowdledgingConsumer.class, args);
|
||||
}
|
||||
|
||||
@StreamListener(Sink.INPUT)
|
||||
public void process(Message<?> message) {
|
||||
Acknowledgment acknowledgment = message.getHeaders().get(KafkaHeaders.ACKNOWLEDGMENT, Acknowledgment.class);
|
||||
if (acknowledgment != null) {
|
||||
System.out.println("Acknowledgment provided");
|
||||
acknowledgment.acknowledge();
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
==== Example: security configuration
|
||||
|
||||
Apache Kafka 0.9 supports secure connections between client and brokers.
|
||||
To take advantage of this feature, follow the guidelines in the http://kafka.apache.org/090/documentation.html#security_configclients[Apache Kafka Documentation] as well as the Kafka 0.9 http://docs.confluent.io/2.0.0/kafka/security.html[security guidelines from the Confluent documentation].
|
||||
Use the `spring.cloud.stream.kafka.binder.configuration` option to set security properties for all clients created by the binder.
|
||||
|
||||
For example, for setting `security.protocol` to `SASL_SSL`, set:
|
||||
|
||||
[source]
|
||||
----
|
||||
spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_SSL
|
||||
----
|
||||
|
||||
All the other security properties can be set in a similar manner.
|
||||
|
||||
When using Kerberos, follow the instructions in the http://kafka.apache.org/090/documentation.html#security_sasl_clientconfig[reference documentation] for creating and referencing the JAAS configuration.
|
||||
|
||||
Spring Cloud Stream supports passing JAAS configuration information to the application using a JAAS configuration file and using Spring Boot properties.
|
||||
|
||||
===== Using JAAS configuration files
|
||||
|
||||
The JAAS, and (optionally) krb5 file locations can be set for Spring Cloud Stream applications by using system properties.
|
||||
Here is an example of launching a Spring Cloud Stream application with SASL and Kerberos using a JAAS configuration file:
|
||||
|
||||
[source]
|
||||
----
|
||||
java -Djava.security.auth.login.config=/path.to/kafka_client_jaas.conf -jar log.jar \
|
||||
--spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.kafka.binder.zkNodes=secure.zookeeper:2181 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT
|
||||
----
|
||||
|
||||
===== Using Spring Boot properties
|
||||
|
||||
As an alternative to having a JAAS configuration file, Spring Cloud Stream provides a mechanism for setting up the JAAS configuration for Spring Cloud Stream applications using Spring Boot properties.
|
||||
|
||||
The following properties can be used for configuring the login context of the Kafka client.
|
||||
|
||||
spring.cloud.stream.kafka.binder.jaas.loginModule::
|
||||
The login module name. Not necessary to be set in normal cases.
|
||||
+
|
||||
Default: `com.sun.security.auth.module.Krb5LoginModule`.
|
||||
spring.cloud.stream.kafka.binder.jaas.controlFlag::
|
||||
The control flag of the login module.
|
||||
+
|
||||
Default: `required`.
|
||||
spring.cloud.stream.kafka.binder.jaas.options::
|
||||
Map with a key/value pair containing the login module options.
|
||||
+
|
||||
Default: Empty map.
|
||||
|
||||
Here is an example of launching a Spring Cloud Stream application with SASL and Kerberos using Spring Boot configuration properties:
|
||||
|
||||
[source]
|
||||
----
|
||||
java --spring.cloud.stream.kafka.binder.brokers=secure.server:9092 \
|
||||
--spring.cloud.stream.kafka.binder.zkNodes=secure.zookeeper:2181 \
|
||||
--spring.cloud.stream.bindings.input.destination=stream.ticktock \
|
||||
--spring.cloud.stream.kafka.binder.autoCreateTopics=false \
|
||||
--spring.cloud.stream.kafka.binder.configuration.security.protocol=SASL_PLAINTEXT \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.useKeyTab=true \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.storeKey=true \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.keyTab=/etc/security/keytabs/kafka_client.keytab \
|
||||
--spring.cloud.stream.kafka.binder.jaas.options.principal=kafka-client-1@EXAMPLE.COM
|
||||
----
|
||||
|
||||
This represents the equivalent of the following JAAS file:
|
||||
|
||||
[source]
|
||||
----
|
||||
KafkaClient {
|
||||
com.sun.security.auth.module.Krb5LoginModule required
|
||||
useKeyTab=true
|
||||
storeKey=true
|
||||
keyTab="/etc/security/keytabs/kafka_client.keytab"
|
||||
principal="kafka-client-1@EXAMPLE.COM";
|
||||
};
|
||||
----
|
||||
|
||||
If the topics required already exist on the broker, or will be created by an administrator, autocreation can be turned off and only client JAAS properties need to be sent. As an alternative to setting `spring.cloud.stream.kafka.binder.autoCreateTopics` you can simply remove the broker dependency from the application. See <<exclude-admin-utils>> for details.
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Do not mix JAAS configuration files and Spring Boot properties in the same application.
|
||||
If the `-Djava.security.auth.login.config` system property is already present, Spring Cloud Stream will ignore the Spring Boot properties.
|
||||
|
||||
====
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
Exercise caution when using the `autoCreateTopics` and `autoAddPartitions` if using Kerberos.
|
||||
Usually applications may use principals that do not have administrative rights in Kafka and Zookeeper, and relying on Spring Cloud Stream to create/modify topics may fail.
|
||||
In secure environments, we strongly recommend creating topics and managing ACLs administratively using Kafka tooling.
|
||||
====
|
||||
|
||||
==== Using the binder with Apache Kafka 0.10
|
||||
|
||||
The default Kafka support in Spring Cloud Stream Kafka binder is for Kafka version 0.10.1.1. The binder also supports connecting to other 0.10 based versions and 0.9 clients.
|
||||
In order to do this, when you create the project that contains your application, include `spring-cloud-starter-stream-kafka` as you normally would do for the default binder.
|
||||
Then add these dependencies at the top of the `<dependencies>` section in the pom.xml file to override the dependencies.
|
||||
|
||||
Here is an example for downgrading your application to 0.10.0.1. Since it is still on the 0.10 line, the default `spring-kafka` and `spring-integration-kafka` versions can be retained.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>0.10.0.1</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>0.10.0.1</version>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
Here is another example of using 0.9.0.1 version.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.kafka</groupId>
|
||||
<artifactId>spring-kafka</artifactId>
|
||||
<version>1.0.5.RELEASE</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.integration</groupId>
|
||||
<artifactId>spring-integration-kafka</artifactId>
|
||||
<version>2.0.1.RELEASE</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
<version>0.9.0.1</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka-clients</artifactId>
|
||||
<version>0.9.0.1</version>
|
||||
</dependency>
|
||||
|
||||
----
|
||||
|
||||
[NOTE]
|
||||
====
|
||||
The versions above are provided only for the sake of the example.
|
||||
For best results, we recommend using the most recent 0.10-compatible versions of the projects.
|
||||
====
|
||||
|
||||
[[exclude-admin-utils]]
|
||||
==== Excluding Kafka broker jar from the classpath of the binder based application
|
||||
|
||||
The Apache Kafka Binder uses the administrative utilities which are part of the Apache Kafka server library to create and reconfigure topics.
|
||||
If the inclusion of the Apache Kafka server library and its dependencies is not necessary at runtime because the application will rely on the topics being configured administratively, the Kafka binder allows for Apache Kafka server dependency to be excluded from the application.
|
||||
|
||||
If you use non default versions for Kafka dependencies as advised above, all you have to do is not to include the kafka broker dependency.
|
||||
If you use the default Kafka version, then ensure that you exclude the kafka broker jar from the `spring-cloud-starter-stream-kafka` dependency as following.
|
||||
|
||||
[source,xml]
|
||||
----
|
||||
<dependency>
|
||||
<groupId>org.springframework.cloud</groupId>
|
||||
<artifactId>spring-cloud-starter-stream-kafka</artifactId>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>kafka_2.11</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
----
|
||||
|
||||
If you exclude the Apache Kafka server dependency and the topic is not present on the server, then the Apache Kafka broker will create the topic if auto topic creation is enabled on the server.
|
||||
Please keep in mind that if you are relying on this, then the Kafka server will use the default number of partitions and replication factors.
|
||||
On the other hand, if auto topic creation is disabled on the server, then care must be taken before running the application to create the topic with the desired number of partitions.
|
||||
|
||||
If you want to have full control over how partitions are allocated, then leave the default settings as they are, i.e. do not exclude the kafka broker jar and ensure that `spring.cloud.stream.kafka.binder.autoCreateTopics` is set to `true`, which is the default.
|
||||
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
/*
|
||||
code highlight CSS resemblign the Eclipse IDE default color schema
|
||||
@author Costin Leau
|
||||
*/
|
||||
|
||||
.hl-keyword {
|
||||
color: #7F0055;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.hl-comment {
|
||||
color: #3F5F5F;
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.hl-multiline-comment {
|
||||
color: #3F5FBF;
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.hl-tag {
|
||||
color: #3F7F7F;
|
||||
}
|
||||
|
||||
.hl-attribute {
|
||||
color: #7F007F;
|
||||
}
|
||||
|
||||
.hl-value {
|
||||
color: #2A00FF;
|
||||
}
|
||||
|
||||
.hl-string {
|
||||
color: #2A00FF;
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
@IMPORT url("manual.css");
|
||||
|
||||
body.firstpage {
|
||||
background: url("../images/background.png") no-repeat center top;
|
||||
}
|
||||
|
||||
div.part h1 {
|
||||
border-top: none;
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
@IMPORT url("manual.css");
|
||||
|
||||
body {
|
||||
background: url("../images/background.png") no-repeat center top;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,344 @@
|
||||
@IMPORT url("highlight.css");
|
||||
|
||||
html {
|
||||
padding: 0pt;
|
||||
margin: 0pt;
|
||||
}
|
||||
|
||||
body {
|
||||
color: #333333;
|
||||
margin: 15px 30px;
|
||||
font-family: Helvetica, Arial, Freesans, Clean, Sans-serif;
|
||||
line-height: 1.6;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
}
|
||||
|
||||
code {
|
||||
font-size: 16px;
|
||||
font-family: Consolas, "Liberation Mono", Courier, monospace;
|
||||
}
|
||||
|
||||
:not(a)>code {
|
||||
color: #6D180B;
|
||||
}
|
||||
|
||||
:not(pre)>code {
|
||||
background-color: #F2F2F2;
|
||||
border: 1px solid #CCCCCC;
|
||||
border-radius: 4px;
|
||||
padding: 1px 3px 0;
|
||||
text-shadow: none;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
body>*:first-child {
|
||||
margin-top: 0 !important;
|
||||
}
|
||||
|
||||
div {
|
||||
margin: 0pt;
|
||||
}
|
||||
|
||||
hr {
|
||||
border: 1px solid #CCCCCC;
|
||||
background: #CCCCCC;
|
||||
}
|
||||
|
||||
h1,h2,h3,h4,h5,h6 {
|
||||
color: #000000;
|
||||
cursor: text;
|
||||
font-weight: bold;
|
||||
margin: 30px 0 10px;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
h1,h2,h3 {
|
||||
margin: 40px 0 10px;
|
||||
}
|
||||
|
||||
h1 {
|
||||
margin: 70px 0 30px;
|
||||
padding-top: 20px;
|
||||
}
|
||||
|
||||
div.part h1 {
|
||||
border-top: 1px dotted #CCCCCC;
|
||||
}
|
||||
|
||||
h1,h1 code {
|
||||
font-size: 32px;
|
||||
}
|
||||
|
||||
h2,h2 code {
|
||||
font-size: 24px;
|
||||
}
|
||||
|
||||
h3,h3 code {
|
||||
font-size: 20px;
|
||||
}
|
||||
|
||||
h4,h1 code,h5,h5 code,h6,h6 code {
|
||||
font-size: 18px;
|
||||
}
|
||||
|
||||
div.book,div.chapter,div.appendix,div.part,div.preface {
|
||||
min-width: 300px;
|
||||
max-width: 1200px;
|
||||
margin: 0 auto;
|
||||
}
|
||||
|
||||
p.releaseinfo {
|
||||
font-weight: bold;
|
||||
margin-bottom: 40px;
|
||||
margin-top: 40px;
|
||||
}
|
||||
|
||||
div.authorgroup {
|
||||
line-height: 1;
|
||||
}
|
||||
|
||||
p.copyright {
|
||||
line-height: 1;
|
||||
margin-bottom: -5px;
|
||||
}
|
||||
|
||||
.legalnotice p {
|
||||
font-style: italic;
|
||||
font-size: 14px;
|
||||
line-height: 1;
|
||||
}
|
||||
|
||||
div.titlepage+p,div.titlepage+p {
|
||||
margin-top: 0;
|
||||
}
|
||||
|
||||
pre {
|
||||
line-height: 1.0;
|
||||
color: black;
|
||||
}
|
||||
|
||||
a {
|
||||
color: #4183C4;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 15px 0;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
ul,ol {
|
||||
padding-left: 30px;
|
||||
}
|
||||
|
||||
li p {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
div.table {
|
||||
margin: 1em;
|
||||
padding: 0.5em;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
div.table table,div.informaltable table {
|
||||
display: table;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
div.table td {
|
||||
padding-left: 7px;
|
||||
padding-right: 7px;
|
||||
}
|
||||
|
||||
.sidebar {
|
||||
line-height: 1.4;
|
||||
padding: 0 20px;
|
||||
background-color: #F8F8F8;
|
||||
border: 1px solid #CCCCCC;
|
||||
border-radius: 3px 3px 3px 3px;
|
||||
}
|
||||
|
||||
.sidebar p.title {
|
||||
color: #6D180B;
|
||||
}
|
||||
|
||||
pre.programlisting,pre.screen {
|
||||
font-size: 15px;
|
||||
padding: 6px 10px;
|
||||
background-color: #F8F8F8;
|
||||
border: 1px solid #CCCCCC;
|
||||
border-radius: 3px 3px 3px 3px;
|
||||
clear: both;
|
||||
overflow: auto;
|
||||
line-height: 1.4;
|
||||
font-family: Consolas, "Liberation Mono", Courier, monospace;
|
||||
}
|
||||
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
border-spacing: 0;
|
||||
border: 1px solid #DDDDDD !important;
|
||||
border-radius: 4px !important;
|
||||
border-collapse: separate !important;
|
||||
line-height: 1.6;
|
||||
}
|
||||
|
||||
table thead {
|
||||
background: #F5F5F5;
|
||||
}
|
||||
|
||||
table tr {
|
||||
border: none;
|
||||
border-bottom: none;
|
||||
}
|
||||
|
||||
table th {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
table th,table td {
|
||||
border: none !important;
|
||||
padding: 6px 13px;
|
||||
}
|
||||
|
||||
table tr:nth-child(2n) {
|
||||
background-color: #F8F8F8;
|
||||
}
|
||||
|
||||
td p {
|
||||
margin: 0 0 15px 0;
|
||||
}
|
||||
|
||||
div.table-contents td p {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
div.important *,div.note *,div.tip *,div.warning *,div.navheader *,div.navfooter *,div.calloutlist *
|
||||
{
|
||||
border: none !important;
|
||||
background: none !important;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
div.important p,div.note p,div.tip p,div.warning p {
|
||||
color: #6F6F6F;
|
||||
line-height: 1.6;
|
||||
}
|
||||
|
||||
div.important code,div.note code,div.tip code,div.warning code {
|
||||
background-color: #F2F2F2 !important;
|
||||
border: 1px solid #CCCCCC !important;
|
||||
border-radius: 4px !important;
|
||||
padding: 1px 3px 0 !important;
|
||||
text-shadow: none !important;
|
||||
white-space: nowrap !important;
|
||||
}
|
||||
|
||||
.note th,.tip th,.warning th {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.note tr:first-child td,.tip tr:first-child td,.warning tr:first-child td
|
||||
{
|
||||
border-right: 1px solid #CCCCCC !important;
|
||||
padding-top: 10px;
|
||||
}
|
||||
|
||||
div.calloutlist p,div.calloutlist td {
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
div.calloutlist>table>tbody>tr>td:first-child {
|
||||
padding-left: 10px;
|
||||
width: 30px !important;
|
||||
}
|
||||
|
||||
div.important,div.note,div.tip,div.warning {
|
||||
margin-left: 0px !important;
|
||||
margin-right: 20px !important;
|
||||
margin-top: 20px;
|
||||
margin-bottom: 20px;
|
||||
padding-top: 10px;
|
||||
padding-bottom: 10px;
|
||||
}
|
||||
|
||||
div.toc {
|
||||
line-height: 1.2;
|
||||
}
|
||||
|
||||
dl,dt {
|
||||
margin-top: 1px;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
div.toc>dl>dt {
|
||||
font-size: 32px;
|
||||
font-weight: bold;
|
||||
margin: 30px 0 10px 0;
|
||||
display: block;
|
||||
}
|
||||
|
||||
div.toc>dl>dd>dl>dt {
|
||||
font-size: 24px;
|
||||
font-weight: bold;
|
||||
margin: 20px 0 10px 0;
|
||||
display: block;
|
||||
}
|
||||
|
||||
div.toc>dl>dd>dl>dd>dl>dt {
|
||||
font-weight: bold;
|
||||
font-size: 20px;
|
||||
margin: 10px 0 0 0;
|
||||
}
|
||||
|
||||
tbody.footnotes * {
|
||||
border: none !important;
|
||||
}
|
||||
|
||||
div.footnote p {
|
||||
margin: 0;
|
||||
line-height: 1;
|
||||
}
|
||||
|
||||
div.footnote p sup {
|
||||
margin-right: 6px;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
div.navheader {
|
||||
border-bottom: 1px solid #CCCCCC;
|
||||
}
|
||||
|
||||
div.navfooter {
|
||||
border-top: 1px solid #CCCCCC;
|
||||
}
|
||||
|
||||
.title {
|
||||
margin-left: -1em;
|
||||
padding-left: 1em;
|
||||
}
|
||||
|
||||
.title>a {
|
||||
position: absolute;
|
||||
visibility: hidden;
|
||||
display: block;
|
||||
font-size: 0.85em;
|
||||
margin-top: 0.05em;
|
||||
margin-left: -1em;
|
||||
vertical-align: text-top;
|
||||
color: black;
|
||||
}
|
||||
|
||||
.title>a:before {
|
||||
content: "\00A7";
|
||||
}
|
||||
|
||||
.title:hover>a,.title>a:hover,.title:hover>a:hover {
|
||||
visibility: visible;
|
||||
}
|
||||
|
||||
.title:focus>a,.title>a:focus,.title:focus>a:focus {
|
||||
outline: 0;
|
||||
}
|
||||
|
After Width: | Height: | Size: 11 KiB |
|
After Width: | Height: | Size: 2.0 KiB |
|
After Width: | Height: | Size: 2.0 KiB |
|
After Width: | Height: | Size: 45 KiB |
|
After Width: | Height: | Size: 2.2 KiB |
|
After Width: | Height: | Size: 931 B |
|
After Width: | Height: | Size: 2.1 KiB |
@@ -0,0 +1,45 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:xslthl="http://xslthl.sf.net"
|
||||
xmlns:d="http://docbook.org/ns/docbook"
|
||||
exclude-result-prefixes="xslthl d"
|
||||
version='1.0'>
|
||||
|
||||
<!-- Extensions -->
|
||||
<xsl:param name="use.extensions">1</xsl:param>
|
||||
<xsl:param name="tablecolumns.extension">0</xsl:param>
|
||||
<xsl:param name="callout.extensions">1</xsl:param>
|
||||
|
||||
<!-- Graphics -->
|
||||
<xsl:param name="admon.graphics" select="1"/>
|
||||
<xsl:param name="admon.graphics.path">images/</xsl:param>
|
||||
<xsl:param name="admon.graphics.extension">.png</xsl:param>
|
||||
|
||||
<!-- Table of Contents -->
|
||||
<xsl:param name="generate.toc">book toc,title</xsl:param>
|
||||
<xsl:param name="toc.section.depth">3</xsl:param>
|
||||
|
||||
<!-- Hide revhistory -->
|
||||
<xsl:template match="d:revhistory" mode="titlepage.mode"/>
|
||||
|
||||
</xsl:stylesheet>
|
||||
@@ -0,0 +1,31 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:xslthl="http://xslthl.sf.net"
|
||||
xmlns:d="http://docbook.org/ns/docbook"
|
||||
exclude-result-prefixes="xslthl d"
|
||||
version='1.0'>
|
||||
|
||||
<xsl:import href="urn:docbkx:stylesheet"/>
|
||||
<xsl:import href="common.xsl"/>
|
||||
|
||||
</xsl:stylesheet>
|
||||
@@ -0,0 +1,73 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
version='1.0'>
|
||||
|
||||
<xsl:import href="urn:docbkx:stylesheet"/>
|
||||
<xsl:import href="html.xsl"/>
|
||||
|
||||
<xsl:param name="html.stylesheet">css/manual-multipage.css</xsl:param>
|
||||
|
||||
<xsl:param name="chunk.section.depth">'5'</xsl:param>
|
||||
<xsl:param name="use.id.as.filename">'1'</xsl:param>
|
||||
|
||||
<!-- Replace chunk-element-content from chunk-common to add firstpage class to body -->
|
||||
<xsl:template name="chunk-element-content">
|
||||
<xsl:param name="prev"/>
|
||||
<xsl:param name="next"/>
|
||||
<xsl:param name="nav.context"/>
|
||||
<xsl:param name="content">
|
||||
<xsl:apply-imports/>
|
||||
</xsl:param>
|
||||
|
||||
<xsl:call-template name="user.preroot"/>
|
||||
|
||||
<html>
|
||||
<xsl:call-template name="html.head">
|
||||
<xsl:with-param name="prev" select="$prev"/>
|
||||
<xsl:with-param name="next" select="$next"/>
|
||||
</xsl:call-template>
|
||||
<body>
|
||||
<xsl:if test="count($prev) = 0">
|
||||
<xsl:attribute name="class">firstpage</xsl:attribute>
|
||||
</xsl:if>
|
||||
<xsl:call-template name="body.attributes"/>
|
||||
<xsl:call-template name="user.header.navigation"/>
|
||||
<xsl:call-template name="header.navigation">
|
||||
<xsl:with-param name="prev" select="$prev"/>
|
||||
<xsl:with-param name="next" select="$next"/>
|
||||
<xsl:with-param name="nav.context" select="$nav.context"/>
|
||||
</xsl:call-template>
|
||||
<xsl:call-template name="user.header.content"/>
|
||||
<xsl:copy-of select="$content"/>
|
||||
<xsl:call-template name="user.footer.content"/>
|
||||
<xsl:call-template name="footer.navigation">
|
||||
<xsl:with-param name="prev" select="$prev"/>
|
||||
<xsl:with-param name="next" select="$next"/>
|
||||
<xsl:with-param name="nav.context" select="$nav.context"/>
|
||||
</xsl:call-template>
|
||||
<xsl:call-template name="user.footer.navigation"/>
|
||||
</body>
|
||||
</html>
|
||||
<xsl:value-of select="$chunk.append"/>
|
||||
</xsl:template>
|
||||
</xsl:stylesheet>
|
||||
@@ -0,0 +1,30 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
version='1.0'>
|
||||
|
||||
<xsl:import href="urn:docbkx:stylesheet"/>
|
||||
<xsl:import href="html.xsl"/>
|
||||
|
||||
<xsl:param name="html.stylesheet">css/manual-singlepage.css</xsl:param>
|
||||
|
||||
</xsl:stylesheet>
|
||||
@@ -0,0 +1,141 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:xslthl="http://xslthl.sf.net"
|
||||
xmlns:d="http://docbook.org/ns/docbook"
|
||||
exclude-result-prefixes="xslthl"
|
||||
version='1.0'>
|
||||
|
||||
<xsl:import href="urn:docbkx:stylesheet/highlight.xsl"/>
|
||||
<xsl:import href="common.xsl"/>
|
||||
|
||||
<!-- Only use scaling in FO -->
|
||||
<xsl:param name="ignore.image.scaling">1</xsl:param>
|
||||
|
||||
<!-- Use code syntax highlighting -->
|
||||
<xsl:param name="highlight.source">1</xsl:param>
|
||||
|
||||
<!-- Activate Graphics -->
|
||||
<xsl:param name="callout.graphics" select="1" />
|
||||
<xsl:param name="callout.defaultcolumn">120</xsl:param>
|
||||
<xsl:param name="callout.graphics.path">images/callouts/</xsl:param>
|
||||
<xsl:param name="callout.graphics.extension">.png</xsl:param>
|
||||
|
||||
<xsl:param name="table.borders.with.css" select="1"/>
|
||||
<xsl:param name="html.stylesheet.type">text/css</xsl:param>
|
||||
|
||||
<xsl:param name="admonition.title.properties">text-align: left</xsl:param>
|
||||
|
||||
<!-- Leave image paths as relative when navigating XInclude -->
|
||||
<xsl:param name="keep.relative.image.uris" select="1"/>
|
||||
|
||||
<!-- Label Chapters and Sections (numbering) -->
|
||||
<xsl:param name="chapter.autolabel" select="1"/>
|
||||
<xsl:param name="section.autolabel" select="1"/>
|
||||
<xsl:param name="section.autolabel.max.depth" select="2"/>
|
||||
<xsl:param name="section.label.includes.component.label" select="1"/>
|
||||
<xsl:param name="table.footnote.number.format" select="'1'"/>
|
||||
|
||||
<!-- Remove "Chapter" from the Chapter titles... -->
|
||||
<xsl:param name="local.l10n.xml" select="document('')"/>
|
||||
<l:i18n xmlns:l="http://docbook.sourceforge.net/xmlns/l10n/1.0">
|
||||
<l:l10n language="en">
|
||||
<l:context name="title-numbered">
|
||||
<l:template name="chapter" text="%n. %t"/>
|
||||
<l:template name="section" text="%n %t"/>
|
||||
</l:context>
|
||||
</l:l10n>
|
||||
</l:i18n>
|
||||
|
||||
<!-- Syntax Highlighting -->
|
||||
<xsl:template match='xslthl:keyword' mode="xslthl">
|
||||
<span class="hl-keyword"><xsl:apply-templates mode="xslthl"/></span>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:comment' mode="xslthl">
|
||||
<span class="hl-comment"><xsl:apply-templates mode="xslthl"/></span>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:oneline-comment' mode="xslthl">
|
||||
<span class="hl-comment"><xsl:apply-templates mode="xslthl"/></span>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:multiline-comment' mode="xslthl">
|
||||
<span class="hl-multiline-comment"><xsl:apply-templates mode="xslthl"/></span>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:tag' mode="xslthl">
|
||||
<span class="hl-tag"><xsl:apply-templates mode="xslthl"/></span>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:attribute' mode="xslthl">
|
||||
<span class="hl-attribute"><xsl:apply-templates mode="xslthl"/></span>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:value' mode="xslthl">
|
||||
<span class="hl-value"><xsl:apply-templates mode="xslthl"/></span>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:string' mode="xslthl">
|
||||
<span class="hl-string"><xsl:apply-templates mode="xslthl"/></span>
|
||||
</xsl:template>
|
||||
|
||||
<!-- Custom Title Page -->
|
||||
<xsl:template match="d:author" mode="titlepage.mode">
|
||||
<xsl:if test="name(preceding-sibling::*[1]) = 'author'">
|
||||
<xsl:text>, </xsl:text>
|
||||
</xsl:if>
|
||||
<span class="{name(.)}">
|
||||
<xsl:call-template name="person.name"/>
|
||||
<xsl:apply-templates mode="titlepage.mode" select="./contrib"/>
|
||||
</span>
|
||||
</xsl:template>
|
||||
<xsl:template match="d:authorgroup" mode="titlepage.mode">
|
||||
<div class="{name(.)}">
|
||||
<h2>Authors</h2>
|
||||
<xsl:apply-templates mode="titlepage.mode"/>
|
||||
</div>
|
||||
</xsl:template>
|
||||
|
||||
<!-- Title Links -->
|
||||
<xsl:template name="anchor">
|
||||
<xsl:param name="node" select="."/>
|
||||
<xsl:param name="conditional" select="1"/>
|
||||
<xsl:variable name="id">
|
||||
<xsl:call-template name="object.id">
|
||||
<xsl:with-param name="object" select="$node"/>
|
||||
</xsl:call-template>
|
||||
</xsl:variable>
|
||||
<xsl:if test="$conditional = 0 or $node/@id or $node/@xml:id">
|
||||
<xsl:element name="a">
|
||||
<xsl:attribute name="name">
|
||||
<xsl:value-of select="$id"/>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="href">
|
||||
<xsl:text>#</xsl:text>
|
||||
<xsl:value-of select="$id"/>
|
||||
</xsl:attribute>
|
||||
</xsl:element>
|
||||
</xsl:if>
|
||||
</xsl:template>
|
||||
|
||||
</xsl:stylesheet>
|
||||
@@ -0,0 +1,582 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:d="http://docbook.org/ns/docbook"
|
||||
xmlns:fo="http://www.w3.org/1999/XSL/Format"
|
||||
xmlns:xslthl="http://xslthl.sf.net"
|
||||
xmlns:xlink='http://www.w3.org/1999/xlink'
|
||||
xmlns:exsl="http://exslt.org/common"
|
||||
exclude-result-prefixes="exsl xslthl d xlink"
|
||||
version='1.0'>
|
||||
|
||||
<xsl:import href="urn:docbkx:stylesheet"/>
|
||||
<xsl:import href="urn:docbkx:stylesheet/highlight.xsl"/>
|
||||
<xsl:import href="common.xsl"/>
|
||||
|
||||
<!-- Extensions -->
|
||||
<xsl:param name="fop1.extensions" select="1"/>
|
||||
|
||||
<xsl:param name="paper.type" select="'A4'"/>
|
||||
<xsl:param name="page.margin.top" select="'1cm'"/>
|
||||
<xsl:param name="region.before.extent" select="'1cm'"/>
|
||||
<xsl:param name="body.margin.top" select="'1.5cm'"/>
|
||||
|
||||
<xsl:param name="body.margin.bottom" select="'1.5cm'"/>
|
||||
<xsl:param name="region.after.extent" select="'1cm'"/>
|
||||
<xsl:param name="page.margin.bottom" select="'1cm'"/>
|
||||
<xsl:param name="title.margin.left" select="'0cm'"/>
|
||||
|
||||
<!-- allow break across pages -->
|
||||
<xsl:attribute-set name="formal.object.properties">
|
||||
<xsl:attribute name="keep-together.within-column">auto</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<!-- use color links and sensible rendering -->
|
||||
<xsl:attribute-set name="xref.properties">
|
||||
<xsl:attribute name="text-decoration">underline</xsl:attribute>
|
||||
<xsl:attribute name="color">#204060</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
<xsl:param name="ulink.show" select="0"></xsl:param>
|
||||
<xsl:param name="ulink.footnotes" select="0"></xsl:param>
|
||||
|
||||
<!-- TITLE PAGE -->
|
||||
|
||||
<xsl:template name="book.titlepage.recto">
|
||||
<fo:block>
|
||||
<fo:table table-layout="fixed" width="175mm">
|
||||
<fo:table-column column-width="175mm"/>
|
||||
<fo:table-body>
|
||||
<fo:table-row>
|
||||
<fo:table-cell text-align="center">
|
||||
<fo:block>
|
||||
<fo:external-graphic src="images/logo.png" width="240px"
|
||||
height="auto" content-width="scale-to-fit"
|
||||
content-height="scale-to-fit"
|
||||
content-type="content-type:image/png" text-align="center"
|
||||
/>
|
||||
</fo:block>
|
||||
<fo:block font-family="Helvetica" font-size="20pt" font-weight="bold" padding="10mm">
|
||||
<xsl:value-of select="d:info/d:title"/>
|
||||
</fo:block>
|
||||
<fo:block font-family="Helvetica" font-size="14pt" padding-before="2mm">
|
||||
<xsl:value-of select="d:info/d:subtitle"/>
|
||||
</fo:block>
|
||||
<fo:block font-family="Helvetica" font-size="14pt" padding="2mm">
|
||||
<xsl:value-of select="d:info/d:releaseinfo"/>
|
||||
</fo:block>
|
||||
</fo:table-cell>
|
||||
</fo:table-row>
|
||||
<fo:table-row>
|
||||
<fo:table-cell text-align="center">
|
||||
<fo:block font-family="Helvetica" font-size="14pt" padding="5mm">
|
||||
<xsl:value-of select="d:info/d:pubdate"/>
|
||||
</fo:block>
|
||||
</fo:table-cell>
|
||||
</fo:table-row>
|
||||
<fo:table-row>
|
||||
<fo:table-cell text-align="center">
|
||||
<fo:block font-family="Helvetica" font-size="10pt" padding="10mm">
|
||||
<xsl:for-each select="d:info/d:authorgroup/d:author">
|
||||
<xsl:if test="position() > 1">
|
||||
<xsl:text>, </xsl:text>
|
||||
</xsl:if>
|
||||
<xsl:value-of select="."/>
|
||||
</xsl:for-each>
|
||||
</fo:block>
|
||||
|
||||
<fo:block font-family="Helvetica" font-size="10pt" padding="5mm">
|
||||
<xsl:value-of select="d:info/d:pubdate"/>
|
||||
</fo:block>
|
||||
|
||||
<fo:block font-family="Helvetica" font-size="10pt" padding="5mm" padding-before="25em">
|
||||
<xsl:text>Copyright © </xsl:text><xsl:value-of select="d:info/d:copyright"/>
|
||||
</fo:block>
|
||||
|
||||
<fo:block font-family="Helvetica" font-size="8pt" padding="1mm">
|
||||
<xsl:value-of select="d:info/d:legalnotice"/>
|
||||
</fo:block>
|
||||
</fo:table-cell>
|
||||
</fo:table-row>
|
||||
</fo:table-body>
|
||||
</fo:table>
|
||||
</fo:block>
|
||||
</xsl:template>
|
||||
|
||||
<!-- Prevent blank pages in output -->
|
||||
<xsl:template name="book.titlepage.before.verso">
|
||||
</xsl:template>
|
||||
<xsl:template name="book.titlepage.verso">
|
||||
</xsl:template>
|
||||
<xsl:template name="book.titlepage.separator">
|
||||
</xsl:template>
|
||||
|
||||
<!-- HEADER -->
|
||||
|
||||
<!-- More space in the center header for long text -->
|
||||
<xsl:attribute-set name="header.content.properties">
|
||||
<xsl:attribute name="font-family">
|
||||
<xsl:value-of select="$body.font.family"/>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="margin-left">-5em</xsl:attribute>
|
||||
<xsl:attribute name="margin-right">-5em</xsl:attribute>
|
||||
<xsl:attribute name="font-size">8pt</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:template name="header.content">
|
||||
<xsl:param name="pageclass" select="''"/>
|
||||
<xsl:param name="sequence" select="''"/>
|
||||
<xsl:param name="position" select="''"/>
|
||||
<xsl:param name="gentext-key" select="''"/>
|
||||
|
||||
<xsl:variable name="Version">
|
||||
<xsl:choose>
|
||||
<xsl:when test="//d:title">
|
||||
<xsl:value-of select="//d:title"/><xsl:text> </xsl:text>
|
||||
</xsl:when>
|
||||
<xsl:otherwise>
|
||||
<xsl:text>please define title in your docbook file!</xsl:text>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:variable>
|
||||
|
||||
<xsl:choose>
|
||||
<xsl:when test="$sequence='blank'">
|
||||
<xsl:choose>
|
||||
<xsl:when test="$position='center'">
|
||||
<xsl:value-of select="$Version"/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:otherwise>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$pageclass='titlepage'">
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$position='center'">
|
||||
<xsl:value-of select="$Version"/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:otherwise>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:template>
|
||||
|
||||
<!-- FOOTER-->
|
||||
<xsl:attribute-set name="footer.content.properties">
|
||||
<xsl:attribute name="font-family">
|
||||
<xsl:value-of select="$body.font.family"/>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="font-size">8pt</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:template name="footer.content">
|
||||
<xsl:param name="pageclass" select="''"/>
|
||||
<xsl:param name="sequence" select="''"/>
|
||||
<xsl:param name="position" select="''"/>
|
||||
<xsl:param name="gentext-key" select="''"/>
|
||||
|
||||
<xsl:variable name="Version">
|
||||
<xsl:choose>
|
||||
<xsl:when test="//d:releaseinfo">
|
||||
<xsl:value-of select="//d:releaseinfo"/>
|
||||
</xsl:when>
|
||||
<xsl:otherwise>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:variable>
|
||||
|
||||
<xsl:variable name="Title">
|
||||
<xsl:choose>
|
||||
<xsl:when test="//d:productname">
|
||||
<xsl:value-of select="//d:productname"/><xsl:text> </xsl:text>
|
||||
</xsl:when>
|
||||
<xsl:otherwise>
|
||||
<xsl:text>please define title in your docbook file!</xsl:text>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:variable>
|
||||
|
||||
<xsl:choose>
|
||||
<xsl:when test="$sequence='blank'">
|
||||
<xsl:choose>
|
||||
<xsl:when test="$double.sided != 0 and $position = 'left'">
|
||||
<xsl:value-of select="$Version"/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$double.sided = 0 and $position = 'center'">
|
||||
</xsl:when>
|
||||
|
||||
<xsl:otherwise>
|
||||
<fo:page-number/>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$pageclass='titlepage'">
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$double.sided != 0 and $sequence = 'even' and $position='left'">
|
||||
<fo:page-number/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$double.sided != 0 and $sequence = 'odd' and $position='right'">
|
||||
<fo:page-number/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$double.sided = 0 and $position='right'">
|
||||
<fo:page-number/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$double.sided != 0 and $sequence = 'odd' and $position='left'">
|
||||
<xsl:value-of select="$Version"/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$double.sided != 0 and $sequence = 'even' and $position='right'">
|
||||
<xsl:value-of select="$Version"/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$double.sided = 0 and $position='left'">
|
||||
<xsl:value-of select="$Version"/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:when test="$position='center'">
|
||||
<xsl:value-of select="$Title"/>
|
||||
</xsl:when>
|
||||
|
||||
<xsl:otherwise>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match="processing-instruction('hard-pagebreak')">
|
||||
<fo:block break-before='page'/>
|
||||
</xsl:template>
|
||||
|
||||
|
||||
<!-- PAPER & PAGE SIZE -->
|
||||
|
||||
<!-- Paper type, no headers on blank pages, no double sided printing -->
|
||||
<xsl:param name="double.sided">0</xsl:param>
|
||||
<xsl:param name="headers.on.blank.pages">0</xsl:param>
|
||||
<xsl:param name="footers.on.blank.pages">0</xsl:param>
|
||||
|
||||
<!-- FONTS & STYLES -->
|
||||
|
||||
<xsl:param name="hyphenate">false</xsl:param>
|
||||
|
||||
<!-- Default Font size -->
|
||||
<xsl:param name="body.font.family">Helvetica</xsl:param>
|
||||
<xsl:param name="body.font.master">10</xsl:param>
|
||||
<xsl:param name="body.font.small">8</xsl:param>
|
||||
<xsl:param name="title.font.family">Helvetica</xsl:param>
|
||||
|
||||
<!-- Line height in body text -->
|
||||
<xsl:param name="line-height">1.4</xsl:param>
|
||||
|
||||
<!-- Chapter title size -->
|
||||
<xsl:attribute-set name="chapter.titlepage.recto.style">
|
||||
<xsl:attribute name="text-align">left</xsl:attribute>
|
||||
<xsl:attribute name="font-weight">bold</xsl:attribute>
|
||||
<xsl:attribute name="font-size">
|
||||
<xsl:value-of select="$body.font.master * 1.8"/>
|
||||
<xsl:text>pt</xsl:text>
|
||||
</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<!-- Why is the font-size for chapters hardcoded in the XSL FO templates?
|
||||
Let's remove it, so this sucker can use our attribute-set only... -->
|
||||
<xsl:template match="d:title" mode="chapter.titlepage.recto.auto.mode">
|
||||
<fo:block xmlns:fo="http://www.w3.org/1999/XSL/Format"
|
||||
xsl:use-attribute-sets="chapter.titlepage.recto.style">
|
||||
<xsl:call-template name="component.title">
|
||||
<xsl:with-param name="node" select="ancestor-or-self::d:chapter[1]"/>
|
||||
</xsl:call-template>
|
||||
</fo:block>
|
||||
</xsl:template>
|
||||
|
||||
<!-- Sections 1, 2 and 3 titles have a small bump factor and padding -->
|
||||
<xsl:attribute-set name="section.title.level1.properties">
|
||||
<xsl:attribute name="space-before.optimum">0.6em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.minimum">0.6em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">0.6em</xsl:attribute>
|
||||
<xsl:attribute name="font-size">
|
||||
<xsl:value-of select="$body.font.master * 1.5"/>
|
||||
<xsl:text>pt</xsl:text>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:attribute-set name="section.title.level2.properties">
|
||||
<xsl:attribute name="space-before.optimum">0.4em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.minimum">0.4em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">0.4em</xsl:attribute>
|
||||
<xsl:attribute name="font-size">
|
||||
<xsl:value-of select="$body.font.master * 1.25"/>
|
||||
<xsl:text>pt</xsl:text>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:attribute-set name="section.title.level3.properties">
|
||||
<xsl:attribute name="space-before.optimum">0.4em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.minimum">0.4em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">0.4em</xsl:attribute>
|
||||
<xsl:attribute name="font-size">
|
||||
<xsl:value-of select="$body.font.master * 1.0"/>
|
||||
<xsl:text>pt</xsl:text>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:attribute-set name="section.title.level4.properties">
|
||||
<xsl:attribute name="space-before.optimum">0.3em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.minimum">0.3em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">0.3em</xsl:attribute>
|
||||
<xsl:attribute name="font-size">
|
||||
<xsl:value-of select="$body.font.master * 0.9"/>
|
||||
<xsl:text>pt</xsl:text>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
|
||||
<!-- TABLES -->
|
||||
|
||||
<!-- Some padding inside tables -->
|
||||
<xsl:attribute-set name="table.cell.padding">
|
||||
<xsl:attribute name="padding-left">4pt</xsl:attribute>
|
||||
<xsl:attribute name="padding-right">4pt</xsl:attribute>
|
||||
<xsl:attribute name="padding-top">4pt</xsl:attribute>
|
||||
<xsl:attribute name="padding-bottom">4pt</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<!-- Only hairlines as frame and cell borders in tables -->
|
||||
<xsl:param name="table.frame.border.thickness">0.1pt</xsl:param>
|
||||
<xsl:param name="table.cell.border.thickness">0.1pt</xsl:param>
|
||||
|
||||
<!-- LABELS -->
|
||||
|
||||
<!-- Label Chapters and Sections (numbering) -->
|
||||
<xsl:param name="chapter.autolabel" select="1"/>
|
||||
<xsl:param name="section.autolabel" select="1"/>
|
||||
<xsl:param name="section.autolabel.max.depth" select="1"/>
|
||||
|
||||
<xsl:param name="section.label.includes.component.label" select="1"/>
|
||||
<xsl:param name="table.footnote.number.format" select="'1'"/>
|
||||
|
||||
<!-- PROGRAMLISTINGS -->
|
||||
|
||||
<!-- Verbatim text formatting (programlistings) -->
|
||||
<xsl:attribute-set name="monospace.verbatim.properties">
|
||||
<xsl:attribute name="font-size">7pt</xsl:attribute>
|
||||
<xsl:attribute name="wrap-option">wrap</xsl:attribute>
|
||||
<xsl:attribute name="keep-together.within-column">1</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:attribute-set name="verbatim.properties">
|
||||
<xsl:attribute name="space-before.minimum">1em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.optimum">1em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
|
||||
|
||||
<xsl:attribute name="border-color">#444444</xsl:attribute>
|
||||
<xsl:attribute name="border-style">solid</xsl:attribute>
|
||||
<xsl:attribute name="border-width">0.1pt</xsl:attribute>
|
||||
<xsl:attribute name="padding-top">0.5em</xsl:attribute>
|
||||
<xsl:attribute name="padding-left">0.5em</xsl:attribute>
|
||||
<xsl:attribute name="padding-right">0.5em</xsl:attribute>
|
||||
<xsl:attribute name="padding-bottom">0.5em</xsl:attribute>
|
||||
<xsl:attribute name="margin-left">0.5em</xsl:attribute>
|
||||
<xsl:attribute name="margin-right">0.5em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<!-- Shade (background) programlistings -->
|
||||
<xsl:param name="shade.verbatim">1</xsl:param>
|
||||
<xsl:attribute-set name="shade.verbatim.style">
|
||||
<xsl:attribute name="background-color">#F0F0F0</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:attribute-set name="list.block.spacing">
|
||||
<xsl:attribute name="space-before.optimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.minimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:attribute-set name="example.properties">
|
||||
<xsl:attribute name="space-before.minimum">0.5em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.optimum">0.5em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">0.5em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.minimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.optimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.maximum">0.1em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:attribute-set name="sidebar.properties">
|
||||
<xsl:attribute name="border-color">#444444</xsl:attribute>
|
||||
<xsl:attribute name="border-style">solid</xsl:attribute>
|
||||
<xsl:attribute name="border-width">0.1pt</xsl:attribute>
|
||||
<xsl:attribute name="background-color">#F0F0F0</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
|
||||
<!-- TITLE INFORMATION FOR FIGURES, EXAMPLES ETC. -->
|
||||
|
||||
<xsl:attribute-set name="formal.title.properties" use-attribute-sets="normal.para.spacing">
|
||||
<xsl:attribute name="font-weight">normal</xsl:attribute>
|
||||
<xsl:attribute name="font-style">italic</xsl:attribute>
|
||||
<xsl:attribute name="font-size">
|
||||
<xsl:value-of select="$body.font.master"/>
|
||||
<xsl:text>pt</xsl:text>
|
||||
</xsl:attribute>
|
||||
<xsl:attribute name="hyphenate">false</xsl:attribute>
|
||||
<xsl:attribute name="space-before.minimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.optimum">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">0.1em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<!-- CALLOUTS -->
|
||||
|
||||
<!-- don't use images for callouts -->
|
||||
<xsl:param name="callout.graphics">0</xsl:param>
|
||||
<xsl:param name="callout.unicode">1</xsl:param>
|
||||
|
||||
<!-- Place callout marks at this column in annotated areas -->
|
||||
<xsl:param name="callout.defaultcolumn">90</xsl:param>
|
||||
|
||||
<!-- MISC -->
|
||||
|
||||
<!-- Placement of titles -->
|
||||
<xsl:param name="formal.title.placement">
|
||||
figure after
|
||||
example after
|
||||
equation before
|
||||
table before
|
||||
procedure before
|
||||
</xsl:param>
|
||||
|
||||
<!-- Format Variable Lists as Blocks (prevents horizontal overflow) -->
|
||||
<xsl:param name="variablelist.as.blocks">1</xsl:param>
|
||||
<xsl:param name="body.start.indent">0pt</xsl:param>
|
||||
|
||||
<!-- Remove "Chapter" from the Chapter titles... -->
|
||||
<xsl:param name="local.l10n.xml" select="document('')"/>
|
||||
<l:i18n xmlns:l="http://docbook.sourceforge.net/xmlns/l10n/1.0">
|
||||
<l:l10n language="en">
|
||||
<l:context name="title-numbered">
|
||||
<l:template name="chapter" text="%n. %t"/>
|
||||
<l:template name="section" text="%n %t"/>
|
||||
</l:context>
|
||||
<l:context name="title">
|
||||
<l:template name="example" text="Example %n %t"/>
|
||||
</l:context>
|
||||
</l:l10n>
|
||||
</l:i18n>
|
||||
|
||||
<!-- admon -->
|
||||
<xsl:param name="admon.graphics" select="0"/>
|
||||
|
||||
<xsl:attribute-set name="nongraphical.admonition.properties">
|
||||
<xsl:attribute name="margin-left">0.1em</xsl:attribute>
|
||||
<xsl:attribute name="margin-right">2em</xsl:attribute>
|
||||
<xsl:attribute name="border-left-width">.75pt</xsl:attribute>
|
||||
<xsl:attribute name="border-left-style">solid</xsl:attribute>
|
||||
<xsl:attribute name="border-left-color">#5c5c4f</xsl:attribute>
|
||||
<xsl:attribute name="padding-left">0.5em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.optimum">1.5em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.minimum">1.5em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">1.5em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.optimum">1.5em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.minimum">1.5em</xsl:attribute>
|
||||
<xsl:attribute name="space-after.maximum">1.5em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:attribute-set name="admonition.title.properties">
|
||||
<xsl:attribute name="font-size">10pt</xsl:attribute>
|
||||
<xsl:attribute name="font-weight">bold</xsl:attribute>
|
||||
<xsl:attribute name="hyphenate">false</xsl:attribute>
|
||||
<xsl:attribute name="keep-with-next.within-column">always</xsl:attribute>
|
||||
<xsl:attribute name="margin-left">0</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<xsl:attribute-set name="admonition.properties">
|
||||
<xsl:attribute name="space-before.optimum">0em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.minimum">0em</xsl:attribute>
|
||||
<xsl:attribute name="space-before.maximum">0em</xsl:attribute>
|
||||
</xsl:attribute-set>
|
||||
|
||||
<!-- Asciidoc -->
|
||||
<xsl:template match="processing-instruction('asciidoc-br')">
|
||||
<fo:block/>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match="processing-instruction('asciidoc-hr')">
|
||||
<fo:block space-after="1em">
|
||||
<fo:leader leader-pattern="rule" rule-thickness="0.5pt" rule-style="solid" leader-length.minimum="100%"/>
|
||||
</fo:block>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match="processing-instruction('asciidoc-pagebreak')">
|
||||
<fo:block break-after='page'/>
|
||||
</xsl:template>
|
||||
|
||||
<!-- SYNTAX HIGHLIGHT -->
|
||||
|
||||
<xsl:template match='xslthl:keyword' mode="xslthl">
|
||||
<fo:inline font-weight="bold" color="#7F0055"><xsl:apply-templates mode="xslthl"/></fo:inline>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:string' mode="xslthl">
|
||||
<fo:inline font-weight="bold" font-style="italic" color="#2A00FF"><xsl:apply-templates mode="xslthl"/></fo:inline>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:comment' mode="xslthl">
|
||||
<fo:inline font-style="italic" color="#3F5FBF"><xsl:apply-templates mode="xslthl"/></fo:inline>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:tag' mode="xslthl">
|
||||
<fo:inline font-weight="bold" color="#3F7F7F"><xsl:apply-templates mode="xslthl"/></fo:inline>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:attribute' mode="xslthl">
|
||||
<fo:inline font-weight="bold" color="#7F007F"><xsl:apply-templates mode="xslthl"/></fo:inline>
|
||||
</xsl:template>
|
||||
|
||||
<xsl:template match='xslthl:value' mode="xslthl">
|
||||
<fo:inline font-weight="bold" color="#2A00FF"><xsl:apply-templates mode="xslthl"/></fo:inline>
|
||||
</xsl:template>
|
||||
|
||||
</xsl:stylesheet>
|
||||
@@ -0,0 +1,23 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<xslthl-config>
|
||||
<highlighter id="java" file="./xslthl/java-hl.xml" />
|
||||
<highlighter id="groovy" file="./xslthl/java-hl.xml" />
|
||||
<highlighter id="html" file="./xslthl/html-hl.xml" />
|
||||
<highlighter id="ini" file="./xslthl/ini-hl.xml" />
|
||||
<highlighter id="php" file="./xslthl/php-hl.xml" />
|
||||
<highlighter id="c" file="./xslthl/c-hl.xml" />
|
||||
<highlighter id="cpp" file="./xslthl/cpp-hl.xml" />
|
||||
<highlighter id="csharp" file="./xslthl/csharp-hl.xml" />
|
||||
<highlighter id="python" file="./xslthl/python-hl.xml" />
|
||||
<highlighter id="ruby" file="./xslthl/ruby-hl.xml" />
|
||||
<highlighter id="perl" file="./xslthl/perl-hl.xml" />
|
||||
<highlighter id="javascript" file="./xslthl/javascript-hl.xml" />
|
||||
<highlighter id="bash" file="./xslthl/bourne-hl.xml" />
|
||||
<highlighter id="css" file="./xslthl/css-hl.xml" />
|
||||
<highlighter id="sql" file="./xslthl/sql2003-hl.xml" />
|
||||
<highlighter id="asciidoc" file="./xslthl/asciidoc-hl.xml" />
|
||||
<highlighter id="properties" file="./xslthl/properties-hl.xml" />
|
||||
<highlighter id="json" file="./xslthl/json-hl.xml" />
|
||||
<highlighter id="yaml" file="./xslthl/yaml-hl.xml" />
|
||||
<namespace prefix="xslthl" uri="http://xslthl.sf.net" />
|
||||
</xslthl-config>
|
||||
@@ -0,0 +1,41 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for AsciiDoc files
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>////</start>
|
||||
<end>////</end>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">
|
||||
<start>//</start>
|
||||
<solitary/>
|
||||
</highlighter>
|
||||
<highlighter type="regex">
|
||||
<pattern>^(={1,6} .+)$</pattern>
|
||||
<style>heading</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
<highlighter type="regex">
|
||||
<pattern>^(\.[^\.\s].+)$</pattern>
|
||||
<style>title</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
<highlighter type="regex">
|
||||
<pattern>^(:!?\w.*?:)</pattern>
|
||||
<style>attribute</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
<highlighter type="regex">
|
||||
<pattern>^(-|\*{1,5}|\d*\.{1,5})(?= .+$)</pattern>
|
||||
<style>bullet</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
<highlighter type="regex">
|
||||
<pattern>^(\[.+\])$</pattern>
|
||||
<style>attribute</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -0,0 +1,95 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for SH
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2010 Mathieu Malaterre
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="oneline-comment">#</highlighter>
|
||||
<highlighter type="heredoc">
|
||||
<start><<</start>
|
||||
<quote>'</quote>
|
||||
<quote>"</quote>
|
||||
<flag>-</flag>
|
||||
<noWhiteSpace />
|
||||
<looseTerminator />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
<spanNewLines />
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<pointStarts />
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<!-- reserved words -->
|
||||
<keyword>if</keyword>
|
||||
<keyword>then</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>elif</keyword>
|
||||
<keyword>fi</keyword>
|
||||
<keyword>case</keyword>
|
||||
<keyword>esac</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>while</keyword>
|
||||
<keyword>until</keyword>
|
||||
<keyword>do</keyword>
|
||||
<keyword>done</keyword>
|
||||
<!-- built-ins -->
|
||||
<keyword>exec</keyword>
|
||||
<keyword>shift</keyword>
|
||||
<keyword>exit</keyword>
|
||||
<keyword>times</keyword>
|
||||
<keyword>break</keyword>
|
||||
<keyword>export</keyword>
|
||||
<keyword>trap</keyword>
|
||||
<keyword>continue</keyword>
|
||||
<keyword>readonly</keyword>
|
||||
<keyword>wait</keyword>
|
||||
<keyword>eval</keyword>
|
||||
<keyword>return</keyword>
|
||||
<!-- other commands -->
|
||||
<keyword>cd</keyword>
|
||||
<keyword>echo</keyword>
|
||||
<keyword>hash</keyword>
|
||||
<keyword>pwd</keyword>
|
||||
<keyword>read</keyword>
|
||||
<keyword>set</keyword>
|
||||
<keyword>test</keyword>
|
||||
<keyword>type</keyword>
|
||||
<keyword>ulimit</keyword>
|
||||
<keyword>umask</keyword>
|
||||
<keyword>unset</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -0,0 +1,117 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
Syntax highlighting definition for C
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/**</start>
|
||||
<end>*/</end>
|
||||
<style>doccomment</style>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">
|
||||
<start><![CDATA[/// ]]></start>
|
||||
<style>doccomment</style>
|
||||
</highlighter>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/*</start>
|
||||
<end>*/</end>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">//</highlighter>
|
||||
<highlighter type="oneline-comment">
|
||||
<!-- use the online-comment highlighter to detect directives -->
|
||||
<start>#</start>
|
||||
<lineBreakEscape>\</lineBreakEscape>
|
||||
<style>directive</style>
|
||||
<solitary />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<suffix>ul</suffix>
|
||||
<suffix>lu</suffix>
|
||||
<suffix>u</suffix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<pointStarts />
|
||||
<exponent>e</exponent>
|
||||
<suffix>ul</suffix>
|
||||
<suffix>lu</suffix>
|
||||
<suffix>u</suffix>
|
||||
<suffix>f</suffix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>auto</keyword>
|
||||
<keyword>_Bool</keyword>
|
||||
<keyword>break</keyword>
|
||||
<keyword>case</keyword>
|
||||
<keyword>char</keyword>
|
||||
<keyword>_Complex</keyword>
|
||||
<keyword>const</keyword>
|
||||
<keyword>continue</keyword>
|
||||
<keyword>default</keyword>
|
||||
<keyword>do</keyword>
|
||||
<keyword>double</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>enum</keyword>
|
||||
<keyword>extern</keyword>
|
||||
<keyword>float</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>goto</keyword>
|
||||
<keyword>if</keyword>
|
||||
<keyword>_Imaginary</keyword>
|
||||
<keyword>inline</keyword>
|
||||
<keyword>int</keyword>
|
||||
<keyword>long</keyword>
|
||||
<keyword>register</keyword>
|
||||
<keyword>restrict</keyword>
|
||||
<keyword>return</keyword>
|
||||
<keyword>short</keyword>
|
||||
<keyword>signed</keyword>
|
||||
<keyword>sizeof</keyword>
|
||||
<keyword>static</keyword>
|
||||
<keyword>struct</keyword>
|
||||
<keyword>switch</keyword>
|
||||
<keyword>typedef</keyword>
|
||||
<keyword>union</keyword>
|
||||
<keyword>unsigned</keyword>
|
||||
<keyword>void</keyword>
|
||||
<keyword>volatile</keyword>
|
||||
<keyword>while</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -0,0 +1,151 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for C++
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/**</start>
|
||||
<end>*/</end>
|
||||
<style>doccomment</style>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">
|
||||
<start><![CDATA[/// ]]></start>
|
||||
<style>doccomment</style>
|
||||
</highlighter>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/*</start>
|
||||
<end>*/</end>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">//</highlighter>
|
||||
<highlighter type="oneline-comment">
|
||||
<!-- use the online-comment highlighter to detect directives -->
|
||||
<start>#</start>
|
||||
<lineBreakEscape>\</lineBreakEscape>
|
||||
<style>directive</style>
|
||||
<solitary/>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<suffix>ul</suffix>
|
||||
<suffix>lu</suffix>
|
||||
<suffix>u</suffix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<pointStarts />
|
||||
<exponent>e</exponent>
|
||||
<suffix>ul</suffix>
|
||||
<suffix>lu</suffix>
|
||||
<suffix>u</suffix>
|
||||
<suffix>f</suffix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<!-- C keywords -->
|
||||
<keyword>auto</keyword>
|
||||
<keyword>_Bool</keyword>
|
||||
<keyword>break</keyword>
|
||||
<keyword>case</keyword>
|
||||
<keyword>char</keyword>
|
||||
<keyword>_Complex</keyword>
|
||||
<keyword>const</keyword>
|
||||
<keyword>continue</keyword>
|
||||
<keyword>default</keyword>
|
||||
<keyword>do</keyword>
|
||||
<keyword>double</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>enum</keyword>
|
||||
<keyword>extern</keyword>
|
||||
<keyword>float</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>goto</keyword>
|
||||
<keyword>if</keyword>
|
||||
<keyword>_Imaginary</keyword>
|
||||
<keyword>inline</keyword>
|
||||
<keyword>int</keyword>
|
||||
<keyword>long</keyword>
|
||||
<keyword>register</keyword>
|
||||
<keyword>restrict</keyword>
|
||||
<keyword>return</keyword>
|
||||
<keyword>short</keyword>
|
||||
<keyword>signed</keyword>
|
||||
<keyword>sizeof</keyword>
|
||||
<keyword>static</keyword>
|
||||
<keyword>struct</keyword>
|
||||
<keyword>switch</keyword>
|
||||
<keyword>typedef</keyword>
|
||||
<keyword>union</keyword>
|
||||
<keyword>unsigned</keyword>
|
||||
<keyword>void</keyword>
|
||||
<keyword>volatile</keyword>
|
||||
<keyword>while</keyword>
|
||||
<!-- C++ keywords -->
|
||||
<keyword>asm</keyword>
|
||||
<keyword>dynamic_cast</keyword>
|
||||
<keyword>namespace</keyword>
|
||||
<keyword>reinterpret_cast</keyword>
|
||||
<keyword>try</keyword>
|
||||
<keyword>bool</keyword>
|
||||
<keyword>explicit</keyword>
|
||||
<keyword>new</keyword>
|
||||
<keyword>static_cast</keyword>
|
||||
<keyword>typeid</keyword>
|
||||
<keyword>catch</keyword>
|
||||
<keyword>false</keyword>
|
||||
<keyword>operator</keyword>
|
||||
<keyword>template</keyword>
|
||||
<keyword>typename</keyword>
|
||||
<keyword>class</keyword>
|
||||
<keyword>friend</keyword>
|
||||
<keyword>private</keyword>
|
||||
<keyword>this</keyword>
|
||||
<keyword>using</keyword>
|
||||
<keyword>const_cast</keyword>
|
||||
<keyword>inline</keyword>
|
||||
<keyword>public</keyword>
|
||||
<keyword>throw</keyword>
|
||||
<keyword>virtual</keyword>
|
||||
<keyword>delete</keyword>
|
||||
<keyword>mutable</keyword>
|
||||
<keyword>protected</keyword>
|
||||
<keyword>true</keyword>
|
||||
<keyword>wchar_t</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -0,0 +1,194 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for C#
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/**</start>
|
||||
<end>*/</end>
|
||||
<style>doccomment</style>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">
|
||||
<start>///</start>
|
||||
<style>doccomment</style>
|
||||
</highlighter>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/*</start>
|
||||
<end>*/</end>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">//</highlighter>
|
||||
<highlighter type="annotation">
|
||||
<!-- annotations are called (custom) "attributes" in .NET -->
|
||||
<start>[</start>
|
||||
<end>]</end>
|
||||
<valueStart>(</valueStart>
|
||||
<valueEnd>)</valueEnd>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">
|
||||
<!-- C# supports a couple of directives -->
|
||||
<start>#</start>
|
||||
<lineBreakEscape>\</lineBreakEscape>
|
||||
<style>directive</style>
|
||||
<solitary/>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<!-- strings starting with an "@" can span multiple lines -->
|
||||
<string>@"</string>
|
||||
<endString>"</endString>
|
||||
<escape>\</escape>
|
||||
<spanNewLines />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<suffix>ul</suffix>
|
||||
<suffix>lu</suffix>
|
||||
<suffix>u</suffix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<pointStarts />
|
||||
<exponent>e</exponent>
|
||||
<suffix>ul</suffix>
|
||||
<suffix>lu</suffix>
|
||||
<suffix>u</suffix>
|
||||
<suffix>f</suffix>
|
||||
<suffix>d</suffix>
|
||||
<suffix>m</suffix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>abstract</keyword>
|
||||
<keyword>as</keyword>
|
||||
<keyword>base</keyword>
|
||||
<keyword>bool</keyword>
|
||||
<keyword>break</keyword>
|
||||
<keyword>byte</keyword>
|
||||
<keyword>case</keyword>
|
||||
<keyword>catch</keyword>
|
||||
<keyword>char</keyword>
|
||||
<keyword>checked</keyword>
|
||||
<keyword>class</keyword>
|
||||
<keyword>const</keyword>
|
||||
<keyword>continue</keyword>
|
||||
<keyword>decimal</keyword>
|
||||
<keyword>default</keyword>
|
||||
<keyword>delegate</keyword>
|
||||
<keyword>do</keyword>
|
||||
<keyword>double</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>enum</keyword>
|
||||
<keyword>event</keyword>
|
||||
<keyword>explicit</keyword>
|
||||
<keyword>extern</keyword>
|
||||
<keyword>false</keyword>
|
||||
<keyword>finally</keyword>
|
||||
<keyword>fixed</keyword>
|
||||
<keyword>float</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>foreach</keyword>
|
||||
<keyword>goto</keyword>
|
||||
<keyword>if</keyword>
|
||||
<keyword>implicit</keyword>
|
||||
<keyword>in</keyword>
|
||||
<keyword>int</keyword>
|
||||
<keyword>interface</keyword>
|
||||
<keyword>internal</keyword>
|
||||
<keyword>is</keyword>
|
||||
<keyword>lock</keyword>
|
||||
<keyword>long</keyword>
|
||||
<keyword>namespace</keyword>
|
||||
<keyword>new</keyword>
|
||||
<keyword>null</keyword>
|
||||
<keyword>object</keyword>
|
||||
<keyword>operator</keyword>
|
||||
<keyword>out</keyword>
|
||||
<keyword>override</keyword>
|
||||
<keyword>params</keyword>
|
||||
<keyword>private</keyword>
|
||||
<keyword>protected</keyword>
|
||||
<keyword>public</keyword>
|
||||
<keyword>readonly</keyword>
|
||||
<keyword>ref</keyword>
|
||||
<keyword>return</keyword>
|
||||
<keyword>sbyte</keyword>
|
||||
<keyword>sealed</keyword>
|
||||
<keyword>short</keyword>
|
||||
<keyword>sizeof</keyword>
|
||||
<keyword>stackalloc</keyword>
|
||||
<keyword>static</keyword>
|
||||
<keyword>string</keyword>
|
||||
<keyword>struct</keyword>
|
||||
<keyword>switch</keyword>
|
||||
<keyword>this</keyword>
|
||||
<keyword>throw</keyword>
|
||||
<keyword>true</keyword>
|
||||
<keyword>try</keyword>
|
||||
<keyword>typeof</keyword>
|
||||
<keyword>uint</keyword>
|
||||
<keyword>ulong</keyword>
|
||||
<keyword>unchecked</keyword>
|
||||
<keyword>unsafe</keyword>
|
||||
<keyword>ushort</keyword>
|
||||
<keyword>using</keyword>
|
||||
<keyword>virtual</keyword>
|
||||
<keyword>void</keyword>
|
||||
<keyword>volatile</keyword>
|
||||
<keyword>while</keyword>
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<!-- special words, not really keywords -->
|
||||
<keyword>add</keyword>
|
||||
<keyword>alias</keyword>
|
||||
<keyword>from</keyword>
|
||||
<keyword>get</keyword>
|
||||
<keyword>global</keyword>
|
||||
<keyword>group</keyword>
|
||||
<keyword>into</keyword>
|
||||
<keyword>join</keyword>
|
||||
<keyword>orderby</keyword>
|
||||
<keyword>partial</keyword>
|
||||
<keyword>remove</keyword>
|
||||
<keyword>select</keyword>
|
||||
<keyword>set</keyword>
|
||||
<keyword>value</keyword>
|
||||
<keyword>where</keyword>
|
||||
<keyword>yield</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -0,0 +1,176 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for CSS files
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2011-2012 Martin Hujer, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Martin Hujer <mhujer at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
Reference: http://www.w3.org/TR/CSS21/propidx.html
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/*</start>
|
||||
<end>*/</end>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
<spanNewLines/>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
<spanNewLines/>
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<pointStarts />
|
||||
</highlighter>
|
||||
<highlighter type="word">
|
||||
<word>@charset</word>
|
||||
<word>@import</word>
|
||||
<word>@media</word>
|
||||
<word>@page</word>
|
||||
<style>directive</style>
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<partChars>-</partChars>
|
||||
<keyword>azimuth</keyword>
|
||||
<keyword>background-attachment</keyword>
|
||||
<keyword>background-color</keyword>
|
||||
<keyword>background-image</keyword>
|
||||
<keyword>background-position</keyword>
|
||||
<keyword>background-repeat</keyword>
|
||||
<keyword>background</keyword>
|
||||
<keyword>border-collapse</keyword>
|
||||
<keyword>border-color</keyword>
|
||||
<keyword>border-spacing</keyword>
|
||||
<keyword>border-style</keyword>
|
||||
<keyword>border-top</keyword>
|
||||
<keyword>border-right</keyword>
|
||||
<keyword>border-bottom</keyword>
|
||||
<keyword>border-left</keyword>
|
||||
<keyword>border-top-color</keyword>
|
||||
<keyword>border-right-color</keyword>
|
||||
<keyword>border-bottom-color</keyword>
|
||||
<keyword>border-left-color</keyword>
|
||||
<keyword>border-top-style</keyword>
|
||||
<keyword>border-right-style</keyword>
|
||||
<keyword>border-bottom-style</keyword>
|
||||
<keyword>border-left-style</keyword>
|
||||
<keyword>border-top-width</keyword>
|
||||
<keyword>border-right-width</keyword>
|
||||
<keyword>border-bottom-width</keyword>
|
||||
<keyword>border-left-width</keyword>
|
||||
<keyword>border-width</keyword>
|
||||
<keyword>border</keyword>
|
||||
<keyword>bottom</keyword>
|
||||
<keyword>caption-side</keyword>
|
||||
<keyword>clear</keyword>
|
||||
<keyword>clip</keyword>
|
||||
<keyword>color</keyword>
|
||||
<keyword>content</keyword>
|
||||
<keyword>counter-increment</keyword>
|
||||
<keyword>counter-reset</keyword>
|
||||
<keyword>cue-after</keyword>
|
||||
<keyword>cue-before</keyword>
|
||||
<keyword>cue</keyword>
|
||||
<keyword>cursor</keyword>
|
||||
<keyword>direction</keyword>
|
||||
<keyword>display</keyword>
|
||||
<keyword>elevation</keyword>
|
||||
<keyword>empty-cells</keyword>
|
||||
<keyword>float</keyword>
|
||||
<keyword>font-family</keyword>
|
||||
<keyword>font-size</keyword>
|
||||
<keyword>font-style</keyword>
|
||||
<keyword>font-variant</keyword>
|
||||
<keyword>font-weight</keyword>
|
||||
<keyword>font</keyword>
|
||||
<keyword>height</keyword>
|
||||
<keyword>left</keyword>
|
||||
<keyword>letter-spacing</keyword>
|
||||
<keyword>line-height</keyword>
|
||||
<keyword>list-style-image</keyword>
|
||||
<keyword>list-style-position</keyword>
|
||||
<keyword>list-style-type</keyword>
|
||||
<keyword>list-style</keyword>
|
||||
<keyword>margin-right</keyword>
|
||||
<keyword>margin-left</keyword>
|
||||
<keyword>margin-top</keyword>
|
||||
<keyword>margin-bottom</keyword>
|
||||
<keyword>margin</keyword>
|
||||
<keyword>max-height</keyword>
|
||||
<keyword>max-width</keyword>
|
||||
<keyword>min-height</keyword>
|
||||
<keyword>min-width</keyword>
|
||||
<keyword>orphans</keyword>
|
||||
<keyword>outline-color</keyword>
|
||||
<keyword>outline-style</keyword>
|
||||
<keyword>outline-width</keyword>
|
||||
<keyword>outline</keyword>
|
||||
<keyword>overflow</keyword>
|
||||
<keyword>padding-top</keyword>
|
||||
<keyword>padding-right</keyword>
|
||||
<keyword>padding-bottom</keyword>
|
||||
<keyword>padding-left</keyword>
|
||||
<keyword>padding</keyword>
|
||||
<keyword>page-break-after</keyword>
|
||||
<keyword>page-break-before</keyword>
|
||||
<keyword>page-break-inside</keyword>
|
||||
<keyword>pause-after</keyword>
|
||||
<keyword>pause-before</keyword>
|
||||
<keyword>pause</keyword>
|
||||
<keyword>pitch-range</keyword>
|
||||
<keyword>pitch</keyword>
|
||||
<keyword>play-during</keyword>
|
||||
<keyword>position</keyword>
|
||||
<keyword>quotes</keyword>
|
||||
<keyword>richness</keyword>
|
||||
<keyword>right</keyword>
|
||||
<keyword>speak-header</keyword>
|
||||
<keyword>speak-numeral</keyword>
|
||||
<keyword>speak-punctuation</keyword>
|
||||
<keyword>speak</keyword>
|
||||
<keyword>speech-rate</keyword>
|
||||
<keyword>stress</keyword>
|
||||
<keyword>table-layout</keyword>
|
||||
<keyword>text-align</keyword>
|
||||
<keyword>text-decoration</keyword>
|
||||
<keyword>text-indent</keyword>
|
||||
<keyword>text-transform</keyword>
|
||||
<keyword>top</keyword>
|
||||
<keyword>unicode-bidi</keyword>
|
||||
<keyword>vertical-align</keyword>
|
||||
<keyword>visibility</keyword>
|
||||
<keyword>voice-family</keyword>
|
||||
<keyword>volume</keyword>
|
||||
<keyword>white-space</keyword>
|
||||
<keyword>widows</keyword>
|
||||
<keyword>width</keyword>
|
||||
<keyword>word-spacing</keyword>
|
||||
<keyword>z-index</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -0,0 +1,122 @@
|
||||
<?xml version='1.0'?>
|
||||
<!--
|
||||
|
||||
Bakalarska prace: Zvyraznovani syntaxe v XSLT
|
||||
Michal Molhanec 2005
|
||||
|
||||
myxml-hl.xml - konfigurace zvyraznovace XML, ktera zvlast zvyrazni
|
||||
HTML elementy a XSL elementy
|
||||
|
||||
This file has been customized for the Asciidoctor project (http://asciidoctor.org).
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="xml">
|
||||
<elementSet>
|
||||
<style>htmltag</style>
|
||||
<element>a</element>
|
||||
<element>abbr</element>
|
||||
<element>address</element>
|
||||
<element>area</element>
|
||||
<element>article</element>
|
||||
<element>aside</element>
|
||||
<element>audio</element>
|
||||
<element>b</element>
|
||||
<element>base</element>
|
||||
<element>bdi</element>
|
||||
<element>blockquote</element>
|
||||
<element>body</element>
|
||||
<element>br</element>
|
||||
<element>button</element>
|
||||
<element>caption</element>
|
||||
<element>canvas</element>
|
||||
<element>cite</element>
|
||||
<element>code</element>
|
||||
<element>command</element>
|
||||
<element>col</element>
|
||||
<element>colgroup</element>
|
||||
<element>dd</element>
|
||||
<element>del</element>
|
||||
<element>dialog</element>
|
||||
<element>div</element>
|
||||
<element>dl</element>
|
||||
<element>dt</element>
|
||||
<element>em</element>
|
||||
<element>embed</element>
|
||||
<element>fieldset</element>
|
||||
<element>figcaption</element>
|
||||
<element>figure</element>
|
||||
<element>font</element>
|
||||
<element>form</element>
|
||||
<element>footer</element>
|
||||
<element>h1</element>
|
||||
<element>h2</element>
|
||||
<element>h3</element>
|
||||
<element>h4</element>
|
||||
<element>h5</element>
|
||||
<element>h6</element>
|
||||
<element>head</element>
|
||||
<element>header</element>
|
||||
<element>hr</element>
|
||||
<element>html</element>
|
||||
<element>i</element>
|
||||
<element>iframe</element>
|
||||
<element>img</element>
|
||||
<element>input</element>
|
||||
<element>ins</element>
|
||||
<element>kbd</element>
|
||||
<element>label</element>
|
||||
<element>legend</element>
|
||||
<element>li</element>
|
||||
<element>link</element>
|
||||
<element>map</element>
|
||||
<element>mark</element>
|
||||
<element>menu</element>
|
||||
<element>menu</element>
|
||||
<element>meta</element>
|
||||
<element>nav</element>
|
||||
<element>noscript</element>
|
||||
<element>object</element>
|
||||
<element>ol</element>
|
||||
<element>optgroup</element>
|
||||
<element>option</element>
|
||||
<element>p</element>
|
||||
<element>param</element>
|
||||
<element>pre</element>
|
||||
<element>q</element>
|
||||
<element>samp</element>
|
||||
<element>script</element>
|
||||
<element>section</element>
|
||||
<element>select</element>
|
||||
<element>small</element>
|
||||
<element>source</element>
|
||||
<element>span</element>
|
||||
<element>strong</element>
|
||||
<element>style</element>
|
||||
<element>sub</element>
|
||||
<element>summary</element>
|
||||
<element>sup</element>
|
||||
<element>table</element>
|
||||
<element>tbody</element>
|
||||
<element>td</element>
|
||||
<element>textarea</element>
|
||||
<element>tfoot</element>
|
||||
<element>th</element>
|
||||
<element>thead</element>
|
||||
<element>time</element>
|
||||
<element>title</element>
|
||||
<element>tr</element>
|
||||
<element>track</element>
|
||||
<element>u</element>
|
||||
<element>ul</element>
|
||||
<element>var</element>
|
||||
<element>video</element>
|
||||
<element>wbr</element>
|
||||
<element>xmp</element>
|
||||
<ignoreCase/>
|
||||
</elementSet>
|
||||
<elementPrefix>
|
||||
<style>namespace</style>
|
||||
<prefix>xsl:</prefix>
|
||||
</elementPrefix>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -0,0 +1,45 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for ini files
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="oneline-comment">;</highlighter>
|
||||
<highlighter type="regex">
|
||||
<!-- ini sections -->
|
||||
<pattern>^(\[.+\]\s*)$</pattern>
|
||||
<style>keyword</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
<highlighter type="regex">
|
||||
<!-- the keys in an ini section -->
|
||||
<pattern>^(.+)(?==)</pattern>
|
||||
<style>attribute</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -0,0 +1,117 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for Java
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/**</start>
|
||||
<end>*/</end>
|
||||
<style>doccomment</style>
|
||||
</highlighter>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/*</start>
|
||||
<end>*/</end>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">//</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="annotation">
|
||||
<start>@</start>
|
||||
<valueStart>(</valueStart>
|
||||
<valueEnd>)</valueEnd>
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<exponent>e</exponent>
|
||||
<suffix>f</suffix>
|
||||
<suffix>d</suffix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>abstract</keyword>
|
||||
<keyword>boolean</keyword>
|
||||
<keyword>break</keyword>
|
||||
<keyword>byte</keyword>
|
||||
<keyword>case</keyword>
|
||||
<keyword>catch</keyword>
|
||||
<keyword>char</keyword>
|
||||
<keyword>class</keyword>
|
||||
<keyword>const</keyword>
|
||||
<keyword>continue</keyword>
|
||||
<keyword>default</keyword>
|
||||
<keyword>do</keyword>
|
||||
<keyword>double</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>extends</keyword>
|
||||
<keyword>final</keyword>
|
||||
<keyword>finally</keyword>
|
||||
<keyword>float</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>goto</keyword>
|
||||
<keyword>if</keyword>
|
||||
<keyword>implements</keyword>
|
||||
<keyword>import</keyword>
|
||||
<keyword>instanceof</keyword>
|
||||
<keyword>int</keyword>
|
||||
<keyword>interface</keyword>
|
||||
<keyword>long</keyword>
|
||||
<keyword>native</keyword>
|
||||
<keyword>new</keyword>
|
||||
<keyword>package</keyword>
|
||||
<keyword>private</keyword>
|
||||
<keyword>protected</keyword>
|
||||
<keyword>public</keyword>
|
||||
<keyword>return</keyword>
|
||||
<keyword>short</keyword>
|
||||
<keyword>static</keyword>
|
||||
<keyword>strictfp</keyword>
|
||||
<keyword>super</keyword>
|
||||
<keyword>switch</keyword>
|
||||
<keyword>synchronized</keyword>
|
||||
<keyword>this</keyword>
|
||||
<keyword>throw</keyword>
|
||||
<keyword>throws</keyword>
|
||||
<keyword>transient</keyword>
|
||||
<keyword>try</keyword>
|
||||
<keyword>void</keyword>
|
||||
<keyword>volatile</keyword>
|
||||
<keyword>while</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -0,0 +1,147 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for JavaScript
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/*</start>
|
||||
<end>*/</end>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">//</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<exponent>e</exponent>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>break</keyword>
|
||||
<keyword>case</keyword>
|
||||
<keyword>catch</keyword>
|
||||
<keyword>continue</keyword>
|
||||
<keyword>default</keyword>
|
||||
<keyword>delete</keyword>
|
||||
<keyword>do</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>finally</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>function</keyword>
|
||||
<keyword>if</keyword>
|
||||
<keyword>in</keyword>
|
||||
<keyword>instanceof</keyword>
|
||||
<keyword>new</keyword>
|
||||
<keyword>return</keyword>
|
||||
<keyword>switch</keyword>
|
||||
<keyword>this</keyword>
|
||||
<keyword>throw</keyword>
|
||||
<keyword>try</keyword>
|
||||
<keyword>typeof</keyword>
|
||||
<keyword>var</keyword>
|
||||
<keyword>void</keyword>
|
||||
<keyword>while</keyword>
|
||||
<keyword>with</keyword>
|
||||
<!-- future keywords -->
|
||||
<keyword>abstract</keyword>
|
||||
<keyword>boolean</keyword>
|
||||
<keyword>byte</keyword>
|
||||
<keyword>char</keyword>
|
||||
<keyword>class</keyword>
|
||||
<keyword>const</keyword>
|
||||
<keyword>debugger</keyword>
|
||||
<keyword>double</keyword>
|
||||
<keyword>enum</keyword>
|
||||
<keyword>export</keyword>
|
||||
<keyword>extends</keyword>
|
||||
<keyword>final</keyword>
|
||||
<keyword>float</keyword>
|
||||
<keyword>goto</keyword>
|
||||
<keyword>implements</keyword>
|
||||
<keyword>import</keyword>
|
||||
<keyword>int</keyword>
|
||||
<keyword>interface</keyword>
|
||||
<keyword>long</keyword>
|
||||
<keyword>native</keyword>
|
||||
<keyword>package</keyword>
|
||||
<keyword>private</keyword>
|
||||
<keyword>protected</keyword>
|
||||
<keyword>public</keyword>
|
||||
<keyword>short</keyword>
|
||||
<keyword>static</keyword>
|
||||
<keyword>super</keyword>
|
||||
<keyword>synchronized</keyword>
|
||||
<keyword>throws</keyword>
|
||||
<keyword>transient</keyword>
|
||||
<keyword>volatile</keyword>
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>prototype</keyword>
|
||||
<!-- Global Objects -->
|
||||
<keyword>Array</keyword>
|
||||
<keyword>Boolean</keyword>
|
||||
<keyword>Date</keyword>
|
||||
<keyword>Error</keyword>
|
||||
<keyword>EvalError</keyword>
|
||||
<keyword>Function</keyword>
|
||||
<keyword>Math</keyword>
|
||||
<keyword>Number</keyword>
|
||||
<keyword>Object</keyword>
|
||||
<keyword>RangeError</keyword>
|
||||
<keyword>ReferenceError</keyword>
|
||||
<keyword>RegExp</keyword>
|
||||
<keyword>String</keyword>
|
||||
<keyword>SyntaxError</keyword>
|
||||
<keyword>TypeError</keyword>
|
||||
<keyword>URIError</keyword>
|
||||
<!-- Global functions -->
|
||||
<keyword>decodeURI</keyword>
|
||||
<keyword>decodeURIComponent</keyword>
|
||||
<keyword>encodeURI</keyword>
|
||||
<keyword>encodeURIComponent</keyword>
|
||||
<keyword>eval</keyword>
|
||||
<keyword>isFinite</keyword>
|
||||
<keyword>isNaN</keyword>
|
||||
<keyword>parseFloat</keyword>
|
||||
<keyword>parseInt</keyword>
|
||||
<!-- Global properties -->
|
||||
<keyword>Infinity</keyword>
|
||||
<keyword>NaN</keyword>
|
||||
<keyword>undefined</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -0,0 +1,37 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<highlighters>
|
||||
<highlighter type="oneline-comment">#</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="annotation">
|
||||
<start>@</start>
|
||||
<valueStart>(</valueStart>
|
||||
<valueEnd>)</valueEnd>
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<exponent>e</exponent>
|
||||
<suffix>f</suffix>
|
||||
<suffix>d</suffix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>true</keyword>
|
||||
<keyword>false</keyword>
|
||||
</highlighter>
|
||||
<highlighter type="word">
|
||||
<word>{</word>
|
||||
<word>}</word>
|
||||
<word>,</word>
|
||||
<word>[</word>
|
||||
<word>]</word>
|
||||
<style>keyword</style>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -0,0 +1,120 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for Perl
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="oneline-comment">#</highlighter>
|
||||
<highlighter type="heredoc">
|
||||
<start><<</start>
|
||||
<quote>'</quote>
|
||||
<quote>"</quote>
|
||||
<noWhiteSpace/>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
<spanNewLines/>
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<pointStarts />
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>if</keyword>
|
||||
<keyword>unless</keyword>
|
||||
<keyword>while</keyword>
|
||||
<keyword>until</keyword>
|
||||
<keyword>foreach</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>elsif</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>when</keyword>
|
||||
<keyword>default</keyword>
|
||||
<keyword>given</keyword>
|
||||
<!-- Keywords related to the control flow of your perl program -->
|
||||
<keyword>caller</keyword>
|
||||
<keyword>continue</keyword>
|
||||
<keyword>die</keyword>
|
||||
<keyword>do</keyword>
|
||||
<keyword>dump</keyword>
|
||||
<keyword>eval</keyword>
|
||||
<keyword>exit</keyword>
|
||||
<keyword>goto</keyword>
|
||||
<keyword>last</keyword>
|
||||
<keyword>next</keyword>
|
||||
<keyword>redo</keyword>
|
||||
<keyword>return</keyword>
|
||||
<keyword>sub</keyword>
|
||||
<keyword>wantarray</keyword>
|
||||
<!-- Keywords related to scoping -->
|
||||
<keyword>caller</keyword>
|
||||
<keyword>import</keyword>
|
||||
<keyword>local</keyword>
|
||||
<keyword>my</keyword>
|
||||
<keyword>package</keyword>
|
||||
<keyword>use</keyword>
|
||||
<!-- Keywords related to perl modules -->
|
||||
<keyword>do</keyword>
|
||||
<keyword>import</keyword>
|
||||
<keyword>no</keyword>
|
||||
<keyword>package</keyword>
|
||||
<keyword>require</keyword>
|
||||
<keyword>use</keyword>
|
||||
<!-- Keywords related to classes and object-orientedness -->
|
||||
<keyword>bless</keyword>
|
||||
<keyword>dbmclose</keyword>
|
||||
<keyword>dbmopen</keyword>
|
||||
<keyword>package</keyword>
|
||||
<keyword>ref</keyword>
|
||||
<keyword>tie</keyword>
|
||||
<keyword>tied</keyword>
|
||||
<keyword>untie</keyword>
|
||||
<keyword>use</keyword>
|
||||
<!-- operators -->
|
||||
<keyword>and</keyword>
|
||||
<keyword>or</keyword>
|
||||
<keyword>not</keyword>
|
||||
<keyword>eq</keyword>
|
||||
<keyword>ne</keyword>
|
||||
<keyword>lt</keyword>
|
||||
<keyword>gt</keyword>
|
||||
<keyword>le</keyword>
|
||||
<keyword>ge</keyword>
|
||||
<keyword>cmp</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -0,0 +1,154 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for PHP
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/**</start>
|
||||
<end>*/</end>
|
||||
<style>doccomment</style>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">
|
||||
<start><![CDATA[/// ]]></start>
|
||||
<style>doccomment</style>
|
||||
</highlighter>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/*</start>
|
||||
<end>*/</end>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">//</highlighter>
|
||||
<highlighter type="oneline-comment">#</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
<spanNewLines />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
<spanNewLines />
|
||||
</highlighter>
|
||||
<highlighter type="heredoc">
|
||||
<start><<<</start>
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<exponent>e</exponent>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>and</keyword>
|
||||
<keyword>or</keyword>
|
||||
<keyword>xor</keyword>
|
||||
<keyword>__FILE__</keyword>
|
||||
<keyword>exception</keyword>
|
||||
<keyword>__LINE__</keyword>
|
||||
<keyword>array</keyword>
|
||||
<keyword>as</keyword>
|
||||
<keyword>break</keyword>
|
||||
<keyword>case</keyword>
|
||||
<keyword>class</keyword>
|
||||
<keyword>const</keyword>
|
||||
<keyword>continue</keyword>
|
||||
<keyword>declare</keyword>
|
||||
<keyword>default</keyword>
|
||||
<keyword>die</keyword>
|
||||
<keyword>do</keyword>
|
||||
<keyword>echo</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>elseif</keyword>
|
||||
<keyword>empty</keyword>
|
||||
<keyword>enddeclare</keyword>
|
||||
<keyword>endfor</keyword>
|
||||
<keyword>endforeach</keyword>
|
||||
<keyword>endif</keyword>
|
||||
<keyword>endswitch</keyword>
|
||||
<keyword>endwhile</keyword>
|
||||
<keyword>eval</keyword>
|
||||
<keyword>exit</keyword>
|
||||
<keyword>extends</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>foreach</keyword>
|
||||
<keyword>function</keyword>
|
||||
<keyword>global</keyword>
|
||||
<keyword>if</keyword>
|
||||
<keyword>include</keyword>
|
||||
<keyword>include_once</keyword>
|
||||
<keyword>isset</keyword>
|
||||
<keyword>list</keyword>
|
||||
<keyword>new</keyword>
|
||||
<keyword>print</keyword>
|
||||
<keyword>require</keyword>
|
||||
<keyword>require_once</keyword>
|
||||
<keyword>return</keyword>
|
||||
<keyword>static</keyword>
|
||||
<keyword>switch</keyword>
|
||||
<keyword>unset</keyword>
|
||||
<keyword>use</keyword>
|
||||
<keyword>var</keyword>
|
||||
<keyword>while</keyword>
|
||||
<keyword>__FUNCTION__</keyword>
|
||||
<keyword>__CLASS__</keyword>
|
||||
<keyword>__METHOD__</keyword>
|
||||
<keyword>final</keyword>
|
||||
<keyword>php_user_filter</keyword>
|
||||
<keyword>interface</keyword>
|
||||
<keyword>implements</keyword>
|
||||
<keyword>extends</keyword>
|
||||
<keyword>public</keyword>
|
||||
<keyword>private</keyword>
|
||||
<keyword>protected</keyword>
|
||||
<keyword>abstract</keyword>
|
||||
<keyword>clone</keyword>
|
||||
<keyword>try</keyword>
|
||||
<keyword>catch</keyword>
|
||||
<keyword>throw</keyword>
|
||||
<keyword>cfunction</keyword>
|
||||
<keyword>old_function</keyword>
|
||||
<keyword>true</keyword>
|
||||
<keyword>false</keyword>
|
||||
<!-- PHP 5.3 -->
|
||||
<keyword>namespace</keyword>
|
||||
<keyword>__NAMESPACE__</keyword>
|
||||
<keyword>goto</keyword>
|
||||
<keyword>__DIR__</keyword>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="word">
|
||||
<!-- highlight the php open and close tags as directives -->
|
||||
<word>?></word>
|
||||
<word><?php</word>
|
||||
<word><?=</word>
|
||||
<style>directive</style>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -0,0 +1,38 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for Java
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="oneline-comment">#</highlighter>
|
||||
<highlighter type="regex">
|
||||
<pattern>^(.+?)(?==|:)</pattern>
|
||||
<style>attribute</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -0,0 +1,100 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for Python
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="annotation">
|
||||
<!-- these are actually called decorators -->
|
||||
<start>@</start>
|
||||
<valueStart>(</valueStart>
|
||||
<valueEnd>)</valueEnd>
|
||||
</highlighter>
|
||||
<highlighter type="oneline-comment">#</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"""</string>
|
||||
<spanNewLines />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'''</string>
|
||||
<spanNewLines />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<pointStarts />
|
||||
<exponent>e</exponent>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>and</keyword>
|
||||
<keyword>del</keyword>
|
||||
<keyword>from</keyword>
|
||||
<keyword>not</keyword>
|
||||
<keyword>while</keyword>
|
||||
<keyword>as</keyword>
|
||||
<keyword>elif</keyword>
|
||||
<keyword>global</keyword>
|
||||
<keyword>or</keyword>
|
||||
<keyword>with</keyword>
|
||||
<keyword>assert</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>if</keyword>
|
||||
<keyword>pass</keyword>
|
||||
<keyword>yield</keyword>
|
||||
<keyword>break</keyword>
|
||||
<keyword>except</keyword>
|
||||
<keyword>import</keyword>
|
||||
<keyword>print</keyword>
|
||||
<keyword>class</keyword>
|
||||
<keyword>exec</keyword>
|
||||
<keyword>in</keyword>
|
||||
<keyword>raise</keyword>
|
||||
<keyword>continue</keyword>
|
||||
<keyword>finally</keyword>
|
||||
<keyword>is</keyword>
|
||||
<keyword>return</keyword>
|
||||
<keyword>def</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>lambda</keyword>
|
||||
<keyword>try</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -0,0 +1,109 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for Ruby
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2005-2008 Michal Molhanec, Jirka Kosek, Michiel Hendriks
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Michal Molhanec <mol1111 at users.sourceforge.net>
|
||||
Jirka Kosek <kosek at users.sourceforge.net>
|
||||
Michiel Hendriks <elmuerte at users.sourceforge.net>
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="oneline-comment">#</highlighter>
|
||||
<highlighter type="heredoc">
|
||||
<start><<</start>
|
||||
<noWhiteSpace/>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>%Q{</string>
|
||||
<endString>}</endString>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>%/</string>
|
||||
<endString>/</endString>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>%q{</string>
|
||||
<endString>}</endString>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="hexnumber">
|
||||
<prefix>0x</prefix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<exponent>e</exponent>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>alias</keyword>
|
||||
<keyword>and</keyword>
|
||||
<keyword>BEGIN</keyword>
|
||||
<keyword>begin</keyword>
|
||||
<keyword>break</keyword>
|
||||
<keyword>case</keyword>
|
||||
<keyword>class</keyword>
|
||||
<keyword>def</keyword>
|
||||
<keyword>defined</keyword>
|
||||
<keyword>do</keyword>
|
||||
<keyword>else</keyword>
|
||||
<keyword>elsif</keyword>
|
||||
<keyword>END</keyword>
|
||||
<keyword>end</keyword>
|
||||
<keyword>ensure</keyword>
|
||||
<keyword>false</keyword>
|
||||
<keyword>for</keyword>
|
||||
<keyword>if</keyword>
|
||||
<keyword>in</keyword>
|
||||
<keyword>module</keyword>
|
||||
<keyword>next</keyword>
|
||||
<keyword>nil</keyword>
|
||||
<keyword>not</keyword>
|
||||
<keyword>or</keyword>
|
||||
<keyword>redo</keyword>
|
||||
<keyword>rescue</keyword>
|
||||
<keyword>retry</keyword>
|
||||
<keyword>return</keyword>
|
||||
<keyword>self</keyword>
|
||||
<keyword>super</keyword>
|
||||
<keyword>then</keyword>
|
||||
<keyword>true</keyword>
|
||||
<keyword>undef</keyword>
|
||||
<keyword>unless</keyword>
|
||||
<keyword>until</keyword>
|
||||
<keyword>when</keyword>
|
||||
<keyword>while</keyword>
|
||||
<keyword>yield</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -0,0 +1,565 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!--
|
||||
|
||||
Syntax highlighting definition for SQL:1999
|
||||
|
||||
xslthl - XSLT Syntax Highlighting
|
||||
http://sourceforge.net/projects/xslthl/
|
||||
Copyright (C) 2012 Michiel Hendriks, Martin Hujer, k42b3
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
-->
|
||||
<highlighters>
|
||||
<highlighter type="oneline-comment">--</highlighter>
|
||||
<highlighter type="multiline-comment">
|
||||
<start>/*</start>
|
||||
<end>*/</end>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<doubleEscapes />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>U'</string>
|
||||
<endString>'</endString>
|
||||
<doubleEscapes />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>B'</string>
|
||||
<endString>'</endString>
|
||||
<doubleEscapes />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>N'</string>
|
||||
<endString>'</endString>
|
||||
<doubleEscapes />
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>X'</string>
|
||||
<endString>'</endString>
|
||||
<doubleEscapes />
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<pointStarts />
|
||||
<exponent>e</exponent>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<ignoreCase />
|
||||
<!-- reserved -->
|
||||
<keyword>A</keyword>
|
||||
<keyword>ABS</keyword>
|
||||
<keyword>ABSOLUTE</keyword>
|
||||
<keyword>ACTION</keyword>
|
||||
<keyword>ADA</keyword>
|
||||
<keyword>ADMIN</keyword>
|
||||
<keyword>AFTER</keyword>
|
||||
<keyword>ALWAYS</keyword>
|
||||
<keyword>ASC</keyword>
|
||||
<keyword>ASSERTION</keyword>
|
||||
<keyword>ASSIGNMENT</keyword>
|
||||
<keyword>ATTRIBUTE</keyword>
|
||||
<keyword>ATTRIBUTES</keyword>
|
||||
<keyword>AVG</keyword>
|
||||
<keyword>BEFORE</keyword>
|
||||
<keyword>BERNOULLI</keyword>
|
||||
<keyword>BREADTH</keyword>
|
||||
<keyword>C</keyword>
|
||||
<keyword>CARDINALITY</keyword>
|
||||
<keyword>CASCADE</keyword>
|
||||
<keyword>CATALOG_NAME</keyword>
|
||||
<keyword>CATALOG</keyword>
|
||||
<keyword>CEIL</keyword>
|
||||
<keyword>CEILING</keyword>
|
||||
<keyword>CHAIN</keyword>
|
||||
<keyword>CHAR_LENGTH</keyword>
|
||||
<keyword>CHARACTER_LENGTH</keyword>
|
||||
<keyword>CHARACTER_SET_CATALOG</keyword>
|
||||
<keyword>CHARACTER_SET_NAME</keyword>
|
||||
<keyword>CHARACTER_SET_SCHEMA</keyword>
|
||||
<keyword>CHARACTERISTICS</keyword>
|
||||
<keyword>CHARACTERS</keyword>
|
||||
<keyword>CHECKED</keyword>
|
||||
<keyword>CLASS_ORIGIN</keyword>
|
||||
<keyword>COALESCE</keyword>
|
||||
<keyword>COBOL</keyword>
|
||||
<keyword>CODE_UNITS</keyword>
|
||||
<keyword>COLLATION_CATALOG</keyword>
|
||||
<keyword>COLLATION_NAME</keyword>
|
||||
<keyword>COLLATION_SCHEMA</keyword>
|
||||
<keyword>COLLATION</keyword>
|
||||
<keyword>COLLECT</keyword>
|
||||
<keyword>COLUMN_NAME</keyword>
|
||||
<keyword>COMMAND_FUNCTION_CODE</keyword>
|
||||
<keyword>COMMAND_FUNCTION</keyword>
|
||||
<keyword>COMMITTED</keyword>
|
||||
<keyword>CONDITION_NUMBER</keyword>
|
||||
<keyword>CONDITION</keyword>
|
||||
<keyword>CONNECTION_NAME</keyword>
|
||||
<keyword>CONSTRAINT_CATALOG</keyword>
|
||||
<keyword>CONSTRAINT_NAME</keyword>
|
||||
<keyword>CONSTRAINT_SCHEMA</keyword>
|
||||
<keyword>CONSTRAINTS</keyword>
|
||||
<keyword>CONSTRUCTORS</keyword>
|
||||
<keyword>CONTAINS</keyword>
|
||||
<keyword>CONVERT</keyword>
|
||||
<keyword>CORR</keyword>
|
||||
<keyword>COUNT</keyword>
|
||||
<keyword>COVAR_POP</keyword>
|
||||
<keyword>COVAR_SAMP</keyword>
|
||||
<keyword>CUME_DIST</keyword>
|
||||
<keyword>CURRENT_COLLATION</keyword>
|
||||
<keyword>CURSOR_NAME</keyword>
|
||||
<keyword>DATA</keyword>
|
||||
<keyword>DATETIME_INTERVAL_CODE</keyword>
|
||||
<keyword>DATETIME_INTERVAL_PRECISION</keyword>
|
||||
<keyword>DEFAULTS</keyword>
|
||||
<keyword>DEFERRABLE</keyword>
|
||||
<keyword>DEFERRED</keyword>
|
||||
<keyword>DEFINED</keyword>
|
||||
<keyword>DEFINER</keyword>
|
||||
<keyword>DEGREE</keyword>
|
||||
<keyword>DENSE_RANK</keyword>
|
||||
<keyword>DEPTH</keyword>
|
||||
<keyword>DERIVED</keyword>
|
||||
<keyword>DESC</keyword>
|
||||
<keyword>DESCRIPTOR</keyword>
|
||||
<keyword>DIAGNOSTICS</keyword>
|
||||
<keyword>DISPATCH</keyword>
|
||||
<keyword>DOMAIN</keyword>
|
||||
<keyword>DYNAMIC_FUNCTION_CODE</keyword>
|
||||
<keyword>DYNAMIC_FUNCTION</keyword>
|
||||
<keyword>EQUALS</keyword>
|
||||
<keyword>EVERY</keyword>
|
||||
<keyword>EXCEPTION</keyword>
|
||||
<keyword>EXCLUDE</keyword>
|
||||
<keyword>EXCLUDING</keyword>
|
||||
<keyword>EXP</keyword>
|
||||
<keyword>EXTRACT</keyword>
|
||||
<keyword>FINAL</keyword>
|
||||
<keyword>FIRST</keyword>
|
||||
<keyword>FLOOR</keyword>
|
||||
<keyword>FOLLOWING</keyword>
|
||||
<keyword>FORTRAN</keyword>
|
||||
<keyword>FOUND</keyword>
|
||||
<keyword>FUSION</keyword>
|
||||
<keyword>G</keyword>
|
||||
<keyword>GENERAL</keyword>
|
||||
<keyword>GO</keyword>
|
||||
<keyword>GOTO</keyword>
|
||||
<keyword>GRANTED</keyword>
|
||||
<keyword>HIERARCHY</keyword>
|
||||
<keyword>IMPLEMENTATION</keyword>
|
||||
<keyword>INCLUDING</keyword>
|
||||
<keyword>INCREMENT</keyword>
|
||||
<keyword>INITIALLY</keyword>
|
||||
<keyword>INSTANCE</keyword>
|
||||
<keyword>INSTANTIABLE</keyword>
|
||||
<keyword>INTERSECTION</keyword>
|
||||
<keyword>INVOKER</keyword>
|
||||
<keyword>ISOLATION</keyword>
|
||||
<keyword>K</keyword>
|
||||
<keyword>KEY_MEMBER</keyword>
|
||||
<keyword>KEY_TYPE</keyword>
|
||||
<keyword>KEY</keyword>
|
||||
<keyword>LAST</keyword>
|
||||
<keyword>LENGTH</keyword>
|
||||
<keyword>LEVEL</keyword>
|
||||
<keyword>LN</keyword>
|
||||
<keyword>LOCATOR</keyword>
|
||||
<keyword>LOWER</keyword>
|
||||
<keyword>M</keyword>
|
||||
<keyword>MAP</keyword>
|
||||
<keyword>MATCHED</keyword>
|
||||
<keyword>MAX</keyword>
|
||||
<keyword>MAXVALUE</keyword>
|
||||
<keyword>MESSAGE_LENGTH</keyword>
|
||||
<keyword>MESSAGE_OCTET_LENGTH</keyword>
|
||||
<keyword>MESSAGE_TEXT</keyword>
|
||||
<keyword>MIN</keyword>
|
||||
<keyword>MINVALUE</keyword>
|
||||
<keyword>MOD</keyword>
|
||||
<keyword>MORE</keyword>
|
||||
<keyword>MUMPS</keyword>
|
||||
<keyword>NAME</keyword>
|
||||
<keyword>NAMES</keyword>
|
||||
<keyword>NESTING</keyword>
|
||||
<keyword>NEXT</keyword>
|
||||
<keyword>NORMALIZE</keyword>
|
||||
<keyword>NORMALIZED</keyword>
|
||||
<keyword>NULLABLE</keyword>
|
||||
<keyword>NULLIF</keyword>
|
||||
<keyword>NULLS</keyword>
|
||||
<keyword>NUMBER</keyword>
|
||||
<keyword>OBJECT</keyword>
|
||||
<keyword>OCTET_LENGTH</keyword>
|
||||
<keyword>OCTETS</keyword>
|
||||
<keyword>OPTION</keyword>
|
||||
<keyword>OPTIONS</keyword>
|
||||
<keyword>ORDERING</keyword>
|
||||
<keyword>ORDINALITY</keyword>
|
||||
<keyword>OTHERS</keyword>
|
||||
<keyword>OVERLAY</keyword>
|
||||
<keyword>OVERRIDING</keyword>
|
||||
<keyword>PAD</keyword>
|
||||
<keyword>PARAMETER_MODE</keyword>
|
||||
<keyword>PARAMETER_NAME</keyword>
|
||||
<keyword>PARAMETER_ORDINAL_POSITION</keyword>
|
||||
<keyword>PARAMETER_SPECIFIC_CATALOG</keyword>
|
||||
<keyword>PARAMETER_SPECIFIC_NAME</keyword>
|
||||
<keyword>PARAMETER_SPECIFIC_SCHEMA</keyword>
|
||||
<keyword>PARTIAL</keyword>
|
||||
<keyword>PASCAL</keyword>
|
||||
<keyword>PATH</keyword>
|
||||
<keyword>PERCENT_RANK</keyword>
|
||||
<keyword>PERCENTILE_CONT</keyword>
|
||||
<keyword>PERCENTILE_DISC</keyword>
|
||||
<keyword>PLACING</keyword>
|
||||
<keyword>PLI</keyword>
|
||||
<keyword>POSITION</keyword>
|
||||
<keyword>POWER</keyword>
|
||||
<keyword>PRECEDING</keyword>
|
||||
<keyword>PRESERVE</keyword>
|
||||
<keyword>PRIOR</keyword>
|
||||
<keyword>PRIVILEGES</keyword>
|
||||
<keyword>PUBLIC</keyword>
|
||||
<keyword>RANK</keyword>
|
||||
<keyword>READ</keyword>
|
||||
<keyword>RELATIVE</keyword>
|
||||
<keyword>REPEATABLE</keyword>
|
||||
<keyword>RESTART</keyword>
|
||||
<keyword>RETURNED_CARDINALITY</keyword>
|
||||
<keyword>RETURNED_LENGTH</keyword>
|
||||
<keyword>RETURNED_OCTET_LENGTH</keyword>
|
||||
<keyword>RETURNED_SQLSTATE</keyword>
|
||||
<keyword>ROLE</keyword>
|
||||
<keyword>ROUTINE_CATALOG</keyword>
|
||||
<keyword>ROUTINE_NAME</keyword>
|
||||
<keyword>ROUTINE_SCHEMA</keyword>
|
||||
<keyword>ROUTINE</keyword>
|
||||
<keyword>ROW_COUNT</keyword>
|
||||
<keyword>ROW_NUMBER</keyword>
|
||||
<keyword>SCALE</keyword>
|
||||
<keyword>SCHEMA_NAME</keyword>
|
||||
<keyword>SCHEMA</keyword>
|
||||
<keyword>SCOPE_CATALOG</keyword>
|
||||
<keyword>SCOPE_NAME</keyword>
|
||||
<keyword>SCOPE_SCHEMA</keyword>
|
||||
<keyword>SECTION</keyword>
|
||||
<keyword>SECURITY</keyword>
|
||||
<keyword>SELF</keyword>
|
||||
<keyword>SEQUENCE</keyword>
|
||||
<keyword>SERIALIZABLE</keyword>
|
||||
<keyword>SERVER_NAME</keyword>
|
||||
<keyword>SESSION</keyword>
|
||||
<keyword>SETS</keyword>
|
||||
<keyword>SIMPLE</keyword>
|
||||
<keyword>SIZE</keyword>
|
||||
<keyword>SOURCE</keyword>
|
||||
<keyword>SPACE</keyword>
|
||||
<keyword>SPECIFIC_NAME</keyword>
|
||||
<keyword>SQRT</keyword>
|
||||
<keyword>STATE</keyword>
|
||||
<keyword>STATEMENT</keyword>
|
||||
<keyword>STDDEV_POP</keyword>
|
||||
<keyword>STDDEV_SAMP</keyword>
|
||||
<keyword>STRUCTURE</keyword>
|
||||
<keyword>STYLE</keyword>
|
||||
<keyword>SUBCLASS_ORIGIN</keyword>
|
||||
<keyword>SUBSTRING</keyword>
|
||||
<keyword>SUM</keyword>
|
||||
<keyword>TABLE_NAME</keyword>
|
||||
<keyword>TABLESAMPLE</keyword>
|
||||
<keyword>TEMPORARY</keyword>
|
||||
<keyword>TIES</keyword>
|
||||
<keyword>TOP_LEVEL_COUNT</keyword>
|
||||
<keyword>TRANSACTION_ACTIVE</keyword>
|
||||
<keyword>TRANSACTION</keyword>
|
||||
<keyword>TRANSACTIONS_COMMITTED</keyword>
|
||||
<keyword>TRANSACTIONS_ROLLED_BACK</keyword>
|
||||
<keyword>TRANSFORM</keyword>
|
||||
<keyword>TRANSFORMS</keyword>
|
||||
<keyword>TRANSLATE</keyword>
|
||||
<keyword>TRIGGER_CATALOG</keyword>
|
||||
<keyword>TRIGGER_NAME</keyword>
|
||||
<keyword>TRIGGER_SCHEMA</keyword>
|
||||
<keyword>TRIM</keyword>
|
||||
<keyword>TYPE</keyword>
|
||||
<keyword>UNBOUNDED</keyword>
|
||||
<keyword>UNCOMMITTED</keyword>
|
||||
<keyword>UNDER</keyword>
|
||||
<keyword>UNNAMED</keyword>
|
||||
<keyword>USAGE</keyword>
|
||||
<keyword>USER_DEFINED_TYPE_CATALOG</keyword>
|
||||
<keyword>USER_DEFINED_TYPE_CODE</keyword>
|
||||
<keyword>USER_DEFINED_TYPE_NAME</keyword>
|
||||
<keyword>USER_DEFINED_TYPE_SCHEMA</keyword>
|
||||
<keyword>VIEW</keyword>
|
||||
<keyword>WORK</keyword>
|
||||
<keyword>WRITE</keyword>
|
||||
<keyword>ZONE</keyword>
|
||||
<!-- non reserved -->
|
||||
<keyword>ADD</keyword>
|
||||
<keyword>ALL</keyword>
|
||||
<keyword>ALLOCATE</keyword>
|
||||
<keyword>ALTER</keyword>
|
||||
<keyword>AND</keyword>
|
||||
<keyword>ANY</keyword>
|
||||
<keyword>ARE</keyword>
|
||||
<keyword>ARRAY</keyword>
|
||||
<keyword>AS</keyword>
|
||||
<keyword>ASENSITIVE</keyword>
|
||||
<keyword>ASYMMETRIC</keyword>
|
||||
<keyword>AT</keyword>
|
||||
<keyword>ATOMIC</keyword>
|
||||
<keyword>AUTHORIZATION</keyword>
|
||||
<keyword>BEGIN</keyword>
|
||||
<keyword>BETWEEN</keyword>
|
||||
<keyword>BIGINT</keyword>
|
||||
<keyword>BINARY</keyword>
|
||||
<keyword>BLOB</keyword>
|
||||
<keyword>BOOLEAN</keyword>
|
||||
<keyword>BOTH</keyword>
|
||||
<keyword>BY</keyword>
|
||||
<keyword>CALL</keyword>
|
||||
<keyword>CALLED</keyword>
|
||||
<keyword>CASCADED</keyword>
|
||||
<keyword>CASE</keyword>
|
||||
<keyword>CAST</keyword>
|
||||
<keyword>CHAR</keyword>
|
||||
<keyword>CHARACTER</keyword>
|
||||
<keyword>CHECK</keyword>
|
||||
<keyword>CLOB</keyword>
|
||||
<keyword>CLOSE</keyword>
|
||||
<keyword>COLLATE</keyword>
|
||||
<keyword>COLUMN</keyword>
|
||||
<keyword>COMMIT</keyword>
|
||||
<keyword>CONNECT</keyword>
|
||||
<keyword>CONSTRAINT</keyword>
|
||||
<keyword>CONTINUE</keyword>
|
||||
<keyword>CORRESPONDING</keyword>
|
||||
<keyword>CREATE</keyword>
|
||||
<keyword>CROSS</keyword>
|
||||
<keyword>CUBE</keyword>
|
||||
<keyword>CURRENT_DATE</keyword>
|
||||
<keyword>CURRENT_DEFAULT_TRANSFORM_GROUP</keyword>
|
||||
<keyword>CURRENT_PATH</keyword>
|
||||
<keyword>CURRENT_ROLE</keyword>
|
||||
<keyword>CURRENT_TIME</keyword>
|
||||
<keyword>CURRENT_TIMESTAMP</keyword>
|
||||
<keyword>CURRENT_TRANSFORM_GROUP_FOR_TYPE</keyword>
|
||||
<keyword>CURRENT_USER</keyword>
|
||||
<keyword>CURRENT</keyword>
|
||||
<keyword>CURSOR</keyword>
|
||||
<keyword>CYCLE</keyword>
|
||||
<keyword>DATE</keyword>
|
||||
<keyword>DAY</keyword>
|
||||
<keyword>DEALLOCATE</keyword>
|
||||
<keyword>DEC</keyword>
|
||||
<keyword>DECIMAL</keyword>
|
||||
<keyword>DECLARE</keyword>
|
||||
<keyword>DEFAULT</keyword>
|
||||
<keyword>DELETE</keyword>
|
||||
<keyword>DEREF</keyword>
|
||||
<keyword>DESCRIBE</keyword>
|
||||
<keyword>DETERMINISTIC</keyword>
|
||||
<keyword>DISCONNECT</keyword>
|
||||
<keyword>DISTINCT</keyword>
|
||||
<keyword>DOUBLE</keyword>
|
||||
<keyword>DROP</keyword>
|
||||
<keyword>DYNAMIC</keyword>
|
||||
<keyword>EACH</keyword>
|
||||
<keyword>ELEMENT</keyword>
|
||||
<keyword>ELSE</keyword>
|
||||
<keyword>END</keyword>
|
||||
<keyword>END-EXEC</keyword>
|
||||
<keyword>ESCAPE</keyword>
|
||||
<keyword>EXCEPT</keyword>
|
||||
<keyword>EXEC</keyword>
|
||||
<keyword>EXECUTE</keyword>
|
||||
<keyword>EXISTS</keyword>
|
||||
<keyword>EXTERNAL</keyword>
|
||||
<keyword>FALSE</keyword>
|
||||
<keyword>FETCH</keyword>
|
||||
<keyword>FILTER</keyword>
|
||||
<keyword>FLOAT</keyword>
|
||||
<keyword>FOR</keyword>
|
||||
<keyword>FOREIGN</keyword>
|
||||
<keyword>FREE</keyword>
|
||||
<keyword>FROM</keyword>
|
||||
<keyword>FULL</keyword>
|
||||
<keyword>FUNCTION</keyword>
|
||||
<keyword>GET</keyword>
|
||||
<keyword>GLOBAL</keyword>
|
||||
<keyword>GRANT</keyword>
|
||||
<keyword>GROUP</keyword>
|
||||
<keyword>GROUPING</keyword>
|
||||
<keyword>HAVING</keyword>
|
||||
<keyword>HOLD</keyword>
|
||||
<keyword>HOUR</keyword>
|
||||
<keyword>IDENTITY</keyword>
|
||||
<keyword>IMMEDIATE</keyword>
|
||||
<keyword>IN</keyword>
|
||||
<keyword>INDICATOR</keyword>
|
||||
<keyword>INNER</keyword>
|
||||
<keyword>INOUT</keyword>
|
||||
<keyword>INPUT</keyword>
|
||||
<keyword>INSENSITIVE</keyword>
|
||||
<keyword>INSERT</keyword>
|
||||
<keyword>INT</keyword>
|
||||
<keyword>INTEGER</keyword>
|
||||
<keyword>INTERSECT</keyword>
|
||||
<keyword>INTERVAL</keyword>
|
||||
<keyword>INTO</keyword>
|
||||
<keyword>IS</keyword>
|
||||
<keyword>ISOLATION</keyword>
|
||||
<keyword>JOIN</keyword>
|
||||
<keyword>LANGUAGE</keyword>
|
||||
<keyword>LARGE</keyword>
|
||||
<keyword>LATERAL</keyword>
|
||||
<keyword>LEADING</keyword>
|
||||
<keyword>LEFT</keyword>
|
||||
<keyword>LIKE</keyword>
|
||||
<keyword>LOCAL</keyword>
|
||||
<keyword>LOCALTIME</keyword>
|
||||
<keyword>LOCALTIMESTAMP</keyword>
|
||||
<keyword>MATCH</keyword>
|
||||
<keyword>MEMBER</keyword>
|
||||
<keyword>MERGE</keyword>
|
||||
<keyword>METHOD</keyword>
|
||||
<keyword>MINUTE</keyword>
|
||||
<keyword>MODIFIES</keyword>
|
||||
<keyword>MODULE</keyword>
|
||||
<keyword>MONTH</keyword>
|
||||
<keyword>MULTISET</keyword>
|
||||
<keyword>NATIONAL</keyword>
|
||||
<keyword>NATURAL</keyword>
|
||||
<keyword>NCHAR</keyword>
|
||||
<keyword>NCLOB</keyword>
|
||||
<keyword>NEW</keyword>
|
||||
<keyword>NO</keyword>
|
||||
<keyword>NONE</keyword>
|
||||
<keyword>NOT</keyword>
|
||||
<keyword>NULL</keyword>
|
||||
<keyword>NUMERIC</keyword>
|
||||
<keyword>OF</keyword>
|
||||
<keyword>OLD</keyword>
|
||||
<keyword>ON</keyword>
|
||||
<keyword>ONLY</keyword>
|
||||
<keyword>OPEN</keyword>
|
||||
<keyword>OR</keyword>
|
||||
<keyword>ORDER</keyword>
|
||||
<keyword>OUT</keyword>
|
||||
<keyword>OUTER</keyword>
|
||||
<keyword>OUTPUT</keyword>
|
||||
<keyword>OVER</keyword>
|
||||
<keyword>OVERLAPS</keyword>
|
||||
<keyword>PARAMETER</keyword>
|
||||
<keyword>PARTITION</keyword>
|
||||
<keyword>PRECISION</keyword>
|
||||
<keyword>PREPARE</keyword>
|
||||
<keyword>PRIMARY</keyword>
|
||||
<keyword>PROCEDURE</keyword>
|
||||
<keyword>RANGE</keyword>
|
||||
<keyword>READS</keyword>
|
||||
<keyword>REAL</keyword>
|
||||
<keyword>RECURSIVE</keyword>
|
||||
<keyword>REF</keyword>
|
||||
<keyword>REFERENCES</keyword>
|
||||
<keyword>REFERENCING</keyword>
|
||||
<keyword>REGR_AVGX</keyword>
|
||||
<keyword>REGR_AVGY</keyword>
|
||||
<keyword>REGR_COUNT</keyword>
|
||||
<keyword>REGR_INTERCEPT</keyword>
|
||||
<keyword>REGR_R2</keyword>
|
||||
<keyword>REGR_SLOPE</keyword>
|
||||
<keyword>REGR_SXX</keyword>
|
||||
<keyword>REGR_SXY</keyword>
|
||||
<keyword>REGR_SYY</keyword>
|
||||
<keyword>RELEASE</keyword>
|
||||
<keyword>RESULT</keyword>
|
||||
<keyword>RETURN</keyword>
|
||||
<keyword>RETURNS</keyword>
|
||||
<keyword>REVOKE</keyword>
|
||||
<keyword>RIGHT</keyword>
|
||||
<keyword>ROLLBACK</keyword>
|
||||
<keyword>ROLLUP</keyword>
|
||||
<keyword>ROW</keyword>
|
||||
<keyword>ROWS</keyword>
|
||||
<keyword>SAVEPOINT</keyword>
|
||||
<keyword>SCROLL</keyword>
|
||||
<keyword>SEARCH</keyword>
|
||||
<keyword>SECOND</keyword>
|
||||
<keyword>SELECT</keyword>
|
||||
<keyword>SENSITIVE</keyword>
|
||||
<keyword>SESSION_USER</keyword>
|
||||
<keyword>SET</keyword>
|
||||
<keyword>SIMILAR</keyword>
|
||||
<keyword>SMALLINT</keyword>
|
||||
<keyword>SOME</keyword>
|
||||
<keyword>SPECIFIC</keyword>
|
||||
<keyword>SPECIFICTYPE</keyword>
|
||||
<keyword>SQL</keyword>
|
||||
<keyword>SQLEXCEPTION</keyword>
|
||||
<keyword>SQLSTATE</keyword>
|
||||
<keyword>SQLWARNING</keyword>
|
||||
<keyword>START</keyword>
|
||||
<keyword>STATIC</keyword>
|
||||
<keyword>SUBMULTISET</keyword>
|
||||
<keyword>SYMMETRIC</keyword>
|
||||
<keyword>SYSTEM_USER</keyword>
|
||||
<keyword>SYSTEM</keyword>
|
||||
<keyword>TABLE</keyword>
|
||||
<keyword>THEN</keyword>
|
||||
<keyword>TIME</keyword>
|
||||
<keyword>TIMESTAMP</keyword>
|
||||
<keyword>TIMEZONE_HOUR</keyword>
|
||||
<keyword>TIMEZONE_MINUTE</keyword>
|
||||
<keyword>TO</keyword>
|
||||
<keyword>TRAILING</keyword>
|
||||
<keyword>TRANSLATION</keyword>
|
||||
<keyword>TREAT</keyword>
|
||||
<keyword>TRIGGER</keyword>
|
||||
<keyword>TRUE</keyword>
|
||||
<keyword>UESCAPE</keyword>
|
||||
<keyword>UNION</keyword>
|
||||
<keyword>UNIQUE</keyword>
|
||||
<keyword>UNKNOWN</keyword>
|
||||
<keyword>UNNEST</keyword>
|
||||
<keyword>UPDATE</keyword>
|
||||
<keyword>UPPER</keyword>
|
||||
<keyword>USER</keyword>
|
||||
<keyword>USING</keyword>
|
||||
<keyword>VALUE</keyword>
|
||||
<keyword>VALUES</keyword>
|
||||
<keyword>VAR_POP</keyword>
|
||||
<keyword>VAR_SAMP</keyword>
|
||||
<keyword>VARCHAR</keyword>
|
||||
<keyword>VARYING</keyword>
|
||||
<keyword>WHEN</keyword>
|
||||
<keyword>WHENEVER</keyword>
|
||||
<keyword>WHERE</keyword>
|
||||
<keyword>WIDTH_BUCKET</keyword>
|
||||
<keyword>WINDOW</keyword>
|
||||
<keyword>WITH</keyword>
|
||||
<keyword>WITHIN</keyword>
|
||||
<keyword>WITHOUT</keyword>
|
||||
<keyword>YEAR</keyword>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -0,0 +1,47 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<highlighters>
|
||||
<highlighter type="oneline-comment">#</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>"</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="string">
|
||||
<string>'</string>
|
||||
<escape>\</escape>
|
||||
</highlighter>
|
||||
<highlighter type="annotation">
|
||||
<start>@</start>
|
||||
<valueStart>(</valueStart>
|
||||
<valueEnd>)</valueEnd>
|
||||
</highlighter>
|
||||
<highlighter type="number">
|
||||
<point>.</point>
|
||||
<exponent>e</exponent>
|
||||
<suffix>f</suffix>
|
||||
<suffix>d</suffix>
|
||||
<suffix>l</suffix>
|
||||
<ignoreCase />
|
||||
</highlighter>
|
||||
<highlighter type="keywords">
|
||||
<keyword>true</keyword>
|
||||
<keyword>false</keyword>
|
||||
</highlighter>
|
||||
<highlighter type="word">
|
||||
<word>{</word>
|
||||
<word>}</word>
|
||||
<word>,</word>
|
||||
<word>[</word>
|
||||
<word>]</word>
|
||||
<style>keyword</style>
|
||||
</highlighter>
|
||||
<highlighter type="regex">
|
||||
<pattern>^(---)$</pattern>
|
||||
<style>comment</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
<highlighter type="regex">
|
||||
<pattern>^(.+?)(?==|:)</pattern>
|
||||
<style>attribute</style>
|
||||
<flags>MULTILINE</flags>
|
||||
</highlighter>
|
||||
</highlighters>
|
||||
@@ -0,0 +1,599 @@
|
||||
/* Javadoc style sheet */
|
||||
/*
|
||||
Overall document style
|
||||
*/
|
||||
|
||||
@import url('resources/fonts/dejavu.css');
|
||||
|
||||
body {
|
||||
background-color:#ffffff;
|
||||
color:#353833;
|
||||
font-family:'DejaVu Sans', Arial, Helvetica, sans-serif;
|
||||
font-size:14px;
|
||||
margin:0;
|
||||
}
|
||||
a:link, a:visited {
|
||||
text-decoration:none;
|
||||
color:#4A6782;
|
||||
}
|
||||
a:hover, a:focus {
|
||||
text-decoration:none;
|
||||
color:#bb7a2a;
|
||||
}
|
||||
a:active {
|
||||
text-decoration:none;
|
||||
color:#4A6782;
|
||||
}
|
||||
a[name] {
|
||||
color:#353833;
|
||||
}
|
||||
a[name]:hover {
|
||||
text-decoration:none;
|
||||
color:#353833;
|
||||
}
|
||||
pre {
|
||||
font-family:'DejaVu Sans Mono', monospace;
|
||||
font-size:14px;
|
||||
}
|
||||
h1 {
|
||||
font-size:20px;
|
||||
}
|
||||
h2 {
|
||||
font-size:18px;
|
||||
}
|
||||
h3 {
|
||||
font-size:16px;
|
||||
font-style:italic;
|
||||
}
|
||||
h4 {
|
||||
font-size:13px;
|
||||
}
|
||||
h5 {
|
||||
font-size:12px;
|
||||
}
|
||||
h6 {
|
||||
font-size:11px;
|
||||
}
|
||||
ul {
|
||||
list-style-type:disc;
|
||||
}
|
||||
code, tt {
|
||||
font-family:'DejaVu Sans Mono', monospace;
|
||||
font-size:14px;
|
||||
padding-top:4px;
|
||||
margin-top:8px;
|
||||
line-height:1.4em;
|
||||
}
|
||||
dt code {
|
||||
font-family:'DejaVu Sans Mono', monospace;
|
||||
font-size:14px;
|
||||
padding-top:4px;
|
||||
}
|
||||
table tr td dt code {
|
||||
font-family:'DejaVu Sans Mono', monospace;
|
||||
font-size:14px;
|
||||
vertical-align:top;
|
||||
padding-top:4px;
|
||||
}
|
||||
sup {
|
||||
font-size:8px;
|
||||
}
|
||||
/*
|
||||
Document title and Copyright styles
|
||||
*/
|
||||
.clear {
|
||||
clear:both;
|
||||
height:0px;
|
||||
overflow:hidden;
|
||||
}
|
||||
.aboutLanguage {
|
||||
float:right;
|
||||
padding:0px 21px;
|
||||
font-size:11px;
|
||||
z-index:200;
|
||||
margin-top:-9px;
|
||||
}
|
||||
.legalCopy {
|
||||
margin-left:.5em;
|
||||
}
|
||||
.bar a, .bar a:link, .bar a:visited, .bar a:active {
|
||||
color:#FFFFFF;
|
||||
text-decoration:none;
|
||||
}
|
||||
.bar a:hover, .bar a:focus {
|
||||
color:#bb7a2a;
|
||||
}
|
||||
.tab {
|
||||
background-color:#0066FF;
|
||||
color:#ffffff;
|
||||
padding:8px;
|
||||
width:5em;
|
||||
font-weight:bold;
|
||||
}
|
||||
/*
|
||||
Navigation bar styles
|
||||
*/
|
||||
.bar {
|
||||
background-color:#4D7A97;
|
||||
color:#FFFFFF;
|
||||
padding:.8em .5em .4em .8em;
|
||||
height:auto;/*height:1.8em;*/
|
||||
font-size:11px;
|
||||
margin:0;
|
||||
}
|
||||
.topNav {
|
||||
background-color:#4D7A97;
|
||||
color:#FFFFFF;
|
||||
float:left;
|
||||
padding:0;
|
||||
width:100%;
|
||||
clear:right;
|
||||
height:2.8em;
|
||||
padding-top:10px;
|
||||
overflow:hidden;
|
||||
font-size:12px;
|
||||
}
|
||||
.bottomNav {
|
||||
margin-top:10px;
|
||||
background-color:#4D7A97;
|
||||
color:#FFFFFF;
|
||||
float:left;
|
||||
padding:0;
|
||||
width:100%;
|
||||
clear:right;
|
||||
height:2.8em;
|
||||
padding-top:10px;
|
||||
overflow:hidden;
|
||||
font-size:12px;
|
||||
}
|
||||
.subNav {
|
||||
background-color:#dee3e9;
|
||||
float:left;
|
||||
width:100%;
|
||||
overflow:hidden;
|
||||
font-size:12px;
|
||||
}
|
||||
.subNav div {
|
||||
clear:left;
|
||||
float:left;
|
||||
padding:0 0 5px 6px;
|
||||
text-transform:uppercase;
|
||||
}
|
||||
ul.navList, ul.subNavList {
|
||||
float:left;
|
||||
margin:0 25px 0 0;
|
||||
padding:0;
|
||||
}
|
||||
ul.navList li{
|
||||
list-style:none;
|
||||
float:left;
|
||||
padding: 5px 6px;
|
||||
text-transform:uppercase;
|
||||
}
|
||||
ul.subNavList li{
|
||||
list-style:none;
|
||||
float:left;
|
||||
}
|
||||
.topNav a:link, .topNav a:active, .topNav a:visited, .bottomNav a:link, .bottomNav a:active, .bottomNav a:visited {
|
||||
color:#FFFFFF;
|
||||
text-decoration:none;
|
||||
text-transform:uppercase;
|
||||
}
|
||||
.topNav a:hover, .bottomNav a:hover {
|
||||
text-decoration:none;
|
||||
color:#bb7a2a;
|
||||
text-transform:uppercase;
|
||||
}
|
||||
.navBarCell1Rev {
|
||||
background-color:#F8981D;
|
||||
color:#253441;
|
||||
margin: auto 5px;
|
||||
}
|
||||
.skipNav {
|
||||
position:absolute;
|
||||
top:auto;
|
||||
left:-9999px;
|
||||
overflow:hidden;
|
||||
}
|
||||
/*
|
||||
Page header and footer styles
|
||||
*/
|
||||
.header, .footer {
|
||||
clear:both;
|
||||
margin:0 20px;
|
||||
padding:5px 0 0 0;
|
||||
}
|
||||
.indexHeader {
|
||||
margin:10px;
|
||||
position:relative;
|
||||
}
|
||||
.indexHeader span{
|
||||
margin-right:15px;
|
||||
}
|
||||
.indexHeader h1 {
|
||||
font-size:13px;
|
||||
}
|
||||
.title {
|
||||
color:#2c4557;
|
||||
margin:10px 0;
|
||||
}
|
||||
.subTitle {
|
||||
margin:5px 0 0 0;
|
||||
}
|
||||
.header ul {
|
||||
margin:0 0 15px 0;
|
||||
padding:0;
|
||||
}
|
||||
.footer ul {
|
||||
margin:20px 0 5px 0;
|
||||
}
|
||||
.header ul li, .footer ul li {
|
||||
list-style:none;
|
||||
font-size:13px;
|
||||
}
|
||||
/*
|
||||
Heading styles
|
||||
*/
|
||||
div.details ul.blockList ul.blockList ul.blockList li.blockList h4, div.details ul.blockList ul.blockList ul.blockListLast li.blockList h4 {
|
||||
background-color:#dee3e9;
|
||||
border:1px solid #d0d9e0;
|
||||
margin:0 0 6px -8px;
|
||||
padding:7px 5px;
|
||||
}
|
||||
ul.blockList ul.blockList ul.blockList li.blockList h3 {
|
||||
background-color:#dee3e9;
|
||||
border:1px solid #d0d9e0;
|
||||
margin:0 0 6px -8px;
|
||||
padding:7px 5px;
|
||||
}
|
||||
ul.blockList ul.blockList li.blockList h3 {
|
||||
padding:0;
|
||||
margin:15px 0;
|
||||
}
|
||||
ul.blockList li.blockList h2 {
|
||||
padding:0px 0 20px 0;
|
||||
}
|
||||
/*
|
||||
Page layout container styles
|
||||
*/
|
||||
.contentContainer, .sourceContainer, .classUseContainer, .serializedFormContainer, .constantValuesContainer {
|
||||
clear:both;
|
||||
padding:10px 20px;
|
||||
position:relative;
|
||||
}
|
||||
.indexContainer {
|
||||
margin:10px;
|
||||
position:relative;
|
||||
font-size:12px;
|
||||
}
|
||||
.indexContainer h2 {
|
||||
font-size:13px;
|
||||
padding:0 0 3px 0;
|
||||
}
|
||||
.indexContainer ul {
|
||||
margin:0;
|
||||
padding:0;
|
||||
}
|
||||
.indexContainer ul li {
|
||||
list-style:none;
|
||||
padding-top:2px;
|
||||
}
|
||||
.contentContainer .description dl dt, .contentContainer .details dl dt, .serializedFormContainer dl dt {
|
||||
font-size:12px;
|
||||
font-weight:bold;
|
||||
margin:10px 0 0 0;
|
||||
color:#4E4E4E;
|
||||
}
|
||||
.contentContainer .description dl dd, .contentContainer .details dl dd, .serializedFormContainer dl dd {
|
||||
margin:5px 0 10px 0px;
|
||||
font-size:14px;
|
||||
font-family:'DejaVu Sans Mono',monospace;
|
||||
}
|
||||
.serializedFormContainer dl.nameValue dt {
|
||||
margin-left:1px;
|
||||
font-size:1.1em;
|
||||
display:inline;
|
||||
font-weight:bold;
|
||||
}
|
||||
.serializedFormContainer dl.nameValue dd {
|
||||
margin:0 0 0 1px;
|
||||
font-size:1.1em;
|
||||
display:inline;
|
||||
}
|
||||
/*
|
||||
List styles
|
||||
*/
|
||||
ul.horizontal li {
|
||||
display:inline;
|
||||
font-size:0.9em;
|
||||
}
|
||||
ul.inheritance {
|
||||
margin:0;
|
||||
padding:0;
|
||||
}
|
||||
ul.inheritance li {
|
||||
display:inline;
|
||||
list-style:none;
|
||||
}
|
||||
ul.inheritance li ul.inheritance {
|
||||
margin-left:15px;
|
||||
padding-left:15px;
|
||||
padding-top:1px;
|
||||
}
|
||||
ul.blockList, ul.blockListLast {
|
||||
margin:10px 0 10px 0;
|
||||
padding:0;
|
||||
}
|
||||
ul.blockList li.blockList, ul.blockListLast li.blockList {
|
||||
list-style:none;
|
||||
margin-bottom:15px;
|
||||
line-height:1.4;
|
||||
}
|
||||
ul.blockList ul.blockList li.blockList, ul.blockList ul.blockListLast li.blockList {
|
||||
padding:0px 20px 5px 10px;
|
||||
border:1px solid #ededed;
|
||||
background-color:#f8f8f8;
|
||||
}
|
||||
ul.blockList ul.blockList ul.blockList li.blockList, ul.blockList ul.blockList ul.blockListLast li.blockList {
|
||||
padding:0 0 5px 8px;
|
||||
background-color:#ffffff;
|
||||
border:none;
|
||||
}
|
||||
ul.blockList ul.blockList ul.blockList ul.blockList li.blockList {
|
||||
margin-left:0;
|
||||
padding-left:0;
|
||||
padding-bottom:15px;
|
||||
border:none;
|
||||
}
|
||||
ul.blockList ul.blockList ul.blockList ul.blockList li.blockListLast {
|
||||
list-style:none;
|
||||
border-bottom:none;
|
||||
padding-bottom:0;
|
||||
}
|
||||
table tr td dl, table tr td dl dt, table tr td dl dd {
|
||||
margin-top:0;
|
||||
margin-bottom:1px;
|
||||
}
|
||||
/*
|
||||
Table styles
|
||||
*/
|
||||
.overviewSummary, .memberSummary, .typeSummary, .useSummary, .constantsSummary, .deprecatedSummary {
|
||||
width:100%;
|
||||
border-left:1px solid #EEE;
|
||||
border-right:1px solid #EEE;
|
||||
border-bottom:1px solid #EEE;
|
||||
}
|
||||
.overviewSummary, .memberSummary {
|
||||
padding:0px;
|
||||
}
|
||||
.overviewSummary caption, .memberSummary caption, .typeSummary caption,
|
||||
.useSummary caption, .constantsSummary caption, .deprecatedSummary caption {
|
||||
position:relative;
|
||||
text-align:left;
|
||||
background-repeat:no-repeat;
|
||||
color:#253441;
|
||||
font-weight:bold;
|
||||
clear:none;
|
||||
overflow:hidden;
|
||||
padding:0px;
|
||||
padding-top:10px;
|
||||
padding-left:1px;
|
||||
margin:0px;
|
||||
white-space:pre;
|
||||
}
|
||||
.overviewSummary caption a:link, .memberSummary caption a:link, .typeSummary caption a:link,
|
||||
.useSummary caption a:link, .constantsSummary caption a:link, .deprecatedSummary caption a:link,
|
||||
.overviewSummary caption a:hover, .memberSummary caption a:hover, .typeSummary caption a:hover,
|
||||
.useSummary caption a:hover, .constantsSummary caption a:hover, .deprecatedSummary caption a:hover,
|
||||
.overviewSummary caption a:active, .memberSummary caption a:active, .typeSummary caption a:active,
|
||||
.useSummary caption a:active, .constantsSummary caption a:active, .deprecatedSummary caption a:active,
|
||||
.overviewSummary caption a:visited, .memberSummary caption a:visited, .typeSummary caption a:visited,
|
||||
.useSummary caption a:visited, .constantsSummary caption a:visited, .deprecatedSummary caption a:visited {
|
||||
color:#FFFFFF;
|
||||
}
|
||||
.overviewSummary caption span, .memberSummary caption span, .typeSummary caption span,
|
||||
.useSummary caption span, .constantsSummary caption span, .deprecatedSummary caption span {
|
||||
white-space:nowrap;
|
||||
padding-top:5px;
|
||||
padding-left:12px;
|
||||
padding-right:12px;
|
||||
padding-bottom:7px;
|
||||
display:inline-block;
|
||||
float:left;
|
||||
background-color:#F8981D;
|
||||
border: none;
|
||||
height:16px;
|
||||
}
|
||||
.memberSummary caption span.activeTableTab span {
|
||||
white-space:nowrap;
|
||||
padding-top:5px;
|
||||
padding-left:12px;
|
||||
padding-right:12px;
|
||||
margin-right:3px;
|
||||
display:inline-block;
|
||||
float:left;
|
||||
background-color:#F8981D;
|
||||
height:16px;
|
||||
}
|
||||
.memberSummary caption span.tableTab span {
|
||||
white-space:nowrap;
|
||||
padding-top:5px;
|
||||
padding-left:12px;
|
||||
padding-right:12px;
|
||||
margin-right:3px;
|
||||
display:inline-block;
|
||||
float:left;
|
||||
background-color:#4D7A97;
|
||||
height:16px;
|
||||
}
|
||||
.memberSummary caption span.tableTab, .memberSummary caption span.activeTableTab {
|
||||
padding-top:0px;
|
||||
padding-left:0px;
|
||||
padding-right:0px;
|
||||
background-image:none;
|
||||
float:none;
|
||||
display:inline;
|
||||
}
|
||||
.overviewSummary .tabEnd, .memberSummary .tabEnd, .typeSummary .tabEnd,
|
||||
.useSummary .tabEnd, .constantsSummary .tabEnd, .deprecatedSummary .tabEnd {
|
||||
display:none;
|
||||
width:5px;
|
||||
position:relative;
|
||||
float:left;
|
||||
background-color:#F8981D;
|
||||
}
|
||||
.memberSummary .activeTableTab .tabEnd {
|
||||
display:none;
|
||||
width:5px;
|
||||
margin-right:3px;
|
||||
position:relative;
|
||||
float:left;
|
||||
background-color:#F8981D;
|
||||
}
|
||||
.memberSummary .tableTab .tabEnd {
|
||||
display:none;
|
||||
width:5px;
|
||||
margin-right:3px;
|
||||
position:relative;
|
||||
background-color:#4D7A97;
|
||||
float:left;
|
||||
|
||||
}
|
||||
.overviewSummary td, .memberSummary td, .typeSummary td,
|
||||
.useSummary td, .constantsSummary td, .deprecatedSummary td {
|
||||
text-align:left;
|
||||
padding:0px 0px 12px 10px;
|
||||
width:100%;
|
||||
}
|
||||
th.colOne, th.colFirst, th.colLast, .useSummary th, .constantsSummary th,
|
||||
td.colOne, td.colFirst, td.colLast, .useSummary td, .constantsSummary td{
|
||||
vertical-align:top;
|
||||
padding-right:0px;
|
||||
padding-top:8px;
|
||||
padding-bottom:3px;
|
||||
}
|
||||
th.colFirst, th.colLast, th.colOne, .constantsSummary th {
|
||||
background:#dee3e9;
|
||||
text-align:left;
|
||||
padding:8px 3px 3px 7px;
|
||||
}
|
||||
td.colFirst, th.colFirst {
|
||||
white-space:nowrap;
|
||||
font-size:13px;
|
||||
}
|
||||
td.colLast, th.colLast {
|
||||
font-size:13px;
|
||||
}
|
||||
td.colOne, th.colOne {
|
||||
font-size:13px;
|
||||
}
|
||||
.overviewSummary td.colFirst, .overviewSummary th.colFirst,
|
||||
.overviewSummary td.colOne, .overviewSummary th.colOne,
|
||||
.memberSummary td.colFirst, .memberSummary th.colFirst,
|
||||
.memberSummary td.colOne, .memberSummary th.colOne,
|
||||
.typeSummary td.colFirst{
|
||||
width:25%;
|
||||
vertical-align:top;
|
||||
}
|
||||
td.colOne a:link, td.colOne a:active, td.colOne a:visited, td.colOne a:hover, td.colFirst a:link, td.colFirst a:active, td.colFirst a:visited, td.colFirst a:hover, td.colLast a:link, td.colLast a:active, td.colLast a:visited, td.colLast a:hover, .constantValuesContainer td a:link, .constantValuesContainer td a:active, .constantValuesContainer td a:visited, .constantValuesContainer td a:hover {
|
||||
font-weight:bold;
|
||||
}
|
||||
.tableSubHeadingColor {
|
||||
background-color:#EEEEFF;
|
||||
}
|
||||
.altColor {
|
||||
background-color:#FFFFFF;
|
||||
}
|
||||
.rowColor {
|
||||
background-color:#EEEEEF;
|
||||
}
|
||||
/*
|
||||
Content styles
|
||||
*/
|
||||
.description pre {
|
||||
margin-top:0;
|
||||
}
|
||||
.deprecatedContent {
|
||||
margin:0;
|
||||
padding:10px 0;
|
||||
}
|
||||
.docSummary {
|
||||
padding:0;
|
||||
}
|
||||
|
||||
ul.blockList ul.blockList ul.blockList li.blockList h3 {
|
||||
font-style:normal;
|
||||
}
|
||||
|
||||
div.block {
|
||||
font-size:14px;
|
||||
font-family:'DejaVu Serif', Georgia, "Times New Roman", Times, serif;
|
||||
}
|
||||
|
||||
td.colLast div {
|
||||
padding-top:0px;
|
||||
}
|
||||
|
||||
|
||||
td.colLast a {
|
||||
padding-bottom:3px;
|
||||
}
|
||||
/*
|
||||
Formatting effect styles
|
||||
*/
|
||||
.sourceLineNo {
|
||||
color:green;
|
||||
padding:0 30px 0 0;
|
||||
}
|
||||
h1.hidden {
|
||||
visibility:hidden;
|
||||
overflow:hidden;
|
||||
font-size:10px;
|
||||
}
|
||||
.block {
|
||||
display:block;
|
||||
margin:3px 10px 2px 0px;
|
||||
color:#474747;
|
||||
}
|
||||
.deprecatedLabel, .descfrmTypeLabel, .memberNameLabel, .memberNameLink,
|
||||
.overrideSpecifyLabel, .packageHierarchyLabel, .paramLabel, .returnLabel,
|
||||
.seeLabel, .simpleTagLabel, .throwsLabel, .typeNameLabel, .typeNameLink {
|
||||
font-weight:bold;
|
||||
}
|
||||
.deprecationComment, .emphasizedPhrase, .interfaceName {
|
||||
font-style:italic;
|
||||
}
|
||||
|
||||
div.block div.block span.deprecationComment, div.block div.block span.emphasizedPhrase,
|
||||
div.block div.block span.interfaceName {
|
||||
font-style:normal;
|
||||
}
|
||||
|
||||
div.contentContainer ul.blockList li.blockList h2{
|
||||
padding-bottom:0px;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
Spring
|
||||
*/
|
||||
|
||||
pre.code {
|
||||
background-color: #F8F8F8;
|
||||
border: 1px solid #CCCCCC;
|
||||
border-radius: 3px 3px 3px 3px;
|
||||
overflow: auto;
|
||||
padding: 10px;
|
||||
margin: 4px 20px 2px 0px;
|
||||
}
|
||||
|
||||
pre.code code, pre.code code * {
|
||||
font-size: 1em;
|
||||
}
|
||||
|
||||
pre.code code, pre.code code * {
|
||||
padding: 0 !important;
|
||||
margin: 0 !important;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
<?xml version="1.0"?>
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:mvn="http://maven.apache.org/POM/4.0.0"
|
||||
version="1.0">
|
||||
|
||||
<xsl:output method="text" encoding="UTF-8" indent="no"/>
|
||||
|
||||
<xsl:template match="/">
|
||||
<xsl:text>|===
</xsl:text>
|
||||
<xsl:text>| Group ID | Artifact ID | Version
</xsl:text>
|
||||
<xsl:for-each select="//mvn:dependency">
|
||||
<xsl:sort select="mvn:groupId"/>
|
||||
<xsl:sort select="mvn:artifactId"/>
|
||||
<xsl:text>
</xsl:text>
|
||||
<xsl:text>| `</xsl:text>
|
||||
<xsl:copy-of select="mvn:groupId"/>
|
||||
<xsl:text>`
</xsl:text>
|
||||
<xsl:text>| `</xsl:text>
|
||||
<xsl:copy-of select="mvn:artifactId"/>
|
||||
<xsl:text>`
</xsl:text>
|
||||
<xsl:text>| </xsl:text>
|
||||
<xsl:copy-of select="mvn:version"/>
|
||||
<xsl:text>
</xsl:text>
|
||||
</xsl:for-each>
|
||||
<xsl:text>|===</xsl:text>
|
||||
</xsl:template>
|
||||
|
||||
</xsl:stylesheet>
|
||||